code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from copy import copy
from decimal import DecimalTuple
import math
from hwt.doc_markers import internal
from hwt.hdl.value import HValue
from pyMathBitPrecise.floatt import FloattVal
class HFloatVal(HValue, FloattVal):
"""
HValue class for Slice type
"""
@classmethod
def from_py(cls, typeObj, val, vld_mask=None):
assert vld_mask is None, vld_mask
if isinstance(val, int):
val = float(val)
if float(val) != val:
raise NotImplementedError("Need to implement better conversion method")
if val is None:
sign = 0
exp = 0
man = 0
assert vld_mask is None or vld_mask == 0
vld_mask = 0
elif isinstance(val, float):
man, exp = math.frexp(val)
man = abs(man)
man = int(man * (2 ** typeObj.mantisa_w))
sign = int(val < 0)
if vld_mask is None:
vld_mask = 1
elif isinstance(val, tuple):
sign, man, exp = val
if vld_mask is None:
vld_mask = 1
else:
raise TypeError(val)
return cls(typeObj, DecimalTuple(sign, man, exp), vld_mask)
def _is_full_valid(self):
return self.vld_mask == 1
def to_py(self):
"""
Convert to python slice object
"""
return float(self)
def _eq_val(self, other):
assert isinstance(other, HFloatVal)
return self.val == other.val
def _eq(self, other):
return self._eq__val(other)
def __copy__(self):
v = HValue.__copy__(self)
v.val = copy(v.val)
return v
@internal
def __hash__(self):
v = self.val
return hash((self._dtype, v))
|
Nic30/HWToolkit
|
hwt/hdl/types/floatVal.py
|
Python
|
mit
| 1,782
|
from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toon import LaffMeter
class MinigameAvatarScorePanel(DirectFrame):
def __init__(self, avId, avName):
self.avId = avId
if self.avId in base.cr.doId2do:
self.avatar = base.cr.doId2do[self.avId]
else:
self.avatar = None
DirectFrame.__init__(self, relief=None, image_color=GlobalDialogColor, image_scale=(0.4, 1.0, 0.24), image_pos=(0.0, 0.1, 0.0))
self['image'] = DGG.getDefaultDialogGeom()
self.scoreText = DirectLabel(self, relief=None, text='0', text_scale=TTLocalizer.MASPscoreText, pos=(0.1, 0.0, -0.09))
if self.avatar:
self.laffMeter = LaffMeter.LaffMeter(self.avatar.style, self.avatar.hp, self.avatar.maxHp)
self.laffMeter.reparentTo(self)
self.laffMeter.setPos(-0.085, 0, -0.035)
self.laffMeter.setScale(0.05)
self.laffMeter.start()
else:
self.laffMeter = None
self.nameText = DirectLabel(self, relief=None, text=avName, text_scale=TTLocalizer.MASPnameText, text_pos=(0.0, 0.06), text_wordwrap=7.5, text_shadow=(1, 1, 1, 1))
self.show()
return
def cleanup(self):
if self.laffMeter:
self.laffMeter.destroy()
del self.laffMeter
del self.scoreText
del self.nameText
self.destroy()
def setScore(self, score):
self.scoreText['text'] = str(score)
def getScore(self):
return int(self.scoreText['text'])
def makeTransparent(self, alpha):
self.setTransparency(1)
self.setColorScale(1, 1, 1, alpha)
|
Spiderlover/Toontown
|
toontown/minigame/MinigameAvatarScorePanel.py
|
Python
|
mit
| 1,759
|
'''
Created on Jul 26, 2014
@author: gigemjt
'''
class Form(object):
def __init__(self, inputFile, fileLength):
self._file = inputFile
self._totalLength = fileLength
self.parseFile()
self._formData = dict()
def parseFile(self):
bytesLeft = self._fileLength
leftOver = ""
while bytesLeft >= 0:
bytesToRead = min(bytesLeft, 1024)
bytesLeft -= bytesToRead
read = self._file.read(bytesToRead)
contents = leftOver + read
leftOver = self.parseString(self, contents, bytesLeft)
def parseString(self, parseString, bytesLeft):
results = parseString.split('&')
leftOver = ""
if bytesLeft > 0:
leftOver = results[len(results) - 1] # grab last section
results = results[:len(results) - 1] # cutoff ending
#parsing
return leftOver
@property
def formData(self):
return self._formData
|
dtracers/Development-Graph
|
Server/python/src/connection/form.py
|
Python
|
apache-2.0
| 983
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.common.types import criteria
from google.ads.googleads.v9.enums.types import criterion_type
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"SharedCriterion",},
)
class SharedCriterion(proto.Message):
r"""A criterion belonging to a shared set.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
resource_name (str):
Immutable. The resource name of the shared criterion. Shared
set resource names have the form:
``customers/{customer_id}/sharedCriteria/{shared_set_id}~{criterion_id}``
shared_set (str):
Immutable. The shared set to which the shared
criterion belongs.
This field is a member of `oneof`_ ``_shared_set``.
criterion_id (int):
Output only. The ID of the criterion.
This field is ignored for mutates.
This field is a member of `oneof`_ ``_criterion_id``.
type_ (google.ads.googleads.v9.enums.types.CriterionTypeEnum.CriterionType):
Output only. The type of the criterion.
keyword (google.ads.googleads.v9.common.types.KeywordInfo):
Immutable. Keyword.
This field is a member of `oneof`_ ``criterion``.
youtube_video (google.ads.googleads.v9.common.types.YouTubeVideoInfo):
Immutable. YouTube Video.
This field is a member of `oneof`_ ``criterion``.
youtube_channel (google.ads.googleads.v9.common.types.YouTubeChannelInfo):
Immutable. YouTube Channel.
This field is a member of `oneof`_ ``criterion``.
placement (google.ads.googleads.v9.common.types.PlacementInfo):
Immutable. Placement.
This field is a member of `oneof`_ ``criterion``.
mobile_app_category (google.ads.googleads.v9.common.types.MobileAppCategoryInfo):
Immutable. Mobile App Category.
This field is a member of `oneof`_ ``criterion``.
mobile_application (google.ads.googleads.v9.common.types.MobileApplicationInfo):
Immutable. Mobile application.
This field is a member of `oneof`_ ``criterion``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
shared_set = proto.Field(proto.STRING, number=10, optional=True,)
criterion_id = proto.Field(proto.INT64, number=11, optional=True,)
type_ = proto.Field(
proto.ENUM,
number=4,
enum=criterion_type.CriterionTypeEnum.CriterionType,
)
keyword = proto.Field(
proto.MESSAGE,
number=3,
oneof="criterion",
message=criteria.KeywordInfo,
)
youtube_video = proto.Field(
proto.MESSAGE,
number=5,
oneof="criterion",
message=criteria.YouTubeVideoInfo,
)
youtube_channel = proto.Field(
proto.MESSAGE,
number=6,
oneof="criterion",
message=criteria.YouTubeChannelInfo,
)
placement = proto.Field(
proto.MESSAGE,
number=7,
oneof="criterion",
message=criteria.PlacementInfo,
)
mobile_app_category = proto.Field(
proto.MESSAGE,
number=8,
oneof="criterion",
message=criteria.MobileAppCategoryInfo,
)
mobile_application = proto.Field(
proto.MESSAGE,
number=9,
oneof="criterion",
message=criteria.MobileApplicationInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v9/resources/types/shared_criterion.py
|
Python
|
apache-2.0
| 4,471
|
from vmlp_multiclass import vmlp as multiclass_neural_network
from embedded_layer import EmbeddedLayer
import numpy
data = numpy.matrix([[0,0],[0,1],[1,0],[1,1]]) # input data
labels = numpy.matrix([[0,0,1],[1,0,0],[1,0,0],[0,0,1]]) # labels
user_model = multiclass_neural_network(data, labels, [2], 0.1, 2000)
# user_model.train()
# multiclass_neural_network.rawLabels()
# user_model.predictedLabels()
sparse_data = numpy.matrix([
[0, 0, 0, 0, 12, 108],
[0, 0, 0, 1, 12, 108],
[0, 0, 1, 0, 12, 108],
[0, 0, 1, 1, 12, 108],
[0, 1, 0, 0, 12, 108],
[0, 1, 0, 1, 12, 108],
[0, 1, 1, 0, 12, 108],
[0, 1, 1, 1, 12, 108],
[1, 0, 0, 0, 12, 108],
[1, 0, 0, 1, 12, 108],
[1, 0, 1, 0, 12, 108],
[1, 0, 1, 1, 12, 108],
[1, 1, 0, 0, 12, 108],
[1, 1, 0, 1, 12, 108],
[1, 1, 1, 0, 12, 108],
[1, 1, 1, 1, 12, 108],
])
sparse_labels = numpy.matrix([
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
])
embedded_model = multiclass_neural_network(sparse_data, sparse_labels, [2], 0.1, 2000)
embedded_layer = EmbeddedLayer(4, 2, 2, 3)
embedded_model.embedLayer(embedded_layer)
embedded_model.train()
embedded_model.predictedLabels()
def hamm(length):
hammed_dist = 2 ** length
rays = numpy.zeros([hammed_dist, length])
for i in range(0, hammed_dist):
bin_vector = list(bin(i)[2:])
if len(bin_vector) < length:
vector = [0] * (length - len(bin_vector))
rays[i] = vector + bin_vector
else:
rays[i] = bin_vector
for i in range(0,len(rays)):
print(rays[i])
# hamm(4)
|
dakoto747/Machine-Learning-Algorithms
|
examples.py
|
Python
|
mit
| 1,807
|
# -*- coding: utf-8 -*-
# Copyright 2016 Serpent Consulting Services Pvt. Ltd
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Open Invoices Reports",
"summary": "Open Invoices Reports",
"version": "8.0.1.0.0",
"category": "Accounting",
'website': 'http://www.serpentcs.com',
"author": """Serpent Consulting Services Pvt. Ltd.,
Agile Business Group,
Odoo Community Association (OCA)""",
"license": "AGPL-3",
"depends": [
"account",
],
'data': [
'views/res_company_view.xml',
'views/report_open_invoices_view.xml',
'views/report_paperformat.xml',
],
"installable": False,
}
|
OpenPymeMx/account-financial-tools
|
partner_report_open_invoices/__openerp__.py
|
Python
|
agpl-3.0
| 717
|
"""
Handle the NBT (Named Binary Tag) data format
"""
from struct import Struct, error as StructError
from gzip import GzipFile
import zlib
from collections import MutableMapping, MutableSequence, Sequence
import os, io
try:
unicode
basestring
except NameError:
unicode = str # compatibility for Python 3
basestring = str # compatibility for Python 3
TAG_END = 0
TAG_BYTE = 1
TAG_SHORT = 2
TAG_INT = 3
TAG_LONG = 4
TAG_FLOAT = 5
TAG_DOUBLE = 6
TAG_BYTE_ARRAY = 7
TAG_STRING = 8
TAG_LIST = 9
TAG_COMPOUND = 10
TAG_INT_ARRAY = 11
class MalformedFileError(Exception):
"""Exception raised on parse error."""
pass
class TAG(object):
"""TAG, a variable with an intrinsic name."""
id = None
def __init__(self, value=None, name=None):
self.name = name
self.value = value
#Parsers and Generators
def _parse_buffer(self, buffer):
raise NotImplementedError(self.__class__.__name__)
def _render_buffer(self, buffer):
raise NotImplementedError(self.__class__.__name__)
#Printing and Formatting of tree
def tag_info(self):
"""Return Unicode string with class, name and unnested value."""
return self.__class__.__name__ + \
('(%r)' % self.name if self.name else "") + \
": " + self.valuestr()
def valuestr(self):
"""Return Unicode string of unnested value. For iterators, this returns a summary."""
return unicode(self.value)
def pretty_tree(self, indent=0):
"""Return formated Unicode string of self, where iterable items are recursively listed in detail."""
return ("\t"*indent) + self.tag_info()
# Python 2 compatibility; Python 3 uses __str__ instead.
def __unicode__(self):
"""Return a unicode string with the result in human readable format. Unlike valuestr(), the result is recursive for iterators till at least one level deep."""
return unicode(self.value)
def __str__(self):
"""Return a string (ascii formated for Python 2, unicode for Python 3) with the result in human readable format. Unlike valuestr(), the result is recursive for iterators till at least one level deep."""
return str(self.value)
# Unlike regular iterators, __repr__() is not recursive.
# Use pretty_tree for recursive results.
# iterators should use __repr__ or tag_info for each item, like regular iterators
def __repr__(self):
"""Return a string (ascii formated for Python 2, unicode for Python 3) describing the class, name and id for debugging purposes."""
return "<%s(%r) at 0x%x>" % (self.__class__.__name__,self.name,id(self))
class _TAG_Numeric(TAG):
"""_TAG_Numeric, comparable to int with an intrinsic name"""
def __init__(self, value=None, name=None, buffer=None):
super(_TAG_Numeric, self).__init__(value, name)
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer):
# Note: buffer.read() may raise an IOError, for example if buffer is a corrupt gzip.GzipFile
self.value = self.fmt.unpack(buffer.read(self.fmt.size))[0]
def _render_buffer(self, buffer):
buffer.write(self.fmt.pack(self.value))
class _TAG_End(TAG):
id = TAG_END
fmt = Struct(">b")
def _parse_buffer(self, buffer):
# Note: buffer.read() may raise an IOError, for example if buffer is a corrupt gzip.GzipFile
value = self.fmt.unpack(buffer.read(1))[0]
if value != 0:
raise ValueError("A Tag End must be rendered as '0', not as '%d'." % (value))
def _render_buffer(self, buffer):
buffer.write(b'\x00')
#== Value Tags ==#
class TAG_Byte(_TAG_Numeric):
"""Represent a single tag storing 1 byte."""
id = TAG_BYTE
fmt = Struct(">b")
class TAG_Short(_TAG_Numeric):
"""Represent a single tag storing 1 short."""
id = TAG_SHORT
fmt = Struct(">h")
class TAG_Int(_TAG_Numeric):
"""Represent a single tag storing 1 int."""
id = TAG_INT
fmt = Struct(">i")
"""Struct(">i"), 32-bits integer, big-endian"""
class TAG_Long(_TAG_Numeric):
"""Represent a single tag storing 1 long."""
id = TAG_LONG
fmt = Struct(">q")
class TAG_Float(_TAG_Numeric):
"""Represent a single tag storing 1 IEEE-754 floating point number."""
id = TAG_FLOAT
fmt = Struct(">f")
class TAG_Double(_TAG_Numeric):
"""Represent a single tag storing 1 IEEE-754 double precision floating point number."""
id = TAG_DOUBLE
fmt = Struct(">d")
class TAG_Byte_Array(TAG, MutableSequence):
"""
TAG_Byte_Array, comparable to a collections.UserList with
an intrinsic name whose values must be bytes
"""
id = TAG_BYTE_ARRAY
def __init__(self, name=None, buffer=None):
# TODO: add a value parameter as well
super(TAG_Byte_Array, self).__init__(name=name)
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer):
length = TAG_Int(buffer=buffer)
self.value = bytearray(buffer.read(length.value))
def _render_buffer(self, buffer):
length = TAG_Int(len(self.value))
length._render_buffer(buffer)
buffer.write(bytes(self.value))
# Mixin methods
def __len__(self):
return len(self.value)
def __iter__(self):
return iter(self.value)
def __contains__(self, item):
return item in self.value
def __getitem__(self, key):
return self.value[key]
def __setitem__(self, key, value):
# TODO: check type of value
self.value[key] = value
def __delitem__(self, key):
del(self.value[key])
def insert(self, key, value):
# TODO: check type of value, or is this done by self.value already?
self.value.insert(key, value)
#Printing and Formatting of tree
def valuestr(self):
return "[%i byte(s)]" % len(self.value)
def __unicode__(self):
return '['+",".join([unicode(x) for x in self.value])+']'
def __str__(self):
return '['+",".join([str(x) for x in self.value])+']'
class TAG_Int_Array(TAG, MutableSequence):
"""
TAG_Int_Array, comparable to a collections.UserList with
an intrinsic name whose values must be integers
"""
id = TAG_INT_ARRAY
def __init__(self, name=None, buffer=None):
# TODO: add a value parameter as well
super(TAG_Int_Array, self).__init__(name=name)
if buffer:
self._parse_buffer(buffer)
def update_fmt(self, length):
""" Adjust struct format description to length given """
self.fmt = Struct(">" + str(length) + "i")
#Parsers and Generators
def _parse_buffer(self, buffer):
length = TAG_Int(buffer=buffer).value
self.update_fmt(length)
self.value = list(self.fmt.unpack(buffer.read(self.fmt.size)))
def _render_buffer(self, buffer):
length = len(self.value)
self.update_fmt(length)
TAG_Int(length)._render_buffer(buffer)
buffer.write(self.fmt.pack(*self.value))
# Mixin methods
def __len__(self):
return len(self.value)
def __iter__(self):
return iter(self.value)
def __contains__(self, item):
return item in self.value
def __getitem__(self, key):
return self.value[key]
def __setitem__(self, key, value):
self.value[key] = value
def __delitem__(self, key):
del(self.value[key])
def insert(self, key, value):
self.value.insert(key, value)
#Printing and Formatting of tree
def valuestr(self):
return "[%i int(s)]" % len(self.value)
class TAG_String(TAG, Sequence):
"""
TAG_String, comparable to a collections.UserString with an
intrinsic name
"""
id = TAG_STRING
def __init__(self, value=None, name=None, buffer=None):
super(TAG_String, self).__init__(value, name)
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer):
length = TAG_Short(buffer=buffer)
read = buffer.read(length.value)
if len(read) != length.value:
raise StructError()
self.value = read.decode("utf-8")
def _render_buffer(self, buffer):
save_val = self.value.encode("utf-8")
length = TAG_Short(len(save_val))
length._render_buffer(buffer)
buffer.write(save_val)
# Mixin methods
def __len__(self):
return len(self.value)
def __iter__(self):
return iter(self.value)
def __contains__(self, item):
return item in self.value
def __getitem__(self, key):
return self.value[key]
#Printing and Formatting of tree
def __repr__(self):
return self.value
#== Collection Tags ==#
class TAG_List(TAG, MutableSequence):
"""
TAG_List, comparable to a collections.UserList with an intrinsic name
"""
id = TAG_LIST
def __init__(self, type=None, value=None, name=None, buffer=None):
super(TAG_List, self).__init__(value, name)
if type:
self.tagID = type.id
else:
self.tagID = None
self.tags = []
if buffer:
self._parse_buffer(buffer)
if self.tagID == None:
raise ValueError("No type specified for list: %s" % (name))
#Parsers and Generators
def _parse_buffer(self, buffer):
self.tagID = TAG_Byte(buffer=buffer).value
self.tags = []
length = TAG_Int(buffer=buffer)
for x in range(length.value):
self.tags.append(TAGLIST[self.tagID](buffer=buffer))
def _render_buffer(self, buffer):
TAG_Byte(self.tagID)._render_buffer(buffer)
length = TAG_Int(len(self.tags))
length._render_buffer(buffer)
for i, tag in enumerate(self.tags):
if tag.id != self.tagID:
raise ValueError("List element %d(%s) has type %d != container type %d" %
(i, tag, tag.id, self.tagID))
tag._render_buffer(buffer)
# Mixin methods
def __len__(self):
return len(self.tags)
def __iter__(self):
return iter(self.tags)
def __contains__(self, item):
return item in self.tags
def __getitem__(self, key):
return self.tags[key]
def __setitem__(self, key, value):
self.tags[key] = value
def __delitem__(self, key):
del(self.tags[key])
def insert(self, key, value):
self.tags.insert(key, value)
#Printing and Formatting of tree
def __repr__(self):
return "%i entries of type %s" % (len(self.tags), TAGLIST[self.tagID].__name__)
#Printing and Formatting of tree
def valuestr(self):
return "[%i %s(s)]" % (len(self.tags), TAGLIST[self.tagID].__name__)
def __unicode__(self):
return "["+", ".join([tag.tag_info() for tag in self.tags])+"]"
def __str__(self):
return "["+", ".join([tag.tag_info() for tag in self.tags])+"]"
def pretty_tree(self, indent=0):
output = [super(TAG_List, self).pretty_tree(indent)]
if len(self.tags):
output.append(("\t"*indent) + "{")
output.extend([tag.pretty_tree(indent + 1) for tag in self.tags])
output.append(("\t"*indent) + "}")
return '\n'.join(output)
class TAG_Compound(TAG, MutableMapping):
"""
TAG_Compound, comparable to a collections.OrderedDict with an
intrinsic name
"""
id = TAG_COMPOUND
def __init__(self, buffer=None, name=None):
# TODO: add a value parameter as well
super(TAG_Compound, self).__init__()
self.tags = []
self.name = ""
if buffer:
self._parse_buffer(buffer)
#Parsers and Generators
def _parse_buffer(self, buffer):
while True:
type = TAG_Byte(buffer=buffer)
if type.value == TAG_END:
#print("found tag_end")
break
else:
name = TAG_String(buffer=buffer).value
try:
tag = TAGLIST[type.value](buffer=buffer, name=name)
tag.name = name
self.tags.append(tag)
except KeyError:
raise ValueError("Unrecognised tag type")
def _render_buffer(self, buffer):
for tag in self.tags:
TAG_Byte(tag.id)._render_buffer(buffer)
TAG_String(tag.name)._render_buffer(buffer)
tag._render_buffer(buffer)
buffer.write(b'\x00') #write TAG_END
# Mixin methods
def __len__(self):
return len(self.tags)
def __iter__(self):
for key in self.tags:
yield key.name
def __contains__(self, key):
if isinstance(key, int):
return key <= len(self.tags)
elif isinstance(key, basestring):
for tag in self.tags:
if tag.name == key:
return True
return False
elif isinstance(key, TAG):
return key in self.tags
return False
def __getitem__(self, key):
if isinstance(key, int):
return self.tags[key]
elif isinstance(key, basestring):
for tag in self.tags:
if tag.name == key:
return tag
else:
raise KeyError("Tag %s does not exist" % key)
else:
raise TypeError("key needs to be either name of tag, or index of tag, not a %s" % type(key).__name__)
def __setitem__(self, key, value):
assert isinstance(value, TAG), "value must be an nbt.TAG"
if isinstance(key, int):
# Just try it. The proper error will be raised if it doesn't work.
self.tags[key] = value
elif isinstance(key, basestring):
value.name = key
for i, tag in enumerate(self.tags):
if tag.name == key:
self.tags[i] = value
return
self.tags.append(value)
def __delitem__(self, key):
if isinstance(key, int):
del(self.tags[key])
elif isinstance(key, basestring):
self.tags.remove(self.__getitem__(key))
else:
raise ValueError("key needs to be either name of tag, or index of tag")
def keys(self):
return [tag.name for tag in self.tags]
def iteritems(self):
for tag in self.tags:
yield (tag.name, tag)
#Printing and Formatting of tree
def __unicode__(self):
return "{"+", ".join([tag.tag_info() for tag in self.tags])+"}"
def __str__(self):
return "{"+", ".join([tag.tag_info() for tag in self.tags])+"}"
def valuestr(self):
return '{%i Entries}' % len(self.tags)
def pretty_tree(self, indent=0):
output = [super(TAG_Compound, self).pretty_tree(indent)]
if len(self.tags):
output.append(("\t"*indent) + "{")
output.extend([tag.pretty_tree(indent + 1) for tag in self.tags])
output.append(("\t"*indent) + "}")
return '\n'.join(output)
TAGLIST = {TAG_END: _TAG_End, TAG_BYTE:TAG_Byte, TAG_SHORT:TAG_Short, TAG_INT:TAG_Int, TAG_LONG:TAG_Long, TAG_FLOAT:TAG_Float, TAG_DOUBLE:TAG_Double, TAG_BYTE_ARRAY:TAG_Byte_Array, TAG_STRING:TAG_String, TAG_LIST:TAG_List, TAG_COMPOUND:TAG_Compound, TAG_INT_ARRAY:TAG_Int_Array}
class NBTFile(TAG_Compound):
"""Represent an NBT file object."""
def __init__(self, filename=None, buffer=None, fileobj=None):
"""
Create a new NBTFile object.
Specify either a filename, file object or data buffer.
If filename of file object is specified, data should be GZip-compressed.
If a data buffer is specified, it is assumed to be uncompressed.
If filename is specified, the file is closed after reading and writing.
If file object is specified, the caller is responsible for closing the file.
"""
super(NBTFile, self).__init__()
self.filename = filename
self.type = TAG_Byte(self.id)
closefile = True
#make a file object
if filename:
self.filename = filename
self.file = GzipFile(filename, 'rb')
elif buffer:
if hasattr(buffer, 'name'):
self.filename = buffer.name
self.file = buffer
closefile = False
elif fileobj:
if hasattr(fileobj, 'name'):
self.filename = fileobj.name
self.file = GzipFile(fileobj=fileobj)
else:
self.file = None
closefile = False
#parse the file given initially
if self.file:
self.parse_file()
if closefile:
# Note: GzipFile().close() does NOT close the fileobj,
# So we are still responsible for closing that.
try:
self.file.close()
except (AttributeError, IOError):
pass
self.file = None
def parse_file(self, filename=None, buffer=None, fileobj=None):
"""Completely parse a file, extracting all tags."""
if filename:
self.file = GzipFile(filename, 'rb')
elif buffer:
if hasattr(buffer, 'name'):
self.filename = buffer.name
self.file = buffer
elif fileobj:
if hasattr(fileobj, 'name'):
self.filename = fileobj.name
self.file = GzipFile(fileobj=fileobj)
if self.file:
try:
type = TAG_Byte(buffer=self.file)
if type.value == self.id:
name = TAG_String(buffer=self.file).value
self._parse_buffer(self.file)
self.name = name
self.file.close()
else:
raise MalformedFileError("First record is not a Compound Tag")
except StructError as e:
raise MalformedFileError("Partial File Parse: file possibly truncated.")
else:
raise ValueError("NBTFile.parse_file(): Need to specify either a filename or a file object")
def write_file(self, filename=None, buffer=None, fileobj=None):
"""Write this NBT file to a file."""
closefile = True
if buffer:
self.filename = None
self.file = buffer
closefile = False
elif filename:
self.filename = filename
self.file = GzipFile(filename, "wb")
elif fileobj:
self.filename = None
self.file = GzipFile(fileobj=fileobj, mode="wb")
elif self.filename:
self.file = GzipFile(self.filename, "wb")
elif not self.file:
raise ValueError("NBTFile.write_file(): Need to specify either a filename or a file object")
#Render tree to file
TAG_Byte(self.id)._render_buffer(self.file)
TAG_String(self.name)._render_buffer(self.file)
self._render_buffer(self.file)
#make sure the file is complete
try:
self.file.flush()
except (AttributeError, IOError):
pass
if closefile:
try:
self.file.close()
except (AttributeError, IOError):
pass
def __repr__(self):
"""
Return a string (ascii formated for Python 2, unicode
for Python 3) describing the class, name and id for
debugging purposes.
"""
if self.filename:
return "<%s(%r) with %s(%r) at 0x%x>" % (self.__class__.__name__, self.filename, \
TAG_Compound.__name__, self.name, id(self))
else:
return "<%s with %s(%r) at 0x%x>" % (self.__class__.__name__, \
TAG_Compound.__name__, self.name, id(self))
|
cburschka/NBT
|
nbt/nbt.py
|
Python
|
mit
| 20,029
|
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import pkg_resources as pkg
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import exceptions as ex
from sahara.plugins.hdp import clusterspec as cs
from sahara.plugins.hdp import configprovider as cfgprov
from sahara.plugins.hdp.versions import abstractversionhandler as avm
from sahara.plugins.hdp.versions.version_1_3_2 import edp_engine
from sahara.plugins.hdp.versions.version_1_3_2 import services
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import poll_utils
from sahara import version
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VersionHandler(avm.AbstractVersionHandler):
config_provider = None
version = None
client = None
def _set_version(self, version):
self.version = version
def _get_config_provider(self):
if self.config_provider is None:
self.config_provider = cfgprov.ConfigurationProvider(
json.load(pkg.resource_stream(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')),
hadoop_version='1.3.2')
return self.config_provider
def get_version(self):
return self.version
def get_ambari_client(self):
if not self.client:
self.client = AmbariClient(self)
return self.client
def get_config_items(self):
return self._get_config_provider().get_config_items()
def get_applicable_target(self, name):
return self._get_config_provider().get_applicable_target(name)
def get_cluster_spec(self, cluster, user_inputs,
scaled_groups=None, cluster_template=None):
if cluster_template:
cluster_spec = cs.ClusterSpec(cluster_template)
else:
if scaled_groups:
for ng in cluster.node_groups:
ng_id = ng['id']
if (ng_id in scaled_groups and
ng['count'] > scaled_groups[ng_id]):
raise ex.ClusterCannotBeScaled(
cluster.name,
_('The HDP plugin does not support '
'the decommissioning of nodes '
'for HDP version 1.3.2'))
cluster_spec = self.get_default_cluster_configuration()
cluster_spec.create_operational_config(
cluster, user_inputs, scaled_groups)
cs.validate_number_of_datanodes(
cluster, scaled_groups, self.get_config_items())
return cluster_spec
def get_default_cluster_configuration(self):
return cs.ClusterSpec(self._get_default_cluster_template())
def _get_default_cluster_template(self):
return pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
def get_node_processes(self):
node_processes = {}
for service in self.get_default_cluster_configuration().services:
components = []
for component in service.components:
components.append(component.name)
node_processes[service.name] = components
return node_processes
def install_swift_integration(self, servers):
if servers:
cpo.add_provisioning_step(
servers[0].cluster_id, _("Install Swift integration"),
len(servers))
for server in servers:
server.install_swift_integration()
def get_services_processor(self):
return services
def get_edp_engine(self, cluster, job_type):
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
return edp_engine.EdpOozieEngine(cluster)
return None
def get_edp_job_types(self):
return edp_engine.EdpOozieEngine.get_supported_job_types()
def get_edp_config_hints(self, job_type):
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
def get_open_ports(self, node_group):
ports = [8660] # for Ganglia
ports_map = {
'AMBARI_SERVER': [8080, 8440, 8441],
'NAMENODE': [50070, 50470, 8020, 9000],
'DATANODE': [50075, 50475, 50010, 50020],
'SECONDARY_NAMENODE': [50090],
'JOBTRACKER': [50030, 8021],
'TASKTRACKER': [50060],
'HISTORYSERVER': [51111],
'HIVE_SERVER': [10000],
'HIVE_METASTORE': [9083],
'HBASE_MASTER': [60000, 60010],
'HBASE_REGIONSERVER': [60020, 60030],
'WEBHCAT_SERVER': [50111],
'GANGLIA_SERVER': [8661, 8662, 8663, 8651],
'MYSQL_SERVER': [3306],
'OOZIE_SERVER': [11000, 11001],
'ZOOKEEPER_SERVER': [2181, 2888, 3888],
'NAGIOS_SERVER': [80]
}
for process in node_group.node_processes:
if process in ports_map:
ports.extend(ports_map[process])
return ports
class AmbariClient(object):
def __init__(self, handler):
# add an argument for neutron discovery
self.handler = handler
def _get_http_session(self, host, port):
return host.remote().get_http_client(port)
def _get_standard_headers(self):
return {"X-Requested-By": "sahara"}
def _post(self, url, ambari_info, data=None):
if data:
LOG.debug('AmbariClient:_post call, url = {url} data = {data}'
.format(url=url, data=str(data)))
else:
LOG.debug('AmbariClient:_post call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.post(url, data=data,
auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _delete(self, url, ambari_info):
LOG.debug('AmbariClient:_delete call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.delete(url,
auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _put(self, url, ambari_info, data=None):
if data:
LOG.debug('AmbariClient:_put call, url = {url} data = {data}'
.format(url=url, data=str(data)))
else:
LOG.debug('AmbariClient:_put call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
auth = (ambari_info.user, ambari_info.password)
return session.put(url, data=data, auth=auth,
headers=self._get_standard_headers())
def _get(self, url, ambari_info):
LOG.debug('AmbariClient:_get call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.get(url, auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _add_cluster(self, ambari_info, name):
add_cluster_url = 'http://{0}/api/v1/clusters/{1}'.format(
ambari_info.get_address(), name)
result = self._post(add_cluster_url, ambari_info,
data='{"Clusters": {"version" : "HDP-' +
self.handler.get_version() + '"}}')
if result.status_code != 201:
LOG.error(_LE('Create cluster command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add cluster: %s') % result.text)
@cpo.event_wrapper(True, step=_("Add configurations to cluster"),
param=('ambari_info', 2))
def _add_configurations_to_cluster(
self, cluster_spec, ambari_info, name):
existing_config_url = ('http://{0}/api/v1/clusters/{1}?fields='
'Clusters/desired_configs'.format(
ambari_info.get_address(), name))
result = self._get(existing_config_url, ambari_info)
json_result = json.loads(result.text)
existing_configs = json_result['Clusters']['desired_configs']
configs = cluster_spec.get_deployed_configurations()
if 'ambari' in configs:
configs.remove('ambari')
if len(configs) == len(existing_configs):
# nothing to do
return
config_url = 'http://{0}/api/v1/clusters/{1}'.format(
ambari_info.get_address(), name)
body = {}
clusters = {}
version = 1
body['Clusters'] = clusters
for config_name in configs:
if config_name in existing_configs:
if config_name == 'core-site' or config_name == 'global':
existing_version = (
existing_configs[config_name]['tag'].lstrip('v'))
version = int(existing_version) + 1
else:
continue
config_body = {}
clusters['desired_config'] = config_body
config_body['type'] = config_name
config_body['tag'] = 'v%s' % version
config_body['properties'] = (
cluster_spec.configurations[config_name])
result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Set configuration command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to set configurations on cluster: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add services to cluster"), param=('ambari_info', 2))
def _add_services_to_cluster(self, cluster_spec, ambari_info, name):
services = cluster_spec.services
add_service_url = 'http://{0}/api/v1/clusters/{1}/services/{2}'
for service in services:
if service.deployed and service.name != 'AMBARI':
result = self._post(add_service_url.format(
ambari_info.get_address(), name, service.name),
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create service command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add services to cluster: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add components to services"), param=('ambari_info', 2))
def _add_components_to_services(self, cluster_spec, ambari_info, name):
add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{'
'2}/components/{3}')
for service in cluster_spec.services:
if service.deployed and service.name != 'AMBARI':
for component in service.components:
result = self._post(add_component_url.format(
ambari_info.get_address(), name, service.name,
component.name),
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create component command failed. {result}')
.format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add components to services: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add hosts and components"), param=('ambari_info', 3))
def _add_hosts_and_components(
self, cluster_spec, servers, ambari_info, name):
add_host_url = 'http://{0}/api/v1/clusters/{1}/hosts/{2}'
add_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}')
for host in servers:
hostname = host.instance.fqdn().lower()
result = self._post(
add_host_url.format(ambari_info.get_address(), name, hostname),
ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host: %s') % result.text)
node_group_name = host.node_group.name
# TODO(jspeidel): ensure that node group exists
node_group = cluster_spec.node_groups[node_group_name]
for component in node_group.components:
# don't add any AMBARI components
if component.find('AMBARI') != 0:
result = self._post(add_host_component_url.format(
ambari_info.get_address(), name, hostname, component),
ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host_component command failed. '
'{result}').format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host component: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Install services"), param=('ambari_info', 2))
def _install_services(self, cluster_name, ambari_info):
ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{'
'1}/services?ServiceInfo/state=INIT'.format(
ambari_address, cluster_name))
body = ('{"RequestInfo" : { "context" : "Install all services" },'
'"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')
result = self._put(install_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(self._get_async_request_uri(
ambari_info, cluster_name, request_id),
ambari_info)
if success:
LOG.info(_LI("Hadoop stack installed successfully."))
self._finalize_ambari_state(ambari_info)
else:
LOG.error(_LE('Install command failed.'))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Install command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
def _get_async_request_uri(self, ambari_info, cluster_name, request_id):
return ('http://{0}/api/v1/clusters/{1}/requests/{'
'2}/tasks?fields=Tasks/status'.format(
ambari_info.get_address(), cluster_name,
request_id))
def _wait_for_async_request(self, request_url, ambari_info):
started = False
while not started:
result = self._get(request_url, ambari_info)
LOG.debug(
'async request {url} response: {response}'.format(
url=request_url, response=result.text))
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
status = items['Tasks']['status']
if status == 'FAILED' or status == 'ABORTED':
return False
else:
if status != 'COMPLETED':
started = False
context.sleep(5)
return started
def _finalize_ambari_state(self, ambari_info):
persist_state_uri = 'http://{0}/api/v1/persist'.format(
ambari_info.get_address())
# this post data has non-standard format because persist
# resource doesn't comply with Ambari API standards
persist_data = ('{ "CLUSTER_CURRENT_STATUS":'
'"{\\"clusterState\\":\\"CLUSTER_STARTED_5\\"}" }')
result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202:
LOG.warning(_LW('Finalizing of Ambari cluster state failed. '
'{result}').format(result.text))
raise ex.HadoopProvisionError(_('Unable to finalize Ambari '
'state.'))
LOG.info(_LI('Ambari cluster state finalized.'))
@cpo.event_wrapper(
True, step=_("Start services"), param=('ambari_info', 3))
def start_services(self, cluster_name, cluster_spec, ambari_info):
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name))
body = ('{"RequestInfo" : { "context" : "Start all services" },'
'"Body" : {"ServiceInfo": {"state" : "STARTED"}}}')
self._fire_service_start_notifications(
cluster_name, cluster_spec, ambari_info)
result = self._put(start_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(
self._get_async_request_uri(ambari_info, cluster_name,
request_id), ambari_info)
if success:
LOG.info(
_LI("Successfully started Hadoop cluster '{name}'.")
.format(name=cluster_name))
LOG.info(_LI('Cluster name: {cluster_name}, '
'Ambari server address: {server_address}')
.format(cluster_name=cluster_name,
server_address=ambari_info.get_address()))
else:
LOG.error(_LE('Failed to start Hadoop cluster.'))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Start command failed. Status: {status}, '
'response: {response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
def _exec_ambari_command(self, ambari_info, body, cmd_uri):
LOG.debug('PUT URI: {uri}'.format(uri=cmd_uri))
result = self._put(cmd_uri, ambari_info, data=body)
if result.status_code == 202:
LOG.debug(
'PUT response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
href = json_result['href'] + '/tasks?fields=Tasks/status'
success = self._wait_for_async_request(href, ambari_info)
if success:
LOG.info(
_LI("Successfully changed state of Hadoop components "))
else:
LOG.error(_LE('Failed to change state of Hadoop components'))
raise ex.HadoopProvisionError(
_('Failed to change state of Hadoop components'))
else:
LOG.error(
_LE('Command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.'))
def _get_host_list(self, servers):
host_list = [server.instance.fqdn().lower() for server in servers]
return ",".join(host_list)
def _install_and_start_components(self, cluster_name, servers,
ambari_info, cluster_spec):
auth = (ambari_info.user, ambari_info.password)
self._install_components(ambari_info, auth, cluster_name, servers)
self.handler.install_swift_integration(servers)
self._start_components(ambari_info, auth, cluster_name,
servers, cluster_spec)
def _install_components(self, ambari_info, auth, cluster_name, servers):
# query for the host components on the given hosts that are in the
# INIT state
# TODO(jspeidel): provide request context
body = '{"HostRoles": {"state" : "INSTALLED"}}'
install_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INIT&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
self._exec_ambari_command(ambari_info, body, install_uri)
LOG.info(_LI('Started Hadoop components while scaling up'))
LOG.info(_LI('Cluster name {cluster_name}, Ambari server ip {ip}')
.format(cluster_name=cluster_name,
ip=ambari_info.get_address()))
def _start_components(self, ambari_info, auth, cluster_name, servers,
cluster_spec):
# query for all the host components in the INSTALLED state,
# then get a list of the client services in the list
installed_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
result = self._get(installed_uri, ambari_info)
if result.status_code == 200:
LOG.debug(
'GET response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
items = json_result['items']
client_set = cluster_spec.get_components_for_type('CLIENT')
inclusion_list = list(set([x['HostRoles']['component_name']
for x in items
if x['HostRoles']['component_name']
not in client_set]))
# query and start all non-client components on the given set of
# hosts
# TODO(jspeidel): Provide request context
body = '{"HostRoles": {"state" : "STARTED"}}'
start_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'
'&HostRoles/component_name.in({3})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers),
",".join(inclusion_list)))
self._exec_ambari_command(ambari_info, body, start_uri)
else:
raise ex.HadoopProvisionError(
_('Unable to determine installed service '
'components in scaled instances. status'
' code returned = {0}').format(result.status))
def _check_host_registrations(self, num_hosts, ambari_info):
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
try:
result = self._get(url, ambari_info)
json_result = json.loads(result.text)
LOG.debug('Registered Hosts: {current_number} '
'of {final_number}'.format(
current_number=len(json_result['items']),
final_number=num_hosts))
for hosts in json_result['items']:
LOG.debug('Registered Host: {host}'.format(
host=hosts['Hosts']['host_name']))
return result and len(json_result['items']) >= num_hosts
except Exception:
LOG.debug('Waiting to connect to ambari server')
return False
@cpo.event_wrapper(True, step=_("Wait for all Ambari agents to register"),
param=('ambari_info', 2))
def wait_for_host_registrations(self, num_hosts, ambari_info):
cluster = ambari_info.get_cluster()
poll_utils.plugin_option_poll(
cluster, self._check_host_registrations,
cfgprov.HOST_REGISTRATIONS_TIMEOUT,
_("Wait for host registrations"), 5, {
'num_hosts': num_hosts, 'ambari_info': ambari_info})
def update_ambari_admin_user(self, password, ambari_info):
old_pwd = ambari_info.password
user_url = 'http://{0}/api/v1/users/admin'.format(
ambari_info.get_address())
update_body = ('{{"Users":{{"roles":"admin","password":"{0}",'
'"old_password":"{1}"}} }}'.format(password, old_pwd))
result = self._put(user_url, ambari_info, data=update_body)
if result.status_code != 200:
raise ex.HadoopProvisionError(_('Unable to update Ambari admin '
'user credentials: {0}').format(
result.text))
def add_ambari_user(self, user, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user.name)
create_body = ('{{"Users":{{"password":"{0}","roles":"{1}"}} }}'.
format(user.password, '%s' %
','.join(map(str, user.groups))))
result = self._post(user_url, ambari_info, data=create_body)
if result.status_code != 201:
raise ex.HadoopProvisionError(
_('Unable to create Ambari user: {0}').format(result.text))
def delete_ambari_user(self, user_name, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user_name)
result = self._delete(user_url, ambari_info)
if result.status_code != 200:
raise ex.HadoopProvisionError(
_('Unable to delete Ambari user: %(user_name)s'
' : %(text)s') %
{'user_name': user_name, 'text': result.text})
def configure_scaled_cluster_instances(self, name, cluster_spec,
num_hosts, ambari_info):
self.wait_for_host_registrations(num_hosts, ambari_info)
self._add_configurations_to_cluster(
cluster_spec, ambari_info, name)
self._add_services_to_cluster(
cluster_spec, ambari_info, name)
self._add_components_to_services(
cluster_spec, ambari_info, name)
self._install_services(name, ambari_info)
def start_scaled_cluster_instances(self, name, cluster_spec, servers,
ambari_info):
self.start_services(name, cluster_spec, ambari_info)
self._add_hosts_and_components(
cluster_spec, servers, ambari_info, name)
self._install_and_start_components(
name, servers, ambari_info, cluster_spec)
@cpo.event_wrapper(
True, step=_("Decommission nodes"), param=('cluster', 1))
def decommission_cluster_instances(self, cluster, clusterspec, instances,
ambari_info):
raise exc.InvalidDataException(_('The HDP plugin does not support '
'the decommissioning of nodes '
'for HDP version 1.3.2'))
def provision_cluster(self, cluster_spec, servers, ambari_info, name):
self._add_cluster(ambari_info, name)
self._add_configurations_to_cluster(cluster_spec, ambari_info, name)
self._add_services_to_cluster(cluster_spec, ambari_info, name)
self._add_components_to_services(cluster_spec, ambari_info, name)
self._add_hosts_and_components(
cluster_spec, servers, ambari_info, name)
self._install_services(name, ambari_info)
self.handler.install_swift_integration(servers)
def cleanup(self, ambari_info):
try:
ambari_info.host.remote().close_http_session(ambari_info.port)
except exc.NotFoundException:
LOG.warning(_LW("HTTP session is not cached"))
def _get_services_in_state(self, cluster_name, ambari_info, state):
services_url = ('http://{0}/api/v1/clusters/{1}/services?'
'ServiceInfo/state.in({2})'.format(
ambari_info.get_address(), cluster_name, state))
result = self._get(services_url, ambari_info)
json_result = json.loads(result.text)
services = []
for service in json_result['items']:
services.append(service['ServiceInfo']['service_name'])
return services
def _fire_service_start_notifications(self, cluster_name,
cluster_spec, ambari_info):
started_services = self._get_services_in_state(
cluster_name, ambari_info, 'STARTED')
for service in cluster_spec.services:
if service.deployed and service.name not in started_services:
service.pre_service_start(cluster_spec, ambari_info,
started_services)
|
matips/iosr-2015
|
sahara/plugins/hdp/versions/version_1_3_2/versionhandler.py
|
Python
|
apache-2.0
| 31,052
|
##############################################################################
#
# Copyright (C) Zenoss, Inc. 2015, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
from . import zenpacklib
import os
if 'ZPL_YAML_FILENAME' in os.environ:
CFG = zenpacklib.load_yaml(os.environ['ZPL_YAML_FILENAME'])
else:
CFG = zenpacklib.load_yaml()
|
vholer/zenpacklib
|
tests/data/zenpacks/ZenPacks.zenoss.ZPLTest1/ZenPacks/zenoss/ZPLTest1/__init__.py
|
Python
|
gpl-2.0
| 540
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
from django.core.management import execute_from_command_line
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "worldmap.settings")
execute_from_command_line(sys.argv)
|
waybarrios/worldmap
|
manage.py
|
Python
|
gpl-3.0
| 253
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from sqlalchemy import create_engine
from sqlalchemy.types import SchemaType
from sqlalchemy.engine import reflection
from sqlalchemy.schema import (
MetaData,
Table,
DropTable,
ForeignKeyConstraint,
DropConstraint,
)
from .base import Cli
__all__ = ['Sqla']
def drop_everything(engine):
'''Droping all tables and custom types (enums) using `engine`.
Taken from http://www.sqlalchemy.org/trac/wiki/UsageRecipes/DropEverything
This method is more robust than `metadata.drop_all(engine)`. B.c. when
you change a table or a type name, `drop_all` does not consider the old one.
Thus, DB holds some unused entities.'''
conn = engine.connect()
# the transaction only applies if the DB supports
# transactional DDL, i.e. Postgresql, MS SQL Server
trans = conn.begin()
inspector = reflection.Inspector.from_engine(engine)
metadata = MetaData()
tbs = []
all_fks = []
types = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk['name']:
continue
fks.append(ForeignKeyConstraint((), (), name=fk['name']))
for col in inspector.get_columns(table_name):
if isinstance(col['type'], SchemaType):
types.append(col['type'])
t = Table(table_name, metadata, *fks)
tbs.append(t)
all_fks.extend(fks)
try:
for fkc in all_fks:
conn.execute(DropConstraint(fkc))
for table in tbs:
conn.execute(DropTable(table))
for custom_type in types:
custom_type.drop(conn)
trans.commit()
except:
trans.rollback()
raise
class Sqla(Cli):
'''
SQLAlchemy database handling
:param session_maker: sqlalchemy session maker function
:param metadata: sqlalchemy metadata object or dictionary mapping names to
metadata objects for multi-DB configuration to deal with
:param initial: a function acceptind sqlalchemy session and filling-in
a database with default initial data
:param dict generators: a dictionary with generator functions. Generator
functions should fill database with "lorem ipsum" data.
They accept sqlalchemy session and a cnumber of objects to be created.
'''
def __init__(self, session_maker, metadata, initial=None, generators=None):
self.session = session_maker()
self.metadata = metadata
self.initial = initial
self.generators = generators or {}
def _schema(self, table):
from sqlalchemy.schema import CreateTable
engine = self.session.get_bind(clause=table)
return str(CreateTable(table, bind=engine))
def command_create_tables(self, meta_name=None, verbose=False):
'''
Create tables according sqlalchemy data model.
Is not a complex migration tool like alembic, just creates tables that
does not exist::
./manage.py sqla:create_tables [--verbose] [meta_name]
'''
def _create_metadata_tables(metadata):
for table in metadata.sorted_tables:
if verbose:
print(self._schema(table))
else:
print(' '+table.name)
engine = self.session.get_bind(clause=table)
metadata.create_all(bind=engine, tables=[table])
if isinstance(self.metadata, MetaData):
print('Creating tables...')
_create_metadata_tables(self.metadata)
else:
for current_meta_name, metadata in self.metadata.items():
if meta_name not in (current_meta_name, None):
continue
print('Creating tables for {}...'.format(current_meta_name))
_create_metadata_tables(metadata)
def command_drop_tables(self, meta_name=None):
'''
Drops all tables without dropping a database::
./manage.py sqla:drop_tables [meta_name]
'''
answer = raw_input('All data will lost. Are you sure? [y/N] ')
if answer.strip().lower()!='y':
sys.exit('Interrupted')
def _drop_metadata_tables(metadata):
table = next(metadata.tables.itervalues(), None)
if table is None:
print('Failed to find engine')
else:
engine = self.session.get_bind(clause=table)
drop_everything(engine)
print('Done')
if isinstance(self.metadata, MetaData):
print('Droping tables... ', end='')
_drop_metadata_tables(self.metadata)
else:
for current_meta_name, metadata in self.metadata.items():
if meta_name not in (current_meta_name, None):
continue
print('Droping tables for {}... '.format(current_meta_name),
end='')
_drop_metadata_tables(metadata)
def command_init(self):
'''
Runs init function::
./manage.py sqla:init
'''
if self.initial:
self.initial(self.session)
def command_reset(self):
'''
Drops all tables, creates tables and runs init function::
./manage.py sqla:reset
'''
self.command_drop_tables()
self.command_create_tables()
self.command_init()
def command_schema(self, name=None):
'''
Prints current database schema (according sqlalchemy database model)::
./manage.py sqla:schema [name]
'''
meta_name = table_name = None
if name:
if isinstance(self.metadata, MetaData):
model_name = name
elif '.' in name:
meta_name, table_name = name.split('.', 1)
else:
meta_name = name
def _print_metadata_schema(metadata):
if table_name is None:
for table in metadata.sorted_tables:
print(self._schema(table))
else:
try:
table = metadata.tables[table_name]
except KeyError:
sys.exit('Table {} is not found'.format(name))
print(self._schema(table))
if isinstance(self.metadata, MetaData):
_print_metadata_schema(self.metadata)
else:
for current_meta_name, metadata in self.metadata.items():
if meta_name not in (current_meta_name, None):
continue
_print_metadata_schema(metadata)
def command_gen(self, *names):
'''
Runs generator functions.
Run `docs` generator function::
./manage.py sqla:gen docs
Run `docs` generator function with `count=10`::
./manage.py sqla:gen docs:10
'''
if not names:
raise Exception('Please provide generator names')
for name in names:
name, count = name, 0
if ':' in name:
name, count = name.split(':', 1)
count = int(count)
create = self.generators[name]
print('Generating `{0}` count={1}'.format(name, count))
create(self.session, count)
self.session.commit()
|
Lehych/iktomi
|
iktomi/cli/sqla.py
|
Python
|
mit
| 7,471
|
"""
Common utility functions useful throughout the contentstore
"""
import logging
from contextlib import contextmanager
from datetime import datetime
from django.conf import settings
from django.urls import reverse
from django.utils import translation
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import CourseKey, UsageKey
from opaque_keys.edx.locator import LibraryLocator
from pytz import UTC
from common.djangoapps.student import auth
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole
from openedx.core.djangoapps.django_comment_common.models import assign_default_role
from openedx.core.djangoapps.django_comment_common.utils import seed_permissions_roles
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from openedx.features.content_type_gating.partitions import CONTENT_TYPE_GATING_SCHEME
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.partitions.partitions_service import get_all_partitions_for_course
log = logging.getLogger(__name__)
def add_instructor(course_key, requesting_user, new_instructor):
"""
Adds given user as instructor and staff to the given course,
after verifying that the requesting_user has permission to do so.
"""
# can't use auth.add_users here b/c it requires user to already have Instructor perms in this course
CourseInstructorRole(course_key).add_users(new_instructor)
auth.add_users(requesting_user, CourseStaffRole(course_key), new_instructor)
def initialize_permissions(course_key, user_who_created_course):
"""
Initializes a new course by enrolling the course creator as a student,
and initializing Forum by seeding its permissions and assigning default roles.
"""
# seed the forums
seed_permissions_roles(course_key)
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user_who_created_course, course_key)
# set default forum roles (assign 'Student' role)
assign_default_role(course_key, user_who_created_course)
def remove_all_instructors(course_key):
"""
Removes all instructor and staff users from the given course.
"""
staff_role = CourseStaffRole(course_key)
staff_role.remove_users(*staff_role.users_with_role())
instructor_role = CourseInstructorRole(course_key)
instructor_role.remove_users(*instructor_role.users_with_role())
def delete_course(course_key, user_id, keep_instructors=False):
"""
Delete course from module store and if specified remove user and
groups permissions from course.
"""
_delete_course_from_modulestore(course_key, user_id)
if not keep_instructors:
_remove_instructors(course_key)
def _delete_course_from_modulestore(course_key, user_id):
"""
Delete course from MongoDB. Deleting course will fire a signal which will result into
deletion of the courseware associated with a course_key.
"""
module_store = modulestore()
with module_store.bulk_operations(course_key):
module_store.delete_course(course_key, user_id)
def _remove_instructors(course_key):
"""
In the django layer, remove all the user/groups permissions associated with this course
"""
print('removing User permissions from course....')
try:
remove_all_instructors(course_key)
except Exception as err: # lint-amnesty, pylint: disable=broad-except
log.error(f"Error in deleting course groups for {course_key}: {err}")
def get_lms_link_for_item(location, preview=False):
"""
Returns an LMS link to the course with a jump_to to the provided location.
:param location: the location to jump to
:param preview: True if the preview version of LMS should be returned. Default value is false.
"""
assert isinstance(location, UsageKey)
# checks LMS_BASE value in site configuration for the given course_org_filter(org)
# if not found returns settings.LMS_BASE
lms_base = SiteConfiguration.get_value_for_org(
location.org,
"LMS_BASE",
settings.LMS_BASE
)
if lms_base is None:
return None
if preview:
# checks PREVIEW_LMS_BASE value in site configuration for the given course_org_filter(org)
# if not found returns settings.FEATURES.get('PREVIEW_LMS_BASE')
lms_base = SiteConfiguration.get_value_for_org(
location.org,
"PREVIEW_LMS_BASE",
settings.FEATURES.get('PREVIEW_LMS_BASE')
)
return "//{lms_base}/courses/{course_key}/jump_to/{location}".format(
lms_base=lms_base,
course_key=str(location.course_key),
location=str(location),
)
def get_lms_link_for_certificate_web_view(course_key, mode):
"""
Returns the url to the certificate web view.
"""
assert isinstance(course_key, CourseKey)
# checks LMS_BASE value in SiteConfiguration against course_org_filter if not found returns settings.LMS_BASE
lms_base = SiteConfiguration.get_value_for_org(course_key.org, "LMS_BASE", settings.LMS_BASE)
if lms_base is None:
return None
return "//{certificate_web_base}/certificates/course/{course_id}?preview={mode}".format(
certificate_web_base=lms_base,
course_id=str(course_key),
mode=mode
)
def get_proctored_exam_settings_url(course_module):
"""
Gets course authoring microfrontend URL for links to proctored exam settings page
"""
course_authoring_microfrontend_url = ''
if settings.FEATURES.get('ENABLE_EXAM_SETTINGS_HTML_VIEW'):
course_authoring_microfrontend_url = configuration_helpers.get_value_for_org(
course_module.location.org,
'COURSE_AUTHORING_MICROFRONTEND_URL',
settings.COURSE_AUTHORING_MICROFRONTEND_URL
)
return course_authoring_microfrontend_url
# pylint: disable=invalid-name
def is_currently_visible_to_students(xblock):
"""
Returns true if there is a published version of the xblock that is currently visible to students.
This means that it has a release date in the past, and the xblock has not been set to staff only.
"""
try:
published = modulestore().get_item(xblock.location, revision=ModuleStoreEnum.RevisionOption.published_only)
# If there's no published version then the xblock is clearly not visible
except ItemNotFoundError:
return False
# If visible_to_staff_only is True, this xblock is not visible to students regardless of start date.
if published.visible_to_staff_only:
return False
# Check start date
if 'detached' not in published._class_tags and published.start is not None: # lint-amnesty, pylint: disable=protected-access
return datetime.now(UTC) > published.start
# No start date, so it's always visible
return True
def has_children_visible_to_specific_partition_groups(xblock):
"""
Returns True if this xblock has children that are limited to specific user partition groups.
Note that this method is not recursive (it does not check grandchildren).
"""
if not xblock.has_children:
return False
for child in xblock.get_children():
if is_visible_to_specific_partition_groups(child):
return True
return False
def is_visible_to_specific_partition_groups(xblock):
"""
Returns True if this xblock has visibility limited to specific user partition groups.
"""
if not xblock.group_access:
return False
for partition in get_user_partition_info(xblock):
if any(g["selected"] for g in partition["groups"]):
return True
return False
def find_release_date_source(xblock):
"""
Finds the ancestor of xblock that set its release date.
"""
# Stop searching at the section level
if xblock.category == 'chapter':
return xblock
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own release date
if not parent_location:
return xblock
parent = modulestore().get_item(parent_location)
if parent.start != xblock.start:
return xblock
else:
return find_release_date_source(parent)
def find_staff_lock_source(xblock):
"""
Returns the xblock responsible for setting this xblock's staff lock, or None if the xblock is not staff locked.
If this xblock is explicitly locked, return it, otherwise find the ancestor which sets this xblock's staff lock.
"""
# Stop searching if this xblock has explicitly set its own staff lock
if xblock.fields['visible_to_staff_only'].is_set_on(xblock):
return xblock
# Stop searching at the section level
if xblock.category == 'chapter':
return None
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own staff lock
if not parent_location:
return None
parent = modulestore().get_item(parent_location)
return find_staff_lock_source(parent)
def ancestor_has_staff_lock(xblock, parent_xblock=None):
"""
Returns True iff one of xblock's ancestors has staff lock.
Can avoid mongo query by passing in parent_xblock.
"""
if parent_xblock is None:
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
if not parent_location:
return False
parent_xblock = modulestore().get_item(parent_location)
return parent_xblock.visible_to_staff_only
def reverse_url(handler_name, key_name=None, key_value=None, kwargs=None):
"""
Creates the URL for the given handler.
The optional key_name and key_value are passed in as kwargs to the handler.
"""
kwargs_for_reverse = {key_name: str(key_value)} if key_name else None
if kwargs:
kwargs_for_reverse.update(kwargs)
return reverse(handler_name, kwargs=kwargs_for_reverse)
def reverse_course_url(handler_name, course_key, kwargs=None):
"""
Creates the URL for handlers that use course_keys as URL parameters.
"""
return reverse_url(handler_name, 'course_key_string', course_key, kwargs)
def reverse_library_url(handler_name, library_key, kwargs=None):
"""
Creates the URL for handlers that use library_keys as URL parameters.
"""
return reverse_url(handler_name, 'library_key_string', library_key, kwargs)
def reverse_usage_url(handler_name, usage_key, kwargs=None):
"""
Creates the URL for handlers that use usage_keys as URL parameters.
"""
return reverse_url(handler_name, 'usage_key_string', usage_key, kwargs)
def get_split_group_display_name(xblock, course):
"""
Returns group name if an xblock is found in user partition groups that are suitable for the split_test module.
Arguments:
xblock (XBlock): The courseware component.
course (XBlock): The course descriptor.
Returns:
group name (String): Group name of the matching group xblock.
"""
for user_partition in get_user_partition_info(xblock, schemes=['random'], course=course):
for group in user_partition['groups']:
if 'Group ID {group_id}'.format(group_id=group['id']) == xblock.display_name_with_default:
return group['name']
def get_user_partition_info(xblock, schemes=None, course=None):
"""
Retrieve user partition information for an XBlock for display in editors.
* If a partition has been disabled, it will be excluded from the results.
* If a group within a partition is referenced by the XBlock, but the group has been deleted,
the group will be marked as deleted in the results.
Arguments:
xblock (XBlock): The courseware component being edited.
Keyword Arguments:
schemes (iterable of str): If provided, filter partitions to include only
schemes with the provided names.
course (XBlock): The course descriptor. If provided, uses this to look up the user partitions
instead of loading the course. This is useful if we're calling this function multiple
times for the same course want to minimize queries to the modulestore.
Returns: list
Example Usage:
>>> get_user_partition_info(block, schemes=["cohort", "verification"])
[
{
"id": 12345,
"name": "Cohorts"
"scheme": "cohort",
"groups": [
{
"id": 7890,
"name": "Foo",
"selected": True,
"deleted": False,
}
]
},
{
"id": 7292,
"name": "Midterm A",
"scheme": "verification",
"groups": [
{
"id": 1,
"name": "Completed verification at Midterm A",
"selected": False,
"deleted": False
},
{
"id": 0,
"name": "Did not complete verification at Midterm A",
"selected": False,
"deleted": False,
}
]
}
]
"""
course = course or modulestore().get_course(xblock.location.course_key)
if course is None:
log.warning(
"Could not find course %s to retrieve user partition information",
xblock.location.course_key
)
return []
if schemes is not None:
schemes = set(schemes)
partitions = []
for p in sorted(get_all_partitions_for_course(course, active_only=True), key=lambda p: p.name):
# Exclude disabled partitions, partitions with no groups defined
# The exception to this case is when there is a selected group within that partition, which means there is
# a deleted group
# Also filter by scheme name if there's a filter defined.
selected_groups = set(xblock.group_access.get(p.id, []) or [])
if (p.groups or selected_groups) and (schemes is None or p.scheme.name in schemes):
# First, add groups defined by the partition
groups = []
for g in p.groups:
# Falsey group access for a partition mean that all groups
# are selected. In the UI, though, we don't show the particular
# groups selected, since there's a separate option for "all users".
groups.append({
"id": g.id,
"name": g.name,
"selected": g.id in selected_groups,
"deleted": False,
})
# Next, add any groups set on the XBlock that have been deleted
all_groups = {g.id for g in p.groups}
missing_group_ids = selected_groups - all_groups
for gid in missing_group_ids:
groups.append({
"id": gid,
"name": _("Deleted Group"),
"selected": True,
"deleted": True,
})
# Put together the entire partition dictionary
partitions.append({
"id": p.id,
"name": str(p.name), # Convert into a string in case ugettext_lazy was used
"scheme": p.scheme.name,
"groups": groups,
})
return partitions
def get_visibility_partition_info(xblock, course=None):
"""
Retrieve user partition information for the component visibility editor.
This pre-processes partition information to simplify the template.
Arguments:
xblock (XBlock): The component being edited.
course (XBlock): The course descriptor. If provided, uses this to look up the user partitions
instead of loading the course. This is useful if we're calling this function multiple
times for the same course want to minimize queries to the modulestore.
Returns: dict
"""
selectable_partitions = []
# We wish to display enrollment partitions before cohort partitions.
enrollment_user_partitions = get_user_partition_info(xblock, schemes=["enrollment_track"], course=course)
# For enrollment partitions, we only show them if there is a selected group or
# or if the number of groups > 1.
for partition in enrollment_user_partitions:
if len(partition["groups"]) > 1 or any(group["selected"] for group in partition["groups"]):
selectable_partitions.append(partition)
course_key = xblock.scope_ids.usage_id.course_key
is_library = isinstance(course_key, LibraryLocator)
if not is_library and ContentTypeGatingConfig.current(course_key=course_key).studio_override_enabled:
selectable_partitions += get_user_partition_info(xblock, schemes=[CONTENT_TYPE_GATING_SCHEME], course=course)
# Now add the cohort user partitions.
selectable_partitions = selectable_partitions + get_user_partition_info(xblock, schemes=["cohort"], course=course)
# Find the first partition with a selected group. That will be the one initially enabled in the dialog
# (if the course has only been added in Studio, only one partition should have a selected group).
selected_partition_index = -1
# At the same time, build up all the selected groups as they are displayed in the dialog title.
selected_groups_label = ''
for index, partition in enumerate(selectable_partitions):
for group in partition["groups"]:
if group["selected"]:
if len(selected_groups_label) == 0:
selected_groups_label = group['name']
else:
# Translators: This is building up a list of groups. It is marked for translation because of the
# comma, which is used as a separator between each group.
selected_groups_label = _('{previous_groups}, {current_group}').format(
previous_groups=selected_groups_label,
current_group=group['name']
)
if selected_partition_index == -1:
selected_partition_index = index
return {
"selectable_partitions": selectable_partitions,
"selected_partition_index": selected_partition_index,
"selected_groups_label": selected_groups_label,
}
def get_xblock_aside_instance(usage_key):
"""
Returns: aside instance of a aside xblock
:param usage_key: Usage key of aside xblock
"""
try:
descriptor = modulestore().get_item(usage_key.usage_key)
for aside in descriptor.runtime.get_asides(descriptor):
if aside.scope_ids.block_type == usage_key.aside_type:
return aside
except ItemNotFoundError:
log.warning('Unable to load item %s', usage_key.usage_key)
def is_self_paced(course):
"""
Returns True if course is self-paced, False otherwise.
"""
return course and course.self_paced
def get_sibling_urls(subsection):
"""
Given a subsection, returns the urls for the next and previous units.
(the first unit of the next subsection or section, and
the last unit of the previous subsection/section)
"""
section = subsection.get_parent()
prev_url = next_url = ''
prev_loc = next_loc = None
last_block = None
siblings = list(section.get_children())
for i, block in enumerate(siblings):
if block.location == subsection.location:
if last_block:
try:
prev_loc = last_block.get_children()[0].location
except IndexError:
pass
try:
next_loc = siblings[i + 1].get_children()[0].location
except IndexError:
pass
break
last_block = block
if not prev_loc:
try:
# section.get_parent SHOULD return the course, but for some reason, it might not
sections = section.get_parent().get_children()
except AttributeError:
log.error("URL Retrieval Error # 1: subsection {subsection} included in section {section}".format(
section=section.location,
subsection=subsection.location
))
# This should not be a fatal error. The worst case is that the navigation on the unit page
# won't display a link to a previous unit.
else:
try:
prev_section = sections[sections.index(next(s for s in sections if s.location == section.location)) - 1]
prev_loc = prev_section.get_children()[-1].get_children()[-1].location
except IndexError:
pass
if not next_loc:
try:
sections = section.get_parent().get_children()
except AttributeError:
log.error("URL Retrieval Error # 2: subsection {subsection} included in section {section}".format(
section=section.location,
subsection=subsection.location
))
else:
try:
next_section = sections[sections.index(next(s for s in sections if s.location == section.location)) + 1]
next_loc = next_section.get_children()[0].get_children()[0].location
except IndexError:
pass
if prev_loc:
prev_url = reverse_usage_url('container_handler', prev_loc)
if next_loc:
next_url = reverse_usage_url('container_handler', next_loc)
return prev_url, next_url
@contextmanager
def translation_language(language):
"""Context manager to override the translation language for the scope
of the following block. Has no effect if language is None.
"""
if language:
previous = translation.get_language()
translation.activate(language)
try:
yield
finally:
translation.activate(previous)
else:
yield
|
EDUlib/edx-platform
|
cms/djangoapps/contentstore/utils.py
|
Python
|
agpl-3.0
| 22,783
|
#
# Copyright (C) 2013 Savoir-Faire Linux Inc.
#
# This file is part of Sageo
#
# Sageo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sageo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sageo. If not, see <http://www.gnu.org/licenses/>
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, Module, current_app
import app.snapins as snapins
#from app.snapins.snapin import SnapinBase
sageo = current_app
def side():
snapin_objects = {}
for snapin in snapins.__all__:
#import ipdb;ipdb.set_trace()
__import__('app.snapins.' + snapin + '.' + snapin)
snapin_objects[snapin] = getattr(getattr(getattr(snapins, snapin), snapin),snapin)()
return snapin_objects
|
smlacombe/sageo
|
app/controllers/side.py
|
Python
|
gpl-3.0
| 1,232
|
import rectangle
class particle:
def __init__(self, x, y, w, h, c):
self.rectangle = rectangle.rectangle(x, y, w, h, c)
|
martflu/adaptive-audio
|
src/particle.py
|
Python
|
gpl-2.0
| 137
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file contains a dialog and widgets related to the module documentation
dialog, which displays the available documentation for a given VisTrails module.
QMethodDocumentation
"""
from PyQt4 import QtCore, QtGui
from vistrails.core.vistrail.port import PortEndPoint
from vistrails.core.utils import VistrailsInternalError
class QPortDocumentation(QtGui.QDialog):
"""
QPortDocumentation is a dialog for showing port documentation. duh.
"""
def __init__(self, module, port_type, port_name, parent=None):
QtGui.QDialog.__init__(self, parent)
if not module.has_port_spec(port_name, port_type):
doc = None
else:
port_spec = module.get_port_spec(port_name, port_type)
doc = port_spec.docstring()
if doc is None:
descriptor = module.module_descriptor
# try the old method of accessing documentation
if port_type == 'output':
call_ = descriptor.module.provide_output_port_documentation
elif port_type == 'input':
call_ = descriptor.module.provide_input_port_documentation
else:
raise VistrailsInternalError("Invalid port type")
doc = call_(port_name)
self.setWindowTitle('Documentation for %s port %s in "%s"' %
(port_type, port_name, module.name))
layout = QtGui.QVBoxLayout()
layout.addStrut(600)
layout.addWidget(QtGui.QLabel("Port name: %s" % port_name))
layout.addWidget(QtGui.QLabel("Module name: %s" % module.name))
layout.addWidget(QtGui.QLabel("Module package: %s" % \
module.package))
self.textEdit = QtGui.QTextEdit(self)
layout.addWidget(self.textEdit, 1)
if doc:
self.textEdit.insertPlainText(doc)
else:
self.textEdit.insertPlainText("(Documentation not available)")
self.textEdit.setReadOnly(True)
self.textEdit.setTextCursor(QtGui.QTextCursor(self.textEdit.document()))
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
self.connect(self.buttonBox, QtCore.SIGNAL('accepted()'), self.accept)
|
Nikea/VisTrails
|
vistrails/gui/port_documentation.py
|
Python
|
bsd-3-clause
| 4,253
|
#! /bin/env python
#
# Generate random fake participant data
#
import argparse
# pylint: disable=superfluous-parens
# pylint: disable=broad-except
import csv
import logging
import sys
from time import sleep
from rdr_service.tools.tool_libs import GCPProcessContext
from rdr_service.services.gcp_utils import gcp_get_app_access_token, gcp_get_app_host_name, gcp_make_auth_header
from rdr_service.services.system_utils import make_api_request, setup_logging, setup_i18n
_logger = logging.getLogger("rdr_logger")
tool_cmd = "random-gen"
tool_desc = "random participant data generator"
class RandomGeneratorClass(object):
MAX_PARTICIPANTS_PER_REQUEST = 25
MAX_CONSECUTIVE_ERRORS = 5
SLEEP_TIME_AFTER_ERROR_SECONDS = 3
_gen_url = "rdr/v1/DataGen"
_host = None
_oauth_token = None
def __init__(self, args, gcp_env):
self.args = args
self.gcp_env = gcp_env
if args:
self._host = gcp_get_app_host_name(self.args.project)
if self.args.port:
self._host = "{0}:{1}".format(self._host, self.args.port)
else:
if self._host in ["127.0.0.1", "localhost"]:
self._host = "{0}:{1}".format(self._host, 8080)
if self._host not in ["127.0.0.1", "localhost"]:
self._oauth_token = gcp_get_app_access_token()
def generate_fake_data(self):
total_participants_created = 0
while total_participants_created < self.args.num_participants:
participants_for_batch = min(
self.MAX_PARTICIPANTS_PER_REQUEST, self.args.num_participants - total_participants_created
)
request_body = {
"num_participants": participants_for_batch,
"include_physical_measurements": self.args.include_physical_measurements,
"include_biobank_orders": self.args.include_biobank_orders,
}
if self.args.hpo:
request_body["hpo"] = self.args.hpo
_logger.info("generating batch of [{0}] participants.".format(participants_for_batch))
num_consecutive_errors = 0
while num_consecutive_errors <= self.MAX_CONSECUTIVE_ERRORS:
code, resp = make_api_request(
self._host, self._gen_url, req_type="POST", json_data=request_body, headers=gcp_make_auth_header()
)
if code == 200:
break
_logger.error("{0} [{1}]".format(code, resp))
num_consecutive_errors += 1
sleep(self.SLEEP_TIME_AFTER_ERROR_SECONDS)
if num_consecutive_errors > self.MAX_CONSECUTIVE_ERRORS:
raise IOError("more than {0} consecutive errors; bailing out.".format(self.MAX_CONSECUTIVE_ERRORS))
total_participants_created += participants_for_batch
_logger.info("total participants created: [{0}].".format(total_participants_created))
if self.args.create_biobank_samples:
_logger.info("requesting Biobank sample generation.")
code, resp = make_api_request(
self._host,
self._gen_url,
req_type="POST",
json_data={"create_biobank_samples": True},
headers=gcp_make_auth_header(),
)
if code != 200:
_logger.error("request to generate biobank samples failed.")
else:
_logger.info(
"biobank samples are being generated asynchronously."
" wait until done, then use the cron tab in AppEngine to start the samples pipeline."
)
def _read_csv_lines(self, filepath):
with open(filepath, "r") as f:
reader = csv.reader(f)
return [line[0].strip() for line in reader]
def generate_data_from_file(self):
reader = self._read_csv_lines(self.args.create_samples_from_file)
_logger.info("requesting pm&b for participants")
for item in reader:
# pylint: disable=unused-variable
code, resp = make_api_request(
self._host, self._gen_url, req_type="POST", json_data=item, headers=gcp_make_auth_header()
)
if code != 200:
_logger.error("request failed")
def run(self):
"""
Main program process
:return: Exit code value
"""
if self.args.create_samples_from_file:
self.generate_data_from_file()
else:
self.generate_fake_data()
return 0
def run():
# Set global debug value and setup application logging.
setup_logging(
_logger, tool_cmd, "--debug" in sys.argv, "{0}.log".format(tool_cmd) if "--log-file" in sys.argv else None
)
setup_i18n()
exit_code = 1
# Setup program arguments.
parser = argparse.ArgumentParser(prog=tool_cmd, description=tool_desc)
parser.add_argument("--debug", help="Enable debug output", default=False, action="store_true") # noqa
parser.add_argument("--log-file", help="write output to a log file", default=False, action="store_true") # noqa
parser.add_argument("--project", help="gcp project name", default="localhost") # noqa
parser.add_argument("--account", help="pmi-ops account", default=None) # noqa
parser.add_argument("--service-account", help="gcp iam service account", default=None) # noqa
parser.add_argument("--port", help="alternate ip port to connect to", default=None) # noqa
parser.add_argument("--num_participants", type=int, help="The number of participants to create.", default=0)
parser.add_argument(
"--include_physical_measurements", action="store_true", help="True if physical measurements should be created"
)
parser.add_argument(
"--include_biobank_orders", action="store_true", help="True if biobank orders should be created"
)
parser.add_argument("--hpo", help="The HPO name to assign participants to; defaults to random choice.")
parser.add_argument(
"--create_biobank_samples", action="store_true", help="True if biobank samples should be created"
)
parser.add_argument(
"--create_samples_from_file",
help="Creates PM&B for existing participants from a csv file; requires path"
' to file. File is expected to contain a single column of ID"s with a '
"leading env. identifier. i.e. P",
)
args = parser.parse_args()
if args.num_participants == 0 and not args.create_biobank_samples and not args.create_samples_from_file:
parser.error("--num_participants must be nonzero unless --create_biobank_samples is true.")
exit(exit_code)
with GCPProcessContext(tool_cmd, args.project, args.account, args.service_account) as gcp_env:
# verify we're not getting pointed to production.
if gcp_env.project == "all-of-us-rdr-prod":
_logger.error("using spec generator in production is not allowed.")
return 1
process = RandomGeneratorClass(args, gcp_env)
exit_code = process.run()
return exit_code
# --- Main Program Call ---
if __name__ == "__main__":
sys.exit(run())
|
all-of-us/raw-data-repository
|
rdr_service/client/client_libs/random_data_generator.py
|
Python
|
bsd-3-clause
| 7,259
|
# -*- coding: utf-8 -*-
from flags import FLAGS
__all__ = (
"get_device",
)
device = None
class Device(object):
@property
def device_list(self):
"""
"""
devices = range(FLAGS.num_gpus)
if len(devices) is 0:
devices = [0]
return devices
@property
def count(self):
"""
"""
c = FLAGS.num_gpus
if c == 0 and self.is_cpu():
return 1
return c
def is_cpu(self):
"""
"""
return FLAGS.num_gpus == 0
def is_gpu(self):
"""
"""
return FLAGS.num_gpus > 0
def make_device_name(self, id):
"""
"""
device_name = "/"
if self.is_gpu():
device_name += "gpu:"
elif self.is_cpu():
device_name += "cpu:"
device_name += str(id)
return device_name
def get_device():
global device
if device is None:
device = Device()
return device
|
tokuda109/tensorflow-docker-skeleton
|
src/train/device.py
|
Python
|
mit
| 1,009
|
#!/opt/local/bin/python
# Python program to print prime factors
import sys
import math
import timeit
import time
# A function to print all prime factors of
# a given number n
def primeFactors(n):
factors = []
# Print the number of two's that divide n
while n & 1 == 0:
factors.append(2)
print(2),
n = int(n / 2)
# n must be odd at this point
# so a skip of 2 ( i = i + 2) can be used
for i in range(3,int(math.sqrt(n))+1,2):
# while i divides n , print i ad divide n
while n % i == 0:
factors.append(i)
print(i),
n = n / i
# Condition if n is a prime
# number greater than 2
if n > 2:
factors.append(n)
print(n)
return(factors)
if len(sys.argv) == 2:
target = int(sys.argv[1])
else:
target = 4620
target = 600851475143
start_time = timeit.default_timer()
primeFactors(target)
print( timeit.default_timer() - start_time )
# print("We found %d primes factors in %d" % (prime_factors, target))
# print("In %f seconds." % ( timeit.default_timer() - start_time) )
#-----
|
perlygatekeeper/glowing-robot
|
Project_Euler/03_largest_prime_factor/prime_factors.py
|
Python
|
artistic-2.0
| 1,189
|
# from .db import db
from db import db
from Authors import *
from model import *
import uuid
class Posts(db.Model):
__tablename__ = 'posts'
post_id = db.Column(db.String(100), primary_key=True)
title = db.Column(db.String(64))
description = db.Column(db.String(128))
content_type = db.Column(db.String(33))
content = db.Column(db.String(800))
creation_time = db.Column(db.DateTime)
view_permission = db.Column(db.Integer)
post_type = db.Column(db.Integer)
numberOf_comments = db.Column(db.Integer)
numberOf_URL = db.Column(db.Integer)
numberOf_images = db.Column(db.Integer)
author_id = db.Column(db.String(100), db.ForeignKey('authors.author_id'))
comments = db.relationship('Comments', backref = 'comm', lazy = 'dynamic')
# UniqueConstraint('post_id', name = 'uix_1')
def __new__(cls, datum = None):
"""
Input: See comments in __init__
Description:
Checks whether post_id is inside datum dictionary.
If not found, then it returns None
"""
if datum == None:
return super(Posts, cls).__new__(cls)
if 'post_id' and 'author_id' not in datum.keys():
return None
else:
return super(Posts,cls).__new__(cls)
def __init__(self,datum=None):
"""
Input:
datum is a dictionary with keys as column names and values as their corresponding values.
eg,
Description:
This constructor sets the values of fields based on datum dictionary. If any field
is missing from datum, its default value will be inserted.
TODO:
"""
if datum == None:
self.post_id = str(uuid.uuid4())
return
self.post_id = datum["post_id"]
if "title" in datum.keys():
self.title = datum["title"]
else:
self.title = empty_string
if "content" in datum.keys():
self.content = datum["content"]
if "description" in datum.keys():
self.description = datum["description"]
if "creation_time" in datum.keys():
self.creation_time = datum["creation_time"]
if "view_permission" in datum.keys():
self.view_permission = datum["view_permission"]
if "content_type" in datum.keys():
self.content_type = datum["content_type"]
self.author_id = datum["author_id"]
def __repr__(self):
return '<User %r>' % (self.post_id)
# db.create_all()
|
CMPUT404-Fall2016/cmput404-project
|
Model/Posts.py
|
Python
|
gpl-3.0
| 2,713
|
import re
from cloudinary import CloudinaryResource, forms, uploader
from django.core.files.uploadedfile import UploadedFile
from django.db import models
from cloudinary.uploader import upload_options
from cloudinary.utils import upload_params
# Add introspection rules for South, if it's installed.
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^cloudinary.models.CloudinaryField"])
except ImportError:
pass
CLOUDINARY_FIELD_DB_RE = r'(?:(?P<resource_type>image|raw|video)/' \
r'(?P<type>upload|private|authenticated)/)?' \
r'(?:v(?P<version>\d+)/)?' \
r'(?P<public_id>.*?)' \
r'(\.(?P<format>[^.]+))?$'
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
This requires a bit of explanation: the basic idea is to make a dummy
metaclass for one level of class instantiation that replaces itself with
the actual metaclass.
Taken from six - https://pythonhosted.org/six/
"""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
class CloudinaryField(models.Field):
description = "A resource stored in Cloudinary"
def __init__(self, *args, **kwargs):
self.default_form_class = kwargs.pop("default_form_class", forms.CloudinaryFileField)
self.type = kwargs.pop("type", "upload")
self.resource_type = kwargs.pop("resource_type", "image")
self.width_field = kwargs.pop("width_field", None)
self.height_field = kwargs.pop("height_field", None)
# Collect all options related to Cloudinary upload
self.options = {key: kwargs.pop(key) for key in set(kwargs.keys()) if key in upload_params + upload_options}
field_options = kwargs
field_options['max_length'] = 255
super(CloudinaryField, self).__init__(*args, **field_options)
def get_internal_type(self):
return 'CharField'
def value_to_string(self, obj):
"""
We need to support both legacy `_get_val_from_obj` and new `value_from_object` models.Field methods.
It would be better to wrap it with try -> except AttributeError -> fallback to legacy.
Unfortunately, we can catch AttributeError exception from `value_from_object` function itself.
Parsing exception string is an overkill here, that's why we check for attribute existence
:param obj: Value to serialize
:return: Serialized value
"""
if hasattr(self, 'value_from_object'):
value = self.value_from_object(obj)
else: # fallback for legacy django versions
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
def parse_cloudinary_resource(self, value):
m = re.match(CLOUDINARY_FIELD_DB_RE, value)
resource_type = m.group('resource_type') or self.resource_type
upload_type = m.group('type') or self.type
return CloudinaryResource(
type=upload_type,
resource_type=resource_type,
version=m.group('version'),
public_id=m.group('public_id'),
format=m.group('format')
)
def from_db_value(self, value, expression, connection, *args, **kwargs):
# TODO: when dropping support for versions prior to 2.0, you may return
# the signature to from_db_value(value, expression, connection)
if value is not None:
return self.parse_cloudinary_resource(value)
def to_python(self, value):
if isinstance(value, CloudinaryResource):
return value
elif isinstance(value, UploadedFile):
return value
elif value is None or value is False:
return value
else:
return self.parse_cloudinary_resource(value)
def pre_save(self, model_instance, add):
value = super(CloudinaryField, self).pre_save(model_instance, add)
if isinstance(value, UploadedFile):
options = {"type": self.type, "resource_type": self.resource_type}
options.update(self.options)
if hasattr(value, 'seekable') and value.seekable():
value.seek(0)
instance_value = uploader.upload_resource(value, **options)
setattr(model_instance, self.attname, instance_value)
if self.width_field:
setattr(model_instance, self.width_field, instance_value.metadata.get('width'))
if self.height_field:
setattr(model_instance, self.height_field, instance_value.metadata.get('height'))
return self.get_prep_value(instance_value)
else:
return value
def get_prep_value(self, value):
if not value:
return self.get_default()
if isinstance(value, CloudinaryResource):
return value.get_prep_value()
else:
return value
def formfield(self, **kwargs):
options = {"type": self.type, "resource_type": self.resource_type}
options.update(kwargs.pop('options', {}))
defaults = {'form_class': self.default_form_class, 'options': options, 'autosave': False}
defaults.update(kwargs)
return super(CloudinaryField, self).formfield(**defaults)
|
JonnyWong16/plexpy
|
lib/cloudinary/models.py
|
Python
|
gpl-3.0
| 5,449
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Widget.progress'
db.add_column('workflows_widget', 'progress', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'Widget.progress'
db.delete_column('workflows_widget', 'progress')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'workflows.abstractinput': {
'Meta': {'object_name': 'AbstractInput'},
'default': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multi': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inputs'", 'to': "orm['workflows.AbstractWidget']"})
},
'workflows.abstractoption': {
'Meta': {'object_name': 'AbstractOption'},
'abstract_input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['workflows.AbstractInput']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'workflows.abstractoutput': {
'Meta': {'object_name': 'AbstractOutput'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outputs'", 'to': "orm['workflows.AbstractWidget']"})
},
'workflows.abstractwidget': {
'Meta': {'object_name': 'AbstractWidget'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'widgets'", 'to': "orm['workflows.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_progress_bar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('workflows.thumbs.ThumbnailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'interaction_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'interactive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'post_interact_action': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'widgets'", 'null': 'True', 'to': "orm['auth.User']"}),
'visualization_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wsdl': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'wsdl_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'workflows.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['workflows.Category']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'categories'", 'null': 'True', 'to': "orm['auth.User']"})
},
'workflows.connection': {
'Meta': {'object_name': 'Connection'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Input']"}),
'output': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Output']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Workflow']"})
},
'workflows.data': {
'Meta': {'object_name': 'Data'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'workflows.input': {
'Meta': {'object_name': 'Input'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inner_output': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outer_input_rel'", 'null': 'True', 'to': "orm['workflows.Output']"}),
'multi_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'outer_output': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inner_input_rel'", 'null': 'True', 'to': "orm['workflows.Output']"}),
'parameter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inputs'", 'to': "orm['workflows.Widget']"})
},
'workflows.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['workflows.Input']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'workflows.output': {
'Meta': {'object_name': 'Output'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inner_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outer_output_rel'", 'null': 'True', 'to': "orm['workflows.Input']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'outer_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inner_output_rel'", 'null': 'True', 'to': "orm['workflows.Input']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outputs'", 'to': "orm['workflows.Widget']"})
},
'workflows.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'active_workflow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users'", 'null': 'True', 'to': "orm['workflows.Workflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'workflows.widget': {
'Meta': {'object_name': 'Widget'},
'abstract_widget': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'instances'", 'null': 'True', 'to': "orm['workflows.AbstractWidget']"}),
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interaction_waiting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '50'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'widgets'", 'to': "orm['workflows.Workflow']"}),
'x': ('django.db.models.fields.IntegerField', [], {}),
'y': ('django.db.models.fields.IntegerField', [], {})
},
'workflows.workflow': {
'Meta': {'object_name': 'Workflow'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Untitled workflow'", 'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflows'", 'to': "orm['auth.User']"}),
'widget': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'workflow_link'", 'unique': 'True', 'null': 'True', 'to': "orm['workflows.Widget']"})
}
}
complete_apps = ['workflows']
|
janezkranjc/clowdflows
|
workflows/migrations/0021_auto__add_field_widget_progress.py
|
Python
|
gpl-3.0
| 14,878
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.utils import ModeType, timestampStringFromTime, timestampStringFromTimeSeconds
from zope.interface import implementer
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
irc.RPL_CREATIONTIME = "329"
@implementer(IPlugin, IModuleData)
class ModeCommand(ModuleData):
name = "ModeCommand"
core = True
def actions(self) -> List[Tuple[str, int, Callable]]:
return [ ("modemessage-channel", 1, self.sendChannelModesToUsers),
("modechanges-channel", 1, self.sendChannelModesToServers),
("modemessage-user", 1, self.sendUserModesToUsers),
("modechanges-user", 1, self.sendUserModesToServers),
("commandpermission-MODE", 1, self.restrictUse),
("buildisupport", 1, self.buildISupport) ]
def userCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("MODE", 1, UserMode(self.ircd)) ]
def serverCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("MODE", 1, ServerMode(self.ircd)) ]
def getOutputModes(self, modes: List[Tuple[bool, str, str, str, datetime]], useUUIDs: bool) -> List[List[str]]:
addInStr = None
modeStrList = []
params = []
modeLists = []
modeLen = 0
for modeData in modes:
adding, mode, param, setBy, setTime = modeData
paramLen = 0
if param is not None:
paramLen = len(param)
if modeLen + paramLen + 3 > 300: # Don't let the mode output get too long
modeLists.append(["".join(modeStrList)] + params)
addInStr = None
modeStrList = []
params = []
modeLen = 0
if adding != addInStr:
if adding:
modeStrList.append("+")
else:
modeStrList.append("-")
addInStr = adding
modeLen += 1
modeStrList.append(mode)
modeLen += 1
if param is not None:
if not useUUIDs and self.ircd.channelModeTypes[mode] == ModeType.Status:
param = self.ircd.users[param].nick
params.append(param)
modeLen += 1 + paramLen
modeLists.append(["".join(modeStrList)] + params)
return modeLists
def sendChannelModesToUsers(self, users: List["IRCUser"], channel: "IRCChannel", source: str, sourceName: str, modes: List[Tuple[bool, str, str, str, datetime]]) -> None:
modeOuts = self.getOutputModes(modes, False)
userSource = source in self.ircd.users
if userSource:
conditionalTags = {}
self.ircd.runActionStandard("sendingusertags", self.ircd.users[source], conditionalTags)
for modeOut in modeOuts:
modeStr = modeOut[0]
params = modeOut[1:]
for user in users:
tags = {}
if userSource:
tags = user.filterConditionalTags(conditionalTags)
user.sendMessage("MODE", modeStr, *params, prefix=sourceName, to=channel.name, tags=tags)
del users[:]
def sendChannelModesToServers(self, channel: "IRCChannel", source: str, sourceName: str, modes: List[Tuple[bool, str, str, str, datetime]]) -> None:
modeOuts = self.getOutputModes(modes, True)
if source[:3] == self.ircd.serverID:
fromServer = None
else:
fromServer = self.ircd.servers[source[:3]]
while fromServer.nextClosest != self.ircd.serverID:
fromServer = self.ircd.servers[fromServer.nextClosest]
for modeOut in modeOuts:
modeStr = modeOut[0]
params = modeOut[1:]
self.ircd.broadcastToServers(fromServer, "MODE", channel.name, timestampStringFromTime(channel.existedSince), modeStr, *params, prefix=source)
def sendUserModesToUsers(self, users: List["IRCUser"], user: "IRCUser", source: str, sourceName: str, modes: List[Tuple[bool, str, str, str, datetime]]) -> None:
modeOuts = self.getOutputModes(modes, False)
userSource = source in self.ircd.users
if userSource:
conditionalTags = {}
self.ircd.runActionStandard("sendingusertags", self.ircd.users[source], conditionalTags)
for modeOut in modeOuts:
modeStr = modeOut[0]
params = modeOut[1:]
for u in set(users):
tags = {}
if userSource:
tags = user.filterConditionalTags(conditionalTags)
u.sendMessage("MODE", modeStr, *params, prefix=sourceName, to=user.nick, tags=tags)
del users[:]
def sendUserModesToServers(self, user: "IRCUser", source: str, sourceName: str, modes: List[Tuple[bool, str, str, str, datetime]]) -> None:
if not user.isRegistered():
return # If the user isn't registered yet, it's a remote user for whom we just received modes
modeOuts = self.getOutputModes(modes, False)
if source[:3] == self.ircd.serverID:
fromServer = None
else:
fromServer = self.ircd.servers[source[:3]]
while fromServer.nextClosest != self.ircd.serverID:
fromServer = self.ircd.servers[fromServer.nextClosest]
for modeOut in modeOuts:
modeStr = modeOut[0]
params = modeOut[1:]
self.ircd.broadcastToServers(fromServer, "MODE", user.uuid, timestampStringFromTime(user.connectedSince), modeStr, *params, prefix=source)
def restrictUse(self, user: "IRCUser", data: Dict[Any, Any]) -> Optional[bool]:
if "channel" not in data or "modes" not in data:
return None
if not data["params"]:
for mode in data["modes"]:
if mode != "+" and mode != "-" and (mode not in self.ircd.channelModeTypes or self.ircd.channelModeTypes[mode] != ModeType.List):
break
else:
return None # All the modes are list modes, and there are no parameters, so we're listing list mode parameters
channel = data["channel"]
if not self.ircd.runActionUntilValue("checkchannellevel", "mode", channel, user, users=[user], channels=[channel]):
user.sendMessage(irc.ERR_CHANOPRIVSNEEDED, channel.name, "You do not have access to set channel modes")
return False
return None
def buildISupport(self, data: Dict[str, Union[str, int]]) -> None:
data["MODES"] = self.ircd.config.get("modes_per_line", 20)
@implementer(ICommand)
class UserMode(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params or not params[0]:
user.sendSingleError("ModeCmd", irc.ERR_NEEDMOREPARAMS, "MODE", "Not enough parameters")
return None
channel = None
if params[0] in self.ircd.channels:
channel = self.ircd.channels[params[0]]
elif params[0] in self.ircd.userNicks:
if self.ircd.userNicks[params[0]] != user:
user.sendSingleError("ModeCmd", irc.ERR_USERSDONTMATCH, "Can't operate on modes for other users")
return None
else:
user.sendSingleError("ModeCmd", irc.ERR_NOSUCHNICK, params[0], "No such nick/channel")
return None
if len(params) == 1:
if channel:
return {
"channel": channel
}
return {}
modeStr = params[1]
modeParams = params[2:]
if channel:
return {
"channel": channel,
"modes": modeStr,
"params": modeParams
}
return {
"modes": modeStr,
"params": modeParams
}
def affectedChannels(self, user: "IRCUser", data: Dict[Any, Any]) -> List["IRCChannel"]:
if "channel" in data:
return [ data["channel"] ]
return []
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
if "modes" not in data:
if "channel" in data:
channel = data["channel"]
user.sendMessage(irc.RPL_CHANNELMODEIS, channel.name, *(channel.modeString(user).split(" ")))
user.sendMessage(irc.RPL_CREATIONTIME, channel.name, timestampStringFromTimeSeconds(channel.existedSince))
return True
user.sendMessage(irc.RPL_UMODEIS, user.modeString(user))
return True
if "channel" in data:
channel = data["channel"]
channel.setModesByUser(user, data["modes"], data["params"])
return True
user.setModesByUser(user, data["modes"], data["params"])
return True
@implementer(ICommand)
class ServerMode(Command):
burstQueuePriority = 70
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) < 3:
return None
if prefix not in self.ircd.users and prefix not in self.ircd.servers:
if prefix in self.ircd.recentlyQuitUsers or prefix in self.ircd.recentlyQuitServers:
return {
"lostsource": True
}
return None # It's safe to say other servers shouldn't be sending modes sourced from us. That's our job! (That's why we don't test for that.)
if params[0] not in self.ircd.users and params[0] not in self.ircd.channels:
if params[0] in self.ircd.recentlyQuitUsers or params[0] in self.ircd.recentlyDestroyedChannels:
return {
"losttarget": True
}
return None
time = None
try:
time = datetime.utcfromtimestamp(float(params[1]))
except (TypeError, ValueError):
return None
modes = params[2]
parameters = params[3:]
parsedModes = []
modeTypes = {}
if params[0] in self.ircd.channels:
modeTypes = self.ircd.channelModeTypes
else:
modeTypes = self.ircd.userModeTypes
adding = True
for mode in modes:
if mode == "+":
adding = True
elif mode == "-":
adding = False
else:
if mode not in modeTypes:
return None # Uh oh, a desync!
modeType = modeTypes[mode]
parameter = None
if modeType in (ModeType.Status, ModeType.List, ModeType.ParamOnUnset) or (adding and modeType == ModeType.Param):
parameter = parameters.pop(0)
parsedModes.append((adding, mode, parameter))
return {
"source": prefix,
"target": params[0],
"time": time,
"modes": parsedModes
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostsource" in data or "losttarget" in data:
return True
source = data["source"]
target = data["target"]
targetTime = data["time"]
if target in self.ircd.channels:
channel = self.ircd.channels[target]
if targetTime > channel.existedSince:
return True
if targetTime < channel.existedSince:
channel.setCreationTime(targetTime, server)
# We'll need to transform the user parameters of status modes before we're done here
channel.setModes(data["modes"], source)
return True
user = self.ircd.users[target]
if targetTime > user.connectedSince:
return True
if targetTime < user.connectedSince:
modeUnsetList = []
for mode, param in user.modes.items():
if self.ircd.userModeTypes[mode] == ModeType.List:
for paramData in param:
modeUnsetList.append((False, mode, paramData[0]))
else:
modeUnsetList.append((False, mode, param))
if modeUnsetList:
user.setModes(modeUnsetList, source)
user.setModes(data["modes"], source)
return True
modeCommand = ModeCommand()
|
Heufneutje/txircd
|
txircd/modules/rfc/cmd_mode.py
|
Python
|
bsd-3-clause
| 10,635
|
# encoding: utf-8
"""
Test data for relationship-related unit tests.
"""
from __future__ import absolute_import
from docx.opc.constants import RELATIONSHIP_TYPE as RT
from docx.opc.rel import Relationships
from docx.opc.constants import NAMESPACE as NS
from docx.opc.oxml import parse_xml
class BaseBuilder(object):
"""
Provides common behavior for all data builders.
"""
@property
def element(self):
"""Return element based on XML generated by builder"""
return parse_xml(self.xml)
def with_indent(self, indent):
"""Add integer *indent* spaces at beginning of element XML"""
self._indent = indent
return self
class RelationshipsBuilder(object):
"""Builder class for test Relationships"""
partname_tmpls = {
RT.SLIDE_MASTER: '/ppt/slideMasters/slideMaster%d.xml',
RT.SLIDE: '/ppt/slides/slide%d.xml',
}
def __init__(self):
self.relationships = []
self.next_rel_num = 1
self.next_partnums = {}
def _next_partnum(self, reltype):
if reltype not in self.next_partnums:
self.next_partnums[reltype] = 1
partnum = self.next_partnums[reltype]
self.next_partnums[reltype] = partnum + 1
return partnum
@property
def next_rId(self):
rId = 'rId%d' % self.next_rel_num
self.next_rel_num += 1
return rId
def _next_tuple_partname(self, reltype):
partname_tmpl = self.partname_tmpls[reltype]
partnum = self._next_partnum(reltype)
return partname_tmpl % partnum
def build(self):
rels = Relationships()
for rel in self.relationships:
rels.add_rel(rel)
return rels
class CT_DefaultBuilder(BaseBuilder):
"""
Test data builder for CT_Default (Default) XML element that appears in
`[Content_Types].xml`.
"""
def __init__(self):
"""Establish instance variables with default values"""
self._content_type = 'application/xml'
self._extension = 'xml'
self._indent = 0
self._namespace = ' xmlns="%s"' % NS.OPC_CONTENT_TYPES
def with_content_type(self, content_type):
"""Set ContentType attribute to *content_type*"""
self._content_type = content_type
return self
def with_extension(self, extension):
"""Set Extension attribute to *extension*"""
self._extension = extension
return self
def without_namespace(self):
"""Don't include an 'xmlns=' attribute"""
self._namespace = ''
return self
@property
def xml(self):
"""Return Default element"""
tmpl = '%s<Default%s Extension="%s" ContentType="%s"/>\n'
indent = ' ' * self._indent
return tmpl % (indent, self._namespace, self._extension,
self._content_type)
class CT_OverrideBuilder(BaseBuilder):
"""
Test data builder for CT_Override (Override) XML element that appears in
`[Content_Types].xml`.
"""
def __init__(self):
"""Establish instance variables with default values"""
self._content_type = 'app/vnd.type'
self._indent = 0
self._namespace = ' xmlns="%s"' % NS.OPC_CONTENT_TYPES
self._partname = '/part/name.xml'
def with_content_type(self, content_type):
"""Set ContentType attribute to *content_type*"""
self._content_type = content_type
return self
def with_partname(self, partname):
"""Set PartName attribute to *partname*"""
self._partname = partname
return self
def without_namespace(self):
"""Don't include an 'xmlns=' attribute"""
self._namespace = ''
return self
@property
def xml(self):
"""Return Override element"""
tmpl = '%s<Override%s PartName="%s" ContentType="%s"/>\n'
indent = ' ' * self._indent
return tmpl % (indent, self._namespace, self._partname,
self._content_type)
class CT_RelationshipBuilder(BaseBuilder):
"""
Test data builder for CT_Relationship (Relationship) XML element that
appears in .rels files
"""
def __init__(self):
"""Establish instance variables with default values"""
self._rId = 'rId9'
self._reltype = 'ReLtYpE'
self._target = 'docProps/core.xml'
self._target_mode = None
self._indent = 0
self._namespace = ' xmlns="%s"' % NS.OPC_RELATIONSHIPS
def with_rId(self, rId):
"""Set Id attribute to *rId*"""
self._rId = rId
return self
def with_reltype(self, reltype):
"""Set Type attribute to *reltype*"""
self._reltype = reltype
return self
def with_target(self, target):
"""Set XXX attribute to *target*"""
self._target = target
return self
def with_target_mode(self, target_mode):
"""Set TargetMode attribute to *target_mode*"""
self._target_mode = None if target_mode == 'Internal' else target_mode
return self
def without_namespace(self):
"""Don't include an 'xmlns=' attribute"""
self._namespace = ''
return self
@property
def target_mode(self):
if self._target_mode is None:
return ''
return ' TargetMode="%s"' % self._target_mode
@property
def xml(self):
"""Return Relationship element"""
tmpl = '%s<Relationship%s Id="%s" Type="%s" Target="%s"%s/>\n'
indent = ' ' * self._indent
return tmpl % (indent, self._namespace, self._rId, self._reltype,
self._target, self.target_mode)
class CT_RelationshipsBuilder(BaseBuilder):
"""
Test data builder for CT_Relationships (Relationships) XML element, the
root element in .rels files.
"""
def __init__(self):
"""Establish instance variables with default values"""
self._rels = (
('rId1', 'http://reltype1', 'docProps/core.xml', 'Internal'),
('rId2', 'http://linktype', 'http://some/link', 'External'),
('rId3', 'http://reltype2', '../slides/slide1.xml', 'Internal'),
)
@property
def xml(self):
"""
Return XML string based on settings accumulated via method calls.
"""
xml = '<Relationships xmlns="%s">\n' % NS.OPC_RELATIONSHIPS
for rId, reltype, target, target_mode in self._rels:
xml += (a_Relationship().with_rId(rId)
.with_reltype(reltype)
.with_target(target)
.with_target_mode(target_mode)
.with_indent(2)
.without_namespace()
.xml)
xml += '</Relationships>\n'
return xml
class CT_TypesBuilder(BaseBuilder):
"""
Test data builder for CT_Types (<Types>) XML element, the root element in
[Content_Types].xml files
"""
def __init__(self):
"""Establish instance variables with default values"""
self._defaults = (
('xml', 'application/xml'),
('jpeg', 'image/jpeg'),
)
self._empty = False
self._overrides = (
('/docProps/core.xml', 'app/vnd.type1'),
('/ppt/presentation.xml', 'app/vnd.type2'),
('/docProps/thumbnail.jpeg', 'image/jpeg'),
)
def empty(self):
self._empty = True
return self
@property
def xml(self):
"""
Return XML string based on settings accumulated via method calls
"""
if self._empty:
return '<Types xmlns="%s"/>\n' % NS.OPC_CONTENT_TYPES
xml = '<Types xmlns="%s">\n' % NS.OPC_CONTENT_TYPES
for extension, content_type in self._defaults:
xml += (a_Default().with_extension(extension)
.with_content_type(content_type)
.with_indent(2)
.without_namespace()
.xml)
for partname, content_type in self._overrides:
xml += (an_Override().with_partname(partname)
.with_content_type(content_type)
.with_indent(2)
.without_namespace()
.xml)
xml += '</Types>\n'
return xml
def a_Default():
return CT_DefaultBuilder()
def a_Relationship():
return CT_RelationshipBuilder()
def a_Relationships():
return CT_RelationshipsBuilder()
def a_Types():
return CT_TypesBuilder()
def an_Override():
return CT_OverrideBuilder()
|
eruffaldi/python-docx
|
tests/opc/unitdata/rels.py
|
Python
|
mit
| 8,804
|
"""Helper functions
"""
import numpy as np
import datetime as dt
def bstr2seconds(data):
""" Converts a byte string to a timedelta object
Parameters
----------
data : bytestring
Contains a bytestring representing a timedelta
Returns
-------
dt.timedelta
timedelta object
"""
string = str(data)
# Use string starting from character 2 in order to remove
# the "b'" preceding the string in a byte string
hours, minutes, seconds, milliseconds = string[2:].split(':')
time = dt.timedelta(hours=int(hours), minutes=int(
minutes), seconds=int(seconds)).total_seconds()
return time
#return np.timedelta64(int(time), 's')
def str2timedelta(data):
""" Converts a string to a timedelta object
Parameters
----------
data : string
Contains a string representing a timedelta
Returns
-------
dt.timedelta
timedelta object
"""
string = str(data)
# Use string starting from character 2 in order to remove
# the "b'" preceding the string in a byte string
hours, minutes, seconds = string.split(':')
time = dt.timedelta(hours=int(hours), minutes=int(
minutes), seconds=int(seconds)).total_seconds()
return time
|
mattihappy/mtibattery
|
mtibattery/helper.py
|
Python
|
gpl-3.0
| 1,265
|
# -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import warnings
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
errorcode
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
EBADF))
try:
socket_map
except NameError:
socket_map = {}
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" %err
class ExitNow(Exception):
pass
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
def read(obj):
try:
obj.handle_read_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def _exception(obj):
try:
obj.handle_expt_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_expt_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error as e:
if e.args[0] not in _DISCONNECTED:
obj.handle_error()
else:
obj.handle_close()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in list(map.items()):
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
# accepting sockets should not be writable
if is_w and not obj.accepting:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in list(map.items()):
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
# accepting sockets should not be writable
if obj.writable() and not obj.accepting:
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=False, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
ignore_log_types = frozenset(['warning'])
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error as err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self.del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
__str__ = __repr__
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock.fileno()
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
or err == EINVAL and os.name in ('nt', 'ce'):
return
if err in (0, EISCONN):
self.addr = address
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
except TypeError:
return None
except socket.error as why:
if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error as why:
if why.args[0] == EWOULDBLOCK:
return 0
elif why.args[0] in _DISCONNECTED:
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return b''
else:
return data
except socket.error as why:
# winsock sometimes throws ENOTCONN
if why.args[0] in _DISCONNECTED:
self.handle_close()
return b''
else:
raise
def close(self):
self.connected = False
self.accepting = False
self.del_channel()
try:
self.socket.close()
except socket.error as why:
if why.args[0] not in (ENOTCONN, EBADF):
raise
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
try:
retattr = getattr(self.socket, attr)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
%(self.__class__.__name__, attr))
else:
msg = "%(me)s.%(attr)s is deprecated; use %(me)s.socket.%(attr)s " \
"instead" % {'me' : self.__class__.__name__, 'attr' : attr}
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return retattr
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if type not in self.ignore_log_types:
print('%s: %s' % (type, message))
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_expt_event(self):
# handle_expt_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.handle_close()
def handle_expt(self):
self.log_info('unhandled incoming priority event', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
pair = self.accept()
if pair is not None:
self.handle_accepted(*pair)
def handle_accepted(self, sock, addr):
sock.close()
self.log_info('unhandled accepted event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = b''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
if not tb: # Must have a traceback
raise AssertionError("traceback does not exist")
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None, ignore_all=False):
if map is None:
map = socket_map
for x in list(map.values()):
try:
x.close()
except OSError as x:
if x.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _reraised_exceptions:
raise
except:
if not ignore_all:
raise
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# Here we override just enough to make a file
# look like a socket for the purposes of asyncore.
# The passed fd is automatically os.dup()'d
def __init__(self, fd):
self.fd = os.dup(fd)
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
def getsockopt(self, level, optname, buflen=None):
if (level == socket.SOL_SOCKET and
optname == socket.SO_ERROR and
not buflen):
return 0
raise NotImplementedError("Only asyncore specific behaviour "
"implemented.")
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
try:
fd = fd.fileno()
except AttributeError:
pass
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self.socket = file_wrapper(fd)
self._fileno = self.socket.fileno()
self.add_channel()
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Lib/asyncore.py
|
Python
|
apache-2.0
| 21,009
|
from .common import generic_url_role
def url_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Role for linking to url articles.
:url:`https://google.com` ->
link: https://google.com
text: https://google.com
:url:`Google <https://google.com>` ->
link: https://google.com
text: Google
:url:`*Google* <https://google.com>` ->
link: https://google.com
text (html): <em>Google</em>
"""
def url_handler(target):
return target
return generic_url_role(name, text, url_handler)
|
tony/django-docutils
|
django_docutils/lib/roles/url.py
|
Python
|
mit
| 581
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^ourstory$', views.our_story, name='our_story'),
url(r'^ourteam$', views.our_team, name='our_team'),
url(r'^childrensprogram$', views.childrens_program, name='childrensprogram'),
url(r'^apply$', views.apply, name='apply'),
url(r'^teensprogram$', views.teens_program, name='teensprogram'),
url(r'^womensprogram$', views.womens_program, name='womensprogram'),
url(r'^artisanprogram$', views.artisan_program, name='artisanprogram'),
url(r'^ethics$', views.ethical_post, name='ethical_page'),
url(r'^peru$', views.why_peru, name='peru'),
url(r'^volunteerperu$', views.volunteer_peru, name='volunteer_peru'),
url(r'^financials$', views.financials, name='financials'),
url(r'^donations$', views.donations, name='donations'),
url(r'^chicago$', views.volunteer_chicago, name='volunteer_chicago'),
url(r'^donate$', views.donate, name='donate'),
]
|
DjangoGirlsSeoul/lightandleadership
|
spanish_content/urls.py
|
Python
|
apache-2.0
| 1,012
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from .. import Error, Tags, Warning, register
REFERRER_POLICY_VALUES = {
'no-referrer', 'no-referrer-when-downgrade', 'origin',
'origin-when-cross-origin', 'same-origin', 'strict-origin',
'strict-origin-when-cross-origin', 'unsafe-url',
}
SECRET_KEY_INSECURE_PREFIX = 'django-insecure-'
SECRET_KEY_MIN_LENGTH = 50
SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5
W001 = Warning(
"You do not have 'django.middleware.security.SecurityMiddleware' "
"in your MIDDLEWARE so the SECURE_HSTS_SECONDS, "
"SECURE_CONTENT_TYPE_NOSNIFF, SECURE_BROWSER_XSS_FILTER, "
"SECURE_REFERRER_POLICY, and SECURE_SSL_REDIRECT settings will have no "
"effect.",
id='security.W001',
)
W002 = Warning(
"You do not have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE, so your pages will not be served with an "
"'x-frame-options' header. Unless there is a good reason for your "
"site to be served in a frame, you should consider enabling this "
"header to help prevent clickjacking attacks.",
id='security.W002',
)
W004 = Warning(
"You have not set a value for the SECURE_HSTS_SECONDS setting. "
"If your entire site is served only over SSL, you may want to consider "
"setting a value and enabling HTTP Strict Transport Security. "
"Be sure to read the documentation first; enabling HSTS carelessly "
"can cause serious, irreversible problems.",
id='security.W004',
)
W005 = Warning(
"You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. "
"Without this, your site is potentially vulnerable to attack "
"via an insecure connection to a subdomain. Only set this to True if "
"you are certain that all subdomains of your domain should be served "
"exclusively via SSL.",
id='security.W005',
)
W006 = Warning(
"Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, "
"so your pages will not be served with an "
"'X-Content-Type-Options: nosniff' header. "
"You should consider enabling this header to prevent the "
"browser from identifying content types incorrectly.",
id='security.W006',
)
W008 = Warning(
"Your SECURE_SSL_REDIRECT setting is not set to True. "
"Unless your site should be available over both SSL and non-SSL "
"connections, you may want to either set this setting True "
"or configure a load balancer or reverse-proxy server "
"to redirect all connections to HTTPS.",
id='security.W008',
)
W009 = Warning(
"Your SECRET_KEY has less than %(min_length)s characters, less than "
"%(min_unique_chars)s unique characters, or it's prefixed with "
"'%(insecure_prefix)s' indicating that it was generated automatically by "
"Django. Please generate a long and random SECRET_KEY, otherwise many of "
"Django's security-critical features will be vulnerable to attack." % {
'min_length': SECRET_KEY_MIN_LENGTH,
'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS,
'insecure_prefix': SECRET_KEY_INSECURE_PREFIX,
},
id='security.W009',
)
W018 = Warning(
"You should not have DEBUG set to True in deployment.",
id='security.W018',
)
W019 = Warning(
"You have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE, but X_FRAME_OPTIONS is not set to 'DENY'. "
"Unless there is a good reason for your site to serve other parts of "
"itself in a frame, you should change it to 'DENY'.",
id='security.W019',
)
W020 = Warning(
"ALLOWED_HOSTS must not be empty in deployment.",
id='security.W020',
)
W021 = Warning(
"You have not set the SECURE_HSTS_PRELOAD setting to True. Without this, "
"your site cannot be submitted to the browser preload list.",
id='security.W021',
)
W022 = Warning(
'You have not set the SECURE_REFERRER_POLICY setting. Without this, your '
'site will not send a Referrer-Policy header. You should consider '
'enabling this header to protect user privacy.',
id='security.W022',
)
E023 = Error(
'You have set the SECURE_REFERRER_POLICY setting to an invalid value.',
hint='Valid values are: {}.'.format(', '.join(sorted(REFERRER_POLICY_VALUES))),
id='security.E023',
)
E100 = Error(
"DEFAULT_HASHING_ALGORITHM must be 'sha1' or 'sha256'.",
id='security.E100',
)
def _security_middleware():
return 'django.middleware.security.SecurityMiddleware' in settings.MIDDLEWARE
def _xframe_middleware():
return 'django.middleware.clickjacking.XFrameOptionsMiddleware' in settings.MIDDLEWARE
@register(Tags.security, deploy=True)
def check_security_middleware(app_configs, **kwargs):
passed_check = _security_middleware()
return [] if passed_check else [W001]
@register(Tags.security, deploy=True)
def check_xframe_options_middleware(app_configs, **kwargs):
passed_check = _xframe_middleware()
return [] if passed_check else [W002]
@register(Tags.security, deploy=True)
def check_sts(app_configs, **kwargs):
passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS
return [] if passed_check else [W004]
@register(Tags.security, deploy=True)
def check_sts_include_subdomains(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True
)
return [] if passed_check else [W005]
@register(Tags.security, deploy=True)
def check_sts_preload(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_PRELOAD is True
)
return [] if passed_check else [W021]
@register(Tags.security, deploy=True)
def check_content_type_nosniff(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_CONTENT_TYPE_NOSNIFF is True
)
return [] if passed_check else [W006]
@register(Tags.security, deploy=True)
def check_ssl_redirect(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_SSL_REDIRECT is True
)
return [] if passed_check else [W008]
@register(Tags.security, deploy=True)
def check_secret_key(app_configs, **kwargs):
try:
secret_key = settings.SECRET_KEY
except (ImproperlyConfigured, AttributeError):
passed_check = False
else:
passed_check = (
len(set(secret_key)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and
len(secret_key) >= SECRET_KEY_MIN_LENGTH and
not secret_key.startswith(SECRET_KEY_INSECURE_PREFIX)
)
return [] if passed_check else [W009]
@register(Tags.security, deploy=True)
def check_debug(app_configs, **kwargs):
passed_check = not settings.DEBUG
return [] if passed_check else [W018]
@register(Tags.security, deploy=True)
def check_xframe_deny(app_configs, **kwargs):
passed_check = (
not _xframe_middleware() or
settings.X_FRAME_OPTIONS == 'DENY'
)
return [] if passed_check else [W019]
@register(Tags.security, deploy=True)
def check_allowed_hosts(app_configs, **kwargs):
return [] if settings.ALLOWED_HOSTS else [W020]
@register(Tags.security, deploy=True)
def check_referrer_policy(app_configs, **kwargs):
if _security_middleware():
if settings.SECURE_REFERRER_POLICY is None:
return [W022]
# Support a comma-separated string or iterable of values to allow fallback.
if isinstance(settings.SECURE_REFERRER_POLICY, str):
values = {v.strip() for v in settings.SECURE_REFERRER_POLICY.split(',')}
else:
values = set(settings.SECURE_REFERRER_POLICY)
if not values <= REFERRER_POLICY_VALUES:
return [E023]
return []
# RemovedInDjango40Warning
@register(Tags.security)
def check_default_hashing_algorithm(app_configs, **kwargs):
if settings.DEFAULT_HASHING_ALGORITHM not in {'sha1', 'sha256'}:
return [E100]
return []
|
wkschwartz/django
|
django/core/checks/security/base.py
|
Python
|
bsd-3-clause
| 8,149
|
from Products.DataCollector.plugins.CollectorPlugin import (
SnmpPlugin, GetTableMap,
)
class TrueNASDataset(SnmpPlugin):
relname = 'trueNASDatasets'
modname = 'ZenPacks.iXsystems.TrueNAS.TrueNASDataset'
deviceProperties = SnmpPlugin.deviceProperties + ('zTrueNASIgnorePools', 'zTrueNASIgnoreDatasets',)
snmpGetTableMaps = (
GetTableMap(
'datasetTable', '1.3.6.1.4.1.50536.1.2', {
'.1.1.2': 'datasetDescr',
# '.2': 'availableKB',
# '.3': 'usedKB',
#'.4': 'sizeKB',
#'.1.6.0': 'datasetAvailable',
'.1.1.4': 'datasetSize',
#'.1.1.5.0': 'datasetUsed',
'.1.1.3': 'datasetAllocationUnits',
#'.1.1.2.0': 'datasetDescr',
}
),
)
def process(self, device, results, log):
datasets = results[1].get('datasetTable', {})
rm = self.relMap()
for snmpindex, row in datasets.items():
allocationUnit = int(row.get('datasetAllocationUnits'))
hiddenSize = int(row.get('datasetSize')) * allocationUnit
ignore = False
name = row.get('datasetDescr')
toIgnoreDatasets = getattr(device, 'zTrueNASIgnoreDatasets', [])
for toIgnoreDataset in toIgnoreDatasets:
if toIgnoreDataset in name:
log.warn('Skipping dataset {0} since it is set to be ignored'.format(name))
ignore = True
if not ignore:
rm.append(self.objectMap({
'id': self.prepId(name),
'title': name,
'snmpindex': snmpindex.strip('.'),
'allocationUnit': allocationUnit,
'size': hiddenSize,
'hiddenSize': hiddenSize,
}))
return rm
|
N-faycal/ZenPacks.iXsystems.TrueNAS
|
build/lib/ZenPacks/iXsystems/TrueNAS/modeler/plugins/iXsystems/snmp/TrueNASDataset.py
|
Python
|
bsd-2-clause
| 1,799
|
#!/usr/bin/python
# uart-eg01.py
#
# to run on the other end of the UART
# screen /dev/ttyUSB1 115200
import serial
def readlineCR(uart):
line = b''
while True:
byte = uart.read()
line += byte
if byte == b'\r':
return line
uart = serial.Serial('/dev/ttyUSB0', baudrate=115200, timeout=1)
while True:
uart.write(b'\r\nSay something: ')
line = readlineCR(uart)
if line != b'exit\r':
lineStr = '\r\nYou sent : {}'.format(line.decode('utf-8'))
uart.write(lineStr.encode('utf-8'))
else:
uart.write(b'\r\nexiting\r\n')
uart.close()
exit(0)
|
CurtisLeeBolin/Examples_Python
|
UART01.py
|
Python
|
unlicense
| 567
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
import requests_mock
from airflow.operators.http_operator import SimpleHttpOperator
try:
from unittest import mock
except ImportError:
import mock
class SimpleHttpOpTests(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HTTP_EXAMPLE'] = 'http://www.example.com'
@requests_mock.mock()
def test_response_in_logs(self, m):
"""
Test that when using SimpleHttpOperator with 'GET',
the log contains 'Example Domain' in it
"""
m.get('http://www.example.com', text='Example.com fake response')
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
log_response=True,
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute(None)
mock_info.assert_called_with('Example.com fake response')
|
malmiron/incubator-airflow
|
tests/operators/test_http_operator.py
|
Python
|
apache-2.0
| 1,793
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import unittest
import json
from os.path import join as pjoin
from libcloud.utils.py3 import httplib, urlparse
from rackspace_monitoring.base import (MonitoringDriver, Entity,
NotificationPlan,
Notification, CheckType, Alarm, Check,
AlarmChangelog)
from rackspace_monitoring.drivers.rackspace import (RackspaceMonitoringDriver,
RackspaceMonitoringValidationError)
from test import MockResponse, MockHttpTestCase
from test.file_fixtures import FIXTURES_ROOT
from test.file_fixtures import FileFixtures
from secrets import RACKSPACE_PARAMS
FIXTURES_ROOT['monitoring'] = pjoin(os.getcwd(), 'test/fixtures')
class MonitoringFileFixtures(FileFixtures):
def __init__(self, sub_dir=''):
super(MonitoringFileFixtures, self).__init__(
fixtures_type='monitoring',
sub_dir=sub_dir)
class RackspaceTests(unittest.TestCase):
def setUp(self):
RackspaceMonitoringDriver.connectionCls.conn_classes = (
RackspaceMockHttp, RackspaceMockHttp)
RackspaceMonitoringDriver.connectionCls.auth_url = \
'https://auth.api.example.com/v1.1/'
RackspaceMockHttp.type = None
self.driver = RackspaceMonitoringDriver(key=RACKSPACE_PARAMS[0],
secret=RACKSPACE_PARAMS[1])
def test_list_monitoring_zones(self):
result = list(self.driver.list_monitoring_zones())
self.assertEqual(len(result), 1)
self.assertEqual(result[0].id, 'mzxJ4L2IU')
def test_list_entities(self):
result = list(self.driver.list_entities())
self.assertEqual(len(result), 6)
self.assertEqual(result[0].id, 'en8B9YwUn6')
self.assertEqual(result[0].label, 'bar')
def test_list_checks(self):
en = self.driver.list_entities()[0]
result = list(self.driver.list_checks(entity=en))
self.assertEqual(len(result), 1)
self.assertEqual(result[0].label, 'bar')
self.assertEqual(result[0].details['url'], 'http://www.foo.com')
self.assertEqual(result[0].details['method'], 'GET')
def test_list_alarms(self):
en = self.driver.list_entities()[0]
result = list(self.driver.list_alarms(entity=en))
self.assertEqual(len(result), 1)
self.assertEqual(result[0].check_id, 'chhJwYeArX')
self.assertEqual(result[0].notification_plan_id, 'npIXxOAn5')
def test_list_check_types(self):
result = list(self.driver.list_check_types())
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, 'remote.dns')
self.assertTrue(result[0].is_remote)
def test_list_metrics(self):
en = self.driver.list_entities()[0]
ch = self.driver.list_checks(entity=en)[0]
result = list(self.driver.list_metrics(entity_id=en.id, check_id=ch.id))
self.assertEqual(len(result), 3)
self.assertEqual(result[0].name, 'mzGRD.constdbl')
def test_list_notification_types(self):
result = list(self.driver.list_notification_types())
self.assertEqual(len(result), 1)
self.assertEqual(result[0].id, 'webhook')
def test_list_notifications(self):
result = list(self.driver.list_notifications())
self.assertEqual(len(result), 2)
self.assertEqual(result[0].type, 'webhook')
self.assertEqual(result[0].details['url'],
'http://www.postbin.org/lulz')
def test_list_notification_plans(self):
result = list(self.driver.list_notification_plans())
self.assertEqual(len(result), 8)
self.assertEqual(result[0].label, 'test-notification-plan')
def test_list_agents(self):
result = list(self.driver.list_agents())
self.assertEqual(len(result), 3)
self.assertEqual(result[0].id, '612deec7-1a3d-429f-c2a2-aadc59')
def test_list_agent_connections(self):
result = list(self.driver.list_agent_connections('612deec7-1a3d-429f-c2a2-aadc59'))
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, 'cn0ElI4abc')
self.assertEqual(result[0].agent_ip, '192.168.0.1')
self.assertEqual(result[1].id, 'cnAAAAAAAA')
self.assertEqual(result[1].agent_ip, '192.168.0.1')
def test_get_agent_host_info(self):
result = self.driver.get_agent_host_info('aaaaa', 'cpus')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['vendor'], 'AMD')
self.assertEqual(result[0]['name'], 'cpu.0')
self.assertEqual(result[0]['total_cores'], 1)
result = self.driver.get_agent_host_info('aaaaa', 'memory')
self.assertEqual(result['actual_free'], 2684153856)
self.assertEqual(result['free'], 236662784)
self.assertEqual(result['ram'], 4016)
self.assertEqual(result['total'], 4208316416)
self.assertEqual(result['used'], 3971653632)
self.assertEqual(result['used_percent'], 36.217869792422)
result = self.driver.get_agent_host_info('aaaaa', 'system')
self.assertEqual(result['name'], 'Linux')
self.assertEqual(result['arch'], 'x86_64')
self.assertEqual(result['version'], '2.6.32-33-server')
self.assertEqual(result['vendor'], 'Ubuntu')
self.assertEqual(result['vendor_version'], '10.04')
self.assertEqual(result['vendor_code_name'], 'lucid')
self.assertEqual(result['description'], 'Ubuntu 10.04')
result = self.driver.get_agent_host_info('aaaaa', 'network_interfaces')
self.assertEqual(len(result), 2)
self.assertEqual(result[0]['address'], '127.0.0.1')
self.assertEqual(result[0]['broadcast'], '0.0.0.0')
self.assertEqual(result[1]['address'], '192.168.0.2')
self.assertEqual(result[1]['broadcast'], '192.168.0.255')
result = self.driver.get_agent_host_info('aaaaa', 'processes')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['pid'], 13702)
self.assertEqual(result[0]['time_sys'], 570)
self.assertEqual(result[0]['memory_page_faults'], 37742)
result = self.driver.get_agent_host_info('aaaaa', 'disks')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['queue'], 0.024919932106766)
self.assertEqual(result[0]['name'], '/')
self.assertEqual(result[0]['wtime'], 517366712)
result = self.driver.get_agent_host_info('aaaaa', 'filesystems')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['dir_name'], '/')
self.assertEqual(result[0]['dev_name'], '/dev/xvda1')
self.assertEqual(result[0]['type_name'], 'local')
self.assertEqual(result[0]['sys_type_name'], 'ext3')
def test_get_entity_targets(self):
result = self.driver.get_entity_agent_targets('aaaaa', 'agent.disk')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['targets'][0], '/')
self.assertEqual(result[0]['targets'][1], '/dev')
def test_get_entity_host_info(self):
result = self.driver.get_entity_host_info('aaaaa', 'cpus')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['vendor'], 'AMD')
self.assertEqual(result[0]['name'], 'cpu.0')
self.assertEqual(result[0]['total_cores'], 1)
result = self.driver.get_entity_host_info('aaaaa', 'memory')
self.assertEqual(result['actual_free'], 2684153856)
self.assertEqual(result['free'], 236662784)
self.assertEqual(result['ram'], 4016)
self.assertEqual(result['total'], 4208316416)
self.assertEqual(result['used'], 3971653632)
self.assertEqual(result['used_percent'], 36.217869792422)
result = self.driver.get_entity_host_info('aaaaa', 'system')
self.assertEqual(result['name'], 'Linux')
self.assertEqual(result['arch'], 'x86_64')
self.assertEqual(result['version'], '2.6.32-33-server')
self.assertEqual(result['vendor'], 'Ubuntu')
self.assertEqual(result['vendor_version'], '10.04')
self.assertEqual(result['vendor_code_name'], 'lucid')
self.assertEqual(result['description'], 'Ubuntu 10.04')
result = self.driver.get_entity_host_info('aaaaa', 'network_interfaces')
self.assertEqual(len(result), 2)
self.assertEqual(result[0]['address'], '127.0.0.1')
self.assertEqual(result[0]['broadcast'], '0.0.0.0')
self.assertEqual(result[1]['address'], '192.168.0.2')
self.assertEqual(result[1]['broadcast'], '192.168.0.255')
result = self.driver.get_entity_host_info('aaaaa', 'processes')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['pid'], 13702)
self.assertEqual(result[0]['time_sys'], 570)
self.assertEqual(result[0]['memory_page_faults'], 37742)
result = self.driver.get_entity_host_info('aaaaa', 'disks')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['queue'], 0.024919932106766)
self.assertEqual(result[0]['name'], '/')
self.assertEqual(result[0]['wtime'], 517366712)
result = self.driver.get_entity_host_info('aaaaa', 'filesystems')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['dir_name'], '/')
self.assertEqual(result[0]['dev_name'], '/dev/xvda1')
self.assertEqual(result[0]['type_name'], 'local')
self.assertEqual(result[0]['sys_type_name'], 'ext3')
def test_ex_list_alarm_notification_history_checks(self):
entity = self.driver.list_entities()[0]
alarm = self.driver.list_alarms(entity=entity)[0]
result = self.driver.ex_list_alarm_notification_history_checks(
entity=entity,
alarm=alarm)
self.assertEqual(len(result['check_ids']), 2)
def test_ex_list_alarm_notification_history(self):
entity = self.driver.list_entities()[0]
alarm = self.driver.list_alarms(entity=entity)[0]
check = self.driver.list_checks(entity=entity)[0]
result = self.driver.ex_list_alarm_notification_history(entity=entity,
alarm=alarm, check=check)
self.assertEqual(len(result), 1)
self.assertTrue('timestamp' in result[0])
self.assertTrue('notification_plan_id' in result[0])
self.assertTrue('state' in result[0])
self.assertTrue('transaction_id' in result[0])
self.assertTrue('notification_results' in result[0])
def test_test_alarm(self):
entity = self.driver.list_entities()[0]
criteria = ('if (metric[\"code\"] == \"404\") { return CRITICAL, ',
' \"not found\" } return OK')
check_data = []
result = self.driver.test_alarm(entity=entity, criteria=criteria,
check_data=check_data)
self.assertTrue('timestamp' in result[0])
self.assertTrue('computed_state' in result[0])
self.assertTrue('status' in result[0])
def test_check(self):
entity = self.driver.list_entities()[0]
check_data = {'label': 'test', 'monitoring_zones': ['mzA'],
'target_alias': 'default', 'details': {'url':
'http://www.google.com'}, 'type': 'remote.http'}
result = self.driver.test_check(entity=entity)
self.assertTrue('available' in result[0])
self.assertTrue('monitoring_zone_id' in result[0])
self.assertTrue('available' in result[0])
self.assertTrue('metrics' in result[0])
def test_delete_entity_success(self):
entity = self.driver.list_entities()[0]
result = self.driver.delete_entity(entity=entity)
self.assertTrue(result)
def test_delete_entity_children_exist(self):
entity = self.driver.list_entities()[1]
RackspaceMockHttp.type = 'CHILDREN_EXIST'
try:
self.driver.delete_entity(entity=entity)
except RackspaceMonitoringValidationError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_check_success(self):
en = self.driver.list_entities()[0]
check = self.driver.list_checks(entity=en)[0]
check.delete()
def test_delete_alarm(self):
en = self.driver.list_entities()[0]
alarm = self.driver.list_alarms(entity=en)[0]
alarm.delete()
def test_create_notification_plan_with_metadata(self):
notification = self.driver.list_notifications()[0]
notif_plan = self.driver.create_notification_plan(
label="demo",
critical_state=[notification.id],
metadata={
"cli": "rackmoncli"
})
if hasattr(self, 'assetIsNotNone'):
self.assertIsNotNone(notif_plan)
self.assertIsNotNone(notif_plan.metadata)
else:
self.assertTrue(notif_plan is not None)
self.assertTrue(notif_plan.metadata is not None)
self.assertEqual(notif_plan.metadata, {
"cli": "rackmoncli"
})
def test_create_notification_with_metadata(self):
notification = self.driver.create_notification(
label="demo notification",
type="email",
details={
"address": "test@me.com"
},
metadata={
"cli": "rackmoncli"
}
)
if hasattr(self, 'assetIsNotNone'):
self.assertIsNotNone(notification)
self.assertIsNotNone(notification.metadata)
else:
self.assertTrue(notification is not None)
self.assertTrue(notification.metadata is not None)
self.assertEqual(notification.metadata, {
"cli": "rackmoncli"
})
def test_create_alarm_with_metadata(self):
notification_plan = self.driver.list_notification_plans()[0]
en = self.driver.list_entities()[0]
check = self.driver.list_checks(entity=en)[0]
alarm = self.driver.create_alarm(
en,
label="demo alarm",
check_id=check.id,
criteria=("if (metric[\"duration\"] >= 2) { "
"return new AlarmStatus(OK); } "
"return new AlarmStatus(CRITICAL);"),
notification_plan_id=notification_plan.id,
metadata={
"cli": "rackmoncli"
}
)
if hasattr(self, 'assetIsNotNone'):
self.assertIsNotNone(alarm)
self.assertIsNotNone(alarm.metadata)
else:
self.assertTrue(alarm is not None)
self.assertTrue(alarm.metadata is not None)
self.assertEqual(alarm.metadata, {
"cli": "rackmoncli"
})
def test_delete_notification(self):
notification = self.driver.list_notifications()[0]
notification.delete()
def test_delete_notification_plan(self):
notification_plan = self.driver.list_notification_plans()[0]
notification_plan.delete()
def test_views_metric_list(self):
metric_list = self.driver.ex_views_metric_list()
self.assertTrue(len(metric_list) > 0)
def test_list_agent_tokens(self):
tokens = self.driver.list_agent_tokens()
fixture = RackspaceMockHttp.fixtures.load('agent_tokens.json')
fixture_tokens = json.loads(fixture)
first_token = fixture_tokens["values"][0]["token"]
self.assertEqual(tokens[0].token, first_token)
self.assertEqual(len(tokens), 11)
def test_delete_agent_token(self):
agent_token = self.driver.list_agent_tokens()[0]
self.assertTrue(self.driver.delete_agent_token(
agent_token=agent_token))
def test_get_monitoring_zone(self):
monitoring_zone = self.driver \
.get_monitoring_zone(monitoring_zone_id='mzord')
self.assertEqual(monitoring_zone.id, 'mzord')
self.assertEqual(monitoring_zone.label, 'ord')
self.assertEqual(monitoring_zone.country_code, 'US')
def test_ex_traceroute(self):
monitoring_zone = self.driver.list_monitoring_zones()[0]
result = self.driver.ex_traceroute(monitoring_zone=monitoring_zone,
target='google.com')
self.assertEqual(result[0]['number'], 1)
self.assertEqual(result[0]['rtts'], [0.572, 0.586, 0.683])
self.assertEqual(result[0]['ip'], '50.57.61.2')
def test__url_to_obj_ids(self):
pairs = [
['http://127.0.0.1:50000/v1.0/7777/entities/enSTkViNvw',
{'entity_id': 'enSTkViNvw'}],
['https://monitoring.api.rackspacecloud.com/v1.0/7777/entities/enSTkViNvw',
{'entity_id': 'enSTkViNvw'}],
['https://monitoring.api.rackspacecloud.com/v2.0/7777/entities/enSTkViNvu',
{'entity_id': 'enSTkViNvu'}],
['https://monitoring.api.rackspacecloud.com/v2.0/7777/alarms/alfoo',
{'alarm_id': 'alfoo'}],
['https://monitoring.api.rackspacecloud.com/v2.0/7777/entities/enFoo/checks/chBar',
{'entity_id': 'enFoo', 'check_id': 'chBar'}],
['https://monitoring.api.rackspacecloud.com/v2.0/7777/entities/enFoo/alarms/alBar',
{'entity_id': 'enFoo', 'alarm_id': 'alBar'}],
]
for url, expected in pairs:
result = self.driver._url_to_obj_ids(url)
self.assertEqual(result, expected)
def test_force_base_url(self):
RackspaceMonitoringDriver.connectionCls.conn_classes = (
RackspaceMockHttp, RackspaceMockHttp)
RackspaceMonitoringDriver.connectionCls.auth_url = \
'https://auth.api.example.com/v1.1/'
RackspaceMockHttp.type = None
driver = RackspaceMonitoringDriver(key=RACKSPACE_PARAMS[0],
secret=RACKSPACE_PARAMS[1],
ex_force_base_url='http://www.todo.com')
driver.list_entities()
self.assertEqual(driver.connection._ex_force_base_url,
'http://www.todo.com/23213')
def test_force_base_url_trailing_slash(self):
RackspaceMonitoringDriver.connectionCls.conn_classes = (
RackspaceMockHttp, RackspaceMockHttp)
RackspaceMonitoringDriver.connectionCls.auth_url = \
'https://auth.api.example.com/v1.1/'
RackspaceMockHttp.type = None
driver = RackspaceMonitoringDriver(key=RACKSPACE_PARAMS[0],
secret=RACKSPACE_PARAMS[1],
ex_force_base_url='http://www.todo.com/')
driver.list_entities()
self.assertEqual(driver.connection._ex_force_base_url,
'http://www.todo.com/23213')
def test_force_auth_token(self):
RackspaceMonitoringDriver.connectionCls.conn_classes = (
RackspaceMockHttp, RackspaceMockHttp)
RackspaceMonitoringDriver.connectionCls.auth_url = \
'https://auth.api.example.com/v1.1/'
RackspaceMockHttp.type = None
driver = RackspaceMonitoringDriver(key=RACKSPACE_PARAMS[0],
secret=RACKSPACE_PARAMS[1],
ex_force_base_url='http://www.todo.com',
ex_force_auth_token='matoken')
driver.list_entities()
self.assertEqual(driver.connection._ex_force_base_url,
'http://www.todo.com')
self.assertEqual(driver.connection.auth_token,
'matoken')
def test_force_base_url_is_none(self):
RackspaceMonitoringDriver.connectionCls.conn_classes = (
RackspaceMockHttp, RackspaceMockHttp)
RackspaceMonitoringDriver.connectionCls.auth_url = \
'https://auth.api.example.com/v1.1/'
RackspaceMockHttp.type = None
driver = RackspaceMonitoringDriver(key=RACKSPACE_PARAMS[0],
secret=RACKSPACE_PARAMS[1])
driver.list_entities()
self.assertEqual(driver.connection._ex_force_base_url,
'https://monitoring.api.rackspacecloud.com/v1.0/23213')
class RackspaceMockHttp(MockHttpTestCase):
auth_fixtures = MonitoringFileFixtures('rackspace/auth')
fixtures = MonitoringFileFixtures('rackspace/v1.0')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v2_0_tokens(self, method, url, body, headers):
body = self.auth_fixtures.load('_v2_0_tokens.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v2_0_tokens_CHILDREN_EXIST(self, method, url, body, headers):
body = self.auth_fixtures.load('_v2_0_tokens.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_monitoring_zones(self, method, url, body, headers):
body = self.fixtures.load('monitoring_zones.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_monitoring_zones_mzord(self, method, url, body, headers):
body = self.fixtures.load('get_monitoring_zone.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_monitoring_zones_mzxJ4L2IU_traceroute(self, method, url, body,
headers):
body = self.fixtures.load('ex_traceroute.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities(self, method, url, body, headers):
body = self.fixtures.load('entities.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_check_types(self, method, url, body, headers):
body = self.fixtures.load('check_types.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_notification_types(self, method, url, body, headers):
body = self.fixtures.load('notification_types.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_notifications(self, method, url, body, headers):
if method == 'POST':
# create method
create_json_content_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=UTF-8',
'location': 'http://example.com/v2.0/23213/notifications/ntQVm5IyiR'
}
body = self.fixtures.load('create_notification.json')
return (httplib.CREATED, body, create_json_content_headers,
httplib.responses[httplib.CREATED])
elif method == 'GET':
body = self.fixtures.load('notifications.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
else:
raise NotImplementedError(
'method {} for _v1_0_23213_notifications not defined'.format(
method))
def _v1_0_23213_notification_plans(self, method, url, body, headers):
if method == 'POST':
# create method
create_json_content_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=UTF-8',
'location': 'http://example.com/v2.0/23213/notification_plans/npIXxOAn5'
}
body = self.fixtures.load('create_notification_plan.json')
return (httplib.CREATED, body, create_json_content_headers,
httplib.responses[httplib.CREATED])
elif method == 'GET':
body = self.fixtures.load('notification_plans.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
else:
raise NotImplementedError(
'method {} for _v1_0_23213_notifications not defined'.format(
method))
def _v1_0_23213_entities_en8B9YwUn6_checks(self, method, url, body, headers):
body = self.fixtures.load('checks.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_aaaaa_agent_check_types_agent_disk_targets(self,
method,
url,
body,
headers):
body = self.fixtures.load('agent_check_types_agent_disk_targets.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_en8B9YwUn6_alarms(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('alarms.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
elif method == 'POST':
# create method
create_json_content_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=UTF-8',
'location': ('http://example.com/v2.0/23213/entities/'
'en8B9YwUn6/alarms/aldIpNY8t3')
}
body = self.fixtures.load('create_alarm.json')
return (httplib.CREATED, body, create_json_content_headers,
httplib.responses[httplib.CREATED])
else:
raise NotImplementedError(
("method {} for _v1_0_23213_entities_en8B9YwUn6_alarms"
"not defined").format(method))
def _v1_0_23213_entities_en8B9YwUn6_alarms_aldIpNY8t3_notification_history(self,
method,
url, body,
headers):
body = self.fixtures.load('list_alarm_history_checks.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_en8B9YwUn6_alarms_aldIpNY8t3_notification_history_chhJwYeArX(self,
method,
url, body,
headers):
body = self.fixtures.load('list_alarm_history.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_en8B9YwUn6_test_alarm(self, method, url, body,
headers):
body = self.fixtures.load('test_alarm.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_en8B9YwUn6_test_check(self, method, url, body,
headers):
body = self.fixtures.load('test_check.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_en8B9YwUn6(self, method, url, body, headers):
body = ''
if method == 'DELETE':
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError('')
def _v1_0_23213_entities_en8Xmk5lv1_CHILDREN_EXIST(self, method, url, body,
headers):
if method == 'DELETE':
body = self.fixtures.load('error_children_exist.json')
return (httplib.BAD_REQUEST, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError('')
def _v1_0_23213_entities_en8B9YwUn6_checks_chhJwYeArX(self, method, url, body,
headers):
if method == 'DELETE':
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError('')
def _v1_0_23213_entities_en8B9YwUn6_alarms_aldIpNY8t3(self, method, url, body,
headers):
if method == 'DELETE':
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'GET':
body = json.loads(
self.fixtures.load('alarms.json'))['values'][0]
return (httplib.OK, json.dumps(body), self.json_content_headers,
httplib.responses[httplib.OK])
raise NotImplementedError(
("method {} for _v1_0_23213_entities_en8B9YwUn6_alarms_aldIpNY8t3"
" dne").format(method))
def _v1_0_23213_notifications_ntQVm5IyiR(self, method, url, body, headers):
if method == 'DELETE':
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'GET':
body = json.loads(
self.fixtures.load('notifications.json'))['values'][0]
return (httplib.OK, json.dumps(body), self.json_content_headers,
httplib.responses[httplib.OK])
raise NotImplementedError(
'method {} for _v1_0_23213_notifications_ntQVm5IyiR dne'.format(
method))
def _v1_0_23213_notification_plans_npIXxOAn5(self, method, url, body, headers):
if method == 'DELETE':
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'GET':
body = json.loads(
self.fixtures.load('notification_plans.json'))['values'][0]
return (httplib.OK, json.dumps(body), self.json_content_headers,
httplib.responses[httplib.OK])
raise NotImplementedError('')
def _v1_0_23213_agent_tokens_at28OJNsRB(self, method, url, body, headers):
if method == 'DELETE':
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
def _v1_0_23213_agent_tokens(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_tokens.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agents.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents_612deec7_1a3d_429f_c2a2_aadc59_connections(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_connections.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents_aaaaa_host_info_cpus(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_cpus.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents_aaaaa_host_info_memory(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_memory.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents_aaaaa_host_info_system(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_system.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents_aaaaa_host_info_network_interfaces(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_network_interfaces.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents_aaaaa_host_info_processes(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_processes.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents_aaaaa_host_info_disks(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_disks.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_agents_aaaaa_host_info_filesystems(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_filesystems.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_aaaaa_agent_host_info_cpus(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_cpus.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_aaaaa_agent_host_info_memory(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_memory.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_aaaaa_agent_host_info_system(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_system.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_aaaaa_agent_host_info_network_interfaces(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_network_interfaces.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_aaaaa_agent_host_info_processes(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_processes.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_aaaaa_agent_host_info_disks(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_disks.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_aaaaa_agent_host_info_filesystems(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('agent_host_info_filesystems.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_entities_en8B9YwUn6_checks_chhJwYeArX_metrics(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('metrics.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
def _v1_0_23213_views_metric_list(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('views_metric_list.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
racker/rackspace-monitoring
|
test/test_rackspace.py
|
Python
|
apache-2.0
| 37,921
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for ADB and Fastboot CLI.
Usage introspects the given class for methods, args, and docs to show the user.
StartCli handles connecting to a device, calling the expected method, and
outputting the results.
"""
from __future__ import absolute_import
from __future__ import print_function
import inspect
import io
import re
import sys
import types
import gflags
from . import usb_exceptions
gflags.DEFINE_integer('timeout_ms', 10000, 'Timeout in milliseconds.')
gflags.DEFINE_list('port_path', [], 'USB port path integers (eg 1,2 or 2,1,1)')
gflags.DEFINE_string('serial', None, 'Device serial to look for (host:port or USB serial)', short_name='s')
gflags.DEFINE_bool('output_port_path', False,
'Affects the devices command only, outputs the port_path '
'alongside the serial if true.')
FLAGS = gflags.FLAGS
_BLACKLIST = {
'Connect',
'Close',
'ConnectDevice',
'DeviceIsAvailable',
}
def Uncamelcase(name):
parts = re.split(r'([A-Z][a-z]+)', name)[1:-1:2]
return ('-'.join(parts)).lower()
def Camelcase(name):
return name.replace('-', ' ').title().replace(' ', '')
def Usage(adb_dev):
methods = inspect.getmembers(adb_dev, inspect.ismethod)
print('Methods:')
for name, method in methods:
if name.startswith('_'):
continue
if not method.__doc__:
continue
if name in _BLACKLIST:
continue
argspec = inspect.getargspec(method)
args = argspec.args[1:] or ''
# Surround default'd arguments with []
defaults = argspec.defaults or []
if args:
args = (args[:-len(defaults)] +
['[%s]' % arg for arg in args[-len(defaults):]])
args = ' ' + ' '.join(args)
print(' %s%s:' % (Uncamelcase(name), args))
print(' %s' % method.__doc__)
def StartCli(argv, device_callback, kwarg_callback=None, list_callback=None,
**device_kwargs):
"""Starts a common CLI interface for this usb path and protocol."""
argv = argv[1:]
if len(argv) == 1 and argv[0] == 'devices' and list_callback is not None:
# To mimic 'adb devices' output like:
# ------------------------------
# List of devices attached
# 015DB7591102001A device
# Or with --output_port_path:
# 015DB7591102001A device 1,2
# ------------------------------
for device in list_callback():
if FLAGS.output_port_path:
print('%s\tdevice\t%s' % (
device.serial_number,
','.join(str(port) for port in device.port_path)))
else:
print('%s\tdevice' % device.serial_number)
return
port_path = [int(part) for part in FLAGS.port_path]
serial = FLAGS.serial
device_kwargs.setdefault('default_timeout_ms', FLAGS.timeout_ms)
try:
dev = device_callback(
port_path=port_path, serial=serial, banner='python-adb',
**device_kwargs)
except usb_exceptions.DeviceNotFoundError as e:
print('No device found: %s' % e, file=sys.stderr)
return
except usb_exceptions.CommonUsbError as e:
print('Could not connect to device: %s' % e, file=sys.stderr)
raise
if not argv:
Usage(dev)
return
kwargs = {}
# CamelCase method names, eg reboot-bootloader -> RebootBootloader
method_name = Camelcase(argv[0])
method = getattr(dev, method_name)
argspec = inspect.getargspec(method)
num_args = len(argspec.args) - 1 # self is the first one.
# Handle putting the remaining command line args into the last normal arg.
argv.pop(0)
# Flags -> Keyword args
if kwarg_callback:
kwarg_callback(kwargs, argspec)
try:
if num_args == 1:
# Only one argument, so join them all with spaces
result = method(' '.join(argv), **kwargs)
else:
result = method(*argv, **kwargs)
if result is not None:
if isinstance(result, io.StringIO):
sys.stdout.write(result.getvalue())
elif isinstance(result, (list, types.GeneratorType)):
for r in result:
r = str(r)
sys.stdout.write(r)
if not r.endswith('\n'):
sys.stdout.write('\n')
else:
sys.stdout.write(result)
sys.stdout.write('\n')
except Exception as e: # pylint: disable=broad-except
sys.stdout.write(str(e))
return
finally:
dev.Close()
|
luci/luci-py
|
appengine/third_party/python-adb/adb/common_cli.py
|
Python
|
apache-2.0
| 4,894
|
# coding: utf-8
import logging
from core.config import config
from core.utils import exception_handler
from core.clients.docker_client import DockerManageClient
log = logging.getLogger(__name__)
class DockerNetwork:
network = None
def __init__(self):
self.name = config.DOCKER_NETWORK_NAME
self.client = DockerManageClient()
self.network = self.create()
def create(self):
"""
:rtype: DNetwork
"""
self.delete(self.name)
return self.client.create_network(self.name)
@exception_handler()
def delete(self, name=None):
if not name and self.network and self.network.id:
name = self.network.id
if name:
self.client.delete_network(name)
else:
log.error("Can't delete network with name equals None")
def connect_container(self, container_id):
self.network.connect(container_id)
def disconnect_container(self, container_id):
self.network.disconnect(container_id)
|
2gis/vmmaster
|
core/network/__init__.py
|
Python
|
mit
| 1,033
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Pyswisseph.
#
# Copyright (c) 2007-2021 Stanislas Marquis <stan@astrorigin.com>
#
# Pyswisseph is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyswisseph is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pyswisseph. If not, see <https://www.gnu.org/licenses/>.
# This docstring is published at the PyPI
# and is used as Distutils long description:
"""Python extension to AstroDienst Swiss Ephemeris library.
The Swiss Ephemeris is the de-facto standard library for astrological
calculations. It is a high-precision ephemeris, based upon the DE431
ephemerides from NASA's JPL, and covering the time range 13201 BC to AD 17191.
::
>>> import swisseph as swe
>>> # first set path to ephemeris files
>>> swe.set_ephe_path('/usr/share/sweph/ephe')
>>> # find time of next lunar eclipse
>>> jd = swe.julday(2007, 3, 3) # julian day
>>> res = swe.lun_eclipse_when(jd)
>>> ecltime = swe.revjul(res[1][0])
>>> print(ecltime)
(2007, 3, 3, 23.347926892340183)
>>> # get ecliptic position of asteroid 13681 "Monty Python"
>>> jd = swe.julday(2008, 3, 21)
>>> xx, rflags = swe.calc_ut(jd, swe.AST_OFFSET+13681)
>>> # print longitude
>>> print(xx[0])
0.09843983166646618
:Documentation: https://astrorigin.com/pyswisseph
:Repository: https://github.com/astrorigin/pyswisseph
"""
import os.path, sys
from setuptools import setup, Extension
from glob import glob
# Pyswisseph version string
# Our version string gets the version of the swisseph library (X.XX.XX)
# and our increment as suffix (.X), plus an eventual pre-release tag (.devX).
#
# Note about Github Actions:
# Each push tagged with vX.XX.XX.X triggers a stable release on PyPI.
# Each push tagged with vX.XX.XX.X.devX triggers a pre-release on PyPI.
# Do not forget to: increment version string right here, and modify file
# pyswisseph.c (PYSWISSEPH_VERSION).
VERSION = '2.10.02.0.dev1'
# Corresponding swisseph version string (for pkg-config)
swe_version = '2.10.02'
# Debian libswe-dev detection
# Set to True to try and find libswe in system.
# Set to False to use bundled libswe.
# Disabled by default until that package is updated.
swe_detection = False
# Include additional functions and constants (contrib submodule)
use_swephelp = True
# Sqlite3 detection
# Set to True to try and find libsqlite3-dev in system.
# Set to False to use bundled sqlite3.
# This is relevant only for the contrib submodule.
sqlite3_detection = True
# Compile flags
cflags = []
if sys.platform in ['win32', 'win_amd64']: # Windows
cflags.append('-D_CRT_SECURE_NO_WARNINGS')
elif sys.platform == 'darwin': # OSX
cflags.append('-Wno-error=unused-command-line-argument-hard-error-in-future')
else: # Linux etc
pass
# Link flags
ldflags = []
# Should not modify below...
# Test for pkg-config
has_pkgconfig = False
if swe_detection or (use_swephelp and sqlite3_detection):
print('Searching for pkg-config...')
try:
import subprocess
try:
subprocess.check_output(['pkg-config'], stderr=subprocess.STDOUT)
except AttributeError: # Python < 2.7
print('Python < 2.7, skipping pkg-config')
except subprocess.CalledProcessError:
has_pkgconfig = True
print('Found pkg-config')
except OSError:
print('pkg-config not found')
except ImportError: # Python < 2.4
print('Python < 2.4, skipping pkg-config')
#
# Find libswe-dev
libswe_found = False
if has_pkgconfig and swe_detection:
print('Searching for libswe-dev...')
try:
out = subprocess.check_output(
['pkg-config', '--cflags-only-I', 'libswe-'+swe_version],
stderr=subprocess.STDOUT).decode().strip().split(' ')
swe_includes = [x[2:] for x in out if x != '']
out = subprocess.check_output(
['pkg-config', '--libs-only-l', 'libswe-'+swe_version],
stderr=subprocess.STDOUT).decode().strip().split(' ')
swe_libs = [x[2:] for x in out if x != '']
swe_sources = []
swe_depends = []
swe_defines = [('PYSWE_DEFAULT_EPHE_PATH',
'"/usr/share/libswe/ephe2:/usr/share/libswe/ephe"')]
libswe_found = True
print('pkg-config found libswe-'+swe_version)
except subprocess.CalledProcessError:
print('pkg-config has not found libswe-dev')
#
if not libswe_found: # using internal libswe
print('Using internal libswe')
swe_includes = ['libswe']
swe_sources = [
'libswe/swecl.c',
'libswe/swedate.c',
'libswe/swehel.c',
'libswe/swehouse.c',
'libswe/swejpl.c',
'libswe/swemmoon.c',
'libswe/swemplan.c',
'libswe/sweph.c',
'libswe/swephlib.c']
swe_depends = [
'libswe/swedate.h',
'libswe/swehouse.h',
'libswe/swemptab.h',
'libswe/swenut2000a.h',
'libswe/sweph.h',
#'libswe/swedll.h',
'libswe/swejpl.h',
'libswe/sweodef.h',
'libswe/swephexp.h',
'libswe/swephlib.h']
swe_libs = []
swe_defines = []
# Find sqlite3
sqlite3_found = False
if has_pkgconfig and use_swephelp and sqlite3_detection:
print('Searching for libsqlite3-dev')
try:
out = subprocess.check_output(
['pkg-config', '--cflags-only-I', 'sqlite3'],
stderr=subprocess.STDOUT).decode().strip().split(' ')
sqlite3_includes = [x[2:] for x in out if x != '']
out = subprocess.check_output(
['pkg-config', '--libs-only-l', 'sqlite3'],
stderr=subprocess.STDOUT).decode().strip().split(' ')
sqlite3_libs = [x[2:] for x in out if x != '']
sqlite3_defines = []
sqlite3_sources = []
sqlite3_depends = []
sqlite3_found = True
print('pkg-config has found libsqlite3-dev')
except subprocess.CalledProcessError:
print('pkg-config has not found libsqlite3-dev')
#
if use_swephelp and not sqlite3_found: # using internal sqlite3
print('Using internal sqlite3')
sqlite3_defines = [
('SQLITE_DEFAULT_AUTOVACUUM', 1),
('SQLITE_DEFAULT_FOREIGN_KEYS', 1),
('SQLITE_DEFAULT_MEMSTATUS', 0),
('SQLITE_DEFAULT_WAL_SYNCHRONOUS', 1),
('SQLITE_DOESNT_MATCH_BLOBS', 1),
('SQLITE_DQS', 0),
('SQLITE_ENABLE_COLUMN_METADATA', 1),
('SQLITE_ENABLE_FTS4', 1),
('SQLITE_MAX_EXPR_DEPTH', 0),
('SQLITE_OMIT_DEPRECATED', 1),
('SQLITE_OMIT_SHARED_CACHE', 1),
('SQLITE_SECURE_DELETE', 1),
('SQLITE_THREADSAFE', 1)
]
sqlite3_includes = ['swephelp/sqlite3']
sqlite3_sources = ['swephelp/sqlite3/sqlite3.c']
sqlite3_depends = ['swephelp/sqlite3/sqlite3.h']
sqlite3_libs = []
# Defines
defines = swe_defines
if use_swephelp:
defines += sqlite3_defines
# Include paths
includes = swe_includes
if use_swephelp:
includes += ['swephelp']
includes += sqlite3_includes
# Sources
sources = ['pyswisseph.c'] + swe_sources
if use_swephelp:
sources += glob('swephelp/*.c')
sources += glob('swephelp/*.cpp')
sources += sqlite3_sources
# Depends
depends = swe_depends
if use_swephelp:
depends += glob('swephelp/*.h')
depends += glob('swephelp/*.hpp')
depends += sqlite3_depends
# Libraries
libraries = swe_libs
if use_swephelp:
libraries += sqlite3_libs
# Pyswisseph extension
swemodule = Extension(
'swisseph',
define_macros = defines,
depends = depends,
extra_compile_args = cflags,
extra_link_args = ldflags,
include_dirs = includes,
libraries = libraries,
sources = sources
)
setup(
name = 'pyswisseph',
version = VERSION,
description = 'Python extension to the Swiss Ephemeris',
long_description = __doc__,
author = 'Stanislas Marquis',
author_email = 'stan@astrorigin.com',
url = 'https://astrorigin.com/pyswisseph',
download_url = 'https://pypi.org/project/pyswisseph',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Religion',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Religion',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords = 'Astrology Ephemeris Swisseph',
ext_modules = [swemodule],
setup_requires = ['wheel'],
python_requires = '>=3.5',
test_suite = 'tests'
)
# vi: set fenc=utf-8 ff=unix et sw=4 ts=4 sts=4 :
|
astrorigin/pyswisseph
|
setup.py
|
Python
|
gpl-2.0
| 9,298
|
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from django.contrib import admin
from lizard_ui.urls import debugmode_urlpatterns
from controlnext import views
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^redirect/$', views.RedirectAfterLoginView.as_view(),
name='controlnext-redirect'),
url(r'^selectbasin/$', views.SelectBasinView.as_view(),
name='controlnext-selectbasin'),
url(r'^403Error/$', views.Http403View.as_view(),
name='controlnext-403error'),
url(r'^(?P<random_url_slug>\w+)/$', views.BasinView.as_view(),
name='controlnext-basin'),
url(r'^data_service/(?P<random_url_slug>\w+)/$',
views.DataService.as_view(),
name='controlnext-data-service'),
url(r'^data_service/(?P<random_url_slug>\w+)/demand/$',
views.DemandView.as_view(),
name='controlnext-data-demand'),
url(r'^$', views.RedirectAfterLoginView.as_view(),
name='controlnext-redirect'),
)
if getattr(settings, 'LIZARD_CONTROLNEXT_STANDALONE', False):
admin.autodiscover()
urlpatterns += patterns(
'',
(r'^ui/', include('lizard_ui.urls')),
(r'^map/', include('lizard_map.urls')),
(r'^admin/', include(admin.site.urls)),
)
urlpatterns += debugmode_urlpatterns()
|
lizardsystem/controlnext
|
controlnext/urls.py
|
Python
|
gpl-3.0
| 1,448
|
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
# from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 button - pythonspot.com'
self.left = 600
self.top = 500
self.width = 360
self.height = 200
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
# qtRectangle = self.frameGeometry()
# self.centerPoint = QDesktopWidget().availableGeometry().center()
# qtRectangle.moveCenter(self.centerPoint)
button = QPushButton('PyQt5 button', self)
button.setToolTip('This is an example button')
button.move(100, 70)
button.clicked.connect(self.on_click)
self.show()
@pyqtSlot()
def on_click(self):
print('PyQt5 button click')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
sidro/excodpy
|
python-exemple/design.py
|
Python
|
bsd-2-clause
| 1,073
|
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
setuptools.setup(
setup_requires=['pbr>=0.5.21,<1.0'],
pbr=True)
|
emonty/pymox
|
setup.py
|
Python
|
apache-2.0
| 794
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from marionette_test import MarionetteTestCase
class TestState(MarionetteTestCase):
def test_isEnabled(self):
test_html = self.marionette.absolute_url("test.html")
self.marionette.navigate(test_html)
l = self.marionette.find_element("name", "myCheckBox")
self.assertTrue(l.is_enabled())
self.marionette.execute_script("arguments[0].disabled = true;", [l])
self.assertFalse(l.is_enabled())
def test_isDisplayed(self):
test_html = self.marionette.absolute_url("test.html")
self.marionette.navigate(test_html)
l = self.marionette.find_element("name", "myCheckBox")
self.assertTrue(l.is_displayed())
self.marionette.execute_script("arguments[0].hidden = true;", [l])
self.assertFalse(l.is_displayed())
class TestStateChrome(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.marionette.set_context("chrome")
self.win = self.marionette.current_window_handle
self.marionette.execute_script("window.open('chrome://marionette/content/test.xul', 'foo', 'chrome,centerscreen');")
self.marionette.switch_to_window('foo')
self.assertNotEqual(self.win, self.marionette.current_window_handle)
def tearDown(self):
self.assertNotEqual(self.win, self.marionette.current_window_handle)
self.marionette.execute_script("window.close();")
self.marionette.switch_to_window(self.win)
MarionetteTestCase.tearDown(self)
def test_isEnabled(self):
l = self.marionette.find_element("id", "textInput")
self.assertTrue(l.is_enabled())
self.marionette.execute_script("arguments[0].disabled = true;", [l])
self.assertFalse(l.is_enabled())
self.marionette.execute_script("arguments[0].disabled = false;", [l])
''' Switched on in Bug 896043 to be turned on in Bug 896046
def test_isDisplayed(self):
l = self.marionette.find_element("id", "textInput")
self.assertTrue(l.is_displayed())
self.marionette.execute_script("arguments[0].hidden = true;", [l])
self.assertFalse(l.is_displayed())
self.marionette.execute_script("arguments[0].hidden = false;", [l])
'''
|
sergecodd/FireFox-OS
|
B2G/gecko/testing/marionette/client/marionette/tests/unit/test_elementState.py
|
Python
|
apache-2.0
| 2,451
|
# -*- coding: utf-8 -*-
"""
=================================================================================
1D Wasserstein barycenter comparison between exact LP and entropic regularization
=================================================================================
This example illustrates the computation of regularized Wasserstein Barycenter
as proposed in [3] and exact LP barycenters using standard LP solver.
It reproduces approximately Figure 3.1 and 3.2 from the following paper:
Cuturi, M., & Peyré, G. (2016). A smoothed dual approach for variational
Wasserstein problems. SIAM Journal on Imaging Sciences, 9(1), 320-343.
[3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & Peyré, G. (2015).
Iterative Bregman projections for regularized transportation problems
SIAM Journal on Scientific Computing, 37(2), A1111-A1138.
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
# necessary for 3d plot even if not used
from mpl_toolkits.mplot3d import Axes3D # noqa
from matplotlib.collections import PolyCollection # noqa
#import ot.lp.cvx as cvx
##############################################################################
# Gaussian Data
# -------------
#%% parameters
problems = []
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n, m=60, s=8)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
n_distributions = A.shape[1]
# loss matrix + normalization
M = ot.utils.dist0(n)
M /= M.max()
#%% plot the distributions
pl.figure(1, figsize=(6.4, 3))
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.tight_layout()
#%% barycenter computation
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
# l2bary
bary_l2 = A.dot(weights)
# wasserstein
reg = 1e-3
ot.tic()
bary_wass = ot.bregman.barycenter(A, M, reg, weights)
ot.toc()
ot.tic()
bary_wass2 = ot.lp.barycenter(A, M, weights, solver='interior-point', verbose=True)
ot.toc()
pl.figure(2)
pl.clf()
pl.subplot(2, 1, 1)
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.subplot(2, 1, 2)
pl.plot(x, bary_l2, 'r', label='l2')
pl.plot(x, bary_wass, 'g', label='Reg Wasserstein')
pl.plot(x, bary_wass2, 'b', label='LP Wasserstein')
pl.legend()
pl.title('Barycenters')
pl.tight_layout()
problems.append([A, [bary_l2, bary_wass, bary_wass2]])
##############################################################################
# Dirac Data
# ----------
#%% parameters
a1 = 1.0 * (x > 10) * (x < 50)
a2 = 1.0 * (x > 60) * (x < 80)
a1 /= a1.sum()
a2 /= a2.sum()
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
n_distributions = A.shape[1]
# loss matrix + normalization
M = ot.utils.dist0(n)
M /= M.max()
#%% plot the distributions
pl.figure(1, figsize=(6.4, 3))
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.tight_layout()
#%% barycenter computation
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
# l2bary
bary_l2 = A.dot(weights)
# wasserstein
reg = 1e-3
ot.tic()
bary_wass = ot.bregman.barycenter(A, M, reg, weights)
ot.toc()
ot.tic()
bary_wass2 = ot.lp.barycenter(A, M, weights, solver='interior-point', verbose=True)
ot.toc()
problems.append([A, [bary_l2, bary_wass, bary_wass2]])
pl.figure(2)
pl.clf()
pl.subplot(2, 1, 1)
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.subplot(2, 1, 2)
pl.plot(x, bary_l2, 'r', label='l2')
pl.plot(x, bary_wass, 'g', label='Reg Wasserstein')
pl.plot(x, bary_wass2, 'b', label='LP Wasserstein')
pl.legend()
pl.title('Barycenters')
pl.tight_layout()
#%% parameters
a1 = np.zeros(n)
a2 = np.zeros(n)
a1[10] = .25
a1[20] = .5
a1[30] = .25
a2[80] = 1
a1 /= a1.sum()
a2 /= a2.sum()
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
n_distributions = A.shape[1]
# loss matrix + normalization
M = ot.utils.dist0(n)
M /= M.max()
#%% plot the distributions
pl.figure(1, figsize=(6.4, 3))
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.tight_layout()
#%% barycenter computation
alpha = 0.5 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
# l2bary
bary_l2 = A.dot(weights)
# wasserstein
reg = 1e-3
ot.tic()
bary_wass = ot.bregman.barycenter(A, M, reg, weights)
ot.toc()
ot.tic()
bary_wass2 = ot.lp.barycenter(A, M, weights, solver='interior-point', verbose=True)
ot.toc()
problems.append([A, [bary_l2, bary_wass, bary_wass2]])
pl.figure(2)
pl.clf()
pl.subplot(2, 1, 1)
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.subplot(2, 1, 2)
pl.plot(x, bary_l2, 'r', label='l2')
pl.plot(x, bary_wass, 'g', label='Reg Wasserstein')
pl.plot(x, bary_wass2, 'b', label='LP Wasserstein')
pl.legend()
pl.title('Barycenters')
pl.tight_layout()
##############################################################################
# Final figure
# ------------
#
#%% plot
nbm = len(problems)
nbm2 = (nbm // 2)
pl.figure(2, (20, 6))
pl.clf()
for i in range(nbm):
A = problems[i][0]
bary_l2 = problems[i][1][0]
bary_wass = problems[i][1][1]
bary_wass2 = problems[i][1][2]
pl.subplot(2, nbm, 1 + i)
for j in range(n_distributions):
pl.plot(x, A[:, j])
if i == nbm2:
pl.title('Distributions')
pl.xticks(())
pl.yticks(())
pl.subplot(2, nbm, 1 + i + nbm)
pl.plot(x, bary_l2, 'r', label='L2 (Euclidean)')
pl.plot(x, bary_wass, 'g', label='Reg Wasserstein')
pl.plot(x, bary_wass2, 'b', label='LP Wasserstein')
if i == nbm - 1:
pl.legend()
if i == nbm2:
pl.title('Barycenters')
pl.xticks(())
pl.yticks(())
|
rflamary/POT
|
docs/source/auto_examples/plot_barycenter_lp_vs_entropic.py
|
Python
|
mit
| 5,934
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Embedding Projector plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imghdr
import os
import numpy as np
from google.protobuf import json_format
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector import PROJECTOR_FILENAME
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
from tensorflow.python.training.saver import checkpoint_exists
from tensorflow.python.training.saver import latest_checkpoint
from tensorflow.tensorboard.plugins.base_plugin import TBPlugin
# HTTP routes.
CONFIG_ROUTE = '/info'
TENSOR_ROUTE = '/tensor'
METADATA_ROUTE = '/metadata'
RUNS_ROUTE = '/runs'
BOOKMARKS_ROUTE = '/bookmarks'
SPRITE_IMAGE_ROUTE = '/sprite_image'
_IMGHDR_TO_MIMETYPE = {
'bmp': 'image/bmp',
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _read_tensor_file(fpath):
with file_io.FileIO(fpath, 'r') as f:
tensor = []
for line in f:
if line:
tensor.append(map(float, line.rstrip('\n').split('\t')))
return np.array(tensor, dtype='float32')
def _latest_checkpoints_changed(configs, run_path_pairs):
"""Returns true if the latest checkpoint has changed in any of the runs."""
for run_name, logdir in run_path_pairs:
if run_name not in configs:
continue
config = configs[run_name]
if not config.model_checkpoint_path:
continue
# See if you can find a checkpoint file in the logdir.
ckpt_path = latest_checkpoint(logdir)
if not ckpt_path:
# See if you can find a checkpoint in the parent of logdir.
ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))
if not ckpt_path:
continue
if config.model_checkpoint_path != ckpt_path:
return True
return False
def _parse_positive_int_param(request, query_params, param_name):
"""Parses and asserts a positive (>0) integer query parameter.
Args:
request: The http request object.
query_params: Dictionary of query parameters.
param_name: Name of the parameter.
Returns:
None if parameter not present. -1 if parameter is not a positive integer.
"""
param = query_params.get(param_name)
if not param:
return None
try:
param = int(param)
if param <= 0:
raise ValueError()
return param
except ValueError:
request.respond('query parameter "%s" must be integer > 0' % param_name,
'text/plain', 400)
return -1
class ProjectorPlugin(TBPlugin):
"""Embedding projector."""
def __init__(self):
self._handlers = None
self.readers = {}
self.run_paths = None
self.logdir = None
self._configs = None
self.old_num_run_paths = None
def get_plugin_handlers(self, run_paths, logdir):
self.run_paths = run_paths
self.logdir = logdir
self._handlers = {
RUNS_ROUTE: self._serve_runs,
CONFIG_ROUTE: self._serve_config,
TENSOR_ROUTE: self._serve_tensor,
METADATA_ROUTE: self._serve_metadata,
BOOKMARKS_ROUTE: self._serve_bookmarks,
SPRITE_IMAGE_ROUTE: self._serve_sprite_image
}
return self._handlers
@property
def configs(self):
"""Returns a map of run paths to `ProjectorConfig` protos."""
run_path_pairs = list(self.run_paths.items())
# If there are no summary event files, the projector should still work,
# treating the `logdir` as the model checkpoint directory.
if not run_path_pairs:
run_path_pairs.append(('.', self.logdir))
if (self._run_paths_changed() or
_latest_checkpoints_changed(self._configs, run_path_pairs)):
self.readers = {}
self._configs, self.config_fpaths = self._read_latest_config_files(
run_path_pairs)
self._augment_configs_with_checkpoint_info()
return self._configs
def _run_paths_changed(self):
num_run_paths = len(list(self.run_paths.keys()))
if num_run_paths != self.old_num_run_paths:
self.old_num_run_paths = num_run_paths
return True
return False
def _augment_configs_with_checkpoint_info(self):
for run, config in self._configs.items():
for embedding in config.embeddings:
# Normalize the name of the embeddings.
if embedding.tensor_name.endswith(':0'):
embedding.tensor_name = embedding.tensor_name[:-2]
# Find the size of embeddings associated with a tensors file.
if embedding.tensor_path and not embedding.tensor_shape:
tensor = _read_tensor_file(embedding.tensor_path)
embedding.tensor_shape.extend([len(tensor), len(tensor[0])])
reader = self._get_reader_for_run(run)
if not reader:
continue
# Augment the configuration with the tensors in the checkpoint file.
special_embedding = None
if config.embeddings and not config.embeddings[0].tensor_name:
special_embedding = config.embeddings[0]
config.embeddings.remove(special_embedding)
var_map = reader.get_variable_to_shape_map()
for tensor_name, tensor_shape in var_map.items():
if len(tensor_shape) != 2:
continue
embedding = self._get_embedding(tensor_name, config)
if not embedding:
embedding = config.embeddings.add()
embedding.tensor_name = tensor_name
if special_embedding:
embedding.metadata_path = special_embedding.metadata_path
embedding.bookmarks_path = special_embedding.bookmarks_path
if not embedding.tensor_shape:
embedding.tensor_shape.extend(tensor_shape)
# Remove configs that do not have any valid (2D) tensors.
runs_to_remove = []
for run, config in self._configs.items():
if not config.embeddings:
runs_to_remove.append(run)
for run in runs_to_remove:
del self._configs[run]
del self.config_fpaths[run]
def _read_latest_config_files(self, run_path_pairs):
"""Reads and returns the projector config files in every run directory."""
configs = {}
config_fpaths = {}
for run_name, logdir in run_path_pairs:
config = ProjectorConfig()
config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)
if file_io.file_exists(config_fpath):
file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')
text_format.Merge(file_content, config)
has_tensor_files = False
for embedding in config.embeddings:
if embedding.tensor_path:
has_tensor_files = True
break
if not config.model_checkpoint_path:
# See if you can find a checkpoint file in the logdir.
ckpt_path = latest_checkpoint(logdir)
if not ckpt_path:
# Or in the parent of logdir.
ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))
if not ckpt_path and not has_tensor_files:
continue
if ckpt_path:
config.model_checkpoint_path = ckpt_path
# Sanity check for the checkpoint file.
if (config.model_checkpoint_path and
not checkpoint_exists(config.model_checkpoint_path)):
logging.warning('Checkpoint file %s not found',
config.model_checkpoint_path)
continue
configs[run_name] = config
config_fpaths[run_name] = config_fpath
return configs, config_fpaths
def _get_reader_for_run(self, run):
if run in self.readers:
return self.readers[run]
config = self._configs[run]
reader = None
if config.model_checkpoint_path:
try:
reader = NewCheckpointReader(config.model_checkpoint_path)
except Exception: # pylint: disable=broad-except
logging.warning('Failed reading %s', config.model_checkpoint_path)
self.readers[run] = reader
return reader
def _get_metadata_file_for_tensor(self, tensor_name, config):
embedding_info = self._get_embedding(tensor_name, config)
if embedding_info:
return embedding_info.metadata_path
return None
def _get_bookmarks_file_for_tensor(self, tensor_name, config):
embedding_info = self._get_embedding(tensor_name, config)
if embedding_info:
return embedding_info.bookmarks_path
return None
def _canonical_tensor_name(self, tensor_name):
if ':' not in tensor_name:
return tensor_name + ':0'
else:
return tensor_name
def _get_embedding(self, tensor_name, config):
if not config.embeddings:
return None
for info in config.embeddings:
if (self._canonical_tensor_name(info.tensor_name) ==
self._canonical_tensor_name(tensor_name)):
return info
return None
def _serve_runs(self, request, query_params):
"""Returns a list of runs that have embeddings."""
request.respond(list(self.configs.keys()), 'application/json')
def _serve_config(self, request, query_params):
run = query_params.get('run')
if run is None:
request.respond('query parameter "run" is required', 'text/plain', 400)
return
if run not in self.configs:
request.respond('Unknown run: %s' % run, 'text/plain', 400)
return
config = self.configs[run]
request.respond(json_format.MessageToJson(config), 'application/json')
def _serve_metadata(self, request, query_params):
run = query_params.get('run')
if run is None:
request.respond('query parameter "run" is required', 'text/plain', 400)
return
name = query_params.get('name')
if name is None:
request.respond('query parameter "name" is required', 'text/plain', 400)
return
num_rows = _parse_positive_int_param(request, query_params, 'num_rows')
if num_rows == -1:
return
if run not in self.configs:
request.respond('Unknown run: %s' % run, 'text/plain', 400)
return
config = self.configs[run]
fpath = self._get_metadata_file_for_tensor(name, config)
if not fpath:
request.respond(
'No metadata file found for tensor %s in the config file %s' %
(name, self.config_fpaths[run]), 'text/plain', 400)
return
if not file_io.file_exists(fpath) or file_io.is_directory(fpath):
request.respond('%s is not a file' % fpath, 'text/plain', 400)
return
num_header_rows = 0
with file_io.FileIO(fpath, 'r') as f:
lines = []
# Stream reading the file with early break in case the file doesn't fit in
# memory.
for line in f:
lines.append(line)
if len(lines) == 1 and '\t' in lines[0]:
num_header_rows = 1
if num_rows and len(lines) >= num_rows + num_header_rows:
break
request.respond(''.join(lines), 'text/plain')
def _serve_tensor(self, request, query_params):
run = query_params.get('run')
if run is None:
request.respond('query parameter "run" is required', 'text/plain', 400)
return
name = query_params.get('name')
if name is None:
request.respond('query parameter "name" is required', 'text/plain', 400)
return
num_rows = _parse_positive_int_param(request, query_params, 'num_rows')
if num_rows == -1:
return
if run not in self.configs:
request.respond('Unknown run: %s' % run, 'text/plain', 400)
return
reader = self._get_reader_for_run(run)
config = self.configs[run]
if reader is None:
# See if there is a tensor file in the config.
embedding = self._get_embedding(name, config)
if not embedding or not embedding.tensor_path:
request.respond('Tensor %s has no tensor_path in the config' %
name, 'text/plain', 400)
return
if not file_io.file_exists(embedding.tensor_path):
request.respond('Tensor file %s does not exist' %
embedding.tensor_path, 'text/plain', 400)
return
tensor = _read_tensor_file(embedding.tensor_path)
else:
if not reader.has_tensor(name):
request.respond('Tensor %s not found in checkpoint dir %s' %
(name, config.model_checkpoint_path),
'text/plain', 400)
return
tensor = reader.get_tensor(name)
if num_rows:
tensor = tensor[:num_rows]
if tensor.dtype != 'float32':
tensor = tensor.astype(dtype='float32', copy=False)
data_bytes = tensor.tobytes()
request.respond(data_bytes, 'application/octet-stream')
def _serve_bookmarks(self, request, query_params):
run = query_params.get('run')
if not run:
request.respond('query parameter "run" is required', 'text/plain', 400)
return
name = query_params.get('name')
if name is None:
request.respond('query parameter "name" is required', 'text/plain', 400)
return
if run not in self.configs:
request.respond('Unknown run: %s' % run, 'text/plain', 400)
return
config = self.configs[run]
fpath = self._get_bookmarks_file_for_tensor(name, config)
if not fpath:
request.respond(
'No bookmarks file found for tensor %s in the config file %s' %
(name, self.config_fpaths[run]), 'text/plain', 400)
return
if not file_io.file_exists(fpath) or file_io.is_directory(fpath):
request.respond('%s is not a file' % fpath, 'text/plain', 400)
return
bookmarks_json = None
with file_io.FileIO(fpath, 'r') as f:
bookmarks_json = f.read()
request.respond(bookmarks_json, 'application/json')
def _serve_sprite_image(self, request, query_params):
run = query_params.get('run')
if not run:
request.respond('query parameter "run" is required', 'text/plain', 400)
return
name = query_params.get('name')
if name is None:
request.respond('query parameter "name" is required', 'text/plain', 400)
return
if run not in self.configs:
request.respond('Unknown run: %s' % run, 'text/plain', 400)
return
config = self.configs[run]
embedding_info = self._get_embedding(name, config)
if not embedding_info or not embedding_info.sprite.image_path:
request.respond(
'No sprite image file found for tensor %s in the config file %s' %
(name, self.config_fpaths[run]), 'text/plain', 400)
return
fpath = embedding_info.sprite.image_path
if not file_io.file_exists(fpath) or file_io.is_directory(fpath):
request.respond(
'%s does not exist or is directory' % fpath, 'text/plain', 400)
return
f = file_io.FileIO(fpath, 'r')
encoded_image_string = f.read()
f.close()
image_type = imghdr.what(None, encoded_image_string)
mime_type = _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
request.respond(encoded_image_string, mime_type)
|
DCSaunders/tensorflow
|
tensorflow/tensorboard/plugins/projector/plugin.py
|
Python
|
apache-2.0
| 15,720
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
install_requires = [
'requests==2.8.1'
]
setup(
name='facebook-auth',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='A simple Django app for facebook authentcation.',
long_description=README,
url='https://github.com/technoarch-softwares/facebook-auth',
author='Pankul Mittal',
author_email='mittal.pankul@gmail.com',
install_requires = install_requires,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
zip_safe=False,
)
|
technoarch-softwares/facebook-auth
|
setup.py
|
Python
|
bsd-2-clause
| 1,410
|
#!/usr/bin/python
import redis
import time
## Connect local redis service
client =redis.Redis(host='127.0.0.1',port=6379,db=0)
print "Connection to server successfully!"
dicKeys = client.keys("*")
print dicKeys
### Redis hash command part Start ###
# hset: Set key to value with hash name,hset(self, name, key, value)
# hget: Return the value of ``key`` within the hash ``name``, hget(self, name, key)
client.hset('myhash','field1',"foo")
hashVal = client.hget('myhash','field1')
print "Get hash value:",hashVal
# Get none-value
hashVal = client.hget('myhash','field2')
print "None hash value:",hashVal
# hexists: Returns a boolean indicating if ``key`` exists within hash ``name``
keyList= ['field1','field2']
for key in keyList:
hexists = client.hexists('myhash',key)
if hexists :
print "Exist in redis-hash key:",key
else:
print "Not exist in redis-hash key:",key
# hgetall: Return a Python dict of the hash's name/value pairs
client.hset('myhash','field2',"bar")
valDict = client.hgetall('myhash')
print "Get python-dict from redis-hash",valDict
# hincrby: Increment the value of ``key`` in hash ``name`` by ``amount``
# default increment is 1,
client.hset('myhash','field',20)
client.hincrby('myhash','field')
print "Get incrby value(Default):",client.hget('myhash','field')
client.hincrby('myhash','field',2)
print "Get incrby value(step: 2):",client.hget('myhash','field')
client.hincrby('myhash','field',-3)
print "Get incrby value(step: -3):",client.hget('myhash','field')
# no method hincrbyfloat
#hkeys: Return the list of keys within hash ``name``
kL = client.hkeys('myhash')
print "Get redis-hash key list",kL
#hlen: Return the number of elements in hash ``name``
lenHash =client.hlen('myhash')
print "All hash length:",lenHash
#hmget: Returns a list of values ordered identically to ``keys``
#hmget(self, name, keys), keys should be python list data structure
val =client.hmget('myhash',['field','field1','field2','field3','fieldx'])
print "Get all redis-hash value list:",val
#hmset: Sets each key in the ``mapping`` dict to its corresponding value in the hash ``name``
hmDict={'field':'foo','field1':'bar'}
hmKeys=hmDict.keys()
client.hmset('hash',hmDict)
val = client.hmget('hash',hmKeys)
print "Get hmset value:",val
#hdel: Delete ``key`` from hash ``name``
client.hdel('hash','field')
print "Get delete result:",client.hget('hash','field')
#hvals: Return the list of values within hash ``name``
val = client.hvals('myhash')
print "Get redis-hash values with HVALS",val
#hsetnx: Set ``key`` to ``value`` within hash ``name`` if ``key`` does not exist.
# Returns 1 if HSETNX created a field, otherwise 0.
r=client.hsetnx('myhash','field',2)
print "Check hsetnx execute result:",r," Value:",client.hget('myhash','field')
r=client.hsetnx('myhash','field10',20)
print "Check hsetnx execute result:",r,"Value",client.hget('myhash','field10')
hashVal = client.hgetall('profile')
print hashVal
#Empty db
client.flushdb()
|
MarsBighead/mustang
|
Python/redis.hash.py
|
Python
|
mit
| 2,981
|
#!/usr/bin/env python
"""
Couchbase collector
Refer to the following cbstats documentation for more details:
http://docs.couchbase.com/couchbase-manual-2.1/#cbstats-tool
"""
import os
import sys
import time
import subprocess
import re
from collectors.etc import couchbase_conf
from collectors.lib import utils
CONFIG = couchbase_conf.get_config()
COLLECTION_INTERVAL = CONFIG['collection_interval']
COUCHBASE_INITFILE = CONFIG['couchbase_initfile']
KEYS = frozenset( [
'bucket_active_conns',
'cas_hits',
'cas_misses',
'cmd_get',
'cmd_set',
'curr_connections',
'curr_conns_on_port_11209',
'curr_conns_on_port_11210',
'ep_queue_size',
'ep_num_value_ejects',
'ep_num_eject_failures',
'ep_oom_errors',
'ep_tmp_oom_errors',
'get_hits',
'get_misses',
'mem_used',
'total_connections',
'total_heap_bytes',
'total_free_bytes',
'total_allocated_bytes',
'total_fragmentation_bytes',
'tcmalloc_current_thread_cache_bytes',
'tcmalloc_max_thread_cache_bytes',
'tcmalloc_unmapped_bytes',
] )
def find_couchbase_pid():
"""Find out the pid of couchbase"""
if not os.path.isfile(COUCHBASE_INITFILE):
return
try:
fd = open(COUCHBASE_INITFILE)
for line in fd:
if line.startswith("exec"):
init_script = line.split()[1]
fd.close()
except IOError:
utils.err("Check permission of file (%s)" % COUCHBASE_INITFILE)
return
try:
fd = open(init_script)
for line in fd:
if line.startswith("PIDFILE"):
pid_file = line.split("=")[1].rsplit()[0]
fd.close()
except IOError:
utils.err("Check permission of file (%s)" % init_script)
return
try:
fd = open(pid_file)
pid = fd.read()
fd.close()
except IOError:
utils.err("Couchbase-server is not running, since no pid file exists")
return
return pid.split()[0]
def find_conf_file(pid):
"""Returns config file for couchbase-server."""
try:
fd = open('/proc/%s/cmdline' % pid)
except IOError, e:
utils.err("Couchbase (pid %s) went away ? %s" % (pid, e))
return
try:
config = fd.read().split("config_path")[1].split("\"")[1]
return config
finally:
fd.close()
def find_bindir_path(config_file):
"""Returns the bin directory path"""
try:
fd = open(config_file)
except IOError, e:
utils.err("Error for Config file (%s): %s" % (config_file, e))
return None
try:
for line in fd:
if line.startswith("{path_config_bindir"):
return line.split(",")[1].split("\"")[1]
finally:
fd.close()
def list_bucket(bin_dir):
"""Returns the list of memcached or membase buckets"""
buckets = []
if not os.path.isfile("%s/couchbase-cli" % bin_dir):
return buckets
cli = ("%s/couchbase-cli" % bin_dir)
try:
buck = subprocess.check_output([cli, "bucket-list", "--cluster",
"localhost:8091"])
except subprocess.CalledProcessError:
return buckets
regex = re.compile("[\s\w]+:[\s\w]+$")
for i in buck.splitlines():
if not regex.match(i):
buckets.append(i)
return buckets
def collect_stats(bin_dir, bucket):
"""Returns statistics related to a particular bucket"""
if not os.path.isfile("%s/cbstats" % bin_dir):
return
cli = ("%s/cbstats" % bin_dir)
try:
ts = time.time()
stats = subprocess.check_output([cli, "localhost:11211", "-b", bucket,
"all"])
except subprocess.CalledProcessError:
return
for stat in stats.splitlines():
metric = stat.split(":")[0].lstrip(" ")
value = stat.split(":")[1].lstrip(" \t")
if metric in KEYS:
print ("couchbase.%s %i %s bucket=%s" % (metric, ts, value, bucket))
def main():
utils.drop_privileges()
pid = find_couchbase_pid()
if not pid:
utils.err("Error: Either couchbase-server is not running or file (%s)"
" doesn't exist" % COUCHBASE_INITFILE)
return 13
conf_file = find_conf_file(pid)
if not conf_file:
utils.err("Error: Can't find config file (%s)" % conf_file)
return 13
bin_dir = find_bindir_path(conf_file)
if not bin_dir:
utils.err("Error: Can't find bindir path in config file")
return 13
while True:
# Listing bucket everytime so as to start collecting datapoints
# of any new bucket.
buckets = list_bucket(bin_dir)
for b in buckets:
collect_stats(bin_dir, b)
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
sys.exit(main())
|
wangy1931/tcollector
|
collectors/0/couchbase.py
|
Python
|
lgpl-3.0
| 4,853
|
# coding=utf-8
"""
The EepURL Report API endpoint
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/reports/eepurl/
Schema: https://api.mailchimp.com/schema/3.0/Reports/Eepurl/Collection.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class ReportEepURL(BaseApi):
"""
Get a summary of social activity for the campaign, tracked by EepURL.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(ReportEepURL, self).__init__(*args, **kwargs)
self.endpoint = 'reports'
self.campaign_id = None
def all(self, campaign_id, **queryparams):
"""
Get a summary of social activity for the campaign, tracked by EepURL.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.campaign_id = campaign_id
return self._mc_client._get(url=self._build_path(campaign_id, 'eepurl'), **queryparams)
|
charlesthk/python-mailchimp
|
mailchimp3/entities/reporteepurl.py
|
Python
|
mit
| 1,175
|
from nose.tools import *
import glob
import json
import os
import sys
import cli
PY3 = sys.version_info[0] == 3
def load_commented_json(path):
lines = open(path).read().split('\n')
return json.loads('\n'.join(
[line for line in lines if not line.startswith('#')]))
def test_all():
specs = glob.glob('tests/cli/*.spec')
for idx, spec in enumerate(specs):
sys.stderr.write('%2d %s\n' % (idx, spec))
args = load_commented_json(spec)
if PY3:
expected = open(spec.replace('.spec', '.out.json'),encoding='utf-8').read()
else:
expected = open(spec.replace('.spec', '.out.json')).read().decode('utf8')
actual = cli.run(args)
if expected != actual:
open('/tmp/expected.json', 'w').write(expected.encode('utf8'))
open('/tmp/actual.json', 'w').write(actual.encode('utf8'))
eq_(expected, actual)
|
Digitalxero/pyjsonselect
|
tests/integration_test.py
|
Python
|
apache-2.0
| 942
|
# Make difference plots for the paper
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from astropy.visualization import LinearStretch, PercentileInterval
from astropy.visualization.mpl_normalize import ImageNormalize
import astropy.units as u
from astropy.coordinates import SkyCoord
import sunpy.map
from sunpy.time import parse_time
from sunpy import config
TIME_FORMAT = config.get("general", "time_format")
# Heliographic Stonyhurst grid ticklabel kwargs
hg_ticklabel_kwargs = {"color": 'blue', "style": 'italic', "fontsize": 9}
def subtract_maps(m1, m2):
s_data = (m1.data / m1.exposure_time) - (m2.data / m2.exposure_time)
return sunpy.map.Map(s_data, m2.meta)
#
#
# Where the data is
root = os.path.expanduser('~/Data/jets/2012-11-20')
# The images we wish to subtract
file_pairs = [['jet_region_A_0/SDO/AIA/1.5/fulldisk/171/AIA20121120_000011_0171.fits',
'jet_region_A_0/SDO/AIA/1.5/fulldisk/171/AIA20121120_000459_0171.fits'],
['jet_region_A_1/SDO/AIA/1.5/fulldisk/171/AIA20121120_012947_0171.fits',
'jet_region_A_1/SDO/AIA/1.5/fulldisk/171/AIA20121120_013447_0171.fits'],
['jet_region_A_5/SDO/AIA/1.5/fulldisk/171/AIA20121120_014011_0171.fits',
'jet_region_A_5/SDO/AIA/1.5/fulldisk/171/AIA20121120_014359_0171.fits'],
['jet_region_A_2/SDO/AIA/1.5/fulldisk/171/AIA20121120_023211_0171.fits',
'jet_region_A_2/SDO/AIA/1.5/fulldisk/171/AIA20121120_023747_0171.fits'],
['jet_region_A_4/SDO/AIA/1.5/fulldisk/171/AIA20121120_030959_0171.fits',
'jet_region_A_4/SDO/AIA/1.5/fulldisk/171/AIA20121120_031459_0171.fits'],
['jet_region_A_6/SDO/AIA/1.5/fulldisk/171/AIA20121120_053511_0171.fits',
'jet_region_A_6/SDO/AIA/1.5/fulldisk/171/AIA20121120_054011_0171.fits'],
]
# Submap location
lower_left_location = [685, -400] * u.arcsec
upper_right_location = [950, -220] * u.arcsec
# For each file pair, make a difference image
difference_maps = []
for file_pair in file_pairs:
m1 = sunpy.map.Map(os.path.join(root, file_pair[0]))
m2 = sunpy.map.Map(os.path.join(root, file_pair[1]))
# Create the sky coordinates
ll = SkyCoord(lower_left_location[0], lower_left_location[1], frame=m2.coordinate_frame)
ur = SkyCoord(upper_right_location[0], upper_right_location[1], frame=m2.coordinate_frame)
# Create the difference map of the region of interest
difference_map = (subtract_maps(m1, m2)).submap(ll, ur)
# Fix the color table and its scaling
difference_map.plot_settings['cmap'] = cm.PiYG
vmin, vmax = PercentileInterval(99.0).get_limits(difference_map.data)
vlim = np.max(np.abs([vmin, vmax]))
difference_map.plot_settings['norm'] = ImageNormalize(vmin=-vlim, vmax=vlim)
# Store the difference maps
difference_maps.append(difference_map)
# Plot the difference maps
"""
fig = plt.figure()
for i, dfm in enumerate(difference_maps):
# Top left plot
if i == 0:
pass
# Middle top and top right plots
if i == 1 or i == 2:
pass
# Bottom left plot
if i == 3:
pass
# Middle bottom and bottom right plots
if i == 4 or i == 5:
pass
pass
"""
"""
ax = plt.subplot(projection=difference_map)
difference_map.plot()
difference_map.draw_limb(color='black', linewidth=1, linestyle='solid')
title = "{nickname} {measurement} difference\n{date2:{tmf2}} - {date1:{tmf1}}".format(nickname=m1.nickname,
measurement=m1.measurement._repr_latex_(),
date2=parse_time(m2.date),
tmf2=TIME_FORMAT,
date1=parse_time(m1.date),
tmf1=TIME_FORMAT)
ax.set_title(title + '\n')
ax.grid(True)
ax.coords.grid(color='orange', linestyle='solid')
# Manually plot a heliographic overlay.
overlay = ax.get_coords_overlay('heliographic_stonyhurst')
lon = overlay[0]
lat = overlay[1]
lon.set_ticks_visible(True)
lon.set_ticks(color='blue')
lon.set_ticklabel_visible(True)
lon.set_ticklabel(color='blue')
lon.coord_wrap = 180
lon.set_major_formatter('dd')
lat.set_ticks_visible(True)
lat.set_ticks(color='blue')
lat.set_ticklabel_visible(True)
lat.set_ticklabel(color='blue')
overlay.grid(color='blue', linewidth=2, linestyle='dashed')
tx, ty = ax.coords
tx.set_major_formatter('s')
ty.set_major_formatter('s')
plt.colorbar(fraction=0.035, pad=0.03, shrink=0.75, label='change in DN/s')
#plt.tight_layout()
plt.show()
"""
nrows = 2
ncols = 3
plot_size_scale = 6
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, subplot_kw=dict(projection=difference_map),
figsize=(ncols*plot_size_scale, nrows*plot_size_scale))
for row in range(0, nrows):
for col in range(0, ncols):
dfm_index = row*ncols + col
m1 = sunpy.map.Map(os.path.join(root, file_pairs[dfm_index][0]))
m2 = sunpy.map.Map(os.path.join(root, file_pairs[dfm_index][1]))
# Create the sky coordinates
ll = SkyCoord(lower_left_location[0], lower_left_location[1], frame=m2.coordinate_frame)
ur = SkyCoord(upper_right_location[0], upper_right_location[1], frame=m2.coordinate_frame)
# Create the difference map of the region of interest
dfm = (subtract_maps(m1, m2)).submap(ll, ur)
# Fix the color table and its scaling
dfm.plot_settings['cmap'] = cm.gray # cm.PiYG
vmin, vmax = PercentileInterval(99.0).get_limits(dfm.data)
vlim = np.max(np.abs([vmin, vmax]))
dfm.plot_settings['norm'] = ImageNormalize(vmin=-vlim, vmax=vlim)
ax = axs[row, col]
dfm.plot(axes=ax)
dfm.draw_limb(color='black', linewidth=1, linestyle='solid')
title = "{nickname} {measurement} difference\n{date2:{tmf2}} - {date1:{tmf1}}".format(nickname=m1.nickname,
measurement=m1.measurement._repr_latex_(),
date2=parse_time(m2.date),
tmf2=TIME_FORMAT,
date1=parse_time(m1.date),
tmf1=TIME_FORMAT)
ax.set_title(title + '\n')
ax.grid(True)
ax.coords.grid(color='orange', linestyle='solid')
# Manually plot a heliographic overlay.
overlay = ax.get_coords_overlay('heliographic_stonyhurst')
lon = overlay[0]
lat = overlay[1]
lon.set_ticks(spacing=10*u.degree)
lon.set_ticks_visible(False)
lon.set_ticklabel_visible(True)
lon.set_ticklabel(**hg_ticklabel_kwargs)
lon.coord_wrap = 180
lon.set_major_formatter('dd')
lat.set_ticks([-16, -21] * u.degree)
lat.set_ticks_visible(False)
lat.set_ticks(color='blue')
lat.set_ticklabel_visible(True)
lat.set_ticklabel(**hg_ticklabel_kwargs)
lat.set_major_formatter('dd')
tx, ty = ax.coords
tx.set_major_formatter('s')
ty.set_major_formatter('s')
# Top left
if col == 0 and row == 0:
ax.set_xlabel('')
tx.set_ticklabel_visible(False)
tx.set_ticks_visible(False)
lat.set_ticklabel_position('l')
# Top middle and right
if row == 0 and (col == 1 or col == 2):
ax.set_xlabel('')
ax.set_ylabel('')
tx.set_ticklabel_visible(False)
tx.set_ticks_visible(False)
ty.set_ticklabel_visible(False)
ty.set_ticks_visible(False)
# Bottom left
if row == 1 and col == 0:
lat.set_ticklabel_position('l')
# Bottom middle and right
if row == 1 and (col == 1 or col == 2):
ax.set_ylabel('')
ty.set_ticklabel_visible(False)
ty.set_ticks_visible(False)
overlay.grid(color='blue', linewidth=1, linestyle='dashed')
plt.tight_layout(rect=(0.05, 0.05, 1, 1))
plt.show()
|
wafels/jets
|
py/paper1_make_difference_maps_for_paper.py
|
Python
|
lgpl-3.0
| 8,372
|
#!/usr/bin/env python
from __future__ import division, print_function
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('ma', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
config = configuration(top_path='').todict()
setup(**config)
|
DailyActie/Surrogate-Model
|
01-codes/numpy-master/numpy/ma/setup.py
|
Python
|
mit
| 433
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for caching master images.
"""
import os
import tempfile
import time
import uuid
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import six
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import images
from ironic.common import utils
from ironic.openstack.common import fileutils
LOG = logging.getLogger(__name__)
img_cache_opts = [
cfg.BoolOpt('parallel_image_downloads',
default=False,
help='Run image downloads and raw format conversions in '
'parallel.'),
]
CONF = cfg.CONF
CONF.register_opts(img_cache_opts)
# This would contain a sorted list of instances of ImageCache to be
# considered for cleanup. This list will be kept sorted in non-increasing
# order of priority.
_cache_cleanup_list = []
class ImageCache(object):
"""Class handling access to cache for master images."""
def __init__(self, master_dir, cache_size, cache_ttl,
image_service=None):
"""Constructor.
:param master_dir: cache directory to work on
:param cache_size: desired maximum cache size in bytes
:param cache_ttl: cache entity TTL in seconds
:param image_service: Glance image service to use, None for default
"""
self.master_dir = master_dir
self._cache_size = cache_size
self._cache_ttl = cache_ttl
self._image_service = image_service
if master_dir is not None:
fileutils.ensure_tree(master_dir)
def fetch_image(self, href, dest_path, ctx=None, force_raw=True):
"""Fetch image by given href to the destination path.
Does nothing if destination path exists and corresponds to a file that
exists.
Only creates a link if master image for this UUID is already in cache.
Otherwise downloads an image and also stores it in cache.
:param href: image UUID or href to fetch
:param dest_path: destination file path
:param ctx: context
:param force_raw: boolean value, whether to convert the image to raw
format
"""
img_download_lock_name = 'download-image'
if self.master_dir is None:
# NOTE(ghe): We don't share images between instances/hosts
if not CONF.parallel_image_downloads:
with lockutils.lock(img_download_lock_name, 'ironic-'):
_fetch(ctx, href, dest_path, self._image_service,
force_raw)
else:
_fetch(ctx, href, dest_path, self._image_service, force_raw)
return
# TODO(ghe): have hard links and counts the same behaviour in all fs
# NOTE(vdrok): File name is converted to UUID if it's not UUID already,
# so that two images with same file names do not collide
if service_utils.is_glance_image(href):
master_file_name = service_utils.parse_image_ref(href)[0]
else:
# NOTE(vdrok): Doing conversion of href in case it's unicode
# string, UUID cannot be generated for unicode strings on python 2.
href_encoded = href.encode('utf-8') if six.PY2 else href
master_file_name = str(uuid.uuid5(uuid.NAMESPACE_URL,
href_encoded))
master_path = os.path.join(self.master_dir, master_file_name)
if CONF.parallel_image_downloads:
img_download_lock_name = 'download-image:%s' % master_file_name
# TODO(dtantsur): lock expiration time
with lockutils.lock(img_download_lock_name, 'ironic-'):
if os.path.exists(dest_path):
# NOTE(vdrok): After rebuild requested image can change, so we
# should ensure that dest_path and master_path (if exists) are
# pointing to the same file
if (os.path.exists(master_path) and
(os.stat(dest_path).st_ino ==
os.stat(master_path).st_ino)):
LOG.debug("Destination %(dest)s already exists for "
"image %(uuid)s" %
{'uuid': href,
'dest': dest_path})
return
os.unlink(dest_path)
try:
# NOTE(dtantsur): ensure we're not in the middle of clean up
with lockutils.lock('master_image', 'ironic-'):
os.link(master_path, dest_path)
except OSError:
LOG.info(_LI("Master cache miss for image %(uuid)s, "
"starting download"),
{'uuid': href})
else:
LOG.debug("Master cache hit for image %(uuid)s",
{'uuid': href})
return
self._download_image(
href, master_path, dest_path, ctx=ctx, force_raw=force_raw)
# NOTE(dtantsur): we increased cache size - time to clean up
self.clean_up()
def _download_image(self, href, master_path, dest_path, ctx=None,
force_raw=True):
"""Download image by href and store at a given path.
This method should be called with uuid-specific lock taken.
:param href: image UUID or href to fetch
:param master_path: destination master path
:param dest_path: destination file path
:param ctx: context
:param force_raw: boolean value, whether to convert the image to raw
format
"""
# TODO(ghe): timeout and retry for downloads
# TODO(ghe): logging when image cannot be created
tmp_dir = tempfile.mkdtemp(dir=self.master_dir)
tmp_path = os.path.join(tmp_dir, href.split('/')[-1])
try:
_fetch(ctx, href, tmp_path, self._image_service, force_raw)
# NOTE(dtantsur): no need for global lock here - master_path
# will have link count >1 at any moment, so won't be cleaned up
os.link(tmp_path, master_path)
os.link(master_path, dest_path)
finally:
utils.rmtree_without_raise(tmp_dir)
@lockutils.synchronized('master_image', 'ironic-')
def clean_up(self, amount=None):
"""Clean up directory with images, keeping cache of the latest images.
Files with link count >1 are never deleted.
Protected by global lock, so that no one messes with master images
after we get listing and before we actually delete files.
:param amount: if present, amount of space to reclaim in bytes,
cleaning will stop, if this goal was reached,
even if it is possible to clean up more files
"""
if self.master_dir is None:
return
LOG.debug("Starting clean up for master image cache %(dir)s" %
{'dir': self.master_dir})
amount_copy = amount
listing = _find_candidates_for_deletion(self.master_dir)
survived, amount = self._clean_up_too_old(listing, amount)
if amount is not None and amount <= 0:
return
amount = self._clean_up_ensure_cache_size(survived, amount)
if amount is not None and amount > 0:
LOG.warn(_LW("Cache clean up was unable to reclaim %(required)d "
"MiB of disk space, still %(left)d MiB required"),
{'required': amount_copy / 1024 / 1024,
'left': amount / 1024 / 1024})
def _clean_up_too_old(self, listing, amount):
"""Clean up stage 1: drop images that are older than TTL.
This method removes files all files older than TTL seconds
unless 'amount' is non-None. If 'amount' is non-None,
it starts removing files older than TTL seconds,
oldest first, until the required 'amount' of space is reclaimed.
:param listing: list of tuples (file name, last used time)
:param amount: if not None, amount of space to reclaim in bytes,
cleaning will stop, if this goal was reached,
even if it is possible to clean up more files
:returns: tuple (list of files left after clean up,
amount still to reclaim)
"""
threshold = time.time() - self._cache_ttl
survived = []
for file_name, last_used, stat in listing:
if last_used < threshold:
try:
os.unlink(file_name)
except EnvironmentError as exc:
LOG.warn(_LW("Unable to delete file %(name)s from "
"master image cache: %(exc)s"),
{'name': file_name, 'exc': exc})
else:
if amount is not None:
amount -= stat.st_size
if amount <= 0:
amount = 0
break
else:
survived.append((file_name, last_used, stat))
return survived, amount
def _clean_up_ensure_cache_size(self, listing, amount):
"""Clean up stage 2: try to ensure cache size < threshold.
Try to delete the oldest files until conditions is satisfied
or no more files are eligible for deletion.
:param listing: list of tuples (file name, last used time)
:param amount: amount of space to reclaim, if possible.
if amount is not None, it has higher priority than
cache size in settings
:returns: amount of space still required after clean up
"""
# NOTE(dtantsur): Sort listing to delete the oldest files first
listing = sorted(listing,
key=lambda entry: entry[1],
reverse=True)
total_listing = (os.path.join(self.master_dir, f)
for f in os.listdir(self.master_dir))
total_size = sum(os.path.getsize(f)
for f in total_listing)
while listing and (total_size > self._cache_size or
(amount is not None and amount > 0)):
file_name, last_used, stat = listing.pop()
try:
os.unlink(file_name)
except EnvironmentError as exc:
LOG.warn(_LW("Unable to delete file %(name)s from "
"master image cache: %(exc)s"),
{'name': file_name, 'exc': exc})
else:
total_size -= stat.st_size
if amount is not None:
amount -= stat.st_size
if total_size > self._cache_size:
LOG.info(_LI("After cleaning up cache dir %(dir)s "
"cache size %(actual)d is still larger than "
"threshold %(expected)d"),
{'dir': self.master_dir, 'actual': total_size,
'expected': self._cache_size})
return max(amount, 0) if amount is not None else 0
def _find_candidates_for_deletion(master_dir):
"""Find files eligible for deletion i.e. with link count ==1.
:param master_dir: directory to operate on
:returns: iterator yielding tuples (file name, last used time, stat)
"""
for filename in os.listdir(master_dir):
filename = os.path.join(master_dir, filename)
stat = os.stat(filename)
if not os.path.isfile(filename) or stat.st_nlink > 1:
continue
# NOTE(dtantsur): Detect most recently accessed files,
# seeing atime can be disabled by the mount option
# Also include ctime as it changes when image is linked to
last_used_time = max(stat.st_mtime, stat.st_atime, stat.st_ctime)
yield filename, last_used_time, stat
def _free_disk_space_for(path):
"""Get free disk space on a drive where path is located."""
stat = os.statvfs(path)
return stat.f_frsize * stat.f_bavail
def _fetch(context, image_href, path, image_service=None, force_raw=False):
"""Fetch image and convert to raw format if needed."""
path_tmp = "%s.part" % path
images.fetch(context, image_href, path_tmp, image_service,
force_raw=False)
# Notes(yjiang5): If glance can provide the virtual size information,
# then we can firstly clean cach and then invoke images.fetch().
if force_raw:
required_space = images.converted_size(path_tmp)
directory = os.path.dirname(path_tmp)
_clean_up_caches(directory, required_space)
images.image_to_raw(image_href, path, path_tmp)
else:
os.rename(path_tmp, path)
def _clean_up_caches(directory, amount):
"""Explicitly cleanup caches based on their priority (if required).
:param directory: the directory (of the cache) to be freed up.
:param amount: amount of space to reclaim.
:raises: InsufficientDiskSpace exception, if we cannot free up enough space
after trying all the caches.
"""
free = _free_disk_space_for(directory)
if amount < free:
return
# NOTE(dtantsur): filter caches, whose directory is on the same device
st_dev = os.stat(directory).st_dev
caches_to_clean = [x[1]() for x in _cache_cleanup_list]
caches = (c for c in caches_to_clean
if os.stat(c.master_dir).st_dev == st_dev)
for cache_to_clean in caches:
cache_to_clean.clean_up(amount=(amount - free))
free = _free_disk_space_for(directory)
if amount < free:
break
else:
raise exception.InsufficientDiskSpace(path=directory,
required=amount / 1024 / 1024,
actual=free / 1024 / 1024,
)
def clean_up_caches(ctx, directory, images_info):
"""Explicitly cleanup caches based on their priority (if required).
This cleans up the caches to free up the amount of space required for the
images in images_info. The caches are cleaned up one after the other in
the order of their priority. If we still cannot free up enough space
after trying all the caches, this method throws exception.
:param ctx: context
:param directory: the directory (of the cache) to be freed up.
:param images_info: a list of tuples of the form (image_uuid,path)
for which space is to be created in cache.
:raises: InsufficientDiskSpace exception, if we cannot free up enough space
after trying all the caches.
"""
total_size = sum(images.download_size(ctx, uuid)
for (uuid, path) in images_info)
_clean_up_caches(directory, total_size)
def cleanup(priority):
"""Decorator method for adding cleanup priority to a class."""
def _add_property_to_class_func(cls):
_cache_cleanup_list.append((priority, cls))
_cache_cleanup_list.sort(reverse=True, key=lambda tuple_: tuple_[0])
return cls
return _add_property_to_class_func
|
naototty/vagrant-lxc-ironic
|
ironic/drivers/modules/image_cache.py
|
Python
|
apache-2.0
| 16,000
|
# Generated by Django 2.0.4 on 2018-04-13 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pa3', '0018_auto_20180413_1001'),
]
operations = [
migrations.AlterField(
model_name='newestnumberbatch',
name='src',
field=models.CharField(choices=[('pa_23', 'H 23'), ('pa_02', 'H 02'), ('pa_13', 'H 13'), ('pa_10', 'H 10')], max_length=50),
),
migrations.AlterField(
model_name='statisticaldata',
name='src',
field=models.CharField(choices=[(['H 19', 'H 23', 'H 25'], ['H 19', 'H 23', 'H 25']), (['H 02'], ['H 02']), (['Schalter 1/2', 'Schalter 3/4', 'Schalter 5/6', 'Schalter 7/8/9', 'Schalter 10/11'], ['Schalter 1/2', 'Schalter 3/4', 'Schalter 5/6', 'Schalter 7/8/9', 'Schalter 10/11']), (['H 10'], ['H 10'])], max_length=50),
),
migrations.AlterField(
model_name='waitingnumber',
name='src',
field=models.CharField(choices=[(['H 19', 'H 23', 'H 25'], ['H 19', 'H 23', 'H 25']), (['H 02'], ['H 02']), (['Schalter 1/2', 'Schalter 3/4', 'Schalter 5/6', 'Schalter 7/8/9', 'Schalter 10/11'], ['Schalter 1/2', 'Schalter 3/4', 'Schalter 5/6', 'Schalter 7/8/9', 'Schalter 10/11']), (['H 10'], ['H 10'])], max_length=50),
),
migrations.AlterField(
model_name='waitingnumberbatch',
name='src',
field=models.CharField(choices=[('pa_23', 'H 23'), ('pa_02', 'H 02'), ('pa_13', 'H 13'), ('pa_10', 'H 10')], db_index=True, max_length=50),
),
]
|
sistason/pa3
|
src/pa3_frontend/pa3_django/pa3/migrations/0019_auto_20180413_1012.py
|
Python
|
gpl-3.0
| 1,619
|
from django.core.management.base import BaseCommand
from optparse import make_option
from sam.models import Website
import os
class Command(BaseCommand):
help = 'This creates Website objects from text file input or writes Websites to text file'
option_list = BaseCommand.option_list + (
make_option('-r',
'--read',
dest='read',
default=False,
action='store_true',
help='This will create Website objects from a text file.'),
make_option('-w',
'--write',
dest='write',
default=False,
action='store_true',
help='This will create a text file with Website objects.'),
)
def handle(self, *args, **options):
write = options['write']
module_dir = os.path.dirname(__file__)
file_path = os.path.join(module_dir, 'websites.txt')
if not write:
site_file = open(file_path, 'r')
for line in site_file:
if len(line.strip()) > 0:
parts = line.split("-=-")
url = parts[0].strip()
display = parts[1].strip()
kind = parts[2].strip()
Website(url=url, display=display, kind=kind).save()
else:
site_file = open(file_path, 'w')
for site in Website.objects.all():
line = site.url + '-=-' + site.display + '-=-' + site.kind
quote_file.write(line + "\n")
|
samolds/samster
|
sam/management/commands/websites.py
|
Python
|
mit
| 1,601
|
from . import purchase_order
|
OCA/stock-logistics-warehouse
|
stock_orderpoint_origin_mrp_link/models/__init__.py
|
Python
|
agpl-3.0
| 29
|
"""
========================================
The :mod:`array_split.split_plot` Module
========================================
Uses :mod:`matplotlib` to plot a split.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
SplitPlotter - Plots a split.
plot - Plots split shapes.
"""
from __future__ import absolute_import
from .license import license as _license, copyright as _copyright, version as _version
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class SplitPlotter(object):
"""
Plots a split.
"""
def __init__(self):
"""
"""
pass
def plot(split):
"""
Plots a split.
"""
|
array-split/array_split
|
array_split/split_plot.py
|
Python
|
mit
| 744
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score
from .common import Benchmark, Estimator, Predictor
from .datasets import _synth_classification_dataset
from .utils import make_gen_classif_scorers
class CrossValidationBenchmark(Benchmark):
"""
Benchmarks for Cross Validation.
"""
timeout = 20000
param_names = ['n_jobs']
params = (Benchmark.n_jobs_vals,)
def setup(self, *params):
n_jobs, = params
data = _synth_classification_dataset(n_samples=50000, n_features=100)
self.X, self.X_val, self.y, self.y_val = data
self.clf = RandomForestClassifier(n_estimators=50,
max_depth=10,
random_state=0)
cv = 16 if Benchmark.data_size == 'large' else 4
self.cv_params = {'n_jobs': n_jobs,
'cv': cv}
def time_crossval(self, *args):
cross_val_score(self.clf, self.X, self.y, **self.cv_params)
def peakmem_crossval(self, *args):
cross_val_score(self.clf, self.X, self.y, **self.cv_params)
def track_crossval(self, *args):
return float(cross_val_score(self.clf, self.X,
self.y, **self.cv_params).mean())
class GridSearchBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for GridSearch.
"""
timeout = 20000
param_names = ['n_jobs']
params = (Benchmark.n_jobs_vals,)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(n_samples=10000, n_features=100)
return data
def make_estimator(self, params):
n_jobs, = params
clf = RandomForestClassifier(random_state=0)
if Benchmark.data_size == 'large':
n_estimators_list = [10, 25, 50, 100, 500]
max_depth_list = [5, 10, None]
max_features_list = [0.1, 0.4, 0.8, 1.0]
else:
n_estimators_list = [10, 25, 50]
max_depth_list = [5, 10]
max_features_list = [0.1, 0.4, 0.8]
param_grid = {'n_estimators': n_estimators_list,
'max_depth': max_depth_list,
'max_features': max_features_list}
estimator = GridSearchCV(clf, param_grid, n_jobs=n_jobs, cv=4)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
|
glemaitre/scikit-learn
|
asv_benchmarks/benchmarks/model_selection.py
|
Python
|
bsd-3-clause
| 2,511
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'NewsBlogConfig', fields ['type', 'namespace']
db.delete_unique(u'aldryn_newsblog_newsblogconfig', ['type', 'namespace'])
# Adding model 'NewsBlogConfigTranslation'
db.create_table(u'aldryn_newsblog_newsblogconfig_translation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('app_title', self.gf('django.db.models.fields.CharField')(max_length=234)),
(u'master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['aldryn_newsblog.NewsBlogConfig'])),
))
db.send_create_signal(u'aldryn_newsblog', ['NewsBlogConfigTranslation'])
# Adding unique constraint on 'NewsBlogConfigTranslation', fields ['language_code', u'master']
db.create_unique(u'aldryn_newsblog_newsblogconfig_translation', ['language_code', u'master_id'])
def backwards(self, orm):
# Removing unique constraint on 'NewsBlogConfigTranslation', fields ['language_code', u'master']
db.delete_unique(u'aldryn_newsblog_newsblogconfig_translation', ['language_code', u'master_id'])
# Deleting model 'NewsBlogConfigTranslation'
db.delete_table(u'aldryn_newsblog_newsblogconfig_translation')
# Adding unique constraint on 'NewsBlogConfig', fields ['type', 'namespace']
db.create_unique(u'aldryn_newsblog_newsblogconfig', ['type', 'namespace'])
models = {
u'aldryn_categories.category': {
'Meta': {'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'rgt': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'aldryn_newsblog.article': {
'Meta': {'ordering': "[u'-publishing_date']", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}),
'categories': ('aldryn_categories.fields.CategoryManyToManyField', [], {'to': u"orm['aldryn_categories.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_newsblog_articles'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'featured_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'publishing_date': ('django.db.models.fields.DateTimeField', [], {})
},
u'aldryn_newsblog.articletranslation': {
'Meta': {'unique_together': "[(u'language_code', u'slug'), (u'language_code', u'master')]", 'object_name': 'ArticleTranslation', 'db_table': "u'aldryn_newsblog_article_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'lead_in': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "u''"}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.Article']"}),
'meta_description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_title': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '234'})
},
u'aldryn_newsblog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'aldryn_newsblog.newsblogconfig': {
'Meta': {'object_name': 'NewsBlogConfig'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'aldryn_newsblog.newsblogconfigtranslation': {
'Meta': {'unique_together': "[(u'language_code', u'master')]", 'object_name': 'NewsBlogConfigTranslation', 'db_table': "u'aldryn_newsblog_newsblogconfig_translation'"},
'app_title': ('django.db.models.fields.CharField', [], {'max_length': '234'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.NewsBlogConfig']"})
},
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_newsblog']
|
mkoistinen/aldryn-newsblog
|
aldryn_newsblog/south_migrations/0012_auto__add_newsblogconfigtranslation__add_unique_newsblogconfigtranslat.py
|
Python
|
bsd-3-clause
| 18,529
|
from clint.textui import puts
def as_table(data):
offset = data['offset']
limit = data['limit']
total = data['total_count']
first = offset + 1
last = offset + limit if offset + limit <= total else total
issues = data['issues']
if not total:
puts('No issues matching your query.')
else:
puts('Showing {first} - {last} of {total} total.'
.format(first=first, last=last, total=total))
for i in issues:
puts('{id} | {status} | {priority} | {project} | {subject}'
.format(id=i['id'],
status=i['status']['name'],
priority=i['priority']['name'],
project=i['project']['name'],
subject=i['subject']))
|
dmedvinsky/redcliff
|
redcliff/renderer/issues.py
|
Python
|
mit
| 794
|
# elog/mod_save_summary.py - elog dispatch module
# Copyright 2006-2007 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import codecs
import time
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
from portage.data import portage_uid, portage_gid
from portage.localization import _
from portage.util import ensure_dirs, apply_permissions
def process(mysettings, key, logentries, fulltext):
if mysettings["PORT_LOGDIR"] != "":
elogdir = os.path.join(mysettings["PORT_LOGDIR"], "elog")
else:
elogdir = os.path.join(os.sep, "var", "log", "portage", "elog")
ensure_dirs(elogdir, uid=portage_uid, gid=portage_gid, mode=0o2770)
# TODO: Locking
elogfilename = elogdir+"/summary.log"
elogfile = codecs.open(_unicode_encode(elogfilename,
encoding=_encodings['fs'], errors='strict'),
mode='a', encoding=_encodings['content'], errors='backslashreplace')
apply_permissions(elogfilename, mode=0o60, mask=0)
time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
time.localtime(time.time()))
# Avoid potential UnicodeDecodeError later.
time_str = _unicode_decode(time_str,
encoding=_encodings['content'], errors='replace')
elogfile.write(_(">>> Messages generated by process %(pid)d on %(time)s for package %(pkg)s:\n\n") %
{"pid": os.getpid(), "time": time_str, "pkg": key})
elogfile.write(fulltext)
elogfile.write("\n")
elogfile.close()
return elogfilename
|
Neuvoo/legacy-portage
|
pym/portage/elog/mod_save_summary.py
|
Python
|
gpl-2.0
| 1,485
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for running fuzzers."""
import json
import os
import shutil
import stat
import sys
import tempfile
import unittest
from unittest import mock
import parameterized
from pyfakefs import fake_filesystem_unittest
import build_fuzzers
import fuzz_target
import run_fuzzers
# pylint: disable=wrong-import-position
INFRA_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(INFRA_DIR)
import helper
import test_helpers
# NOTE: This integration test relies on
# https://github.com/google/oss-fuzz/tree/master/projects/example project.
EXAMPLE_PROJECT = 'example'
# Location of files used for testing.
TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test_data')
MEMORY_FUZZER_DIR = os.path.join(TEST_DATA_PATH, 'memory')
MEMORY_FUZZER = 'curl_fuzzer_memory'
UNDEFINED_FUZZER_DIR = os.path.join(TEST_DATA_PATH, 'undefined')
UNDEFINED_FUZZER = 'curl_fuzzer_undefined'
FUZZ_SECONDS = 10
class RunFuzzerIntegrationTestMixin: # pylint: disable=too-few-public-methods,invalid-name
"""Mixin for integration test classes that runbuild_fuzzers on builds of a
specific sanitizer."""
# These must be defined by children.
FUZZER_DIR = None
FUZZER = None
def setUp(self):
"""Patch the environ so that we can execute runner scripts."""
test_helpers.patch_environ(self, runner=True)
def _test_run_with_sanitizer(self, fuzzer_dir, sanitizer):
"""Calls run_fuzzers on fuzzer_dir and |sanitizer| and asserts
the run succeeded and that no bug was found."""
with test_helpers.temp_dir_copy(fuzzer_dir) as fuzzer_dir_copy:
config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS,
workspace=fuzzer_dir_copy,
oss_fuzz_project_name='curl',
sanitizer=sanitizer)
result = run_fuzzers.run_fuzzers(config)
self.assertEqual(result, run_fuzzers.RunFuzzersResult.NO_BUG_FOUND)
@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'),
'INTEGRATION_TESTS=1 not set')
class RunMemoryFuzzerIntegrationTest(RunFuzzerIntegrationTestMixin,
unittest.TestCase):
"""Integration test for build_fuzzers with an MSAN build."""
FUZZER_DIR = MEMORY_FUZZER_DIR
FUZZER = MEMORY_FUZZER
def test_run_with_memory_sanitizer(self):
"""Tests run_fuzzers with a valid MSAN build."""
self._test_run_with_sanitizer(self.FUZZER_DIR, 'memory')
@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'),
'INTEGRATION_TESTS=1 not set')
class RunUndefinedFuzzerIntegrationTest(RunFuzzerIntegrationTestMixin,
unittest.TestCase):
"""Integration test for build_fuzzers with an UBSAN build."""
FUZZER_DIR = UNDEFINED_FUZZER_DIR
FUZZER = UNDEFINED_FUZZER
def test_run_with_undefined_sanitizer(self):
"""Tests run_fuzzers with a valid UBSAN build."""
self._test_run_with_sanitizer(self.FUZZER_DIR, 'undefined')
class BaseFuzzTargetRunnerTest(unittest.TestCase):
"""Tests BaseFuzzTargetRunner."""
def _create_runner(self, **kwargs): # pylint: disable=no-self-use
defaults = {
'fuzz_seconds': FUZZ_SECONDS,
'oss_fuzz_project_name': EXAMPLE_PROJECT
}
for default_key, default_value in defaults.items():
if default_key not in kwargs:
kwargs[default_key] = default_value
config = test_helpers.create_run_config(**kwargs)
return run_fuzzers.BaseFuzzTargetRunner(config)
def _test_initialize_fail(self, expected_error_args, **create_runner_kwargs):
with mock.patch('logging.error') as mock_error:
runner = self._create_runner(**create_runner_kwargs)
self.assertFalse(runner.initialize())
mock_error.assert_called_with(*expected_error_args)
@parameterized.parameterized.expand([(0,), (None,), (-1,)])
def test_initialize_invalid_fuzz_seconds(self, fuzz_seconds):
"""Tests initialize fails with an invalid fuzz seconds."""
expected_error_args = ('Fuzz_seconds argument must be greater than 1, '
'but was: %s.', fuzz_seconds)
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'build-out')
os.mkdir(out_path)
with mock.patch('utils.get_fuzz_targets') as mock_get_fuzz_targets:
mock_get_fuzz_targets.return_value = [
os.path.join(out_path, 'fuzz_target')
]
self._test_initialize_fail(expected_error_args,
fuzz_seconds=fuzz_seconds,
workspace=tmp_dir)
def test_initialize_no_out_dir(self):
"""Tests initialize fails with no out dir."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'build-out')
expected_error_args = ('Out directory: %s does not exist.', out_path)
self._test_initialize_fail(expected_error_args, workspace=tmp_dir)
def test_initialize_nonempty_artifacts(self):
"""Tests initialize with a file artifacts path."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'build-out')
os.mkdir(out_path)
os.makedirs(os.path.join(tmp_dir, 'out'))
artifacts_path = os.path.join(tmp_dir, 'out', 'artifacts')
with open(artifacts_path, 'w') as artifacts_handle:
artifacts_handle.write('fake')
expected_error_args = (
'Artifacts path: %s exists and is not an empty directory.',
artifacts_path)
self._test_initialize_fail(expected_error_args, workspace=tmp_dir)
def test_initialize_bad_artifacts(self):
"""Tests initialize with a non-empty artifacts path."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'build-out')
os.mkdir(out_path)
artifacts_path = os.path.join(tmp_dir, 'out', 'artifacts')
os.makedirs(artifacts_path)
artifact_path = os.path.join(artifacts_path, 'artifact')
with open(artifact_path, 'w') as artifact_handle:
artifact_handle.write('fake')
expected_error_args = (
'Artifacts path: %s exists and is not an empty directory.',
artifacts_path)
self._test_initialize_fail(expected_error_args, workspace=tmp_dir)
@mock.patch('utils.get_fuzz_targets')
@mock.patch('logging.error')
def test_initialize_empty_artifacts(self, mock_log_error,
mock_get_fuzz_targets):
"""Tests initialize with an empty artifacts dir."""
mock_get_fuzz_targets.return_value = ['fuzz-target']
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'build-out')
os.mkdir(out_path)
artifacts_path = os.path.join(tmp_dir, 'out', 'artifacts')
os.makedirs(artifacts_path)
runner = self._create_runner(workspace=tmp_dir)
self.assertTrue(runner.initialize())
mock_log_error.assert_not_called()
self.assertTrue(os.path.isdir(artifacts_path))
@mock.patch('utils.get_fuzz_targets')
@mock.patch('logging.error')
def test_initialize_no_artifacts(self, mock_log_error, mock_get_fuzz_targets):
"""Tests initialize with no artifacts dir (the expected setting)."""
mock_get_fuzz_targets.return_value = ['fuzz-target']
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'build-out')
os.mkdir(out_path)
runner = self._create_runner(workspace=tmp_dir)
self.assertTrue(runner.initialize())
mock_log_error.assert_not_called()
self.assertTrue(os.path.isdir(os.path.join(tmp_dir, 'out', 'artifacts')))
def test_initialize_no_fuzz_targets(self):
"""Tests initialize with no fuzz targets."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'build-out')
os.makedirs(out_path)
expected_error_args = ('No fuzz targets were found in out directory: %s.',
out_path)
self._test_initialize_fail(expected_error_args, workspace=tmp_dir)
class CiFuzzTargetRunnerTest(fake_filesystem_unittest.TestCase):
"""Tests that CiFuzzTargetRunner works as intended."""
def setUp(self):
self.setUpPyfakefs()
@mock.patch('clusterfuzz_deployment.OSSFuzz.upload_crashes')
@mock.patch('utils.get_fuzz_targets')
@mock.patch('run_fuzzers.CiFuzzTargetRunner.run_fuzz_target')
@mock.patch('run_fuzzers.CiFuzzTargetRunner.create_fuzz_target_obj')
def test_run_fuzz_targets_quits(self, mock_create_fuzz_target_obj,
mock_run_fuzz_target, mock_get_fuzz_targets,
mock_upload_crashes):
"""Tests that run_fuzz_targets quits on the first crash it finds."""
workspace = 'workspace'
out_path = os.path.join(workspace, 'build-out')
self.fs.create_dir(out_path)
config = test_helpers.create_run_config(
fuzz_seconds=FUZZ_SECONDS,
workspace=workspace,
oss_fuzz_project_name=EXAMPLE_PROJECT)
runner = run_fuzzers.CiFuzzTargetRunner(config)
mock_get_fuzz_targets.return_value = ['target1', 'target2']
runner.initialize()
testcase = os.path.join(workspace, 'testcase')
self.fs.create_file(testcase)
stacktrace = 'stacktrace'
corpus_dir = 'corpus'
self.fs.create_dir(corpus_dir)
mock_run_fuzz_target.return_value = fuzz_target.FuzzResult(
testcase, stacktrace, corpus_dir)
magic_mock = mock.MagicMock()
magic_mock.target_name = 'target1'
mock_create_fuzz_target_obj.return_value = magic_mock
self.assertTrue(runner.run_fuzz_targets())
self.assertEqual(mock_run_fuzz_target.call_count, 1)
self.assertEqual(mock_upload_crashes.call_count, 1)
class BatchFuzzTargetRunnerTest(fake_filesystem_unittest.TestCase):
"""Tests that BatchFuzzTargetRunnerTest works as intended."""
WORKSPACE = 'workspace'
STACKTRACE = 'stacktrace'
CORPUS_DIR = 'corpus'
def setUp(self):
self.setUpPyfakefs()
out_dir = os.path.join(self.WORKSPACE, 'build-out')
self.fs.create_dir(out_dir)
self.testcase1 = os.path.join(out_dir, 'testcase-aaa')
self.fs.create_file(self.testcase1)
self.testcase2 = os.path.join(out_dir, 'testcase-bbb')
self.fs.create_file(self.testcase2)
self.config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS,
workspace=self.WORKSPACE,
cfl_platform='github')
@mock.patch('utils.get_fuzz_targets', return_value=['target1', 'target2'])
@mock.patch('clusterfuzz_deployment.ClusterFuzzLite.upload_crashes')
@mock.patch('run_fuzzers.BatchFuzzTargetRunner.run_fuzz_target')
@mock.patch('run_fuzzers.BatchFuzzTargetRunner.create_fuzz_target_obj')
def test_run_fuzz_targets_quits(self, mock_create_fuzz_target_obj,
mock_run_fuzz_target, mock_upload_crashes, _):
"""Tests that run_fuzz_targets doesn't quit on the first crash it finds."""
runner = run_fuzzers.BatchFuzzTargetRunner(self.config)
runner.initialize()
call_count = 0
def mock_run_fuzz_target_impl(_):
nonlocal call_count
if call_count == 0:
testcase = self.testcase1
elif call_count == 1:
testcase = self.testcase2
assert call_count != 2
call_count += 1
if not os.path.exists(self.CORPUS_DIR):
self.fs.create_dir(self.CORPUS_DIR)
return fuzz_target.FuzzResult(testcase, self.STACKTRACE, self.CORPUS_DIR)
mock_run_fuzz_target.side_effect = mock_run_fuzz_target_impl
magic_mock = mock.MagicMock()
magic_mock.target_name = 'target1'
mock_create_fuzz_target_obj.return_value = magic_mock
self.assertTrue(runner.run_fuzz_targets())
self.assertEqual(mock_run_fuzz_target.call_count, 2)
self.assertEqual(mock_upload_crashes.call_count, 1)
class GetCoverageTargetsTest(unittest.TestCase):
"""Tests for get_coverage_fuzz_targets."""
def test_get_fuzz_targets(self):
"""Tests that get_coverage_fuzz_targets returns expected targets."""
with tempfile.TemporaryDirectory() as temp_dir:
# Setup.
fuzz_target_path = os.path.join(temp_dir, 'fuzz-target')
with open(fuzz_target_path, 'w') as file_handle:
file_handle.write('')
fuzz_target_st = os.stat(fuzz_target_path)
os.chmod(fuzz_target_path, fuzz_target_st.st_mode | stat.S_IEXEC)
non_fuzz_target1 = os.path.join(temp_dir, 'non-fuzz-target1')
with open(non_fuzz_target1, 'w') as file_handle:
file_handle.write('LLVMFuzzerTestOneInput')
subdir = os.path.join(temp_dir, 'subdir')
os.mkdir(subdir)
non_fuzz_target2 = os.path.join(subdir, 'non-fuzz-target1')
with open(non_fuzz_target2, 'w') as file_handle:
file_handle.write('LLVMFuzzerTestOneInput')
self.assertEqual(run_fuzzers.get_coverage_fuzz_targets(temp_dir),
[fuzz_target_path])
@unittest.skip('TODO(metzman): Fix this test')
@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'),
'INTEGRATION_TESTS=1 not set')
class CoverageReportIntegrationTest(unittest.TestCase):
"""Integration tests for coverage reports."""
SANITIZER = 'coverage'
def setUp(self):
test_helpers.patch_environ(self, runner=True)
@mock.patch('filestore.github_actions._upload_artifact_with_upload_js')
def test_coverage_report(self, _):
"""Tests generation of coverage reports end-to-end, from building to
generation."""
with test_helpers.docker_temp_dir() as temp_dir:
shared = os.path.join(temp_dir, 'shared')
os.mkdir(shared)
copy_command = ('cp -r /opt/code_coverage /shared && '
'cp $(which llvm-profdata) /shared && '
'cp $(which llvm-cov) /shared')
assert helper.docker_run([
'-v', f'{shared}:/shared', 'gcr.io/oss-fuzz-base/base-runner', 'bash',
'-c', copy_command
])
os.environ['CODE_COVERAGE_SRC'] = os.path.join(shared, 'code_coverage')
os.environ['PATH'] += os.pathsep + shared
# Do coverage build.
build_config = test_helpers.create_build_config(
oss_fuzz_project_name=EXAMPLE_PROJECT,
project_repo_name='oss-fuzz',
workspace=temp_dir,
git_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523',
base_commit='da0746452433dc18bae699e355a9821285d863c8',
sanitizer=self.SANITIZER,
cfl_platform='github',
# Needed for test not to fail because of permissions issues.
bad_build_check=False)
self.assertTrue(build_fuzzers.build_fuzzers(build_config))
# TODO(metzman): Get rid of this here and make 'compile' do this.
chmod_command = ('chmod -R +r /out && '
'find /out -type d -exec chmod +x {} +')
assert helper.docker_run([
'-v', f'{os.path.join(temp_dir, "build-out")}:/out',
'gcr.io/oss-fuzz-base/base-builder', 'bash', '-c', chmod_command
])
# Generate report.
run_config = test_helpers.create_run_config(fuzz_seconds=FUZZ_SECONDS,
workspace=temp_dir,
sanitizer=self.SANITIZER,
mode='coverage',
cfl_platform='github')
result = run_fuzzers.run_fuzzers(run_config)
self.assertEqual(result, run_fuzzers.RunFuzzersResult.NO_BUG_FOUND)
expected_summary_path = os.path.join(
TEST_DATA_PATH, 'example_coverage_report_summary.json')
with open(expected_summary_path) as file_handle:
expected_summary = json.loads(file_handle.read())
actual_summary_path = os.path.join(temp_dir, 'cifuzz-coverage',
'report', 'linux', 'summary.json')
with open(actual_summary_path) as file_handle:
actual_summary = json.loads(file_handle.read())
self.assertEqual(expected_summary, actual_summary)
@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'),
'INTEGRATION_TESTS=1 not set')
class RunAddressFuzzersIntegrationTest(RunFuzzerIntegrationTestMixin,
unittest.TestCase):
"""Integration tests for build_fuzzers with an ASAN build."""
BUILD_DIR_NAME = 'cifuzz-latest-build'
def test_new_bug_found(self):
"""Tests run_fuzzers with a valid ASAN build."""
# Set the first return value to True, then the second to False to
# emulate a bug existing in the current PR but not on the downloaded
# OSS-Fuzz build.
with mock.patch('fuzz_target.FuzzTarget.is_reproducible',
side_effect=[True, False]):
with tempfile.TemporaryDirectory() as tmp_dir:
workspace = os.path.join(tmp_dir, 'workspace')
shutil.copytree(TEST_DATA_PATH, workspace)
config = test_helpers.create_run_config(
fuzz_seconds=FUZZ_SECONDS,
workspace=workspace,
oss_fuzz_project_name=EXAMPLE_PROJECT)
result = run_fuzzers.run_fuzzers(config)
self.assertEqual(result, run_fuzzers.RunFuzzersResult.BUG_FOUND)
@mock.patch('fuzz_target.FuzzTarget.is_reproducible',
side_effect=[True, True])
def test_old_bug_found(self, _):
"""Tests run_fuzzers with a bug found in OSS-Fuzz before."""
with tempfile.TemporaryDirectory() as tmp_dir:
workspace = os.path.join(tmp_dir, 'workspace')
shutil.copytree(TEST_DATA_PATH, workspace)
config = test_helpers.create_run_config(
fuzz_seconds=FUZZ_SECONDS,
workspace=workspace,
oss_fuzz_project_name=EXAMPLE_PROJECT)
result = run_fuzzers.run_fuzzers(config)
self.assertEqual(result, run_fuzzers.RunFuzzersResult.NO_BUG_FOUND)
def test_invalid_build(self):
"""Tests run_fuzzers with an invalid ASAN build."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'build-out')
os.mkdir(out_path)
config = test_helpers.create_run_config(
fuzz_seconds=FUZZ_SECONDS,
workspace=tmp_dir,
oss_fuzz_project_name=EXAMPLE_PROJECT)
result = run_fuzzers.run_fuzzers(config)
self.assertEqual(result, run_fuzzers.RunFuzzersResult.ERROR)
class GetFuzzTargetRunnerTest(unittest.TestCase):
"""Tests for get_fuzz_fuzz_target_runner."""
@parameterized.parameterized.expand([
('batch', run_fuzzers.BatchFuzzTargetRunner),
('code-change', run_fuzzers.CiFuzzTargetRunner),
('coverage', run_fuzzers.CoverageTargetRunner)
])
def test_get_fuzz_target_runner(self, mode, fuzz_target_runner_cls):
"""Tests that get_fuzz_target_runner returns the correct runner based on the
specified mode."""
with tempfile.TemporaryDirectory() as tmp_dir:
run_config = test_helpers.create_run_config(
fuzz_seconds=FUZZ_SECONDS,
workspace=tmp_dir,
oss_fuzz_project_name='example',
mode=mode)
runner = run_fuzzers.get_fuzz_target_runner(run_config)
self.assertTrue(isinstance(runner, fuzz_target_runner_cls))
if __name__ == '__main__':
unittest.main()
|
google/oss-fuzz
|
infra/cifuzz/run_fuzzers_test.py
|
Python
|
apache-2.0
| 19,890
|
#-*-coding:Utf-8 -*
# TheSecretTower
# Copyright (C) 2011 Pierre SURPLY
#
# This file is part of TheSecretTower.
#
# TheSecretTower is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TheSecretTower is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TheSecretTower. If not, see <http://www.gnu.org/licenses/>.
# Auteur : Pierre Surply
# Pygame
import pygame
from pygame.locals import *
import const
class Element:
""" Définit l'élément de base de la fentre """
def __init__(self):
self.x = 0
self.y = 0
self.rect = None
def changer_image(self, src):
if const.display:
self.image = src
if self.rect == None:
self.rect = self.image.get_rect()
def changer_text(self, text, font, color = (0,0,0)):
if const.display:
if isinstance(text, str):
self.image = font.render(text, 1, color)
if self.rect == None:
self.rect = self.image.get_rect()
def move_el(self, x, y):
self.x = self.x+x
self.y = self.y+y
if const.display:
self.rect = self.rect.move(x,y)
|
dusty141/TheSecretTower
|
element.py
|
Python
|
gpl-3.0
| 1,664
|
from PyQt5 import QtWidgets, QtCore, QtGui
from card import Card, Ranks, Suits
from enum import Enum
from random import shuffle
class DeckStrategy(Enum):
visible = 1
invisible = 2
class Deck(QtWidgets.QStackedWidget):
target = None
def __init__(self, strategy: DeckStrategy, parent=None):
super().__init__(parent=parent)
self._strategy = strategy
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
emptyDeck = QtWidgets.QLabel()
emptyDeck.setPixmap(QtGui.QPixmap("images/deck.png").scaled(Card.width, Card.height, QtCore.Qt.KeepAspectRatio))
self.addWidget(emptyDeck)
if self._strategy.value == DeckStrategy.invisible.value:
emptyDeck.setGeometry(0, 0, Card.width, Card.height)
self.setGeometry(20, 20, Card.width, Card.height)
self.mousePressEvent = self.leaf
cards = []
for rank in Ranks:
for suit in Suits:
cards.append(Card(rank, suit, parent=self))
shuffle(cards)
for card in cards:
card.leaf = 2
self.addWidget(card)
self.setCurrentIndex(self.count() - 1)
else:
emptyDeck.setGeometry(10, 10, Card.width, Card.height)
self.setGeometry(Card.width + 40, 10, Card.width + 20, Card.height + 20)
self.mousePressEvent = self.getCard
self.setCurrentWidget(emptyDeck)
self.__class__.target = self
self.show()
def __iter__(self):
return self
def __next__(self):
if self.parent().card != False and len(self.parent().card) == 1:
self.parent().card[0].toggleActive()
self.parent().card = False
if self.count() > 1:
card = self.currentWidget()
self.removeWidget(card)
self.setCurrentIndex(self.count() - 1)
return card
else:
raise StopIteration
def leaf(self, event):
if self.count() > 1:
card = self.__next__()
card.leaf = 0
card.setParent(self.__class__.target)
card.setGeometry(10, 10, Card.width, Card.height)
self.__class__.target.addWidget(card)
self.__class__.target.setCurrentWidget(card)
else:
self.parent().score -= 100
for card in self.__class__.target:
card.leaf = 2
card.setParent(self)
card.setGeometry(0, 0, Card.width, Card.height)
card.imageView()
self.addWidget(card)
self.setCurrentWidget(card)
def getCard(self, event):
card = self.currentWidget()
if self.parent().card == False:
if card._rank.value == 13:
card.setParent(None)
return
card.toggleActive()
self.parent().card = [card]
else:
if self.parent().card[0] is card:
card.toggleActive()
self.parent().card = False
return
if self.parent().card[0]._rank.value + card._rank.value == 13:
self.parent().score += 50
for link in self.parent()._pyramid.getLinks(self.card[1], self.card[2]):
link.leaf = link.leaf - 1
self.parent().card[0].setParent(None)
card.setParent(None)
else:
card.toggleActive()
self.parent().card = False
|
EpicUsaMan/univer
|
deck.py
|
Python
|
mit
| 3,550
|
#!/usr/bin/env python
# This file is part of cxqwatch, released under the MIT license.
# The MIT License (MIT)
# Copyright (c) 2014, 2016 Christian T. Jacobs
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from gi.repository import Gtk, GObject, Gdk, GdkPixbuf
import signal
import logging
from toolbar import *
from cx import *
from jobbook import *
from connection_dialog import *
class CXQWatch(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="cxqwatch")
self.set_size_request(800, 600) # Default to an 800 x 600 resolution.
# Kills the application if the close button is clicked on the main window itself.
self.connect("delete-event", Gtk.main_quit)
# The CX1 handler
self.cx = CX()
vbox_outer = Gtk.VBox()
self.add(vbox_outer)
self.toolbar = Toolbar(self)
vbox_outer.pack_start(self.toolbar, False, False, 0)
self.jobbook = JobBook(self, self.cx)
self.jobbook.set_scrollable(True)
vbox_outer.pack_start(self.jobbook, True, True, 0)
self.show_all()
return
def cx_connect(self, widget=None):
dialog = ConnectionDialog(self)
response = dialog.run()
if(response == Gtk.ResponseType.OK):
user_details = dialog.get_sources()
username = user_details["USERNAME"].get_text()
password = user_details["PASSWORD"].get_text()
dialog.destroy()
else:
dialog.destroy()
return
self.cx.login_connect(username, password)
self.jobbook.jobs.populate() # Initial population of the jobs window.
# Refresh the window every few minutes.
self.query_event = GObject.timeout_add(300000, self.jobbook.jobs.refresh)
return
def cx_disconnect(self, widget=None):
self.cx.login_disconnect()
try:
GObject.source_remove(self.query_event)
except AttributeError:
logging.error("Cannot remove timer since it doesn't exist.")
return
if(__name__ == "__main__"):
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
signal.signal(signal.SIGINT, signal.SIG_DFL) # Exit if a SIGINT signal is captured.
application = CXQWatch() # Populate the main window and show it.
Gtk.main() # Start up the event loop!
|
ctjacobs/cxqwatch
|
cxqwatch.py
|
Python
|
mit
| 3,467
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import pandas as pd
# Bokeh imports
from bokeh.util.testing import verify_all
# Module under test
#import bokeh.sampledata.us_marriages_divorces as bsu
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.us_marriages_divorces", ALL))
@pytest.mark.sampledata
def test_data():
import bokeh.sampledata.us_marriages_divorces as bsu
assert isinstance(bsu.data, pd.DataFrame)
# check detail for package data
assert len(bsu.data) == 145
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
|
Karel-van-de-Plassche/bokeh
|
bokeh/sampledata/tests/test_us_marriages_divorces.py
|
Python
|
bsd-3-clause
| 2,015
|
"""
Functions for the analysis of integral field spectroscopy.
Author: Daniel Ruschel Dutra
Website: https://github.com/danielrd6/ifscube
"""
from numpy import *
import pyfits as pf
import spectools as st
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.integrate import trapz
from copy import deepcopy
from voronoi_2d_binning import voronoi_2d_binning
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.ndimage import gaussian_filter as gf
from scipy.integrate import trapz
from scipy.interpolate import interp1d
from scipy import ndimage
import profiles as lprof
import ppxf
import ppxf_util
def progress(x, xmax, steps=10):
try:
if x%(xmax/steps) == 0:
print '{:2.0f}%\r'.format(float(x)/float(xmax)*100)
except ZeroDivisionError:
pass
class gmosdc:
"""
A class for dealing with data cubes, originally written to work
with GMOS IFU.
"""
def __init__(self, fitsfile, redshift=None, vortab=None):
"""
Initializes the class and loads basic information onto the
object.
Parameters:
-----------
fitstile : string
Name of the FITS file containing the GMOS datacube. This
should be the standard output from the GFCUBE task of the
GEMINI-GMOS IRAF package.
redshift : float
Value of redshift (z) of the source, if no Doppler
correction has
been applied to the spectra yet.
vortab : string
Name of the file containing the Voronoi binning table
Returns:
--------
Nothing.
"""
if len(pf.open(fitsfile)) == 2:
dataext, hdrext = 1,0
elif len(pf.open(fitsfile)) == 1:
dataext, hdrext = 0,0
self.data = pf.getdata(fitsfile,ext=dataext)
self.header_data = pf.getheader(fitsfile, ext=dataext)
self.header = pf.getheader(fitsfile, ext=hdrext)
self.wl = st.get_wl(fitsfile, hdrext=dataext, dimension=0,
dwlkey='CD3_3', wl0key='CRVAL3', pix0key='CRPIX3')
if redshift == None:
try:
redshift = self.header['REDSHIFT']
except KeyError:
print 'WARNING! Redshift not given and not found in the image'\
+ ' header. Using redshift = 0.'
redshift = 0.0
self.restwl = self.wl/(1.+redshift)
try:
if self.header['VORBIN'] and vortab != None:
self.voronoi_tab = vortab
self.binned = True
elif self.header['VORBIN'] and vortab == None:
print 'WARNING! Data has been binned but no binning table has'\
+ ' been given.'
self.binned = True
except KeyError:
self.binned = False
self.fitsfile = fitsfile
self.redshift = redshift
self.spec_indices = column_stack([
ravel(indices(shape(self.data)[1:])[0]),
ravel(indices(shape(self.data)[1:])[1])
])
def continuum(self, writefits=False, outimage=None, fitting_window=None,
copts=None):
"""
Evaluates a polynomial continuum for the whole cube and stores
it in self.cont.
"""
if self.binned:
v = loadtxt(self.voronoi_tab)
xy = v[unique(v[:,2], return_index=True)[1],:2]
else:
xy = self.spec_indices
fw = fitting_window
fwidx = (self.restwl > fw[0]) & (self.restwl < fw[1])
wl = deepcopy(self.restwl[fwidx])
data = deepcopy(self.data[fwidx])
c = zeros(shape(data), dtype='float32')
nspec = len(xy)
if copts == None:
copts = {'degr':3, 'upper_threshold':2,
'lower_threshold':2, 'niterate':5}
try:
copts['returns']
except KeyError:
copts['returns'] = 'function'
for k,h in enumerate(xy):
i,j = h
s = deepcopy(data[:,i,j])
if any(s[:20]) and any(s[-20:]):
try:
cont = st.continuum(wl, s, **copts)
if self.binned:
for l,m in v[v[:,2] == k,:2]:
c[:,l,m] = cont[1]
else:
c[:,i,j] = cont[1]
except TypeError:
print 'Could not find a solution for {:d},{:d}.'\
.format(i,j)
return wl, s
else:
c[:,i,j] = zeros(len(wl), dtype='float32')
self.cont = c
if writefits:
if outimage == None:
outimage = self.fitsfile.replace('.fits','_continuum.fits')
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
hdr['CRVAL3'] = wl[0]
hdr.append(('CONTDEGR', copts['degr'],
'Degree of continuum polynomial'))
hdr.append(('CONTNITE', copts['niterate'],
'Continuum rejection iterations'))
hdr.append(('CONTLTR', copts['lower_threshold'],
'Continuum lower threshold'))
hdr.append(('CONTHTR', copts['upper_threshold'],
'Continuum upper threshold'))
pf.writeto(outimage, data=c, header=hdr)
return c
def snr_eval(self, wl_range=[6050,6200], copts=None):
"""
Measures the signal to noise ratio (SNR) for each spectrum in a
data cube, returning an image of the SNR.
Parameters:
-----------
self : gmosdc instance
gmosdc object
wl_range : array like
An array like object containing two wavelength coordinates
that define the SNR window at the rest frame.
copts : dictionary
Options for the continuum fitting function.
Returns:
--------
snr : numpy.ndarray
Image of the SNR for each spectrum.
Description:
------------
This method evaluates the SNR for each spectrum in a data
cube by measuring the residuals of a polynomial continuum
fit. The function CONTINUUM of the SPECTOOLS package is used
to provide the continuum, with zero rejection iterations
and a 3 order polynomial.
"""
noise = zeros(shape(self.data)[1:], dtype='float32')
signal = zeros(shape(self.data)[1:], dtype='float32')
snrwindow = (self.restwl >= wl_range[0]) &\
(self.restwl <= wl_range[1])
data = deepcopy(self.data)
wl = self.restwl[snrwindow]
if copts == None:
copts = {'niterate':0, 'degr':3, 'upper_threshold':3,
'lower_threshold':3, 'returns':'function'}
else:
copts['returns'] = 'function'
for i,j in self.spec_indices:
if any(data[snrwindow,i,j]):
s = data[snrwindow,i,j]
cont = st.continuum(wl, s, **copts)[1]
noise[i,j] = nanstd(s - cont)
signal[i,j] = nanmean(cont)
else:
noise[i,j],signal[i,j] = nan, nan
self.noise = noise
self.signal = signal
return array([signal,noise])
def wlprojection(self, wl0, fwhm=10, filtertype='box', writefits=False,
outimage='wlprojection.fits'):
"""
Writes a projection of the data cube along the wavelength
coordinate, with the flux given by a given type of filter.
Parameters:
-----------
wl0 : float
Central wavelength at the rest frame.
fwhm : float
Full width at half maximum. See 'filtertype'.
filtertype : string
Type of function to be multiplied by the spectrum to return
the argument for the integral.
'box' = Box function that is zero everywhere and 1
between wl0-fwhm/2 and wl0+fwhm/2.
'gaussian' = Normalized gaussian function with center at
wl0 and sigma = fwhm/(2*sqrt(2*log(2)))
outimage : string
Name of the output image
Returns:
--------
Nothing.
"""
if filtertype == 'box':
arrfilt = array( (self.restwl >= wl0-fwhm/2.) &
(self.restwl <= wl0+fwhm/2.), dtype='float')
arrfilt /= trapz(arrfilt,self.restwl)
elif filtertype == 'gaussian':
s = fwhm/(2.*sqrt(2.*log(2.)))
arrfilt = 1./sqrt(2*pi)*exp(-(self.restwl-wl0)**2/2./s**2)
else:
print 'ERROR! Parameter filtertype "{:s}" not understood.'\
.format(filtertype)
outim = zeros(shape(self.data)[1:], dtype='float32')
for i,j in self.spec_indices:
outim[i,j] = trapz(self.data[:,i,j]*arrfilt, self.restwl)
if writefits:
hdr = deepcopy(self.header)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift, 'Redshift used in GMOSDC'))
hdr.append(('WLPROJ', True, 'Processed by WLPROJECTION?'))
hdr.append(('WLPRTYPE', filtertype,
'Type of filter used in projection.'))
hdr.append(('WLPRWL0', wl0, 'Central wavelength of the filter.'))
hdr.append(('WLPRFWHM', fwhm, 'FWHM of the projection filter.'))
pf.writeto(outimage,data=outim,header=hdr)
return outim
def plotspec(self, x, y):
"""
Plots the spectrum at coordinates x,y.
Parameters
----------
x,y : numbers or tuple
If x and y are numbers plots the spectrum at the specific
spaxel. If x and y are two element tuples plots the average
between x[0],y[0] and x[1],y[1]
Returns
-------
Nothing.
"""
fig = plt.figure(1)
ax = plt.axes()
try:
if len(x) == 2 and len(y) == 2:
s = average(average(self.data[:,y[0]:y[1],x[0]:x[1]], 1), 1)
except TypeError:
s = self.data[:,y,x]
ax.plot(self.restwl, s)
plt.show()
def linefit(self, p0, function='gaussian', fitting_window=None,
writefits=False, outimage=None, variance=None,
constraints=(), bounds=None, inst_disp=1.0, individual_spec=False,
min_method='SLSQP', minopts=None, copts=None,
refit=False, spiral_loop=False, spiral_center=None,
fit_continuum=True, refit_radius=3):
"""
Fits a spectral feature with a gaussian function and returns a
map of measured properties. This is a wrapper for the scipy
minimize function that basically iterates over the cube,
has a formula for the reduced chi squared, and applies
an internal scale factor to the flux.
Parameters
----------
p0 : iterable
Initial guess for the fitting funcion, consisting of a list
of 3N parameters for N components of **function**. In the
case of a gaussian fucntion, these parameters must be given
as [amplitude0, center0, sigma0, amplitude1, center1, ...].
function : string
The function to be fitted to the spectral features.
Available options and respective parameters are:
'gaussian' : amplitude, central wavelength in angstroms,
sigma in angstroms
'gauss_hermite' : amplitude, central wavelength in
angstroms, sigma in angstroms, h3 and h4
fitting_window : iterable
Lower and upper wavelength limits for the fitting
algorithm. These limits should allow for a considerable
portion of continuum besides the desired spectral features.
writefits : boolean
Writes the results in a FITS file.
outimage : string
Name of the FITS file in which to write the results.
variance : float, 1D, 2D or 3D array
The variance of the flux measurments. It can be given
in one of four formats. If variance is a float it is
applied as a contant to the whole spectrum. If given as 1D
array it assumed to be a spectrum that will be applied to
the whole cube. As 2D array, each spaxel will be applied
equally to all wavelenths. Finally the 3D array must
represent the variance for each elemente of the data cube.
It defaults to None, in which case it does not affect the
minimization algorithm, and the returned Chi2 will be in
fact just the fit residuals.
inst_disp : number
Instrumental dispersion in pixel units. This argument is
used to evaluate the reduced chi squared. If let to default
it is assumed that each wavelength coordinate is a degree
of freedom. The physically sound way to do it is to use the
number of dispersion elements in a spectrum as the degrees
of freedom.
bounds : sequence
Bounds for the fitting algorithm, given as a list of
[xmin, xmax] pairs for each x parameter.
constraints : dict or sequence of dicts
See scipy.optimize.minimize
min_method : string
Minimization method. See scipy.optimize.minimize.
minopts : dict
Dictionary of options to be passed to the minimization
routine. See scipy.optimize.minimize.
individual_spec : False or x,y pair
Pixel coordinates for the spectrum you wish to fit
individually.
copts : dict
Arguments to be passed to the spectools.continuum function.
refit : boolean
Use parameters from nearby sucessful fits as the initial
guess for the next fit.
spiral_loop : boolean
Begins the fitting with the central spaxel and continues
spiraling outwards.
spiral_center : iterable
Central coordinates for the beginning of the spiral given
as a list of two coordinates [x0, y0]
fit_continuum : boolean
If True fits the continuum just before attempting to fit
the emission lines. Setting this option to False will
cause the algorithm to look for self.cont, which should
contain a data cube of continua.
Returns
-------
sol : numpy.ndarray
A data cube with the solution for each spectrum occupying
the respective position in the image, and each position in
the first axis giving the different parameters of the fit.
See also
--------
scipy.optimize.curve_fit, scipy.optimize.leastsq
"""
if function == 'gaussian':
fit_func = lprof.gauss
self.fit_func = lprof.gauss
npars_pc = 3
elif function == 'gauss_hermite':
fit_func = lprof.gausshermite
self.fit_func = lprof.gausshermite
npars_pc = 5
else:
raise NameError('Unknown function "{:s}".'.format(function))
if fitting_window != None:
fw = (self.restwl > fitting_window[0]) &\
(self.restwl < fitting_window[1])
else:
fw = Ellipsis
if copts == None:
copts = {'niterate':5, 'degr':4, 'upper_threshold':2,
'lower_threshold':2}
copts['returns'] = 'function'
try:
minopts['eps']
except TypeError:
if minopts == None:
minopts = {'eps': 1e-3}
else:
minopts['eps'] = 1e-3
wl = deepcopy(self.restwl[fw])
scale_factor = median(self.data[fw,:,:])
data = deepcopy(self.data[fw,:,:])/scale_factor
fit_status = ones(shape(data)[1:], dtype='float32')*-1
if len(shape(variance)) == 0:
if variance == None:
variance = 1.0
else:
variance = deepcopy(variance)/scale_factor**2
vcube = ones(shape(data), dtype='float32')
if len(shape(variance)) == 0:
vcube *= variance
elif len(shape(variance)) == 1:
for i,j in self.spec_indices:
vcube[:,i,j] = variance
elif len(shape(variance)) == 2:
for i,j in enumerate(vcube):
vcube[i] = variance
elif len(shape(variance)) == 3:
vcube = variance
npars = len(p0)
nan_solution = array([nan for i in range(npars+1)])
sol = zeros((npars+1,shape(self.data)[1], shape(self.data)[2]),
dtype='float32')
self.fitcont = zeros(shape(data), dtype='float32')
self.fitwl = wl
self.fitspec = zeros(shape(data), dtype='float32')
self.resultspec = zeros(shape(data), dtype='float32')
if self.binned:
vor = loadtxt(self.voronoi_tab)
xy = vor[unique(vor[:,2],return_index=True)[1],:2]
else:
xy = self.spec_indices
# Scale factor for the flux. Needed to avoid problems with
# the minimization algorithm.
flux_sf = ones(npars, dtype='float32')
flux_sf[arange(0, npars, npars_pc)] *= scale_factor
p0 /= flux_sf
if bounds != None:
bounds = array(bounds)
for i,j in enumerate(bounds):
j /= flux_sf[i]
Y, X = indices(shape(data)[1:])
if individual_spec:
xy = [individual_spec[::-1]]
elif spiral_loop:
if self.binned:
y, x = xy[:,0], xy[:,1]
else:
y, x = self.spec_indices[:,0], self.spec_indices[:,1]
if spiral_center == None:
r = sqrt((x - x.max()/2.)**2 + (y - y.max()/2.)**2)
else:
r = sqrt((x - spiral_center[0])**2 + (y - spiral_center[1])**2)
t = arctan2(y - y.max()/2., x - x.max()/2.)
t[t < 0] += 2*pi
b = array([(ravel(r)[i], ravel(t)[i]) for i in\
range(len(ravel(r)))], dtype=[('radius', 'f8'),\
('angle', 'f8')])
s = argsort(b, axis=0, order=['radius', 'angle'])
xy = column_stack([ravel(y)[s], ravel(x)[s]])
nspec = len(xy)
for k, h in enumerate(xy):
progress(k, nspec, 10)
i, j = h
if self.binned:
binNum = vor[(vor[:,0] == i)&(vor[:,1] == j), 2]
if ~any(data[:20,i,j]) or ~any(data[-20:,i,j]):
sol[:,i,j] = nan_solution
continue
v = vcube[:,i,j]
if fit_continuum:
cont = st.continuum(wl, data[:,i,j], **copts)[1]
else:
cont = self.cont[:,i,j]/scale_factor
s = data[:,i,j] - cont
# Avoids fitting if the spectrum is null.
try:
res = lambda x : sum( (s-fit_func(self.fitwl, x))**2/v )
if refit and k != 0:
radsol = sqrt((Y - i)**2 + (X - j)**2)
nearsol = sol[:-1, (radsol < refit_radius) &\
(fit_status == 0)]
if shape(nearsol) == (5, 1):
p0 = deepcopy(nearsol.transpose()/flux_sf)
elif any(nearsol):
p0 = deepcopy(average(nearsol.transpose(), 0)/flux_sf)
r = minimize(res, x0=p0, method=min_method, bounds=bounds,
constraints=constraints, options=minopts)
if r.status != 0:
print h, r.message
# Reduced chi squared of the fit.
chi2 = res(r['x'])
nu = len(s)/inst_disp - npars - 1
red_chi2 = chi2 / nu
p = append(r['x']*flux_sf, red_chi2)
fit_status[i,j] = r.status
except RuntimeError:
print 'Optimal parameters not found for spectrum {:d},{:d}'\
.format(int(i),int(j))
p = nan_solution
if self.binned:
for l, m in vor[vor[:,2] == binNum,:2]:
sol[:,l,m] = p
self.fitcont[:,l,m] = cont*scale_factor
self.fitspec[:,l,m] = (s+cont)*scale_factor
self.resultspec[:,l,m] = (cont+fit_func(self.fitwl,
r['x']))*scale_factor
else:
sol[:,i,j] = p
self.fitcont[:,i,j] = cont*scale_factor
self.fitspec[:,i,j] = (s+cont)*scale_factor
self.resultspec[:,i,j] = (cont+fit_func(self.fitwl, r['x']))\
*scale_factor
self.em_model = sol
self.fit_status = fit_status
p0 *= flux_sf
if writefits:
# Basic tests and first header
if outimage == None:
outimage = self.fitsfile.replace('.fits',
'_linefit.fits')
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
# Creates MEF output.
h = pf.HDUList()
h.append(pf.PrimaryHDU(header=hdr))
# Creates the fitted spectrum extension
hdr = pf.Header()
hdr.append(('object', 'spectrum', 'Data in this extension'))
hdr.append(('CRPIX3', 1, 'Reference pixel for wavelength'))
hdr.append(('CRVAL3', wl[0], 'Reference value for wavelength'))
hdr.append(('CD3_3', average(diff(wl)),
'CD3_3'))
h.append(pf.ImageHDU(data=self.fitspec, header=hdr))
# Creates the fitted continuum extension.
hdr['object'] = 'continuum'
h.append(pf.ImageHDU(data=self.fitcont, header=hdr))
# Creates the fitted function extension.
hdr['object'] = 'fit'
h.append(pf.ImageHDU(data=self.resultspec, header=hdr))
# Creates the solution extension.
hdr['object'] = 'parameters'
hdr.append(('function', function, 'Fitted function'))
hdr.append(('nfunc', len(p)/3, 'Number of functions'))
h.append(pf.ImageHDU(data=sol, header=hdr))
# Creates the minimize's exit status extension
hdr['object'] = 'status'
h.append(pf.ImageHDU(data=fit_status, header=hdr))
h.writeto(outimage)
if individual_spec:
return wl, s*scale_factor, cont*scale_factor,\
fit_func(wl, p[:-1]), r
else:
return sol
def loadfit(self, fname):
"""
Loads the result of a previous fit, and put it in the
appropriate variables for the plotfit function.
Parameters
----------
fname : string
Name of the FITS file generated by gmosdc.linefit.
Returns
-------
Nothing.
"""
self.fitwl = st.get_wl(fname, pix0key='crpix3', wl0key='crval3',
dwlkey='cd3_3', hdrext=1, dataext=1)
self.fitspec = pf.getdata(fname, ext=1)
self.fitcont = pf.getdata(fname, ext=2)
self.resultspec = pf.getdata(fname, ext=3)
func_name = pf.getheader(fname, ext=4)['function']
if func_name == 'gaussian':
self.fit_func = lprof.gauss
if func_name == 'gauss_hermite':
self.fit_func = lprof.gausshermite
self.em_model = pf.getdata(fname, ext=4)
def eqw(self, amp_index=0, center_index=1, sigma_index=2, sigma_limit=3):
"""
Evaluates the equivalent width of a previous linefit.
"""
xy = self.spec_indices
eqw_model = zeros(shape(self.em_model)[1:], dtype='float32')
eqw_direct = zeros(shape(self.em_model)[1:], dtype='float32')
fit_func = lambda x, a ,b, c: a*exp(-(x-b)**2/2./c**2)
for i,j in xy:
cond = (self.fitwl > self.em_model[center_index,i,j]\
- sigma_limit*self.em_model[sigma_index,i,j])\
& (self.fitwl < self.em_model[center_index,i,j]\
+ sigma_limit*self.em_model[sigma_index,i,j])
fit = fit_func(self.fitwl[cond],
*self.em_model[[amp_index, center_index, sigma_index], i, j])
cont = self.fitcont[cond,i,j]
eqw_model[i,j] = trapz(1. - (fit+cont)/cont, x=self.fitwl[cond])
eqw_direct[i,j] = trapz(1. - self.data[cond,i,j]/cont,
x=self.restwl[cond])
return array([eqw_model,eqw_direct])
def plotfit(self, x, y):
"""
Plots the spectrum and features just fitted.
Parameters
----------
x : number
Horizontal coordinate of the desired spaxel.
y : number
Vertical coordinate of the desired spaxel.
Returns
-------
Nothing.
"""
fig = plt.figure(1)
plt.clf()
ax = plt.axes()
p = self.em_model[:-1,y,x]
c = self.fitcont[:,y,x]
wl = self.fitwl
f = self.fit_func
s = self.fitspec[:,y,x]
ax.plot(wl, c + f(wl, p))
ax.plot(wl, c)
ax.plot(wl, s)
if self.fit_func == lprof.gauss:
npars = 3
parnames = ('A', 'wl', 's')
elif self.fit_func == lprof.gausshermite:
npars = 5
parnames = ('A', 'wl', 's', 'h3', 'h4')
else:
raise NameError('Unkown fit function.')
if len(p) > npars:
for i in arange(0, len(p), npars):
ax.plot(wl, c + f(wl, p[i:i+npars]), 'k--')
pars = (npars*'{:10s}'+'\n').format(*parnames)
for i in arange(0, len(p), npars):
pars += (('{:10.2e}'+(npars-1)*'{:10.2f}'+'\n')\
.format(*p[i:i+npars]))
print pars
plt.show()
def channelmaps(self, channels=6, lambda0=None, velmin=None, velmax=None,
continuum_width=300, continuum_opts=None, sigma=1e-16):
"""
Creates velocity channel maps from a data cube.
Parameters
----------
channels : integer
Number of channel maps to build
lambda0 : number
Central wavelength of the desired spectral feature
vmin : number
Mininum velocity in kilometers per second
vmax : number
Maximum velocity in kilometers per second
continuum_width : number
Width in wavelength for the continuum evaluation window
continuum_opts : dictionary
Dicitionary of options to be passed to the
spectools.continuum function
Returns
-------
"""
# Converting from velocities to wavelength
wlmin, wlmax = lambda0*(array([velmin, velmax])/2.99792e+5 + 1.)
wlstep = (wlmax - wlmin)/channels
wl_limits = arange(wlmin, wlmax + wlstep, wlstep)
side = int(ceil(sqrt(channels))) # columns
otherside = int(ceil(channels/side)) # lines
fig = plt.figure()
plt.clf()
if continuum_opts == None:
continuum_opts = {'niterate' : 3, 'degr' : 5,
'upper_threshold' : 3, 'lower_threshold' : 3}
cp = continuum_opts
cw = continuum_width
fw = lambda0 + array([-cw/2., cw/2.])
cont = self.continuum(niterate=cp['niterate'],
degr=cp['degr'], upper_threshold=cp['upper_threshold'],
lower_threshold=cp['lower_threshold'],
fitting_window=fw)
contwl = self.wl[ (self.wl > fw[0]) & (self.wl < fw[1]) ]
cont_wl2pix = interp1d(contwl, arange(len(contwl)))
for i in arange(channels):
ax = fig.add_subplot(otherside, side, i+1)
wl = self.restwl
wl0, wl1 = wl_limits[i], wl_limits[i+1]
print wl[(wl > wl0) & (wl < wl1)]
wlc, wlwidth = average([wl0, wl1]), (wl1-wl0)
f = self.wlprojection(wlc, fwhm=wlwidth, writefits=False,
filtertype='box') - cont[int(round(cont_wl2pix(wlc)))]
f[f < sigma] = nan
cp = continuum_opts
ax.imshow(f, interpolation='none', aspect=1)
ax.annotate('{:.0f}'.format((wlc - lambda0)/lambda0*2.99792e+5),
xy=(0.1, 0.8), xycoords='axes fraction', color='k')
if i%side != 0:
ax.set_yticklabels([])
if i/float( (otherside-1)*side ) < 1:
ax.set_xticklabels([])
fig.subplots_adjust(wspace=0, hspace=0)
plt.show()
def voronoi_binning(self, targetsnr=10.0, writefits=False,
outfile=None, clobber=False, writevortab=True):
"""
Applies Voronoi binning to the data cube, using Cappellari's
Python implementation.
Parameters:
-----------
targetsnr : float
Desired signal to noise ratio of the binned pixels
writefits : boolean
Writes a FITS image with the output of the binning.
outfile : string
Name of the output FITS file. If 'None' then the name of
the original FITS file containing the data cube will be used
as a root name, with '.bin' appended to it.
clobber : boolean
Overwrites files with the same name given in 'outfile'.
writevortab : boolean
Saves an ASCII table with the binning recipe.
Returns:
--------
Nothing.
"""
try:
x = shape(self.noise)
except AttributeError:
print 'This function requires prior execution of the snr_eval'\
+ 'method.'
return
valid_spaxels = ravel(~isnan(self.signal))
x = ravel(indices(shape(self.signal))[0])[valid_spaxels]
y = ravel(indices(shape(self.signal))[1])[valid_spaxels]
xnan = ravel(indices(shape(self.signal))[0])[~valid_spaxels]
ynan = ravel(indices(shape(self.signal))[1])[~valid_spaxels]
s, n = deepcopy(self.signal), deepcopy(self.noise)
s[s <= 0] = average(self.signal[self.signal > 0])
n[n <= 0] = average(self.signal[self.signal > 0])*.5
signal, noise = ravel(s)[valid_spaxels], ravel(n)[valid_spaxels]
binNum, xNode, yNode, xBar, yBar, sn, nPixels, scale = \
voronoi_2d_binning(x, y, signal, noise, targetsnr, plot=1, quiet=0)
v = column_stack([x, y, binNum])
if writevortab:
savetxt('voronoi_binning.dat', v, fmt='%.2f\t%.2f\t%d')
binned = zeros(shape(self.data), dtype='float32')
binned[:, xnan, ynan] = nan
for i in arange(binNum.max()+1):
samebin = v[:,2] == i
samebin_coords = v[samebin,:2]
binspec = average(self.data[:,samebin_coords[:,0],
samebin_coords[:,1]], axis=1)
for k in samebin_coords:
binned[:,k[0],k[1]] = binspec
if writefits:
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
hdr.append(('VORBIN',True,'Processed by Voronoi binning?'))
hdr.append(('VORTSNR',targetsnr,'Target SNR for Voronoi binning.'))
if outfile == None:
outfile = '{:s}bin.fits'.format(self.fitsfile[:-4])
pf.writeto(outfile,data=binned,header=hdr,clobber=clobber)
self.binned_cube = binned
def write_binnedspec(self, dopcor=False, writefits=False):
"""
Writes only one spectrum for each bin in a FITS file.
"""
xy = self.spec_indices
unique_indices = xy[unique(self.data[1400,:,:], return_index=True)[1]]
if dopcor:
try:
shape(self.em_model)
except AttributeError:
print 'ERROR! This function requires the gmosdc.em_model'\
+ ' attribute to be defined.'
return
for k,i,j in enumerate(unique_indices):
z = self.em_model[0,i,j]/2.998e+5
interp_spec = interp1d(self.restwl/(1.+z),self.data[i,j])
if k == 0:
specs = interp_spec(self.restwl)
else:
specs = row_stack([specs,interp_spec(self.restwl)])
else:
specs = row_stack([self.data[:,i,j] for i,j in unique_indices])
return specs
def ppxf_kinematics(self, fitting_window, base_wl, base_spec,
base_cdelt, writefits=True, outimage=None,
vel=0, sigma=180, fwhm_gal=2, fwhm_model=1.8, noise=0.05,
individual_spec=False, plotfit=False, quiet=False,
deg=4):
"""
Executes pPXF fitting of the stellar spectrum over the whole
data cube.
Parameters
----------
fitting_window : array-like
Initial and final values of wavelength for fitting.
base_wl : array
Wavelength coordinates of the base spectra.
base_spec : array
Flux density coordinates of the base spectra.
base_cdelt : number
Step in wavelength coordinates.
Returns
-------
Nothing
Description
-----------
This function is merely a wrapper for Michelle Capellari's pPXF
Python algorithm for penalized pixel fitting of stellar
spectra.
"""
w0, w1 = fitting_window
fw = (self.wl >= w0)&(self.wl < w1)
# Here we use the goodpixels as the fitting window
gp = arange(shape(self.data)[0])[fw]
lamRange1 = self.wl[[1, -1]]
gal_lin = deepcopy(self.data[:,0,0])
galaxy, logLam1, velscale = ppxf_util.log_rebin(lamRange1,
gal_lin)
lamRange2 = base_wl[[1,-1]]
ssp = base_spec[0]
sspNew, logLam2, velscale = ppxf_util.log_rebin(lamRange2, ssp,
velscale=velscale)
templates = empty((sspNew.size, len(base_spec)))
# Convolve the whole Vazdekis library of spectral templates
# with the quadratic difference between the SAURON and the
# Vazdekis instrumental resolution. Logarithmically rebin
# and store each template as a column in the array TEMPLATES.
# Quadratic sigma difference in pixels Vazdekis --> SAURON
# The formula below is rigorously valid if the shapes of the
# instrumental spectral profiles are well approximated by
# Gaussians.
FWHM_dif = sqrt(fwhm_gal**2 - fwhm_model**2)
# Sigma difference in pixels
sigma = FWHM_dif/2.355/base_cdelt
for j in range(len(base_spec)):
ssp = base_spec[j]
ssp = ndimage.gaussian_filter1d(ssp,sigma)
sspNew, logLam2, velscale = ppxf_util.log_rebin(lamRange2, ssp,
velscale=velscale)
# Normalizes templates
templates[:,j] = sspNew/median(sspNew)
c = 299792.458
dv = (logLam2[0]-logLam1[0])*c # km/s
z = exp(vel/c) - 1
# Here the actual fit starts.
start = [vel, 180.] # (km/s), starting guess for [V,sigma]
# Assumes uniform noise accross the spectrum
noise = zeros(shape(self.data)[0], dtype='float32') + noise
if self.binned:
vor = loadtxt(self.voronoi_tab)
xy = vor[unique(vor[:,2],return_index=True)[1],:2]
else:
xy = self.spec_indices
if individual_spec:
xy = [individual_spec[::-1]]
ppxf_sol = zeros((4, shape(self.data)[1], shape(self.data)[2]),
dtype='float32')
ppxf_spec = zeros(shape(self.data), dtype='float32')
ppxf_model = zeros(shape(ppxf_spec), dtype='float32')
nspec = len(xy)
for k, h in enumerate(xy):
progress(k, nspec, 10)
i, j = h
if self.binned:
binNum = vor[(vor[:,0] == i)&(vor[:,1] == j), 2]
gal_lin = deepcopy(self.data[:,i,j])
galaxy, logLam1, velscale = ppxf_util.log_rebin(lamRange1, gal_lin)
# Normalize spectrum to avoid numerical issues.
galaxy = galaxy/median(galaxy)
# Assume constant noise per pixel here.
# This should be changed in the future.
galaxy = deepcopy(self.data[:,i,j])
galaxy = galaxy/median(galaxy)
pp = ppxf.ppxf(templates, galaxy, noise, velscale, start,
goodpixels=gp, plot=plotfit, moments=4, degree=deg, vsyst=dv,
quiet=quiet)
if self.binned:
for l, m in vor[vor[:,2] == binNum,:2]:
ppxf_sol[:,l,m] = pp.sol
ppxf_spec[:,l,m] = pp.galaxy
ppxf_model[:,l,m] = pp.bestfit
else:
ppxf_sol[:,i,j] = pp.sol
ppxf_spec[:,i,j] = pp.galaxy
ppxf_model[:,i,j] = pp.bestfit
self.ppxf_sol = ppxf_sol
self.ppxf_spec = ppxf_spec
self.ppxf_model = ppxf_model
if writefits:
# Basic tests and first header
if outimage == None:
outimage = self.fitsfile.replace('.fits',
'_ppxf.fits')
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
# Creates MEF output.
h = pf.HDUList()
h.append(pf.PrimaryHDU(header=hdr))
# Creates the fitted spectrum extension
hdr = pf.Header()
hdr.append(('object', 'spectrum', 'Data in this extension'))
hdr.append(('CRPIX3', 1, 'Reference pixel for wavelength'))
hdr.append(('CRVAL3', self.wl[0],
'Reference value for wavelength'))
hdr.append(('CD3_3', average(diff(self.wl)),
'CD3_3'))
h.append(pf.ImageHDU(data=self.ppxf_spec, header=hdr))
# Creates the fitted model extension.
hdr['object'] = 'model'
h.append(pf.ImageHDU(data=self.ppxf_model, header=hdr))
# Creates the solution extension.
hdr['object'] = 'parameters'
h.append(pf.ImageHDU(data=self.ppxf_sol, header=hdr))
h.writeto(outimage)
def lineflux(self, amplitude, sigma):
"""
Calculates the flux in a line given the amplitude and sigma
of the gaussian function that fits it.
"""
lf = amplitude * abs(sigma) * sqrt(2.*pi)
return lf
|
danielrd6/ifscube
|
cubetools.py
|
Python
|
gpl-3.0
| 39,584
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# Marche - A server control daemon
# Copyright (c) 2015-2016 by the authors, see LICENSE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <g.brandl@fz-juelich.de>
#
# *****************************************************************************
""".. index:: nicos; job
NICOS job
=========
This is a job for controlling all NICOS_ services configured on the host.
.. _NICOS: http://nicos-controls.org/
This job has the following configuration parameters:
.. describe:: [job.xxx]
.. describe:: type
Must be ``nicos``.
.. describe:: root
The root of the NICOS installation, which should contain ``nicos.conf``.
If not given, it is derived from the init script
``/etc/init.d/nicos-system``, which is normally a symbolic link to the
file below the NICOS root.
.. describe:: permissions
pollinterval
The :ref:`standard parameters <standard-params>` present for all jobs.
No further configuration is necessary; the job will read the NICOS
configuration file ``nicos.conf`` and derive parameters like available
services and their logfiles from there.
"""
from os import path
from marche.six.moves import configparser
from marche.jobs import DEAD, RUNNING, WARNING
from marche.jobs.base import Job as BaseJob
from marche.utils import extract_loglines
class Job(BaseJob):
DEFAULT_INIT = '/etc/init.d/nicos-system'
def configure(self, config):
self._services = []
self._proc = None
if 'root' in config:
self._root = config['root']
self._script = path.join(self._root, 'etc', 'nicos-system')
else:
# determine the NICOS root from the init script, which is a symlink
# to the init script in the NICOS root
real_init = path.realpath(self.DEFAULT_INIT)
self._root = path.dirname(path.dirname(real_init))
self._script = self.DEFAULT_INIT
self._logpath = None
def check(self):
if not path.exists(self._script):
self.log.warning('%s missing' % self._script)
return False
return True
def init(self):
self._services = [('nicos-system', '')]
lines = self._sync_call('%s 2>&1' % self._script).stdout
prefix = 'Possible services are '
if len(lines) >= 2 and lines[-1].startswith(prefix):
self._services.extend(('nicos', entry.strip()) for entry in
lines[-1][len(prefix):].split(','))
BaseJob.init(self)
def get_services(self):
return self._services
def start_service(self, service, instance):
if service == 'nicos-system':
return self._async_start(None, self._script + ' start')
return self._async_start(None, self._script + ' start %s' % instance)
def stop_service(self, service, instance):
if service == 'nicos-system':
return self._async_stop(None, self._script + ' stop')
return self._async_stop(None, self._script + ' stop %s' % instance)
def restart_service(self, service, instance):
if service == 'nicos-system':
return self._async_start(None, self._script + ' restart')
return self._async_start(None, self._script + ' restart %s' % instance)
def service_status(self, service, instance):
async_st = self._async_status_only(None)
if async_st is not None:
return async_st, ''
if service == 'nicos-system':
output = self._sync_call('%s status' % self._script).stdout
something_dead = something_running = False
for line in output:
if 'dead' in line:
something_dead = True
if 'running' in line:
something_running = True
if something_dead and something_running:
return WARNING, 'only some services running'
elif something_running:
return RUNNING, ''
return DEAD, ''
else:
proc = self._sync_call(self._script + ' status %s' % instance)
return RUNNING if proc.retcode == 0 else DEAD, ''
def service_output(self, service, instance):
return list(self._output.get(None, []))
def service_logs(self, service, instance):
if service == 'nicos-system':
return {}
if self._logpath is None:
# extract nicos log directory
cfg = configparser.RawConfigParser()
cfg.read([path.join(self._root, 'nicos.conf')])
if cfg.has_option('nicos', 'logging_path'): # pragma: no cover
self._logpath = cfg.get('nicos', 'logging_path')
else:
self._logpath = path.join(self._root, 'log')
return extract_loglines(path.join(self._logpath, instance, 'current'))
|
birkenfeld/marche
|
marche/jobs/nicos.py
|
Python
|
gpl-2.0
| 5,669
|
#!/usr/bin/python
import xmlrpclib
server = xmlrpclib.Server("http://localhost:8086");
session = server.Session.do_login_with_password("user", "passwd")['Value']
server.VM.do_unpause(session, '7366a41a-e50e-b891-fa0c-ca5b4d2e3f1c')
|
sharady/xen-api
|
ocaml/idl/ocaml_backend/python/unpause_vm.py
|
Python
|
lgpl-2.1
| 233
|
# -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
import csv
from json import dumps
from functools import wraps
from datetime import datetime
from flask import Response
from main import app
import logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
"""
This docstring will be overridden by @wraps decorator.
"""
return Response(
dumps(function(*args, **kwargs)),
mimetype='application/json'
)
return inner
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(app.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
except (ValueError, TypeError):
log.debug('Problem with line %d: ', i, exc_info=True)
data.setdefault(user_id, {})[date] = {'start': start, 'end': end}
return data
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = [[], [], [], [], [], [], []] # one list for every day in week
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def group_by_start_end(items):
"""
Groups entries by weekday for start and end.
"""
result = {i: {'start': [], 'end': []} for i in range(7)}
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()]['start'].append(
seconds_since_midnight(start)
)
result[date.weekday()]['end'].append(
seconds_since_midnight(end)
)
return result
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
|
stxnext-kindergarten/presence-analyzer-dbrzoskowski
|
src/presence_analyzer/utils.py
|
Python
|
mit
| 3,251
|
import argparse
import subprocess
import ntpath
import tempfile
import shutil
parser = argparse.ArgumentParser(description='Compile to SPIRV and generate header/implementation')
parser.add_argument('files', metavar='files', nargs='+', help='list of glsl files')
parser.add_argument('--output', action='store', dest='output', help='output file')
parser.add_argument('--compiler', action='store', dest='compiler', help='location of spirv compiler')
parser.add_argument('--vulkan_version', action='store', dest='version', help='vulkan version')
args = parser.parse_args()
# create temp dir
dirpath = tempfile.mkdtemp()
def genCArray(file):
basename = ntpath.basename(file).replace('.', '_')
temp_file = dirpath + '/' + basename + '.txt'
try:
subprocess.check_output([args.compiler,'--target-env', 'vulkan' + args.version, '-V',file,'-x','-o',temp_file]).decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output)
content = None
with open(temp_file, 'r') as content_file:
content = content_file.read()
array = 'const uint32_t _' + basename + '[] = {\n' + content + '\n};\n'
spirv = 'Vortex::Renderer::SpirvBinary ' + basename + '(_' + basename + ');\n'
return array + spirv
def genCArrayDef(file):
basename = ntpath.basename(file).replace('.', '_')
return 'extern Vortex::Renderer::SpirvBinary ' + basename + ';\n'
output = ntpath.basename(args.output)
with open(args.output + '.h', 'w') as f:
f.write('/* This header is autogenerated */\n')
f.write('#ifndef GENERATED_' + output.upper() +'_H\n')
f.write('#define GENERATED_' + output.upper() + '_H\n\n')
f.write('''#include <cstdint>
namespace Vortex
{
namespace Renderer
{
class SpirvBinary;
}
namespace SPIRV
{
''')
for file in args.files:
f.write(genCArrayDef(file))
f.write('''
}
}
#endif
''')
with open(args.output + '.cpp', 'w') as f:
f.write('''/* This header is autogenerated */
#include <Vortex/Renderer/Device.h>
''')
f.write('#include "' + ntpath.basename(args.output) + '.h"\n\n')
f.write('''
namespace Vortex
{
namespace SPIRV
{
''')
for file in args.files:
f.write(genCArray(file))
f.write('''
}
}
''')
# remove temp dir
shutil.rmtree(dirpath)
|
mmaldacker/Vortex2D
|
Scripts/GenerateSPIRV.py
|
Python
|
mit
| 2,206
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import session_restore
from telemetry import test
# crbug.com/325479: Disabling this test for now since it never ran before.
@test.Disabled('android', 'linux')
class SessionRestoreColdTypical25(test.Test):
tag = 'cold'
test = session_restore.SessionRestore
page_set = 'page_sets/typical_25.json'
options = {'cold': True,
'pageset_repeat': 5}
class SessionRestoreWarmTypical25(test.Test):
tag = 'warm'
test = session_restore.SessionRestore
page_set = 'page_sets/typical_25.json'
options = {'warm': True,
'pageset_repeat': 20}
|
patrickm/chromium.src
|
tools/perf/benchmarks/session_restore.py
|
Python
|
bsd-3-clause
| 752
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from json import loads
from collections import defaultdict
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import r_client
from qiita_db.util import generate_analyses_list_per_study
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_db.exceptions import QiitaDBColumnError
from qiita_db.processing_job import ProcessingJob
from qiita_pet.handlers.api_proxy.util import check_access
SAMPLE_TEMPLATE_KEY_FORMAT = 'sample_template_%s'
def _check_sample_template_exists(samp_id):
"""Make sure a sample template exists in the system
Parameters
----------
samp_id : int or str castable to int
SampleTemplate id to check
Returns
-------
dict
{'status': status,
'message': msg}
"""
if not SampleTemplate.exists(int(samp_id)):
return {'status': 'error',
'message': 'Sample template %d does not exist' % int(samp_id)
}
return {'status': 'success',
'message': ''}
def sample_template_get_req(samp_id, user_id):
"""Gets the json of the full sample template
Parameters
----------
samp_id : int or int castable string
SampleTemplate id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict of objects
{'status': status,
'message': msg,
'template': dict of {str: {str: object, ...}, ...}
template is dictionary where the keys access_error the metadata samples
and the values are a dictionary of column and value.
Format {sample: {column: value, ...}, ...}
"""
exists = _check_sample_template_exists(int(samp_id))
if exists['status'] != 'success':
return exists
access_error = check_access(int(samp_id), user_id)
if access_error:
return access_error
template = SampleTemplate(int(samp_id))
access_error = check_access(template.study_id, user_id)
if access_error:
return access_error
df = template.to_dataframe()
return {'status': 'success',
'message': '',
'template': df.to_dict(orient='index')}
def sample_template_samples_get_req(samp_id, user_id):
"""Returns list of samples in the sample template
Parameters
----------
samp_id : int or str typecastable to int
SampleTemplate id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict
Returns summary information in the form
{'status': str,
'message': str,
'samples': list of str}
samples is list of samples in the template
"""
exists = _check_sample_template_exists(int(samp_id))
if exists['status'] != 'success':
return exists
access_error = check_access(samp_id, user_id)
if access_error:
return access_error
return {'status': 'success',
'message': '',
'samples': sorted(x for x in SampleTemplate(int(samp_id)))
}
def sample_template_meta_cats_get_req(samp_id, user_id):
"""Returns list of metadata categories in the sample template
Parameters
----------
samp_id : int or str typecastable to int
SampleTemplate id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict
Returns information in the form
{'status': str,
'message': str,
'categories': list of str}
samples is list of metadata categories in the template
"""
exists = _check_sample_template_exists(int(samp_id))
if exists['status'] != 'success':
return exists
access_error = check_access(samp_id, user_id)
if access_error:
return access_error
return {'status': 'success',
'message': '',
'categories': sorted(SampleTemplate(int(samp_id)).categories)
}
def sample_template_category_get_req(category, samp_id, user_id):
"""Returns dict of values for each sample in the given category
Parameters
----------
category : str
Metadata category to get values for
samp_id : int or str typecastable to int
SampleTemplate id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict
Returns information in the form
{'status': str,
'message': str,
'values': dict of {str: object}}
"""
exists = _check_sample_template_exists(int(samp_id))
if exists['status'] != 'success':
return exists
access_error = check_access(samp_id, user_id)
if access_error:
return access_error
st = SampleTemplate(int(samp_id))
try:
values = st.get_category(category)
except QiitaDBColumnError:
return {'status': 'error',
'message': 'Category %s does not exist in sample template' %
category}
return {'status': 'success',
'message': '',
'values': values}
def analyses_associated_with_study(study_id, user_id):
"""Returns all available analyses in study_id
Parameters
----------
study_id : int or str typecastable to int
Study id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict
Returns information in the form
{'status': str,
'message': str,
'values': list of [qiita_db.analysis.Analysis,
prep_ids for this study]}
"""
access_error = check_access(study_id, user_id)
if access_error:
return access_error
values = generate_analyses_list_per_study(study_id)
return {'status': 'success',
'message': '',
'values': values}
def get_sample_template_processing_status(st_id):
# Initialize variables here
processing = False
alert_type = ''
alert_msg = ''
job_info = r_client.get(SAMPLE_TEMPLATE_KEY_FORMAT % st_id)
if job_info:
job_info = defaultdict(lambda: '', loads(job_info))
job_id = job_info['job_id']
job = ProcessingJob(job_id)
job_status = job.status
processing = job_status not in ('success', 'error')
if processing:
alert_type = 'info'
alert_msg = 'This sample template is currently being processed'
elif job_status == 'error':
alert_type = 'danger'
alert_msg = job.log.msg.replace('\n', '</br>')
else:
alert_type = job_info['alert_type']
alert_msg = job_info['alert_msg'].replace('\n', '</br>')
return processing, alert_type, alert_msg
@execute_as_transaction
def sample_template_filepaths_get_req(study_id, user_id):
"""Returns all the filepaths attached to the sample template
Parameters
----------
study_id : int
The current study object id
user_id : str
The current user object id
Returns
-------
dict
Filepaths in the form
{'status': status,
'message': msg,
'filepaths': filepaths}
status can be success, warning, or error depending on result
message has the warnings or errors
filepaths is a list of tuple of int and str
All files in the sample template, as [(id, URL), ...]
"""
exists = _check_sample_template_exists(int(study_id))
if exists['status'] != 'success':
return exists
access_error = check_access(study_id, user_id)
if access_error:
return access_error
try:
template = SampleTemplate(int(study_id))
except QiitaDBUnknownIDError as e:
return {'status': 'error',
'message': str(e)}
return {'status': 'success',
'message': '',
'filepaths': template.get_filepaths()
}
|
antgonza/qiita
|
qiita_pet/handlers/api_proxy/sample_template.py
|
Python
|
bsd-3-clause
| 8,355
|
from vsg.rules import token_case
from vsg.token import architecture_body as token
class rule_028(token_case):
'''
This rule checks the **architecture** keyword in the **end architecture** has proper case.
|configuring_uppercase_and_lowercase_rules_link|
**Violation**
.. code-block:: vhdl
end ARCHITECTURE;
end Architecture;
**Fix**
.. code-block:: vhdl
end architecture;
end architecture;
'''
def __init__(self):
token_case.__init__(self, 'architecture', '028', [token.end_architecture_keyword])
self.groups.append('case::keyword')
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/rules/architecture/rule_028.py
|
Python
|
gpl-3.0
| 627
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
from dateutil.relativedelta import relativedelta
import holidays
class TestNZ(unittest.TestCase):
def setUp(self):
self.holidays = holidays.NZ(observed=True)
def test_new_years(self):
for year in range(1900, 2100):
dt = date(year, 1, 1)
self.assertIn(dt, self.holidays)
for year, day in enumerate(
[
1,
1,
1,
1,
3, # 2001-05
3,
1,
1,
1,
1, # 2006-10
3,
3,
1,
1,
1, # 2011-15
1,
3,
1,
1,
1,
1,
], # 2016-21
2001,
):
dt = date(year, 1, day)
self.assertIn(dt, self.holidays)
self.assertEqual(self.holidays[dt][:10], "New Year's")
self.assertNotIn("1893-01-01", self.holidays)
self.assertIn("1894-01-01", self.holidays)
def test_day_after_new_years(self):
for year in range(1900, 2100):
dt = date(year, 1, 2)
self.assertIn(dt, self.holidays)
for year, day in enumerate(
[
2,
2,
2,
2,
2, # 2001-05
2,
2,
2,
2,
4, # 2006-10
4,
2,
2,
2,
2, # 2011-15
4,
2,
2,
2,
2,
4,
], # 2016-21
2001,
):
dt = date(year, 1, day)
self.assertIn(dt, self.holidays)
self.assertEqual(self.holidays[dt][:10], "Day after ")
self.assertNotIn(date(2016, 1, 3), self.holidays)
def test_waitangi_day(self):
ntl_holidays = holidays.NZ(subdiv="Northland")
for year, day in enumerate([3, 8, 7, 6, 5], 1964):
dt = date(year, 2, day)
self.assertIn(dt, ntl_holidays, dt)
self.assertEqual(ntl_holidays[dt][:8], "Waitangi")
for year in range(1900, 1974):
dt = date(year, 2, 6)
self.assertNotIn(dt, self.holidays)
for year in range(1974, 2100):
dt = date(year, 2, 6)
self.assertIn(dt, self.holidays)
for year, day in enumerate(
[
6,
6,
6,
6,
6, # 2001-05
6,
6,
6,
6,
6, # 2006-10
6,
6,
6,
6,
6, # 2011-15
8,
6,
6,
6,
6,
8,
], # 2016-21
2001,
):
dt = date(year, 2, day)
self.assertIn(dt, self.holidays)
self.assertEqual(self.holidays[dt][:8], "Waitangi")
self.assertNotIn(date(2005, 2, 7), self.holidays)
self.assertNotIn(date(2010, 2, 8), self.holidays)
self.assertNotIn(date(2011, 2, 7), self.holidays)
def test_good_friday(self):
for dt in [
date(1900, 4, 13),
date(1901, 4, 5),
date(1902, 3, 28),
date(1999, 4, 2),
date(2000, 4, 21),
date(2010, 4, 2),
date(2018, 3, 30),
date(2019, 4, 19),
date(2020, 4, 10),
]:
self.assertIn(dt, self.holidays)
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(dt + relativedelta(days=+1), self.holidays)
def test_easter_monday(self):
for dt in [
date(1900, 4, 16),
date(1901, 4, 8),
date(1902, 3, 31),
date(1999, 4, 5),
date(2010, 4, 5),
date(2018, 4, 2),
date(2019, 4, 22),
date(2020, 4, 13),
]:
self.assertIn(dt, self.holidays)
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(dt + relativedelta(days=+1), self.holidays)
def test_anzac_day(self):
for year in range(1900, 1921):
dt = date(year, 4, 25)
self.assertNotIn(dt, self.holidays)
for year in range(1921, 2100):
dt = date(year, 4, 25)
self.assertIn(dt, self.holidays)
for year, day in enumerate(
[
25,
25,
25,
25,
25, # 2001-05
25,
25,
25,
25,
25, # 2006-10
25,
25,
25,
25,
27, # 2011-15
25,
25,
25,
25,
27,
26,
], # 2016-21
2001,
):
dt = date(year, 4, day)
self.assertIn(dt, self.holidays, dt)
self.assertEqual(self.holidays[dt][:5], "Anzac")
self.assertNotIn(date(2009, 4, 27), self.holidays)
self.assertNotIn(date(2010, 4, 26), self.holidays)
def test_sovereigns_birthday(self):
self.assertIn(date(1909, 11, 9), self.holidays)
self.assertIn(date(1936, 6, 23), self.holidays)
self.assertIn(date(1937, 6, 9), self.holidays)
self.assertIn(date(1940, 6, 3), self.holidays)
self.assertIn(date(1952, 6, 2), self.holidays)
for year in range(1912, 1936):
dt = date(year, 6, 3)
self.assertIn(dt, self.holidays)
self.assertEqual(self.holidays[dt], "King's Birthday")
for year, day in enumerate(
[
4,
3,
2,
7,
6, # 2001-05
5,
4,
2,
1,
7, # 2006-10
6,
4,
3,
2,
1, # 2011-15
6,
5,
4,
3,
1,
7,
], # 2016-21
2001,
):
dt = date(year, 6, day)
self.assertIn(dt, self.holidays, dt)
self.assertEqual(self.holidays[dt], "Queen's Birthday")
def test_matariki(self):
for dt in [
date(2022, 6, 24),
date(2023, 7, 14),
date(2024, 6, 28),
date(2025, 6, 20),
date(2026, 7, 10),
date(2027, 6, 25),
date(2028, 7, 14),
date(2029, 7, 6),
date(2030, 6, 21),
date(2031, 7, 11),
date(2032, 7, 2),
date(2033, 6, 24),
date(2034, 7, 7),
date(2035, 6, 29),
date(2036, 7, 18),
date(2037, 7, 10),
date(2038, 6, 25),
date(2039, 7, 15),
date(2040, 7, 6),
date(2041, 7, 19),
date(2042, 7, 11),
date(2043, 7, 3),
date(2044, 6, 24),
date(2045, 7, 7),
date(2046, 6, 29),
date(2047, 7, 19),
date(2048, 7, 3),
date(2049, 6, 25),
date(2050, 7, 15),
date(2051, 6, 30),
date(2052, 6, 21),
]:
self.assertIn(dt, self.holidays)
self.assertEqual(self.holidays[dt], "Matariki")
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(dt + relativedelta(days=+1), self.holidays)
def test_labour_day(self):
for year, day in enumerate(
[
22,
28,
27,
25,
24, # 2001-05
23,
22,
27,
26,
25, # 2006-10
24,
22,
28,
27,
26, # 2011-15
24,
23,
22,
28,
26,
25,
], # 2016-21
2001,
):
dt = date(year, 10, day)
self.assertIn(dt, self.holidays, dt)
self.assertEqual(self.holidays[dt], "Labour Day")
def test_christmas_day(self):
self.holidays.observed = False
for year in range(1900, 2100):
dt = date(year, 12, 25)
self.assertIn(dt, self.holidays)
self.assertNotIn(dt + relativedelta(days=-1), self.holidays)
self.assertNotIn(date(2010, 12, 24), self.holidays)
self.assertNotEqual(
self.holidays[date(2011, 12, 26)], "Christmas Day (Observed)"
)
self.holidays.observed = True
self.assertEqual(
self.holidays[date(2011, 12, 27)], "Christmas Day (Observed)"
)
for year, day in enumerate(
[
25,
25,
25,
27,
27, # 2001-05
25,
25,
25,
25,
27, # 2006-10
27,
25,
25,
25,
25, # 2011-15
27,
25,
25,
25,
25,
25,
], # 2016-21
2001,
):
dt = date(year, 12, day)
self.assertIn(dt, self.holidays, dt)
self.assertEqual(self.holidays[dt][:9], "Christmas")
def test_boxing_day(self):
self.holidays.observed = False
for year in range(1900, 2100):
dt = date(year, 12, 26)
self.assertIn(dt, self.holidays)
self.assertNotIn(dt + relativedelta(days=+1), self.holidays)
self.assertNotIn(date(2009, 12, 28), self.holidays)
self.assertNotIn(date(2010, 12, 27), self.holidays)
self.holidays.observed = True
self.assertIn(date(2009, 12, 28), self.holidays)
self.assertIn(date(2010, 12, 27), self.holidays)
for year, day in enumerate(
[
26,
26,
26,
28,
26, # 2001-05
26,
26,
26,
28,
28, # 2006-10
26,
26,
26,
26,
28, # 2011-15
26,
26,
26,
26,
28,
28,
], # 2016-21
2001,
):
dt = date(year, 12, day)
self.assertIn(dt, self.holidays, dt)
self.assertEqual(self.holidays[dt][:6], "Boxing")
def test_auckland_anniversary_day(self):
auk_holidays = holidays.NZ(subdiv="Auckland")
for year, day in enumerate(
[
29,
28,
27,
26,
31, # 2001-05
30,
29,
28,
26,
1, # 2006-10
31,
30,
28,
27,
26, # 2011-15
1,
30,
29,
28,
27,
1,
], # 2016-21
2001,
):
dt = date(year, 2 if day < 9 else 1, day)
self.assertIn(dt, auk_holidays, dt)
self.assertEqual(auk_holidays[dt], "Auckland Anniversary Day")
def test_taranaki_anniversary_day(self):
tki_holidays = holidays.NZ(subdiv="Taranaki")
for year, day in enumerate(
[
12,
11,
10,
8,
14, # 2001-05
13,
12,
10,
9,
8, # 2006-10
14,
12,
11,
10,
9, # 2011-15
14,
13,
12,
11,
9,
8,
], # 2016-21
2001,
):
dt = date(year, 3, day)
self.assertIn(dt, tki_holidays, dt)
self.assertEqual(tki_holidays[dt], "Taranaki Anniversary Day")
def test_hawkes_bay_anniversary_day(self):
hkb_holidays = holidays.NZ(subdiv="Hawke's Bay")
for year, day in enumerate(
[
19,
25,
24,
22,
21, # 2001-05
20,
19,
24,
23,
22, # 2006-10
21,
19,
25,
24,
23, # 2011-15
21,
20,
19,
25,
23,
22,
], # 2016-21
2001,
):
dt = date(year, 10, day)
self.assertIn(dt, hkb_holidays, dt)
self.assertEqual(hkb_holidays[dt], "Hawke's Bay Anniversary Day")
def test_wellington_anniversary_day(self):
wgn_holidays = holidays.NZ(subdiv="Wellington")
for year, day in enumerate(
[
22,
21,
20,
19,
24, # 2001-05
23,
22,
21,
19,
25, # 2006-10
24,
23,
21,
20,
19, # 2011-15
25,
23,
22,
21,
20,
25,
], # 2016-21
2001,
):
dt = date(year, 1, day)
self.assertIn(dt, wgn_holidays, dt)
self.assertEqual(
wgn_holidays[dt], "Wellington Anniversary Day", dt
)
def test_marlborough_anniversary_day(self):
mbh_holidays = holidays.NZ(subdiv="Marlborough")
for year, day in enumerate(
[
29,
4,
3,
1,
31, # 2001-05
30,
29,
3,
2,
1, # 2006-10
31,
29,
4,
3,
2, # 2011-15
31,
30,
29,
4,
2,
1,
], # 2016-21
2001,
):
dt = date(year, 11 if day < 9 else 10, day)
self.assertIn(dt, mbh_holidays, dt)
self.assertEqual(
mbh_holidays[dt], "Marlborough Anniversary Day", dt
)
def test_nelson_anniversary_day(self):
nsn_holidays = holidays.NZ(subdiv="Nelson")
for year, day in enumerate(
[
29,
4,
3,
2,
31, # 2001-05
30,
29,
4,
2,
1, # 2006-10
31,
30,
4,
3,
2, # 2011-15
1,
30,
29,
4,
3,
1,
], # 2016-21
2001,
):
dt = date(year, 2 if day < 9 else 1, day)
self.assertIn(dt, nsn_holidays, dt)
self.assertEqual(nsn_holidays[dt], "Nelson Anniversary Day", dt)
def test_canterbury_anniversary_day(self):
can_holidays = holidays.NZ(subdiv="Canterbury")
for year, day in enumerate(
[
16,
15,
14,
12,
11, # 2001-05
17,
16,
14,
13,
12, # 2006-10
11,
16,
15,
14,
13, # 2011-15
11,
17,
16,
15,
13,
12,
], # 2016-21
2001,
):
dt = date(year, 11, day)
self.assertIn(dt, can_holidays, dt)
self.assertEqual(
can_holidays[dt], "Canterbury Anniversary Day", dt
)
def test_south_canterbury_anniversary_day(self):
stc_holidays = holidays.NZ(subdiv="South Canterbury")
for year, day in enumerate(
[
24,
23,
22,
27,
26, # 2001-05
25,
24,
22,
28,
27, # 2006-10
26,
24,
23,
22,
28, # 2011-15
26,
25,
24,
23,
28,
27,
], # 2016-21
2001,
):
dt = date(year, 9, day)
self.assertIn(dt, stc_holidays, dt)
self.assertEqual(
stc_holidays[dt], "South Canterbury Anniversary Day", dt
)
def test_westland_anniversary_day(self):
wtc_holidays = holidays.NZ(subdiv="Westland")
for year, day in enumerate(
[
3,
2,
1,
29,
5, # 2001-05
4,
3,
1,
30,
29, # 2006-10
28,
3,
2,
1,
30, # 2011-15
28,
4,
3,
2,
30,
29,
], # 2016-21
2001,
):
dt = date(year, 12 if day < 9 else 11, day)
self.assertIn(dt, wtc_holidays, dt)
self.assertEqual(
wtc_holidays[dt], "West Coast Anniversary Day", dt
)
def test_otago_anniversary_day(self):
ota_holidays = holidays.NZ(subdiv="Otago")
for year, day in enumerate(
[
26,
25,
24,
22,
21, # 2001-05
20,
26,
25,
23,
22, # 2006-10
21,
26,
25,
24,
23, # 2011-15
21,
20,
26,
25,
23,
22,
], # 2016-21
2001,
):
dt = date(year, 3, day)
self.assertIn(dt, ota_holidays, dt)
self.assertEqual(ota_holidays[dt], "Otago Anniversary Day", dt)
def test_southland_anniversary_day(self):
stl_holidays = holidays.NZ(subdiv="Southland")
for year, day in enumerate(
[15, 14, 20, 19, 17, 16, 15, 14, 19, 18, 17],
2001, # 2001-05 # 2006-11
):
dt = date(year, 1, day)
self.assertIn(dt, stl_holidays, dt)
self.assertEqual(stl_holidays[dt], "Southland Anniversary Day", dt)
for year, (month, day) in enumerate(
[
(4, 10),
(4, 2),
(4, 22),
(4, 7),
(3, 29),
(4, 18),
(4, 3),
(4, 23),
(4, 14),
(4, 6),
],
2012,
):
dt = date(year, month, day)
self.assertIn(dt, stl_holidays, dt)
self.assertEqual(
stl_holidays[dt], "Southland Anniversary Day", dt
)
def test_chatham_islands_anniversary_day(self):
cit_holidays = holidays.NZ(subdiv="Chatham Islands")
for year, day in enumerate(
[
3,
2,
1,
29,
28, # 2001-05
27,
3,
1,
30,
29, # 2006-10
28,
3,
2,
1,
30, # 2011-15
28,
27,
3,
2,
30,
29,
], # 2016-21
2001,
):
dt = date(year, 12 if day < 9 else 11, day)
self.assertIn(dt, cit_holidays, dt)
self.assertEqual(
cit_holidays[dt], "Chatham Islands Anniversary Day", dt
)
def test_all_holidays_present(self):
nz_1969 = sum(
holidays.NZ(years=[1969], subdiv=p)
for p in holidays.NZ.subdivisions
)
holidays_in_1969 = sum((nz_1969.get_list(key) for key in nz_1969), [])
nz_2015 = sum(
holidays.NZ(years=[2015], subdiv=p)
for p in holidays.NZ.subdivisions
)
holidays_in_2015 = sum((nz_2015.get_list(key) for key in nz_2015), [])
nz_1974 = sum(
holidays.NZ(years=[1974], subdiv=p)
for p in holidays.NZ.subdivisions
)
holidays_in_1974 = sum((nz_1974.get_list(key) for key in nz_1974), [])
all_holidays = [
"New Year's Day",
"Day after New Year's Day",
"Waitangi Day",
"Good Friday",
"Easter Monday",
"Anzac Day",
"Queen's Birthday",
"Labour Day",
"Christmas Day",
"Boxing Day",
"Auckland Anniversary Day",
"Taranaki Anniversary Day",
"Hawke's Bay Anniversary Day",
"Wellington Anniversary Day",
"Marlborough Anniversary Day",
"Nelson Anniversary Day",
"Canterbury Anniversary Day",
"South Canterbury Anniversary Day",
"West Coast Anniversary Day",
"Otago Anniversary Day",
"Southland Anniversary Day",
"Chatham Islands Anniversary Day",
"Queen's Birthday",
"Labour Day",
"Christmas Day",
"Boxing Day",
]
for holiday in all_holidays:
self.assertIn(holiday, holidays_in_1969, holiday)
self.assertIn(holiday, holidays_in_2015, holiday)
all_holidays.remove("Waitangi Day")
all_holidays.insert(2, "New Zealand Day")
for holiday in all_holidays:
self.assertIn(holiday, holidays_in_1974, holiday)
self.assertNotIn("Waitangi Day", holidays_in_1974)
|
dr-prodigy/python-holidays
|
test/countries/test_new_zealand.py
|
Python
|
mit
| 24,166
|
import os
import time
from django.conf import settings
from django.db import connections
from django.dispatch import receiver, Signal
from django.template import context
from django.utils import timezone
template_rendered = Signal(providing_args=["template", "context"])
setting_changed = Signal(providing_args=["setting", "value"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone._localtime = None
# Reset the database connections' time zone
if kwargs['setting'] == 'USE_TZ' and settings.TIME_ZONE != 'UTC':
USE_TZ, TIME_ZONE = kwargs['value'], settings.TIME_ZONE
elif kwargs['setting'] == 'TIME_ZONE' and not settings.USE_TZ:
USE_TZ, TIME_ZONE = settings.USE_TZ, kwargs['value']
else:
# no need to change the database connnections' time zones
return
tz = 'UTC' if USE_TZ else TIME_ZONE
for conn in connections.all():
conn.settings_dict['TIME_ZONE'] = tz
tz_sql = conn.ops.set_time_zone_sql()
if tz_sql:
conn.cursor().execute(tz_sql, [tz])
@receiver(setting_changed)
def clear_context_processors_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_CONTEXT_PROCESSORS':
context._standard_context_processors = None
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in ('LOCALE_PATHS', 'LANGUAGE_CODE'):
from django.utils.translation import trans_real
trans_real._default = None
if kwargs['setting'] == 'LOCALE_PATHS':
trans_real._translations = {}
|
azurestandard/django
|
django/test/signals.py
|
Python
|
bsd-3-clause
| 2,021
|
"""
Settings that are expected to be changed by the user. They influence the system as a whole
"""
import os
FULLTEXT_EXTRACT_PATH = "/vagrant/live"
FULLTEXT_EXTRACT_PATH_UNITTEST = "tests/test_unit/stub_data"
PROJ_HOME = os.path.dirname(os.path.realpath(__file__))
config = {
"FULLTEXT_EXTRACT_PATH": FULLTEXT_EXTRACT_PATH,
"FULLTEXT_EXTRACT_PATH_UNITTEST": os.path.join(PROJ_HOME, FULLTEXT_EXTRACT_PATH_UNITTEST),
}
CONSTANTS = {
"META_PATH": "meta_path",
"FILE_SOURCE": "ft_source",
"BIBCODE": "bibcode",
"PROVIDER": "provider",
"UPDATE": "UPDATE",
"FULL_TEXT": 'fulltext',
"FORMAT": "file_format",
"TIME_STAMP": 'index_date',
"ACKNOWLEDGEMENTS": 'acknowledgements',
"DATASET": 'dataset',
}
META_CONTENT = {
"xml": {
"fulltext": ['//body','//section[@type="body"]', '//journalarticle-body'],
"acknowledgements": ['//ack', '//section[@type="acknowledgments"]', '//subsection[@type="acknowledgement" or @type="acknowledgment"]'],
"dataset": ['//named-content[@content-type="dataset"]'],
},
"xmlelsevier": {
"fulltext": ['//body','//raw-text'],
"acknowledgements": ['//acknowledgment', '//ack', '//section[@type="acknowledgments"]',
'//subsection[@type="acknowledgement" or @type="acknowledgment"]',
'//*[local-name()="acknowledgment"]'],
"dataset": ['//named-content[@content-type="dataset"]'],
},
"html": {
"introduction": ['//h2[contains(.,"ntroduction")]',
'//h2[contains(.,"ntroduction")]',
'//p[contains(.,"Abstract")]',
],
"references": ['//h2[contains(.,"References")]'],
"table": ['//table'],
"table_links": ['//a[contains(@href, "TABLE_NAME")]'],
"head": ['//head']
},
"txt": {"fulltext": [""]},
"ocr": {"fulltext": [""]},
"http": {"fulltext": [""]},
"pdf": {"fulltext": [""]},
}
# For production/testing environment
try:
from local_settings import *
except ImportError as e:
pass
|
jonnybazookatone/adsfulltext_old
|
settings.py
|
Python
|
gpl-3.0
| 2,178
|
# -*- coding: utf-8 -*-
# memorystore.py
# Copyright (C) 2014 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
In-memory transient store for a LEAPIMAPServer.
"""
import contextlib
import logging
import threading
import weakref
from collections import defaultdict
from copy import copy
from enum import Enum
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.python import log
from zope.interface import implements
from leap.common.check import leap_assert_type
from leap.mail import size
from leap.mail.utils import empty, phash_iter
from leap.mail.messageflow import MessageProducer
from leap.mail.imap import interfaces
from leap.mail.imap.fields import fields
from leap.mail.imap.messageparts import MessagePartType, MessagePartDoc
from leap.mail.imap.messageparts import RecentFlagsDoc
from leap.mail.imap.messageparts import MessageWrapper
from leap.mail.imap.messageparts import ReferenciableDict
from leap.mail.decorators import deferred_to_thread
logger = logging.getLogger(__name__)
# The default period to do writebacks to the permanent
# soledad storage, in seconds.
SOLEDAD_WRITE_PERIOD = 15
FDOC = MessagePartType.fdoc.key
HDOC = MessagePartType.hdoc.key
CDOCS = MessagePartType.cdocs.key
DOCS_ID = MessagePartType.docs_id.key
@contextlib.contextmanager
def set_bool_flag(obj, att):
"""
Set a boolean flag to True while we're doing our thing.
Just to let the world know.
"""
setattr(obj, att, True)
try:
yield True
except RuntimeError as exc:
logger.exception(exc)
finally:
setattr(obj, att, False)
DirtyState = Enum("none", "dirty", "new")
class MemoryStore(object):
"""
An in-memory store to where we can write the different parts that
we split the messages into and buffer them until we write them to the
permanent storage.
It uses MessageWrapper instances to represent the message-parts, which are
indexed by mailbox name and UID.
It also can be passed a permanent storage as a paremeter (any implementor
of IMessageStore, in this case a SoledadStore). In this case, a periodic
dump of the messages stored in memory will be done. The period of the
writes to the permanent storage is controled by the write_period parameter
in the constructor.
"""
implements(interfaces.IMessageStore,
interfaces.IMessageStoreWriter)
# TODO We will want to index by chash when we transition to local-only
# UIDs.
WRITING_FLAG = "_writing"
_last_uid_lock = threading.Lock()
_fdoc_docid_lock = threading.Lock()
def __init__(self, permanent_store=None,
write_period=SOLEDAD_WRITE_PERIOD):
"""
Initialize a MemoryStore.
:param permanent_store: a IMessageStore implementor to dump
messages to.
:type permanent_store: IMessageStore
:param write_period: the interval to dump messages to disk, in seconds.
:type write_period: int
"""
self.reactor = reactor
self._permanent_store = permanent_store
self._write_period = write_period
if permanent_store is None:
self._mbox_closed = defaultdict(lambda: False)
# Internal Storage: messages
"""
flags document store.
_fdoc_store[mbox][uid] = { 'content': 'aaa' }
"""
self._fdoc_store = defaultdict(lambda: defaultdict(
lambda: ReferenciableDict({})))
# Sizes
"""
{'mbox, uid': <int>}
"""
self._sizes = {}
# Internal Storage: payload-hash
"""
fdocs:doc-id store, stores document IDs for putting
the dirty flags-docs.
"""
self._fdoc_id_store = defaultdict(lambda: defaultdict(
lambda: ''))
# Internal Storage: content-hash:hdoc
"""
hdoc-store keeps references to
the header-documents indexed by content-hash.
{'chash': { dict-stuff }
}
"""
self._hdoc_store = defaultdict(lambda: ReferenciableDict({}))
# Internal Storage: payload-hash:cdoc
"""
content-docs stored by payload-hash
{'phash': { dict-stuff } }
"""
self._cdoc_store = defaultdict(lambda: ReferenciableDict({}))
# Internal Storage: content-hash:fdoc
"""
chash-fdoc-store keeps references to
the flag-documents indexed by content-hash.
{'chash': {'mbox-a': weakref.proxy(dict),
'mbox-b': weakref.proxy(dict)}
}
"""
self._chash_fdoc_store = defaultdict(lambda: defaultdict(lambda: None))
# Internal Storage: recent-flags store
"""
recent-flags store keeps one dict per mailbox,
with the document-id of the u1db document
and the set of the UIDs that have the recent flag.
{'mbox-a': {'doc_id': 'deadbeef',
'set': {1,2,3,4}
}
}
"""
# TODO this will have to transition to content-hash
# indexes after we move to local-only UIDs.
self._rflags_store = defaultdict(
lambda: {'doc_id': None, 'set': set([])})
"""
last-uid store keeps the count of the highest UID
per mailbox.
{'mbox-a': 42,
'mbox-b': 23}
"""
self._last_uid = defaultdict(lambda: 0)
"""
known-uids keeps a count of the uids that soledad knows for a given
mailbox
{'mbox-a': set([1,2,3])}
"""
self._known_uids = defaultdict(set)
"""
mbox-flags is a dict containing flags for each mailbox. this is
modified from mailbox.getFlags / mailbox.setFlags
"""
self._mbox_flags = defaultdict(set)
# New and dirty flags, to set MessageWrapper State.
self._new = set([])
self._new_queue = set([])
self._new_deferreds = {}
self._dirty = set([])
self._dirty_queue = set([])
self._dirty_deferreds = {}
self._rflags_dirty = set([])
# Flag for signaling we're busy writing to the disk storage.
setattr(self, self.WRITING_FLAG, False)
if self._permanent_store is not None:
# this producer spits its messages to the permanent store
# consumer using a queue. We will use that to put
# our messages to be written.
self.producer = MessageProducer(permanent_store,
period=0.1)
# looping call for dumping to SoledadStore
self._write_loop = LoopingCall(self.write_messages,
permanent_store)
# We can start the write loop right now, why wait?
self._start_write_loop()
else:
# We have a memory-only store.
self.producer = None
self._write_loop = None
def _start_write_loop(self):
"""
Start loop for writing to disk database.
"""
if self._write_loop is None:
return
if not self._write_loop.running:
self._write_loop.start(self._write_period, now=True)
def _stop_write_loop(self):
"""
Stop loop for writing to disk database.
"""
if self._write_loop is None:
return
if self._write_loop.running:
self._write_loop.stop()
# IMessageStore
# XXX this would work well for whole message operations.
# We would have to add a put_flags operation to modify only
# the flags doc (and set the dirty flag accordingly)
def create_message(self, mbox, uid, message, observer,
notify_on_disk=True):
"""
Create the passed message into this MemoryStore.
By default we consider that any message is a new message.
:param mbox: the mailbox
:type mbox: str or unicode
:param uid: the UID for the message
:type uid: int
:param message: a message to be added
:type message: MessageWrapper
:param observer: the deferred that will fire with the
UID of the message. If notify_on_disk is True,
this will happen when the message is written to
Soledad. Otherwise it will fire as soon as we've
added the message to the memory store.
:type observer: Deferred
:param notify_on_disk: whether the `observer` deferred should
wait until the message is written to disk to
be fired.
:type notify_on_disk: bool
"""
log.msg("Adding new doc to memstore %r (%r)" % (mbox, uid))
key = mbox, uid
self._add_message(mbox, uid, message, notify_on_disk)
self._new.add(key)
if observer is not None:
if notify_on_disk:
# We store this deferred so we can keep track of the pending
# operations internally.
# TODO this should fire with the UID !!! -- change that in
# the soledad store code.
self._new_deferreds[key] = observer
else:
# Caller does not care, just fired and forgot, so we pass
# a defer that will inmediately have its callback triggered.
self.reactor.callFromThread(observer.callback, uid)
def put_message(self, mbox, uid, message, notify_on_disk=True):
"""
Put an existing message.
This will also set the dirty flag on the MemoryStore.
:param mbox: the mailbox
:type mbox: str or unicode
:param uid: the UID for the message
:type uid: int
:param message: a message to be added
:type message: MessageWrapper
:param notify_on_disk: whether the deferred that is returned should
wait until the message is written to disk to
be fired.
:type notify_on_disk: bool
:return: a Deferred. if notify_on_disk is True, will be fired
when written to the db on disk.
Otherwise will fire inmediately
:rtype: Deferred
"""
key = mbox, uid
d = defer.Deferred()
d.addCallback(lambda result: log.msg("message PUT save: %s" % result))
self._dirty.add(key)
self._dirty_deferreds[key] = d
self._add_message(mbox, uid, message, notify_on_disk)
return d
def _add_message(self, mbox, uid, message, notify_on_disk=True):
"""
Helper method, called by both create_message and put_message.
See those for parameter documentation.
"""
msg_dict = message.as_dict()
fdoc = msg_dict.get(FDOC, None)
if fdoc is not None:
fdoc_store = self._fdoc_store[mbox][uid]
fdoc_store.update(fdoc)
chash_fdoc_store = self._chash_fdoc_store
# content-hash indexing
chash = fdoc.get(fields.CONTENT_HASH_KEY)
chash_fdoc_store[chash][mbox] = weakref.proxy(
self._fdoc_store[mbox][uid])
hdoc = msg_dict.get(HDOC, None)
if hdoc is not None:
chash = hdoc.get(fields.CONTENT_HASH_KEY)
hdoc_store = self._hdoc_store[chash]
hdoc_store.update(hdoc)
cdocs = message.cdocs
for cdoc in cdocs.values():
phash = cdoc.get(fields.PAYLOAD_HASH_KEY, None)
if not phash:
continue
cdoc_store = self._cdoc_store[phash]
cdoc_store.update(cdoc)
# Update memory store size
# XXX this should use [mbox][uid]
# TODO --- this has to be deferred to thread,
# TODO add hdoc and cdocs sizes too
# it's slowing things down here.
# key = mbox, uid
# self._sizes[key] = size.get_size(self._fdoc_store[key])
def purge_fdoc_store(self, mbox):
"""
Purge the empty documents from a fdoc store.
Called during initialization of the SoledadMailbox
:param mbox: the mailbox
:type mbox: str or unicode
"""
# XXX This is really a workaround until I find the conditions
# that are making the empty items remain there.
# This happens, for instance, after running several times
# the regression test, that issues a store deleted + expunge + select
# The items are being correclty deleted, but in succesive appends
# the empty items with previously deleted uids reappear as empty
# documents. I suspect it's a timing condition with a previously
# evaluated sequence being used after the items has been removed.
for uid, value in self._fdoc_store[mbox].items():
if empty(value):
del self._fdoc_store[mbox][uid]
def get_docid_for_fdoc(self, mbox, uid):
"""
Return Soledad document id for the flags-doc for a given mbox and uid,
or None of no flags document could be found.
:param mbox: the mailbox
:type mbox: str or unicode
:param uid: the message UID
:type uid: int
:rtype: unicode or None
"""
with self._fdoc_docid_lock:
doc_id = self._fdoc_id_store[mbox][uid]
if empty(doc_id):
fdoc = self._permanent_store.get_flags_doc(mbox, uid)
if empty(fdoc) or empty(fdoc.content):
return None
doc_id = fdoc.doc_id
self._fdoc_id_store[mbox][uid] = doc_id
return doc_id
def get_message(self, mbox, uid, dirtystate=DirtyState.none,
flags_only=False):
"""
Get a MessageWrapper for the given mbox and uid combination.
:param mbox: the mailbox
:type mbox: str or unicode
:param uid: the message UID
:type uid: int
:param dirtystate: DirtyState enum: one of `dirty`, `new`
or `none` (default)
:type dirtystate: enum
:param flags_only: whether the message should carry only a reference
to the flags document.
:type flags_only: bool
:
:return: MessageWrapper or None
"""
if dirtystate == DirtyState.dirty:
flags_only = True
key = mbox, uid
fdoc = self._fdoc_store[mbox][uid]
if empty(fdoc):
return None
new, dirty = False, False
if dirtystate == DirtyState.none:
new, dirty = self._get_new_dirty_state(key)
if dirtystate == DirtyState.dirty:
new, dirty = False, True
if dirtystate == DirtyState.new:
new, dirty = True, False
if flags_only:
return MessageWrapper(fdoc=fdoc,
new=new, dirty=dirty,
memstore=weakref.proxy(self))
else:
chash = fdoc.get(fields.CONTENT_HASH_KEY)
hdoc = self._hdoc_store[chash]
if empty(hdoc):
hdoc = self._permanent_store.get_headers_doc(chash)
if empty(hdoc):
return None
if not empty(hdoc.content):
self._hdoc_store[chash] = hdoc.content
hdoc = hdoc.content
cdocs = None
pmap = hdoc.get(fields.PARTS_MAP_KEY, None)
if new and pmap is not None:
# take the different cdocs for write...
cdoc_store = self._cdoc_store
cdocs_list = phash_iter(hdoc)
cdocs = dict(enumerate(
[cdoc_store[phash] for phash in cdocs_list], 1))
return MessageWrapper(fdoc=fdoc, hdoc=hdoc, cdocs=cdocs,
new=new, dirty=dirty,
memstore=weakref.proxy(self))
def remove_message(self, mbox, uid):
"""
Remove a Message from this MemoryStore.
:param mbox: the mailbox
:type mbox: str or unicode
:param uid: the message UID
:type uid: int
"""
# XXX For the moment we are only removing the flags and headers
# docs. The rest we leave there polluting your hard disk,
# until we think about a good way of deorphaning.
# XXX implement elijah's idea of using a PUT document as a
# token to ensure consistency in the removal.
try:
del self._fdoc_store[mbox][uid]
except KeyError:
pass
try:
key = mbox, uid
self._new.discard(key)
self._dirty.discard(key)
if key in self._sizes:
del self._sizes[key]
self._known_uids[mbox].discard(uid)
except KeyError:
pass
except Exception as exc:
logger.error("error while removing message!")
logger.exception(exc)
try:
with self._fdoc_docid_lock:
del self._fdoc_id_store[mbox][uid]
except KeyError:
pass
except Exception as exc:
logger.error("error while removing message!")
logger.exception(exc)
# IMessageStoreWriter
@deferred_to_thread
def write_messages(self, store):
"""
Write the message documents in this MemoryStore to a different store.
:param store: the IMessageStore to write to
:rtype: False if queue is not empty, None otherwise.
"""
# For now, we pass if the queue is not empty, to avoid duplicate
# queuing.
# We would better use a flag to know when we've already enqueued an
# item.
# XXX this could return the deferred for all the enqueued operations
if not self.producer.is_queue_empty():
return False
if any(map(lambda i: not empty(i), (self._new, self._dirty))):
logger.info("Writing messages to Soledad...")
# TODO change for lock, and make the property access
# is accquired
with set_bool_flag(self, self.WRITING_FLAG):
for rflags_doc_wrapper in self.all_rdocs_iter():
self.producer.push(rflags_doc_wrapper,
state=self.producer.STATE_DIRTY)
for msg_wrapper in self.all_new_msg_iter():
self.producer.push(msg_wrapper,
state=self.producer.STATE_NEW)
for msg_wrapper in self.all_dirty_msg_iter():
self.producer.push(msg_wrapper,
state=self.producer.STATE_DIRTY)
# MemoryStore specific methods.
def get_uids(self, mbox):
"""
Get all uids for a given mbox.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: list
"""
return self._fdoc_store[mbox].keys()
def get_soledad_known_uids(self, mbox):
"""
Get all uids that soledad knows about, from the memory cache.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: list
"""
return self._known_uids.get(mbox, [])
# last_uid
def get_last_uid(self, mbox):
"""
Return the highest UID for a given mbox.
It will be the highest between the highest uid in the message store for
the mailbox, and the soledad integer cache.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: int
"""
uids = self.get_uids(mbox)
last_mem_uid = uids and max(uids) or 0
last_soledad_uid = self.get_last_soledad_uid(mbox)
return max(last_mem_uid, last_soledad_uid)
def get_last_soledad_uid(self, mbox):
"""
Get last uid for a given mbox from the soledad integer cache.
:param mbox: the mailbox
:type mbox: str or unicode
"""
return self._last_uid.get(mbox, 0)
def set_last_soledad_uid(self, mbox, value):
"""
Set last uid for a given mbox in the soledad integer cache.
SoledadMailbox should prime this value during initialization.
Other methods (during message adding) SHOULD call
`increment_last_soledad_uid` instead.
:param mbox: the mailbox
:type mbox: str or unicode
:param value: the value to set
:type value: int
"""
# can be long???
# leap_assert_type(value, int)
logger.info("setting last soledad uid for %s to %s" %
(mbox, value))
# if we already have a value here, don't do anything
with self._last_uid_lock:
if not self._last_uid.get(mbox, None):
self._last_uid[mbox] = value
def set_known_uids(self, mbox, value):
"""
Set the value fo the known-uids set for this mbox.
:param mbox: the mailbox
:type mbox: str or unicode
:param value: a sequence of integers to be added to the set.
:type value: tuple
"""
current = self._known_uids[mbox]
self._known_uids[mbox] = current.union(set(value))
def increment_last_soledad_uid(self, mbox):
"""
Increment by one the soledad integer cache for the last_uid for
this mbox, and fire a defer-to-thread to update the soledad value.
The caller should lock the call tho this method.
:param mbox: the mailbox
:type mbox: str or unicode
"""
with self._last_uid_lock:
self._last_uid[mbox] += 1
value = self._last_uid[mbox]
self.reactor.callInThread(self.write_last_uid, mbox, value)
return value
def write_last_uid(self, mbox, value):
"""
Increment the soledad integer cache for the highest uid value.
:param mbox: the mailbox
:type mbox: str or unicode
:param value: the value to set
:type value: int
"""
leap_assert_type(value, int)
if self._permanent_store:
self._permanent_store.write_last_uid(mbox, value)
def load_flag_docs(self, mbox, flag_docs):
"""
Load the flag documents for the given mbox.
Used during initial flag docs prefetch.
:param mbox: the mailbox
:type mbox: str or unicode
:param flag_docs: a dict with the content for the flag docs, indexed
by uid.
:type flag_docs: dict
"""
# We can do direct assignments cause we know this will only
# be called during initialization of the mailbox.
# TODO could hook here a sanity-check
# for duplicates
fdoc_store = self._fdoc_store[mbox]
chash_fdoc_store = self._chash_fdoc_store
for uid in flag_docs:
rdict = ReferenciableDict(flag_docs[uid])
fdoc_store[uid] = rdict
# populate chash dict too, to avoid fdoc duplication
chash = flag_docs[uid]["chash"]
chash_fdoc_store[chash][mbox] = weakref.proxy(
self._fdoc_store[mbox][uid])
def update_flags(self, mbox, uid, fdoc):
"""
Update the flag document for a given mbox and uid combination,
and set the dirty flag.
We could use put_message, but this is faster.
:param mbox: the mailbox
:type mbox: str or unicode
:param uid: the uid of the message
:type uid: int
:param fdoc: a dict with the content for the flag docs
:type fdoc: dict
"""
key = mbox, uid
self._fdoc_store[mbox][uid].update(fdoc)
self._dirty.add(key)
def load_header_docs(self, header_docs):
"""
Load the flag documents for the given mbox.
Used during header docs prefetch, and during cache after
a read from soledad if the hdoc property in message did not
find its value in here.
:param flag_docs: a dict with the content for the flag docs.
:type flag_docs: dict
"""
hdoc_store = self._hdoc_store
for chash in header_docs:
hdoc_store[chash] = ReferenciableDict(header_docs[chash])
def all_flags(self, mbox):
"""
Return a dictionary with all the flags for a given mbox.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: dict
"""
fdict = {}
uids = self.get_uids(mbox)
fstore = self._fdoc_store[mbox]
for uid in uids:
try:
fdict[uid] = fstore[uid][fields.FLAGS_KEY]
except KeyError:
continue
return fdict
def all_headers(self, mbox):
"""
Return a dictionary with all the header docs for a given mbox.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: dict
"""
headers_dict = {}
uids = self.get_uids(mbox)
fdoc_store = self._fdoc_store[mbox]
hdoc_store = self._hdoc_store
for uid in uids:
try:
chash = fdoc_store[uid][fields.CONTENT_HASH_KEY]
hdoc = hdoc_store[chash]
if not empty(hdoc):
headers_dict[uid] = hdoc
except KeyError:
continue
return headers_dict
# Counting sheeps...
def count_new_mbox(self, mbox):
"""
Count the new messages by mailbox.
:param mbox: the mailbox
:type mbox: str or unicode
:return: number of new messages
:rtype: int
"""
return len([(m, uid) for m, uid in self._new if mbox == mbox])
# XXX used at all?
def count_new(self):
"""
Count all the new messages in the MemoryStore.
:rtype: int
"""
return len(self._new)
def count(self, mbox):
"""
Return the count of messages for a given mbox.
:param mbox: the mailbox
:type mbox: str or unicode
:return: number of messages
:rtype: int
"""
return len(self._fdoc_store[mbox])
def unseen_iter(self, mbox):
"""
Get an iterator for the message UIDs with no `seen` flag
for a given mailbox.
:param mbox: the mailbox
:type mbox: str or unicode
:return: iterator through unseen message doc UIDs
:rtype: iterable
"""
fdocs = self._fdoc_store[mbox]
return [uid for uid, value
in fdocs.items()
if fields.SEEN_FLAG not in value.get(fields.FLAGS_KEY, [])]
def get_cdoc_from_phash(self, phash):
"""
Return a content-document by its payload-hash.
:param phash: the payload hash to check against
:type phash: str or unicode
:rtype: MessagePartDoc
"""
doc = self._cdoc_store.get(phash, None)
# XXX return None for consistency?
# XXX have to keep a mapping between phash and its linkage
# info, to know if this payload is been already saved or not.
# We will be able to get this from the linkage-docs,
# not yet implemented.
new = True
dirty = False
return MessagePartDoc(
new=new, dirty=dirty, store="mem",
part=MessagePartType.cdoc,
content=doc,
doc_id=None)
def get_fdoc_from_chash(self, chash, mbox):
"""
Return a flags-document by its content-hash and a given mailbox.
Used during content-duplication detection while copying or adding a
message.
:param chash: the content hash to check against
:type chash: str or unicode
:param mbox: the mailbox
:type mbox: str or unicode
:return: MessagePartDoc. It will return None if the flags document
has empty content or it is flagged as \\Deleted.
"""
fdoc = self._chash_fdoc_store[chash][mbox]
# a couple of special cases.
# 1. We might have a doc with empty content...
if empty(fdoc):
return None
# 2. ...Or the message could exist, but being flagged for deletion.
# We want to create a new one in this case.
# Hmmm what if the deletion is un-done?? We would end with a
# duplicate...
if fdoc and fields.DELETED_FLAG in fdoc.get(fields.FLAGS_KEY, []):
return None
uid = fdoc[fields.UID_KEY]
key = mbox, uid
new = key in self._new
dirty = key in self._dirty
return MessagePartDoc(
new=new, dirty=dirty, store="mem",
part=MessagePartType.fdoc,
content=fdoc,
doc_id=None)
def iter_fdoc_keys(self):
"""
Return a generator through all the mbox, uid keys in the flags-doc
store.
"""
fdoc_store = self._fdoc_store
for mbox in fdoc_store:
for uid in fdoc_store[mbox]:
yield mbox, uid
def all_new_msg_iter(self):
"""
Return generator that iterates through all new messages.
:return: generator of MessageWrappers
:rtype: generator
"""
gm = self.get_message
# need to freeze, set can change during iteration
new = [gm(*key, dirtystate=DirtyState.new) for key in tuple(self._new)]
# move content from new set to the queue
self._new_queue.update(self._new)
self._new.difference_update(self._new)
return new
def all_dirty_msg_iter(self):
"""
Return generator that iterates through all dirty messages.
:return: generator of MessageWrappers
:rtype: generator
"""
gm = self.get_message
# need to freeze, set can change during iteration
dirty = [gm(*key, flags_only=True, dirtystate=DirtyState.dirty)
for key in tuple(self._dirty)]
# move content from new and dirty sets to the queue
self._dirty_queue.update(self._dirty)
self._dirty.difference_update(self._dirty)
return dirty
def all_deleted_uid_iter(self, mbox):
"""
Return a list with the UIDs for all messags
with deleted flag in a given mailbox.
:param mbox: the mailbox
:type mbox: str or unicode
:return: list of integers
:rtype: list
"""
# This *needs* to return a fixed sequence. Otherwise the dictionary len
# will change during iteration, when we modify it
fdocs = self._fdoc_store[mbox]
return [uid for uid, value
in fdocs.items()
if fields.DELETED_FLAG in value.get(fields.FLAGS_KEY, [])]
# new, dirty flags
def _get_new_dirty_state(self, key):
"""
Return `new` and `dirty` flags for a given message.
:param key: the key for the message, in the form mbox, uid
:type key: tuple
:return: tuple of bools
:rtype: tuple
"""
# TODO change indexing of sets to [mbox][key] too.
# XXX should return *first* the news, and *then* the dirty...
# TODO should query in queues too , true?
#
return map(lambda _set: key in _set, (self._new, self._dirty))
def set_new_queued(self, key):
"""
Add the key value to the `new-queue` set.
:param key: the key for the message, in the form mbox, uid
:type key: tuple
"""
self._new_queue.add(key)
def unset_new_queued(self, key):
"""
Remove the key value from the `new-queue` set.
:param key: the key for the message, in the form mbox, uid
:type key: tuple
"""
self._new_queue.discard(key)
deferreds = self._new_deferreds
d = deferreds.get(key, None)
if d:
# XXX use a namedtuple for passing the result
# when we check it in the other side.
d.callback('%s, ok' % str(key))
deferreds.pop(key)
def set_dirty_queued(self, key):
"""
Add the key value to the `dirty-queue` set.
:param key: the key for the message, in the form mbox, uid
:type key: tuple
"""
self._dirty_queue.add(key)
def unset_dirty_queued(self, key):
"""
Remove the key value from the `dirty-queue` set.
:param key: the key for the message, in the form mbox, uid
:type key: tuple
"""
self._dirty_queue.discard(key)
deferreds = self._dirty_deferreds
d = deferreds.get(key, None)
if d:
# XXX use a namedtuple for passing the result
# when we check it in the other side.
d.callback('%s, ok' % str(key))
deferreds.pop(key)
# Recent Flags
def set_recent_flag(self, mbox, uid):
"""
Set the `Recent` flag for a given mailbox and UID.
:param mbox: the mailbox
:type mbox: str or unicode
:param uid: the message UID
:type uid: int
"""
self._rflags_dirty.add(mbox)
self._rflags_store[mbox]['set'].add(uid)
# TODO --- nice but unused
def unset_recent_flag(self, mbox, uid):
"""
Unset the `Recent` flag for a given mailbox and UID.
:param mbox: the mailbox
:type mbox: str or unicode
:param uid: the message UID
:type uid: int
"""
self._rflags_store[mbox]['set'].discard(uid)
def set_recent_flags(self, mbox, value):
"""
Set the value for the set of the recent flags.
Used from the property in the MessageCollection.
:param mbox: the mailbox
:type mbox: str or unicode
:param value: a sequence of flags to set
:type value: sequence
"""
self._rflags_dirty.add(mbox)
self._rflags_store[mbox]['set'] = set(value)
def load_recent_flags(self, mbox, flags_doc):
"""
Load the passed flags document in the recent flags store, for a given
mailbox.
:param mbox: the mailbox
:type mbox: str or unicode
:param flags_doc: A dictionary containing the `doc_id` of the Soledad
flags-document for this mailbox, and the `set`
of uids marked with that flag.
"""
self._rflags_store[mbox] = flags_doc
def get_recent_flags(self, mbox):
"""
Return the set of UIDs with the `Recent` flag for this mailbox.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: set, or None
"""
rflag_for_mbox = self._rflags_store.get(mbox, None)
if not rflag_for_mbox:
return None
return self._rflags_store[mbox]['set']
def all_rdocs_iter(self):
"""
Return an iterator through all in-memory recent flag dicts, wrapped
under a RecentFlagsDoc namedtuple.
Used for saving to disk.
:return: a generator of RecentFlagDoc
:rtype: generator
"""
# XXX use enums
DOC_ID = "doc_id"
SET = "set"
rflags_store = self._rflags_store
def get_rdoc(mbox, rdict):
mbox_rflag_set = rdict[SET]
recent_set = copy(mbox_rflag_set)
# zero it!
mbox_rflag_set.difference_update(mbox_rflag_set)
return RecentFlagsDoc(
doc_id=rflags_store[mbox][DOC_ID],
content={
fields.TYPE_KEY: fields.TYPE_RECENT_VAL,
fields.MBOX_KEY: mbox,
fields.RECENTFLAGS_KEY: list(recent_set)
})
return (get_rdoc(mbox, rdict) for mbox, rdict in rflags_store.items()
if not empty(rdict[SET]))
# Methods that mirror the IMailbox interface
def remove_all_deleted(self, mbox):
"""
Remove all messages flagged \\Deleted from this Memory Store only.
Called from `expunge`
:param mbox: the mailbox
:type mbox: str or unicode
:return: a list of UIDs
:rtype: list
"""
mem_deleted = self.all_deleted_uid_iter(mbox)
for uid in mem_deleted:
self.remove_message(mbox, uid)
return mem_deleted
def stop_and_flush(self):
"""
Stop the write loop and trigger a write to the producer.
"""
self._stop_write_loop()
if self._permanent_store is not None:
# XXX we should check if we did get a True value on this
# operation. If we got False we should retry! (queue was not empty)
self.write_messages(self._permanent_store)
self.producer.flush()
def expunge(self, mbox, observer):
"""
Remove all messages flagged \\Deleted, from the Memory Store
and from the permanent store also.
It first queues up a last write, and wait for the deferreds to be done
before continuing.
:param mbox: the mailbox
:type mbox: str or unicode
:param observer: a deferred that will be fired when expunge is done
:type observer: Deferred
"""
soledad_store = self._permanent_store
if soledad_store is None:
# just-in memory store, easy then.
self._delete_from_memory(mbox, observer)
return
# We have a soledad storage.
try:
# Stop and trigger last write
self.stop_and_flush()
# Wait on the writebacks to finish
# XXX what if pending deferreds is empty?
pending_deferreds = (self._new_deferreds.get(mbox, []) +
self._dirty_deferreds.get(mbox, []))
d1 = defer.gatherResults(pending_deferreds, consumeErrors=True)
d1.addCallback(
self._delete_from_soledad_and_memory, mbox, observer)
except Exception as exc:
logger.exception(exc)
def _delete_from_memory(self, mbox, observer):
"""
Remove all messages marked as deleted from soledad and memory.
:param mbox: the mailbox
:type mbox: str or unicode
:param observer: a deferred that will be fired when expunge is done
:type observer: Deferred
"""
mem_deleted = self.remove_all_deleted(mbox)
observer.callback(mem_deleted)
def _delete_from_soledad_and_memory(self, result, mbox, observer):
"""
Remove all messages marked as deleted from soledad and memory.
:param result: ignored. the result of the deferredList that triggers
this as a callback from `expunge`.
:param mbox: the mailbox
:type mbox: str or unicode
:param observer: a deferred that will be fired when expunge is done
:type observer: Deferred
"""
all_deleted = []
soledad_store = self._permanent_store
try:
# 1. Delete all messages marked as deleted in soledad.
logger.debug("DELETING FROM SOLEDAD ALL FOR %r" % (mbox,))
sol_deleted = soledad_store.remove_all_deleted(mbox)
try:
self._known_uids[mbox].difference_update(set(sol_deleted))
except Exception as exc:
logger.exception(exc)
# 2. Delete all messages marked as deleted in memory.
logger.debug("DELETING FROM MEM ALL FOR %r" % (mbox,))
mem_deleted = self.remove_all_deleted(mbox)
all_deleted = set(mem_deleted).union(set(sol_deleted))
logger.debug("deleted %r" % all_deleted)
except Exception as exc:
logger.exception(exc)
finally:
self._start_write_loop()
observer.callback(all_deleted)
# Mailbox documents and attributes
# This could be also be cached in memstore, but proxying directly
# to soledad since it's not too performance-critical.
def get_mbox_doc(self, mbox):
"""
Return the soledad document for a given mailbox.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: SoledadDocument or None.
"""
if self.permanent_store is not None:
return self.permanent_store.get_mbox_document(mbox)
else:
return None
def get_mbox_closed(self, mbox):
"""
Return the closed attribute for a given mailbox.
:param mbox: the mailbox
:type mbox: str or unicode
:rtype: bool
"""
if self.permanent_store is not None:
return self.permanent_store.get_mbox_closed(mbox)
else:
return self._mbox_closed[mbox]
def set_mbox_closed(self, mbox, closed):
"""
Set the closed attribute for a given mailbox.
:param mbox: the mailbox
:type mbox: str or unicode
"""
if self.permanent_store is not None:
self.permanent_store.set_mbox_closed(mbox, closed)
else:
self._mbox_closed[mbox] = closed
def get_mbox_flags(self, mbox):
"""
Get the flags for a given mbox.
:rtype: list
"""
return sorted(self._mbox_flags[mbox])
def set_mbox_flags(self, mbox, flags):
"""
Set the mbox flags
"""
self._mbox_flags[mbox] = set(flags)
# TODO
# This should write to the permanent store!!!
# Rename flag-documents
def rename_fdocs_mailbox(self, old_mbox, new_mbox):
"""
Change the mailbox name for all flag documents in a given mailbox.
Used from account.rename
:param old_mbox: name for the old mbox
:type old_mbox: str or unicode
:param new_mbox: name for the new mbox
:type new_mbox: str or unicode
"""
fs = self._fdoc_store
keys = fs[old_mbox].keys()
for k in keys:
fdoc = fs[old_mbox][k]
fdoc['mbox'] = new_mbox
fs[new_mbox][k] = fdoc
fs[old_mbox].pop(k)
self._dirty.add((new_mbox, k))
# Dump-to-disk controls.
@property
def is_writing(self):
"""
Property that returns whether the store is currently writing its
internal state to a permanent storage.
Used to evaluate whether the CHECK command can inform that the field
is clear to proceed, or waiting for the write operations to complete
is needed instead.
:rtype: bool
"""
# FIXME this should return a deferred !!!
# XXX ----- can fire when all new + dirty deferreds
# are done (gatherResults)
return getattr(self, self.WRITING_FLAG)
@property
def permanent_store(self):
return self._permanent_store
# Memory management.
def get_size(self):
"""
Return the size of the internal storage.
Use for calculating the limit beyond which we should flush the store.
:rtype: int
"""
return reduce(lambda x, y: x + y, self._sizes, 0)
|
laborautonomo/leap_mail
|
src/leap/mail/imap/memorystore.py
|
Python
|
gpl-3.0
| 44,071
|
# -*- coding: utf-8 -*-
import logging
import os
import sys
from datetime import datetime
import wormhole.errors
from PyQt5.QtCore import QFileInfo, Qt, QTimer, pyqtSignal
from PyQt5.QtGui import QFont, QIcon
from PyQt5.QtWidgets import (
QDialog,
QFileIconProvider,
QGridLayout,
QGroupBox,
QLabel,
QMessageBox,
QProgressBar,
QPushButton,
QSizePolicy,
QSpacerItem,
QToolButton,
QWidget,
)
from twisted.internet import reactor
from twisted.internet.defer import CancelledError
from gridsync import config_dir, resource
from gridsync.desktop import get_clipboard_modes, set_clipboard_text
from gridsync.gui.font import Font
from gridsync.gui.invite import InviteCodeWidget, show_failure
from gridsync.gui.pixmap import Pixmap
from gridsync.invite import InviteReceiver, InviteSender
from gridsync.preferences import get_preference
from gridsync.tor import TOR_PURPLE
from gridsync.util import b58encode, humanized_list
class InviteSenderDialog(QDialog):
done = pyqtSignal(QWidget)
closed = pyqtSignal(QWidget)
def __init__(self, gateway, gui, folder_names=None):
super().__init__()
self.gateway = gateway
self.gui = gui
self.folder_names = folder_names
self.folder_names_humanized = humanized_list(folder_names, "folders")
self.settings = {}
self.pending_invites = []
self.use_tor = self.gateway.use_tor
self.setMinimumSize(500, 300)
header_icon = QLabel(self)
if self.folder_names:
icon = QFileIconProvider().icon(
QFileInfo(
self.gateway.get_magic_folder_directory(
self.folder_names[0]
)
)
)
else:
icon = QIcon(os.path.join(gateway.nodedir, "icon"))
if not icon.availableSizes():
icon = QIcon(resource("tahoe-lafs.png"))
header_icon.setPixmap(icon.pixmap(50, 50))
header_text = QLabel(self)
if self.folder_names:
header_text.setText(self.folder_names_humanized)
else:
header_text.setText(self.gateway.name)
header_text.setFont(Font(18))
header_text.setAlignment(Qt.AlignCenter)
header_layout = QGridLayout()
header_layout.addItem(
QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 1
)
header_layout.addWidget(header_icon, 1, 2)
header_layout.addWidget(header_text, 1, 3)
header_layout.addItem(
QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 4
)
self.subtext_label = QLabel(self)
self.subtext_label.setFont(Font(10))
self.subtext_label.setStyleSheet("color: grey")
self.subtext_label.setWordWrap(True)
self.subtext_label.setAlignment(Qt.AlignCenter)
self.noise_label = QLabel()
font = Font(16)
font.setFamily("Courier")
font.setStyleHint(QFont.Monospace)
self.noise_label.setFont(font)
self.noise_label.setStyleSheet("color: grey")
self.noise_timer = QTimer()
self.noise_timer.timeout.connect(
lambda: self.noise_label.setText(b58encode(os.urandom(16)))
)
self.noise_timer.start(75)
self.code_label = QLabel()
self.code_label.setFont(Font(18))
self.code_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.code_label.hide()
self.box_title = QLabel(self)
self.box_title.setAlignment(Qt.AlignCenter)
self.box_title.setFont(Font(16))
self.box = QGroupBox()
self.box.setAlignment(Qt.AlignCenter)
self.box.setStyleSheet("QGroupBox {font-size: 16px}")
self.copy_button = QToolButton()
self.copy_button.setIcon(QIcon(resource("copy.png")))
self.copy_button.setToolTip("Copy to clipboard")
self.copy_button.setStyleSheet("border: 0px; padding: 0px;")
self.copy_button.hide()
box_layout = QGridLayout(self.box)
box_layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 1)
box_layout.addWidget(self.noise_label, 1, 2)
box_layout.addWidget(self.code_label, 1, 3)
box_layout.addWidget(self.copy_button, 1, 4)
box_layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 5)
self.close_button = QPushButton("Close and cancel invite")
self.close_button.setAutoDefault(False)
self.checkmark = QLabel()
self.checkmark.setPixmap(Pixmap("green_checkmark.png", 32))
self.checkmark.setAlignment(Qt.AlignCenter)
self.checkmark.hide()
self.tor_label = QLabel()
self.tor_label.setToolTip(
"This connection is being routed through the Tor network."
)
self.tor_label.setPixmap(Pixmap("tor-onion.png", 24))
self.tor_label.hide()
self.progress_bar = QProgressBar()
self.progress_bar.setMaximum(2)
self.progress_bar.setTextVisible(False)
self.progress_bar.hide()
layout = QGridLayout(self)
layout.addItem(QSpacerItem(0, 0, 0, QSizePolicy.Expanding), 0, 0)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 1)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 2)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 3)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 4)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 5)
layout.addLayout(header_layout, 1, 3)
layout.addItem(QSpacerItem(0, 0, 0, QSizePolicy.Expanding), 2, 1)
layout.addWidget(self.box_title, 3, 2, 1, 3)
layout.addWidget(self.checkmark, 3, 3)
layout.addWidget(
self.tor_label, 4, 1, 1, 1, Qt.AlignRight | Qt.AlignVCenter
)
layout.addWidget(self.box, 4, 2, 1, 3)
layout.addWidget(self.progress_bar, 4, 2, 1, 3)
layout.addWidget(self.subtext_label, 5, 2, 1, 3)
layout.addItem(QSpacerItem(0, 0, 0, QSizePolicy.Expanding), 6, 1)
layout.addWidget(self.close_button, 7, 3)
layout.addItem(QSpacerItem(0, 0, 0, QSizePolicy.Expanding), 8, 1)
self.copy_button.clicked.connect(self.on_copy_button_clicked)
self.close_button.clicked.connect(self.close)
self.set_box_title("Generating invite code...")
self.subtext_label.setText("Creating folder invite(s)...\n\n")
if self.use_tor:
self.tor_label.show()
self.progress_bar.setStyleSheet(
"QProgressBar::chunk {{ background-color: {}; }}".format(
TOR_PURPLE
)
)
self.go() # XXX
def set_box_title(self, text):
if sys.platform == "darwin":
self.box_title.setText(text)
self.box_title.show()
else:
self.box.setTitle(text)
def on_copy_button_clicked(self):
code = self.code_label.text()
for mode in get_clipboard_modes():
set_clipboard_text(code, mode)
self.subtext_label.setText(
"Copied '{}' to clipboard!\n\n".format(code)
)
def on_got_code(self, code):
self.noise_timer.stop()
self.noise_label.hide()
self.set_box_title("Your invite code is:")
self.code_label.setText(code)
self.code_label.show()
self.copy_button.show()
if self.folder_names:
if len(self.folder_names) == 1:
abilities = 'download "{}" and modify its contents'.format(
self.folder_names[0]
)
else:
abilities = "download {} and modify their contents".format(
self.folder_names_humanized
)
else:
abilities = 'connect to "{}" and upload new folders'.format(
self.gateway.name
)
self.subtext_label.setText(
"Entering this code on another device will allow it to {}.\n"
"This code can only be used once.".format(abilities)
)
def on_got_introduction(self):
if sys.platform == "darwin":
self.box_title.hide()
self.box.hide()
self.progress_bar.show()
self.progress_bar.setValue(1)
self.subtext_label.setText("Connection established; sending invite...")
def on_send_completed(self):
self.box.hide()
self.progress_bar.show()
self.progress_bar.setValue(2)
self.checkmark.show()
self.close_button.setText("Finish")
if self.folder_names:
target = self.folder_names_humanized
else:
target = self.gateway.name
text = "Your invitation to {} was accepted".format(target)
self.subtext_label.setText(
"Invite successful!\n {} at {}".format(
text, datetime.now().strftime("%H:%M")
)
)
if get_preference("notifications", "invite") != "false":
self.gui.show_message("Invite successful", text)
if self.folder_names:
for view in self.gui.main_window.central_widget.views:
if view.gateway.name == self.gateway.name:
for folder in self.folder_names:
# Immediately tell the Model that there are at least 2
# members for this folder, i.e., that it is now shared
view.model().on_members_updated(folder, [None, None])
def handle_failure(self, failure):
if failure.type == wormhole.errors.LonelyError:
return
logging.error(str(failure))
show_failure(failure, self)
self.invite_sender.cancel()
self.close()
def on_created_invite(self):
self.subtext_label.setText("Opening wormhole...\n\n")
def go(self):
self.invite_sender = InviteSender(self.use_tor)
self.invite_sender.created_invite.connect(self.on_created_invite)
self.invite_sender.got_code.connect(self.on_got_code)
self.invite_sender.got_introduction.connect(self.on_got_introduction)
self.invite_sender.send_completed.connect(self.on_send_completed)
self.invite_sender.send(self.gateway, self.folder_names).addErrback(
self.handle_failure
)
def closeEvent(self, event):
if self.code_label.text() and self.progress_bar.value() < 2:
msg = QMessageBox(self)
msg.setIcon(QMessageBox.Question)
msg.setWindowTitle("Cancel invitation?")
msg.setText(
'Are you sure you wish to cancel the invitation to "{}"?'.format(
self.gateway.name
)
)
msg.setInformativeText(
'The invite code "{}" will no longer be valid.'.format(
self.code_label.text()
)
)
msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
msg.setDefaultButton(QMessageBox.No)
if msg.exec_() == QMessageBox.Yes:
self.invite_sender.cancel()
event.accept()
self.closed.emit(self)
else:
event.ignore()
else:
event.accept()
if self.noise_timer.isActive():
self.noise_timer.stop()
self.closed.emit(self)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.close()
class InviteReceiverDialog(QDialog):
done = pyqtSignal(object) # Tahoe gateway
closed = pyqtSignal(QWidget)
def __init__(self, gateways):
super().__init__()
self.gateways = gateways
self.invite_receiver = None
self.joined_folders = []
self.setMinimumSize(500, 300)
self.mail_closed_icon = QLabel()
self.mail_closed_icon.setAlignment(Qt.AlignCenter)
self.mail_closed_icon.setPixmap(
Pixmap("mail-envelope-closed.png", 128)
)
self.mail_open_icon = QLabel()
self.mail_open_icon.setAlignment(Qt.AlignCenter)
self.mail_open_icon.setPixmap(Pixmap("mail-envelope-open.png", 128))
self.folder_icon = QLabel()
icon = QFileIconProvider().icon(QFileInfo(config_dir))
self.folder_icon.setPixmap(icon.pixmap(128, 128))
self.folder_icon.setAlignment(Qt.AlignCenter)
self.invite_code_widget = InviteCodeWidget(self)
self.invite_code_widget.lineedit.go.connect(self.go) # XXX
self.tor_label = QLabel()
self.tor_label.setToolTip(
"This connection is being routed through the Tor network."
)
self.tor_label.setPixmap(Pixmap("tor-onion.png", 24))
self.checkmark = QLabel()
self.checkmark.setAlignment(Qt.AlignCenter)
self.checkmark.setPixmap(Pixmap("green_checkmark.png", 32))
self.progressbar = QProgressBar(self)
self.progressbar.setValue(0)
self.progressbar.setMaximum(6) # XXX
self.progressbar.setTextVisible(False)
self.message_label = QLabel(" ")
self.message_label.setStyleSheet("color: grey")
self.message_label.setAlignment(Qt.AlignCenter)
self.error_label = QLabel()
self.error_label.setStyleSheet("color: red")
self.error_label.setAlignment(Qt.AlignCenter)
self.close_button = QPushButton("Close")
self.close_button.clicked.connect(self.close)
layout = QGridLayout(self)
layout.addItem(QSpacerItem(0, 0, 0, QSizePolicy.Expanding), 0, 0)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 1)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 2)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 3)
layout.addWidget(self.mail_closed_icon, 1, 2, 1, 3)
layout.addWidget(self.mail_open_icon, 1, 2, 1, 3)
layout.addWidget(self.folder_icon, 1, 2, 1, 3)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 4)
layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, 0), 1, 5)
layout.addWidget(self.invite_code_widget, 2, 2, 1, 3)
layout.addWidget(self.checkmark, 2, 3, 1, 1)
layout.addWidget(
self.tor_label, 3, 1, 1, 1, Qt.AlignRight | Qt.AlignVCenter
)
layout.addWidget(self.progressbar, 3, 2, 1, 3)
layout.addWidget(self.message_label, 5, 1, 1, 5)
layout.addWidget(self.error_label, 5, 2, 1, 3)
layout.addWidget(self.close_button, 6, 3)
layout.addItem(QSpacerItem(0, 0, 0, QSizePolicy.Expanding), 7, 1)
self.reset()
def reset(self):
self.mail_open_icon.hide()
self.folder_icon.hide()
self.mail_closed_icon.show()
self.progressbar.hide()
self.error_label.setText("")
self.error_label.hide()
self.close_button.hide()
self.tor_label.hide()
self.checkmark.hide()
self.progressbar.setStyleSheet("")
def show_error(self, text):
self.error_label.setText(text)
self.message_label.hide()
self.error_label.show()
reactor.callLater(3, self.error_label.hide)
reactor.callLater(3, self.message_label.show)
def update_progress(self, message):
step = self.progressbar.value() + 1
self.progressbar.setValue(step)
self.message_label.setText(message)
if step == 3:
self.mail_closed_icon.hide()
self.mail_open_icon.show()
def set_joined_folders(self, folders):
self.joined_folders = folders
if folders:
self.mail_open_icon.hide()
self.folder_icon.show()
def on_got_icon(self, path):
self.mail_open_icon.setPixmap(Pixmap(path, 128))
self.mail_closed_icon.hide()
self.mail_open_icon.show()
def on_done(self, gateway):
self.progressbar.setValue(self.progressbar.maximum())
self.close_button.show()
self.checkmark.show()
self.done.emit(gateway)
if self.joined_folders and len(self.joined_folders) == 1:
target = self.joined_folders[0]
self.message_label.setText(
'Successfully joined folder "{0}"!\n"{0}" is now available '
"for download".format(target)
)
elif self.joined_folders:
target = humanized_list(self.joined_folders, "folders")
self.message_label.setText(
"Successfully joined {0}!\n{0} are now available for "
"download".format(target)
)
self.close() # TODO: Cleanup
def on_grid_already_joined(self, grid_name):
QMessageBox.information(
self,
"Already connected",
'You are already connected to "{}"'.format(grid_name),
)
self.close()
def got_message(self, _):
self.update_progress("Reading invitation...") # 3
def got_welcome(self):
self.update_progress("Connected; waiting for message...") # 2
def handle_failure(self, failure):
logging.error(str(failure))
if failure.type == CancelledError and self.progressbar.value() > 2:
return
show_failure(failure, self)
self.close()
def go(self, code):
self.reset()
self.invite_code_widget.hide()
self.progressbar.show()
if self.invite_code_widget.tor_checkbox.isChecked():
use_tor = True
self.tor_label.show()
self.progressbar.setStyleSheet(
"QProgressBar::chunk {{ background-color: {}; }}".format(
TOR_PURPLE
)
)
else:
use_tor = False
self.update_progress("Verifying invitation...") # 1
self.invite_receiver = InviteReceiver(self.gateways, use_tor)
self.invite_receiver.got_welcome.connect(self.got_welcome)
self.invite_receiver.got_message.connect(self.got_message)
self.invite_receiver.grid_already_joined.connect(
self.on_grid_already_joined
)
self.invite_receiver.update_progress.connect(self.update_progress)
self.invite_receiver.got_icon.connect(self.on_got_icon)
self.invite_receiver.joined_folders.connect(self.set_joined_folders)
self.invite_receiver.done.connect(self.on_done)
d = self.invite_receiver.receive(code)
d.addErrback(self.handle_failure)
reactor.callLater(30, d.cancel)
def enterEvent(self, event):
event.accept()
self.invite_code_widget.lineedit.update_action_button() # XXX
def closeEvent(self, event):
event.accept()
try:
self.invite_receiver.cancel()
except AttributeError:
pass
self.closed.emit(self)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.close()
|
gridsync/gridsync
|
gridsync/gui/share.py
|
Python
|
gpl-3.0
| 19,086
|
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import errno
try:
import simplejson as json
except ImportError:
import json
from ansible import constants as C
from ansible import utils
from ansible.cache.base import BaseCacheModule
class CacheModule(BaseCacheModule):
"""
A caching module backed by json files.
"""
def __init__(self, *args, **kwargs):
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._cache = {}
self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path
if not self._cache_dir:
utils.exit("error, fact_caching_connection is not set, cannot use fact cache")
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError,IOError), e:
utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
return None
def get(self, key):
if key in self._cache:
return self._cache.get(key)
if self.has_expired(key):
raise KeyError
cachefile = "%s/%s" % (self._cache_dir, key)
try:
f = open( cachefile, 'r')
except (OSError,IOError), e:
utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
else:
value = json.load(f)
self._cache[key] = value
return value
finally:
f.close()
def set(self, key, value):
self._cache[key] = value
cachefile = "%s/%s" % (self._cache_dir, key)
try:
f = open(cachefile, 'w')
except (OSError,IOError), e:
utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
else:
f.write(utils.jsonify(value))
finally:
f.close()
def has_expired(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
try:
st = os.stat(cachefile)
except (OSError,IOError), e:
if e.errno == errno.ENOENT:
return False
else:
utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
keys = []
for k in os.listdir(self._cache_dir):
if not (k.startswith('.') or self.has_expired(k)):
keys.append(k)
return keys
def contains(self, key):
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
st = os.stat("%s/%s" % (self._cache_dir, key))
return True
except (OSError,IOError), e:
if e.errno == errno.ENOENT:
return False
else:
utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
def delete(self, key):
del self._cache[key]
try:
os.remove("%s/%s" % (self._cache_dir, key))
except (OSError,IOError), e:
pass #TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
|
devopservices/ansible
|
lib/ansible/cache/jsonfile.py
|
Python
|
gpl-3.0
| 4,163
|
# Natural Language Toolkit: TIMIT Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Haejoong Lee <haejoong@ldc.upenn.edu>
# Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tokens, phonemes and audio data from the NLTK TIMIT Corpus.
This corpus contains selected portion of the TIMIT corpus.
* 16 speakers from 8 dialect regions
* 1 male and 1 female from each dialect region
* total 130 sentences (10 sentences per speaker. Note that some
sentences are shared among other speakers, especially sa1 and sa2
are spoken by all speakers.)
* total 160 recording of sentences (10 recordings per speaker)
* audio format: NIST Sphere, single channel, 16kHz sampling,
16 bit sample, PCM encoding
Module contents
---------------
The timit module provides 4 functions and 4 data items.
* items
List of items in the corpus. There are total 160 items, each of which
corresponds to a unique utterance of a speaker. Here's an example of an
item in the list:
dr1-fvmh0:sx206
- _---- _---
| | | | |
| | | | |
| | | | `--- sentence number
| | | `----- sentence type (a:all, i:shared, x:exclusive)
| | `--------- speaker ID
| `------------ sex (m:male, f:female)
`-------------- dialect region (1..8)
* speakers
List of speaker IDs. An example of speaker ID:
dr1-fvmh0
Note that if you split an item ID with colon and take the first element of
the result, you will get a speaker ID.
>>> itemid = dr1-fvmh0:sx206
>>> spkrid,sentid = itemid.split(':')
>>> spkrid
'dr1-fvmh0'
The second element of the result is a sentence ID.
* dictionary
Phonetic dictionary of words contained in this corpus. This is a Python
dictionary from words to phoneme lists.
* spkrinfo
Speaker information table. It's a Python dictionary from speaker IDs to
records of 10 fields. Speaker IDs the same as the ones in timie.speakers.
Each record is a dictionary from field names to values, and the fields are
as follows:
id speaker ID as defined in the original TIMIT speaker info table
sex speaker gender (M:male, F:female)
dr speaker dialect region (1:new england, 2:northern,
3:north midland, 4:south midland, 5:southern, 6:new york city,
7:western, 8:army brat (moved around))
use corpus type (TRN:training, TST:test)
in this sample corpus only TRN is available
recdate recording date
birthdate speaker birth date
ht speaker height
race speaker race (WHT:white, BLK:black, AMR:american indian,
SPN:spanish-american, ORN:oriental,???:unknown)
edu speaker education level (HS:high school, AS:associate degree,
BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA),
PHD:doctorate degree (PhD,JD,MD), ??:unknown)
comments comments by the recorder
The 4 functions are as follows.
* raw(sentences=items, offset=False)
Given a list of items, returns an iterator of a list of word lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the word list is a tuple of word(string), start offset and
end offset, where offset is represented as a number of 16kHz samples.
* phonetic(sentences=items, offset=False)
Given a list of items, returns an iterator of a list of phoneme lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the phoneme list is a tuple of word(string), start offset
and end offset, where offset is represented as a number of 16kHz samples.
* audiodata(item, start=0, end=None)
Given an item, returns a chunk of audio samples formatted into a string.
When the fuction is called, if start and end are omitted, the entire
samples of the recording will be returned. If only end is omitted,
samples from the start offset to the end of the recording will be returned.
* play(data)
Play the given audio samples. The audio samples can be obtained from the
timit.audiodata function.
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
from itertools import islice
import ossaudiodev, time
import sys, os, re
if sys.platform.startswith('linux') or sys.platform.startswith('freebsd'):
PLAY_ENABLED = True
else:
PLAY_ENABLED = False
__all__ = ["items", "raw", "phonetic", "speakers", "dictionary", "spkrinfo",
"audiodata", "play"]
PREFIX = os.path.join(get_basedir(),"timit")
speakers = []
items = []
dictionary = {}
spkrinfo = {}
for f in os.listdir(PREFIX):
if re.match("^dr[0-9]-[a-z]{4}[0-9]$", f):
speakers.append(f)
for g in os.listdir(os.path.join(PREFIX,f)):
if g.endswith(".txt"):
items.append(f+':'+g[:-4])
speakers.sort()
items.sort()
# read dictionary
for l in open(os.path.join(PREFIX,"timitdic.txt")):
if l[0] == ';': continue
a = l.strip().split(' ')
dictionary[a[0]] = a[1].strip('/').split()
# read spkrinfo
header = ['id','sex','dr','use','recdate','birthdate','ht','race','edu',
'comments']
for l in open(os.path.join(PREFIX,"spkrinfo.txt")):
if l[0] == ';': continue
rec = l[:54].split() + [l[54:].strip()]
key = "dr%s-%s%s" % (rec[2],rec[1].lower(),rec[0].lower())
spkrinfo[key] = dict([(header[i],rec[i]) for i in range(10)])
def _prim(ext, sentences=items, offset=False):
if isinstance(sentences,str):
sentences = [sentences]
for sent in sentences:
fnam = os.path.sep.join([PREFIX] + sent.split(':')) + ext
r = []
for l in open(fnam):
if not l.strip(): continue
a = l.split()
if offset:
r.append((a[2],int(a[0]),int(a[1])))
else:
r.append(a[2])
yield r
def raw(sentences=items, offset=False):
"""
Given a list of items, returns an iterator of a list of word lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the word list is a tuple of word(string), start offset and
end offset, where offset is represented as a number of 16kHz samples.
@param sentences: List of items (sentences) for which tokenized word list
will be returned. In case there is only one item, it is possible to
pass the item id as a string.
@type sentences: list of strings or a string
@param offset: If True, the start and end offsets are accompanied to each
word in the returned list. Note that here, an offset is represented by
the number of 16kHz samples.
@type offset: bool
@return: List of list of strings (words) if offset is False. List of list
of tuples (word, start offset, end offset) if offset if True.
"""
return _prim(".wrd", sentences, offset)
def phonetic(sentences=items, offset=False):
"""
Given a list of items, returns an iterator of a list of phoneme lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the phoneme list is a tuple of word(string), start offset
and end offset, where offset is represented as a number of 16kHz samples.
@param sentences: List of items (sentences) for which phoneme list
will be returned. In case there is only one item, it is possible to
pass the item id as a string.
@type sentences: list of strings or a string
@param offset: If True, the start and end offsets are accompanied to each
phoneme in the returned list. Note that here, an offset is represented by
the number of 16kHz samples.
@type offset: bool
@return: List of list of strings (phonemes) if offset is False. List of
list of tuples (phoneme, start offset, end offset) if offset if True.
"""
return _prim(".phn", sentences, offset)
def audiodata(item, start=0, end=None):
"""
Given an item, returns a chunk of audio samples formatted into a string.
When the fuction is called, if start and end are omitted, the entire
samples of the recording will be returned. If only end is omitted,
samples from the start offset to the end of the recording will be returned.
@param start: start offset
@type start: integer (number of 16kHz frames)
@param end: end offset
@type end: integer (number of 16kHz frames) or None to indicate
the end of file
@return: string of sequence of bytes of audio samples
"""
assert(end is None or end > start)
headersize = 44
fnam = os.path.join(PREFIX,item.replace(':',os.path.sep)) + '.wav'
if end is None:
data = open(fnam).read()
else:
data = open(fnam).read(headersize+end*2)
return data[headersize+start*2:]
def play(data):
"""
Play the given audio samples.
@param data: audio samples
@type data: string of bytes of audio samples
"""
if not PLAY_ENABLED:
print >>sys.stderr, "sorry, currently we don't support audio playback on this platform:", sys.platform
return
try:
dsp = ossaudiodev.open('w')
except IOError, e:
print >>sys.stderr, "can't acquire the audio device; please activate your audio device."
print >>sys.stderr, "system error message:", str(e)
return
dsp.setfmt(ossaudiodev.AFMT_S16_LE)
dsp.channels(1)
dsp.speed(16000)
dsp.write(data)
dsp.close()
def demo():
from en.parser.nltk_lite.corpora import timit
print "6th item (timit.items[5])"
print "-------------------------"
itemid = timit.items[5]
spkrid, sentid = itemid.split(':')
print " item id: ", itemid
print " speaker id: ", spkrid
print " sentence id:", sentid
print
record = timit.spkrinfo[spkrid]
print " speaker information:"
print " TIMIT speaker id: ", record['id']
print " speaker sex: ", record['sex']
print " dialect region: ", record['dr']
print " data type: ", record['use']
print " recording date: ", record['recdate']
print " date of birth: ", record['birthdate']
print " speaker height: ", record['ht']
print " speaker race: ", record['race']
print " speaker education:", record['edu']
print " comments: ", record['comments']
print
print " words of the sentence:"
print " ", timit.raw(sentences=itemid).next()
print
print " words of the sentence with offsets (first 3):"
print " ", timit.raw(sentences=itemid, offset=True).next()[:3]
print
print " phonemes of the sentence (first 10):"
print " ", timit.phonetic(sentences=itemid).next()[:10]
print
print " phonemes of the sentence with offsets (first 3):"
print " ", timit.phonetic(sentences=itemid, offset=True).next()[:3]
print
print " looking up dictionary for words of the sentence..."
words = timit.raw(sentences=itemid).next()
for word in words:
print " %-5s:" % word, timit.dictionary[word]
print
print "audio playback:"
print "---------------"
print " playing sentence", sentid, "by speaker", spkrid, "(a.k.a. %s)"%record["id"], "..."
data = timit.audiodata(itemid)
timit.play(data)
print
print " playing words:"
words = timit.raw(sentences=itemid, offset=True).next()
for word, start, end in words:
print " playing %-10s in 1.5 seconds ..." % `word`
time.sleep(1.5)
data = timit.audiodata(itemid, start, end)
timit.play(data)
print
print " playing phonemes (first 10):"
phones = timit.phonetic(sentences=itemid, offset=True).next()
for phone, start, end in phones[:10]:
print " playing %-10s in 1.5 seconds ..." % `phone`
time.sleep(1.5)
data = timit.audiodata(itemid, start, end)
timit.play(data)
print
# play sentence sa1 of all female speakers
sentid = 'sa1'
for spkr in timit.speakers:
if timit.spkrinfo[spkr]['sex'] == 'F':
itemid = spkr + ':' + sentid
print " playing sentence %s of speaker %s ..." % (sentid, spkr)
data = timit.audiodata(itemid)
timit.play(data)
print
if __name__ == '__main__':
demo()
|
rossgoodwin/musapaedia
|
musapaedia/en/parser/nltk_lite/corpora/timit.py
|
Python
|
mit
| 12,505
|
"""
Class for accessing the swig process
"""
import shutil, subprocess, os
from pylib.logwrapper import LogWrapper
from pylib.process import Process
# Wrapper class for logging
class SwigProcess(Process):
def __init__(self):
super().__init__()
self.log = LogWrapper.getlogger()
# Swig Process options
self.Namespace = None
self.IncludeDirectories = []
self.InputFile = None
def Start(self):
self.log.info("Starting generation of swig C# files")
# Setup Output directory
if os.path.exists(self.WorkingDir):
self.log.warn("Cleaning Output Directory: " + self.WorkingDir)
shutil.rmtree(self.WorkingDir, ignore_errors=True)
os.makedirs(self.WorkingDir)
if self.Options == None: self.Options = []
self.Options = self.Options + self.GenerateCmdLineOpts()
self.log.info("Swig: Launching:")
self.log.info("Swig: ExePath: " + self.ExePath)
self.log.info("Swig: RootNamespace: " + self.Namespace)
self.log.info("Swig: Command: " + " ".join(str(x) for x in self.Options))
super().Start()
return
# Generate Command Line Options
def GenerateCmdLineOpts(self):
ret = []
if self.Namespace != None:
ret.append("-namespace")
ret.append(self.Namespace)
for incdir in self.IncludeDirectories:
ret.append("-I" + incdir)
if self.InputFile != None: ret.append(self.InputFile)
return ret
|
grbd/GBD.Audio.SoxSharp.Soxbuild
|
bin/pylib/subproc/swig_process.py
|
Python
|
lgpl-3.0
| 1,537
|
from django.urls import reverse
from django_comments.models import Comment
from fiscal.forms import OrganizationCreateForm
from workshops.models import Organization, Event
from workshops.tests.base import TestBase
class TestOrganization(TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
def test_organization_delete(self):
"""Make sure deleted organization is longer accessible.
Additionally check on_delete behavior for Event."""
Event.objects.create(host=self.org_alpha,
administrator=self.org_beta,
slug='test-event')
for org_domain in [self.org_alpha.domain, self.org_beta.domain]:
rv = self.client.post(reverse('organization_delete', args=[org_domain, ]))
content = rv.content.decode('utf-8')
assert 'Failed to delete' in content
Event.objects.get(slug='test-event').delete()
for org_domain in [self.org_alpha.domain, self.org_beta.domain]:
rv = self.client.post(reverse('organization_delete', args=[org_domain, ]))
assert rv.status_code == 302
with self.assertRaises(Organization.DoesNotExist):
Organization.objects.get(domain=org_domain)
def test_organization_invalid_chars_in_domain(self):
r"""Ensure users can't put wrong characters in the organization's
domain field.
Invalid characters are any that match `[^\w\.-]+`, ie. domain is
allowed only to have alphabet-like chars, dot and dash.
The reason for only these chars lies in `workshops/urls.py`. The regex
for the organization_details URL has `[\w\.-]+` matching...
"""
data = {
'domain': 'http://beta.com/',
'fullname': self.org_beta.fullname,
'country': self.org_beta.country,
}
url = reverse('organization_edit', args=[self.org_beta.domain])
rv = self.client.post(url, data=data)
# make sure we're not updating to good values
assert rv.status_code == 200
def test_creating_event_with_no_comment(self):
"""Ensure that no comment is added when OrganizationCreateForm without
comment content is saved."""
self.assertEqual(Comment.objects.count(), 0)
data = {
'fullname': 'Test Organization',
'domain': 'test.org',
'comment': '',
}
form = OrganizationCreateForm(data)
form.save()
self.assertEqual(Comment.objects.count(), 0)
def test_creating_event_with_comment(self):
"""Ensure that a comment is added when OrganizationCreateForm with
comment content is saved."""
self.assertEqual(Comment.objects.count(), 0)
data = {
'fullname': 'Test Organization',
'domain': 'test.org',
'comment': 'This is a test comment.',
}
form = OrganizationCreateForm(data)
obj = form.save()
self.assertEqual(Comment.objects.count(), 1)
comment = Comment.objects.first()
self.assertEqual(comment.comment, 'This is a test comment.')
self.assertIn(comment, Comment.objects.for_model(obj))
|
swcarpentry/amy
|
amy/fiscal/tests/test_organization.py
|
Python
|
mit
| 3,256
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ProcessIO event."""
from .running_process_event import RunningProcessEvent
class ProcessIO(RunningProcessEvent):
"""Event emitted when a process generates output on stdout or stderr, or if stdin is used."""
name = 'launch.events.process.ProcessIO'
def __init__(self, *, text: bytes, fd: int, **kwargs) -> None:
"""
Create a ProcessIO event.
Unmatched keyword arguments are passed to RunningProcessEvent, see it
for details on those arguments.
:param: text is the unicode data associated with the event
:param: fd is an integer that indicates which file descriptor the text is from
"""
super().__init__(**kwargs)
self.__text = text
self.__from_stdin = fd == 0
self.__from_stdout = fd == 1
self.__from_stderr = fd == 2
@property
def text(self) -> bytes:
"""Getter for text."""
return self.__text
@property
def from_stdin(self) -> bool:
"""Getter for from_stdin."""
return self.__from_stdin
@property
def from_stdout(self) -> bool:
"""Getter for from_stdout."""
return self.__from_stdout
@property
def from_stderr(self) -> bool:
"""Getter for from_stderr."""
return self.__from_stderr
|
ros2/launch
|
launch/launch/events/process/process_io.py
|
Python
|
apache-2.0
| 1,913
|
from queries import queries
from datetime import datetime, date, time
import xml.parsers.expat
import httplib
class rest_request:
def __init__(self, from_, to, refdate = 'now'):
if refdate == 'now':
self._from = from_
self._to = to
else:
time = datetime.strptime(refdate, '%Y-%m-%dT%H:%M:%S')
diff = (datetime.now() - time).days
self._from = from_ + diff
self._to = to + diff
def run(self):
self._run_rest_query()
self._parse_xml()
def _element_handler(self, name, attrs):
if name == "moneyresults":
self.chargedValue = float(attrs['chargedValue'])
self.count = int(attrs['nbBankTransaction'])
self.paidValue = float(attrs['paidValue'])
self.moneyRaised = float(attrs['moneyRaised'])
def _run_rest_query(self):
conn = httplib.HTTPSConnection("localhost")
conn.request("GET", "/rest/moneyquery?from=%i&to=%i" % (self._from, self._to))
reponse = conn.getresponse()
self.data = reponse.read()
def _parse_xml(self):
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self._element_handler
p.Parse(self.data)
class comment_rest_request:
def __init__(self, from_, to, refdate = 'now'):
if refdate == 'now':
self._from = from_
self._to = to
else:
time = datetime.strptime(refdate, '%Y-%m-%dT%H:%M:%S')
diff = (datetime.now() - time).days
self._from = from_ + diff
self._to = to + diff
def run(self):
self._run_rest_query()
self._parse_xml()
def _element_handler(self, name, attrs):
if name == "size":
self.size = float(attrs['size'])
def _run_rest_query(self):
conn = httplib.HTTPSConnection("localhost")
conn.request("GET", "/rest/comments?size=true&from=%i&to=%i" % (self._from, self._to))
reponse = conn.getresponse()
self.data = reponse.read()
def _parse_xml(self):
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self._element_handler
p.Parse(self.data)
class dashboard_queries(queries):
def __init__(self, cursor, output, refdate = 'now'):
super(dashboard_queries, self).__init__(cursor, output, refdate)
self.nbvisits_last_month = 0
self.nbvisits_month = 0
self.nbvisits_day = 0
self.nbinscription_last_month = 0
self.nbinscription_month = 0
self.nbinscription_day = 0
def generate_dashboard(self):
self._generate_visits_array()
self._generate_visitors_array()
self._generate_members_array()
self._generate_inscription_array()
self._get_rest_data()
self._write_results()
def _generate_visits_array(self):
self.cursor.execute('''
SELECT count(distinct(visit.id))
FROM visit
WHERE begin_date > datetime(?, '-30 days', 'localtime')
AND real=1
''', (self.refdate,))
self.nbvisits_month = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(visit.id))
FROM visit
WHERE begin_date > datetime(?, '-1 day', 'localtime')
AND real=1
''', (self.refdate,))
self.nbvisits_day = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(visit.id))
FROM visit
WHERE begin_date < datetime(?, '-30 days', 'localtime')
AND begin_date > datetime(?, '-60 days', 'localtime')
AND real=1
''', (self.refdate,self.refdate))
self.nbvisits_last_month = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(visit.id))
FROM visit
WHERE real=1
''')
self.nbvisits_tot = self.cursor.fetchone()[0]
def _generate_inscription_array(self):
self.cursor.execute('''
SELECT count(distinct(url)) from request where url like '%member/doactivate%'
AND date > datetime(?, '-1 day', 'localtime')
''', (self.refdate,))
self.nbinscription_day = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(url)) from request where url like '%member/doactivate%'
AND date > datetime(?, '-30 days', 'localtime')
''', (self.refdate,))
self.nbinscription_month = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(url)) from request where url like '%member/doactivate%'
AND date < datetime(?, '-30 days', 'localtime')
AND date > datetime(?, '-60 days', 'localtime')
''', (self.refdate,self.refdate))
self.nbinscription_last_month = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(url)) from request where url like '%member/doactivate%'
''')
self.nbinscription_tot = self.cursor.fetchone()[0]
def _percent(self, a, b):
if b == 0:
return 0
return (a * 100) / b
def _get_rest_data(self):
r = rest_request(30, -1, self.refdate)
r.run()
self.charged_month = r.chargedValue
self.conv_month = r.count
self.moy_month = (r.count != 0) and (r.chargedValue / r.count) or 0
self.tot_month = r.paidValue - r.chargedValue
self.contrib_month = r.moneyRaised
r = rest_request(1, -1, self.refdate)
r.run()
self.charged_day = r.chargedValue
self.conv_day = self._percent(r.count, self.nbvisits_day)
self.moy_day = (r.count != 0) and (r.chargedValue / r.count) or 0
self.tot_day = r.paidValue - r.chargedValue
self.contrib_day = r.moneyRaised
r = rest_request(60, 30, self.refdate)
r.run()
self.charged_last_month = r.chargedValue
self.conv_last_month = self._percent(r.count, self.nbvisits_last_month)
self.moy_last_month = (r.count != 0) and (r.chargedValue / r.count) or 0
self.tot_last_month = r.paidValue - r.chargedValue
self.contrib_last_month = r.moneyRaised
r = rest_request(3000, -1, self.refdate)
r.run()
self.charged_tot = r.chargedValue
self.conv_tot = r.count
self.moy_tot = (r.count != 0) and (r.chargedValue / r.count) or 0
self.tot_tot = r.paidValue - r.chargedValue
self.contrib_tot = r.moneyRaised
u = comment_rest_request(30, -1, self.refdate)
u.run()
self.com_month = u.size
u = comment_rest_request(1, -1, self.refdate)
u.run()
self.com_day = u.size
u = comment_rest_request(60, 30, self.refdate)
u.run()
self.com_last_month = u.size
u = comment_rest_request(3000, -1, self.refdate)
u.run()
self.com_tot = u.size
def _write_results(self):
f = open(self.output + "/dashboard.js", "w")
f.write("var visits = [%i, %i, %f, %i, %i];"
% (self.nbvisits_month, self.nbvisits_day, self.nbvisits_month / 30, self.nbvisits_last_month, self.nbvisits_tot))
f.write("var visitors = [%i, %i, %f, %i, %i];"
% (self.nbvisitors_month, self.nbvisitors_day, self.nbvisitors_month / 30, self.nbvisitors_last_month, self.nbvisitors_tot))
f.write("var members = [%i, %i, %f, %i, %i];"
% (self.nbmembers_month, self.nbmembers_day, self.nbmembers_month / 30, self.nbmembers_last_month, self.nbmembers_tot))
f.write("var insc = [%i, %f, %f, %i, %i];" %
(self.nbinscription_month,
self._percent(self.nbinscription_day, self.nbvisits_day),
self._percent(self.nbinscription_day, self.nbvisits_day),
self._percent(self.nbinscription_last_month, self.nbvisits_last_month),
self.nbinscription_tot))
f.write("var conv = [%f, %f, %f, %f, %f];"
% (self.conv_month, self.conv_day, self.conv_month, self.conv_last_month, self.conv_tot))
f.write("var moy = [%i, %i, %f, %i, %i];"
% (self.moy_month, self.moy_day, self.moy_month, self.moy_last_month, self.moy_tot))
f.write("var tot = [%i, %i, %f, %i, %i];"
% (self.tot_month, self.tot_day, self.tot_month / 30, self.tot_last_month, self.tot_tot))
f.write("var contrib = [%i, %i, %f, %i, %i];"
% (self.contrib_month, self.contrib_day, self.contrib_month / 30, self.contrib_last_month, self.contrib_tot))
f.write("var com = [%i, %i, %f, %i, %i];"
% (self.com_month, self.com_day, self.com_month / 30, self.com_last_month, self.com_tot))
f.write("var stock = [%i, %i, %f, %i, %i];"
% (self.charged_month - self.contrib_month,
self.charged_day - self.charged_day,
(self.charged_month-self.contrib_month) / 30,
self.charged_last_month - self.contrib_last_month,
self.charged_tot -self.contrib_tot))
def _generate_members_array(self):
self.cursor.execute('''
SELECT COUNT(DISTINCT(visitor.id))
FROM visitor LEFT JOIN visit on visitor.id = id_visitor
WHERE visitor.userid != -1
AND real=1
AND begin_date > datetime(?, '-30 days', 'localtime')
''', (self.refdate,))
self.nbmembers_month = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT COUNT(DISTINCT(visitor.id))
FROM visitor LEFT JOIN visit on visitor.id = id_visitor
WHERE visitor.userid != -1
AND real=1
AND begin_date > datetime(?, '-1 day', 'localtime')
''', (self.refdate,))
self.nbmembers_day = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT COUNT(DISTINCT(visitor.id))
FROM visitor LEFT JOIN visit on visitor.id = id_visitor
WHERE visitor.userid != -1
AND real=1
AND begin_date < datetime(?, '-30 days', 'localtime')
AND begin_date > datetime(?, '-60 days', 'localtime')
''', (self.refdate,self.refdate))
self.nbmembers_last_month = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT COUNT(DISTINCT(visitor.id))
FROM visitor LEFT JOIN visit on visitor.id = id_visitor
WHERE visitor.userid != -1
AND real=1
''')
self.nbmembers_tot = self.cursor.fetchone()[0]
def _generate_visitors_array(self):
self.cursor.execute('''
SELECT count(distinct(visitor.id))
FROM visit
JOIN visitor on visitor.id=id_visitor
WHERE real=1
AND begin_date > datetime(?, '-30 days', 'localtime')
''', (self.refdate,))
self.nbvisitors_month = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(visitor.id))
FROM visit
JOIN visitor on visitor.id=id_visitor
WHERE real=1
AND begin_date > datetime(?, '-1 day', 'localtime')
''', (self.refdate,))
self.nbvisitors_day = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(visitor.id))
FROM visit
JOIN visitor on visitor.id=id_visitor
WHERE real=1
AND begin_date < datetime(?, '-30 days', 'localtime')
AND begin_date > datetime(?, '-60 days', 'localtime')
''', (self.refdate,self.refdate))
self.nbvisitors_last_month = self.cursor.fetchone()[0]
self.cursor.execute('''
SELECT count(distinct(visitor.id))
FROM visit
JOIN visitor on visitor.id=id_visitor
WHERE real=1
''')
self.nbvisitors_tot = self.cursor.fetchone()[0]
|
niavok/elveos
|
stats/src/bloatitstats/queries/dashboard_queries.py
|
Python
|
agpl-3.0
| 12,021
|
#!/usr/bin/python3
# This program is intended to run in a cron schedule.
# If there is no action from user within 30mins the computer powers off.
# The action is detected through X by invoking xautolock(1).
# If xautolock is running already the process doesn't start.
# Dependencies:
#
# Linux OS
# python
# xautolock must be installe
# my_notifier.py must be in /usr/local/bin
import sys
import time
import os
currenttime = time.strftime("%Y%m%d%H%M%S").rstrip('\n')
try:
import subprocess
# Maximum minutes of no X user activity (30)
timeout = 30
# Seconds before shutdown (300)
margin = 300
# Program to run when timeout is reached
locker = "/sbin/poweroff"
#locker = "/bin/ls"
# Program to run $margin seconds before timeout
notifier = "/usr/local/bin/my_notifier.py"
#notifier = "./my_notifier.py"
#import os
#if os.path.isfile(notifier): print("lala")
logfile = "/var/log/check_idle.log"
log = open(logfile,"a")
my_env = os.environ
my_env['DISPLAY'] = ":0.0"
my_env['XAUTHORITY'] = "/root/.Xauthoroty"
# xautolock executable
xautolock = "/usr/bin/xautolock"
opts = []
opts.append("-time")
opts.append("%d" %timeout)
if os.path.isfile(notifier):
opts.append("-notify")
opts.append("%d" % margin)
opts.append("-notifier")
opts.append("%s " % notifier)
else:
log.write("%s: Warning! Notifier (/usr/local/bin/my_notifier.py) not found.\n" % currenttime)
opts.append("-locker")
opts.append("%s" % locker)
opts.append("-secure")
opts.append("-detectsleep")
commandlist = []
commandlist.append(xautolock)
log.write("%s: Executing: %s\n" % (currenttime, " ".join(commandlist+opts)))
popen = subprocess.Popen(commandlist+opts,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env)
stdout,stderr = popen.communicate()
log.write("%s: %s || %s\n" % (currenttime, stdout, stderr))
log.close()
#raise()
except:
logfile = "/var/log/check_idle.log"
log = open(logfile,"a")
log.write("%s: There was an error %s " % (currenttime, sys.exc_info()[0]))
log.close()
# import sys
# import smtplib
# from email.mime.text import MIMEText
#
# message = "ERROR: %s" % sys.exc_info()[0]
# msg = MIMEText(message)
# msg['Subject'] = '[check_idle] %s' % message[:20]
# msg['From'] = "check_idle@math.uoc.gr"
# msg['To'] = "sysadmin@math.uoc.gr"
#
# s = smtplib.SMTP('mta.uoc.gr')
# s.sendmail("sysadmin@math.uoc.gr", "sysadmin@math.uoc.gr", msg.as_string())
#s.quit()
|
kalopsik-math/scripts
|
labs/check_idle.py
|
Python
|
gpl-3.0
| 2,740
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import logging
import logging.config
import os
import socket
import sys
import warnings
from os.path import dirname, expanduser
from shutil import which
import tornado.ioloop
from PIL import Image
from tornado.httpserver import HTTPServer
from tornado.netutil import bind_unix_socket
from thumbor.config import Config
from thumbor.console import get_server_parameters
from thumbor.context import Context
from thumbor.importer import Importer
from thumbor.signal_handler import setup_signal_handler
def get_as_integer(value):
try:
return int(value)
except (ValueError, TypeError):
return None
def get_config(config_path, use_environment=False):
if use_environment:
Config.allow_environment_variables()
lookup_paths = [os.curdir, expanduser("~"), "/etc/", dirname(__file__)]
return Config.load(config_path, conf_name="thumbor.conf", lookup_paths=lookup_paths)
def configure_log(config, log_level):
if config.THUMBOR_LOG_CONFIG and config.THUMBOR_LOG_CONFIG != "":
logging.config.dictConfig(config.THUMBOR_LOG_CONFIG)
else:
logging.basicConfig(
level=getattr(logging, log_level),
format=config.THUMBOR_LOG_FORMAT,
datefmt=config.THUMBOR_LOG_DATE_FORMAT,
)
def get_importer(config):
importer = Importer(config)
importer.import_modules()
if importer.error_handler_class is not None:
importer.error_handler = importer.error_handler_class(config)
return importer
def validate_config(config, server_parameters):
if server_parameters.security_key is None:
server_parameters.security_key = config.SECURITY_KEY
if not isinstance(server_parameters.security_key, (bytes, str)):
raise RuntimeError(
"No security key was found for this instance of thumbor. "
+ "Please provide one using the conf file or a security key file."
)
if config.ENGINE or config.USE_GIFSICLE_ENGINE:
# Error on Image.open when image pixel count is above MAX_IMAGE_PIXELS
warnings.simplefilter("error", Image.DecompressionBombWarning)
if config.USE_GIFSICLE_ENGINE:
server_parameters.gifsicle_path = which("gifsicle")
if server_parameters.gifsicle_path is None:
raise RuntimeError(
"If using USE_GIFSICLE_ENGINE configuration to True,"
" the `gifsicle` binary must be in the PATH "
"and must be an executable."
)
def get_context(server_parameters, config, importer):
return Context(server=server_parameters, config=config, importer=importer)
def get_application(context):
return context.modules.importer.import_class(context.app_class)(context)
def run_server(application, context):
server = HTTPServer(application, xheaders=True)
if context.server.fd is not None:
fd_number = get_as_integer(context.server.fd)
if fd_number is not None:
sock = socket.fromfd(
fd_number, socket.AF_INET | socket.AF_INET6, socket.SOCK_STREAM
)
else:
sock = bind_unix_socket(context.server.fd)
server.add_socket(sock)
logging.debug(
"thumbor starting at fd %s", context.server.fd
)
else:
server.bind(context.server.port, context.server.ip)
logging.debug(
"thumbor starting at %s:%d", context.server.ip, context.server.port
)
server.start(context.server.processes)
return server
def main(arguments=None):
"""Runs thumbor server with the specified arguments."""
if arguments is None:
arguments = sys.argv[1:]
server_parameters = get_server_parameters(arguments)
config = get_config(
server_parameters.config_path, server_parameters.use_environment
)
configure_log(config, server_parameters.log_level.upper())
validate_config(config, server_parameters)
importer = get_importer(config)
with get_context(server_parameters, config, importer) as context:
application = get_application(context)
server = run_server(application, context)
setup_signal_handler(server, config)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main(sys.argv[1:])
|
kkopachev/thumbor
|
thumbor/server.py
|
Python
|
mit
| 4,551
|
from pymongo.errors import DuplicateKeyError
from master.boostrap.db_client import SingleDBClient
from master.beans.pws_entries import PwsEntry
from master.encryption.encrypt_bfish import MyBlowFish
from pymongo import ASCENDING, DESCENDING
class PwsStore:
def __init__(self):
self.client = SingleDBClient().get_client()
self.db = self.client.pws
self.db.pws_col.ensure_index([('owner', ASCENDING), ('login', ASCENDING),
('env_name', DESCENDING)], unique=True)
def insert_new_pws(self, pws_object):
try:
self.db.pws_col.insert(pws_object.to_json())
except DuplicateKeyError:
return "Duplicate entry found"
return None
def get_pws_by_login_env(self, owner, login, env_name):
return PwsEntry.to_pws(self.db.pws_col.find_one({'owner': owner, 'login': login, 'env_name': env_name}))
def get_pws_by_owner(self, owner):
all_pws = []
for pws_entry in self.db.pws_col.find({'owner': owner}):
all_pws.append({'env_name': pws_entry['env_name'], 'login': pws_entry['login']})
return all_pws
def update_pws_password(self, owner, login, env, enc):
self.db.pws_col.update({'owner': owner, 'login': login, 'env_name': env}, {'$set': {'enc': enc}})
def close(self):
self.client.close()
def drop_pws_col(self):
self.client.drop_database('pws')
def delete_pws_entry(self, owner, pws_login):
self.db.pws_col.remove({'owner': owner, 'login': pws_login})
def delete_pws_by_owner(self, owner):
self.db.pws_col.remove({'owner': owner})
def delete_pws_by_owner_env(self, owner, env_name):
self.db.pws_col.remove({'owner': owner, 'env_name': env_name})
def change_all_pws_enc(self, owner, old_master_password, master_password):
"""
:param owner: string id of the owner
:param old_master_password: string old master password
:param master_password: string new master password
"""
updating_record = []
for pws_entry in self.db.pws_col.find({'owner': owner}):
each_clear_text = MyBlowFish(old_master_password).decrypt(pws_entry['enc'])
new_enc = MyBlowFish(master_password).encrypt(each_clear_text.decode('utf-8'))
updating_record.append({'login': pws_entry['login'],
'env_name': pws_entry['env_name'], 'enc': new_enc})
# updating records back
if len(updating_record) > 0:
for new_pws_entry in updating_record:
self.update_pws_password(owner, new_pws_entry['login'], new_pws_entry['env_name'], new_pws_entry['enc'])
|
hungh/masterpass
|
master/persistence/pws_store.py
|
Python
|
apache-2.0
| 2,729
|
#!/usr/bin/env python2
import json
import os
import sys
import django
django.setup()
# dashboard
from main import models
# This is the UUID of SIP from the `MetadataAppliesToTypes` table
INGEST_METADATA_TYPE = '3e48343d-e2d2-4956-aaa3-b54d26eb9761'
def main(sip_uuid, dc_path):
# If there's no metadata, that's not an error, and just keep going
if not os.path.exists(dc_path):
print "DC metadata not found; exiting", "(at", dc_path + ")"
return 0
print "Loading DC metadata from", dc_path
with open(dc_path) as json_data:
data = json.load(json_data)
dc = models.DublinCore(metadataappliestoidentifier=sip_uuid,
metadataappliestotype_id=INGEST_METADATA_TYPE)
for key, value in data.iteritems():
try:
setattr(dc, key, value)
except AttributeError:
print >> sys.stderr, "Invalid DC attribute:", key
dc.save()
return 0
if __name__ == '__main__':
sip_uuid = sys.argv[1]
dc_path = sys.argv[2]
sys.exit(main(sip_uuid, dc_path))
|
eckardm/archivematica
|
src/MCPClient/lib/clientScripts/loadDublinCore.py
|
Python
|
agpl-3.0
| 1,064
|
from pandac.PandaModules import PythonCallbackObject
from pandac.PandaModules import CallbackNode, VBase3
from direct.directbase import DirectStart
from OpenGL.GL import *
import sys
VSHADER = """
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
"""
FSHADER = """
uniform sampler2D tex;
void main() {
gl_FragColor.rgb = 1.0 - texture2D(tex,gl_TexCoord[0].st).rgb;
}
"""
def init(cbdata):
"""We'll be compiling & assigning the shader here.
This method will only be called once."""
v = glCreateShader(GL_VERTEX_SHADER)
f = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(v, VSHADER)
glShaderSource(f, FSHADER)
glCompileShader(v)
glCompileShader(f)
program = glCreateProgram()
glAttachShader(program,v)
glAttachShader(program,f)
glLinkProgram(program)
glUseProgram(program)
glDeleteShader(v)
glDeleteShader(f)
# We don't need to set the shader again. Clear the callback.
cbnode.clearDrawCallback()
# Make sure we're using da OpenGL.
if base.pipe.getInterfaceName() != "OpenGL":
print "This program requires OpenGL."
sys.exit(1)
# Set up the callback object
cbnode = CallbackNode("cbnode")
cbnode.setDrawCallback(PythonCallbackObject(init))
cbnp = render.attachNewNode(cbnode)
# Load the panda and reparent it to the callback object.
panda = loader.loadModel("panda")
panda.reparentTo(cbnp)
# Let it rotate to show that transforms work well too.
panda.hprInterval(2.0, VBase3(360, 0, 0)).loop()
# Put the camera where it will be able to actually see something.
base.trackball.node().setPos(0, 30, -7)
run()
|
faruk/opticfoo
|
inspiration/glsl.py
|
Python
|
gpl-3.0
| 1,579
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Terminal Widget."""
from __future__ import print_function
import sys
from spyder.config.base import _, DEV
from qtpy.QtCore import (Qt, QUrl, Slot, QEvent, QTimer, Signal,
QObject)
from qtpy.QtWidgets import (QMenu, QFrame, QVBoxLayout, QWidget)
from qtpy.QtGui import QKeySequence
from spyder.widgets.browser import WebView
from spyder.utils import icon_manager as ima
from qtpy.QtWebEngineWidgets import QWebEnginePage, QWebEngineSettings
from spyder.utils.qthelpers import create_action, add_actions
from qtpy.QtWebEngineWidgets import WEBENGINE
if WEBENGINE:
from PyQt5.QtWebChannel import QWebChannel
class ChannelHandler(QObject):
"""QWebChannel handler for JS calls."""
sig_ready = Signal()
sig_closed = Signal()
def __init__(self, parent):
"""Handler main constructor."""
QObject.__init__(self, parent)
@Slot()
def ready(self):
"""Invoke signal when terminal prompt is ready."""
self.sig_ready.emit()
@Slot()
def close(self):
"""Invoke signal when terminal process was closed externally."""
self.sig_closed.emit()
class TerminalWidget(QFrame):
"""Terminal widget."""
terminal_closed = Signal()
terminal_ready = Signal()
def __init__(self, parent, port, path='~', font=None):
"""Frame main constructor."""
QWidget.__init__(self, parent)
url = 'http://127.0.0.1:{0}?path={1}'.format(port, path)
self.handler = ChannelHandler(self)
self.handler.sig_ready.connect(lambda: self.terminal_ready.emit())
self.handler.sig_closed.connect(lambda: self.terminal_closed.emit())
self.view = TermView(self, term_url=url, handler=self.handler)
self.font = font
self.initial_path = path
layout = QVBoxLayout()
layout.addWidget(self.view)
layout.setContentsMargins(0, 0, 0, 0)
self.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
self.setLayout(layout)
self.body = self.view.document
self.view.page().loadFinished.connect(self.setup_term)
if not WEBENGINE:
QTimer.singleShot(250, self.__alive_loopback)
@Slot(bool)
def setup_term(self, finished):
"""Setup other terminal options after page has loaded."""
if finished:
# This forces to display the black background
print("\0", end='')
self.set_font(self.font)
self.set_dir(self.initial_path)
def eval_javascript(self, script):
"""Evaluate Javascript instructions inside view."""
return self.view.eval_javascript(script)
def set_dir(self, path):
"""Set terminal initial current working directory."""
self.eval_javascript('setcwd("{0}")'.format(path))
def set_font(self, font):
"""Set terminal font via CSS."""
self.font = font
self.eval_javascript('fitFont("{0}")'.format(self.font))
def get_fonts(self):
"""List terminal CSS fonts."""
return self.eval_javascript("getFonts()")
def exec_cmd(self, cmd):
"""Execute a command inside the terminal."""
self.eval_javascript('exec("{0}")'.format(cmd))
def __alive_loopback(self):
alive = self.is_alive()
if not alive:
self.terminal_closed.emit()
else:
QTimer.singleShot(250, self.__alive_loopback)
def is_alive(self):
"""Check if terminal process is alive."""
alive = self.eval_javascript('isAlive()')
return alive
class TermView(WebView):
"""XTerm Wrapper."""
def __init__(self, parent, term_url='http://127.0.0.1:8070',
handler=None):
"""Webview main constructor."""
WebView.__init__(self, parent)
self.parent = parent
self.copy_action = create_action(self, _("Copy text"),
icon=ima.icon('editcopy'),
triggered=self.copy,
shortcut='Ctrl+Shift+C')
self.paste_action = create_action(self, _("Paste text"),
icon=ima.icon('editpaste'),
triggered=self.paste,
shortcut='Ctrl+Shift+V')
if WEBENGINE:
self.channel = QWebChannel(self.page())
self.page().setWebChannel(self.channel)
self.channel.registerObject('handler', handler)
self.term_url = QUrl(term_url)
self.load(self.term_url)
if WEBENGINE:
self.document = self.page()
try:
self.document.profile().clearHttpCache()
except AttributeError:
pass
else:
self.document = self.page().mainFrame()
self.initial_y_pos = 0
self.setFocusPolicy(Qt.ClickFocus)
def copy(self):
"""Copy unicode text from terminal."""
self.triggerPageAction(QWebEnginePage.Copy)
def paste(self):
"""Paste unicode text into terminal."""
self.triggerPageAction(QWebEnginePage.Paste)
def contextMenuEvent(self, event):
"""Override Qt method."""
menu = QMenu(self)
actions = [self.pageAction(QWebEnginePage.SelectAll),
self.copy_action, self.paste_action, None,
self.zoom_in_action, self.zoom_out_action]
if DEV and not WEBENGINE:
settings = self.page().settings()
settings.setAttribute(QWebEngineSettings.DeveloperExtrasEnabled,
True)
actions += [None, self.pageAction(QWebEnginePage.InspectElement)]
add_actions(menu, actions)
menu.popup(event.globalPos())
event.accept()
def eval_javascript(self, script):
"""Evaluate Javascript instructions inside DOM."""
if WEBENGINE:
return self.document.runJavaScript("{}".format(script))
else:
return self.document.evaluateJavaScript("{}".format(script))
def wheelEvent(self, event):
"""Catch and process wheel scrolling events via Javascript."""
delta = event.angleDelta().y()
self.eval_javascript('scrollTerm({0})'.format(delta))
def event(self, event):
"""Grab all keyboard input."""
if event.type() == QEvent.ShortcutOverride:
key = event.key()
modifiers = event.modifiers()
if modifiers & Qt.ShiftModifier:
key += Qt.SHIFT
if modifiers & Qt.ControlModifier:
key += Qt.CTRL
if modifiers & Qt.AltModifier:
key += Qt.ALT
if modifiers & Qt.MetaModifier:
key += Qt.META
sequence = QKeySequence(key).toString(QKeySequence.PortableText)
if sequence == 'Ctrl+Alt+Shift+T':
event.ignore()
return False
elif sequence == 'Ctrl+Shift+C':
self.copy()
elif sequence == 'Ctrl+Shift+V':
self.paste()
event.accept()
return True
return WebView.event(self, event)
def test():
"""Plugin visual test."""
from spyder.utils.qthelpers import qapplication
app = qapplication(test_time=8)
term = TerminalWidget(None)
# term.resize(900, 700)
term.show()
sys.exit(app.exec_())
if __name__ == "__main__":
test()
|
andfoy/spyder-terminal
|
spyder_terminal/widgets/terminalgui.py
|
Python
|
mit
| 7,823
|
# Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import hashlib
import math
import time
from lxml import etree
from oslo_log import log as logging
from oslo_utils import units
import requests
import six
from cinder import exception
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol, ssl_verify):
self._login = login
self._password = password
self._base_url = "%s://%s/api" % (protocol, host)
self._session_key = None
self.ssl_verify = ssl_verify
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
tree = etree.XML(xml)
if tree.findtext(".//PROPERTY[@name='response-type']") == "success":
self._session_key = tree.findtext(".//PROPERTY[@name='response']")
def login(self):
"""Authenticates the service on the device."""
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = hashlib.md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url, verify=self.ssl_verify)
except requests.exceptions.RequestException:
raise exception.DotHillConnectionError
self._get_auth_token(xml.text.encode('utf8'))
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0
or if the return code is None.
"""
# Get the return code for the operation, raising an exception
# if it is not present.
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if not return_code:
raise exception.DotHillRequestError(message="No status found")
# If no error occurred, just return.
if return_code == '0':
return
# Format a message for the status code.
msg = "%s (%s)" % (tree.findtext(".//PROPERTY[@name='response']"),
return_code)
raise exception.DotHillRequestError(message=msg)
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an HTTP request on the device.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
LOG.debug("DotHill Request URL: %s", url)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers, verify=self.ssl_verify)
tree = etree.XML(xml.text.encode('utf8'))
except Exception:
raise exception.DotHillConnectionError
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
url = self._base_url + '/exit'
try:
requests.get(url, verify=self.ssl_verify)
return True
except Exception:
return False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
self._request("/create/volume", name, **path_dict)
return None
def delete_volume(self, name):
self._request("/delete/volumes", name)
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
self._request("/create/snapshots", snap_name, volumes=volume_name)
def delete_snapshot(self, snap_name):
self._request("/delete/snapshot", "cleanup", snap_name)
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (units.G)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
luns = self.list_luns_for_host(host)
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
self._request("/create/host", hostname, id=host)
lun = self._get_first_available_lun_for_host(host)
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
self._request("/unmap/volume", volume_name, host=host)
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def linear_copy_volume(self, src_name, dest_name, dest_bknd_name):
"""Copy a linear volume."""
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
# The copy has started; now monitor until the operation completes.
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
raise exception.DotHillRequestError
time.sleep(1)
count += 1
time.sleep(5)
def copy_volume(self, src_name, dest_name, dest_bknd_name,
backend_type='virtual'):
"""Copy a linear or virtual volume."""
if backend_type == 'linear':
return self.linear_copy_volume(src_name, dest_name, dest_bknd_name)
# Copy a virtual volume to another in the same pool.
self._request("/copy/volume", src_name, name=dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s started.",
{'src_name': src_name, 'dest_name': dest_name, })
# Loop until this volume copy is no longer in progress.
while self.volume_copy_in_progress(src_name):
time.sleep(5)
# Once the copy operation is finished, check to ensure that
# the volume was not deleted because of a subsequent error. An
# exception will be raised if the named volume is not present.
self._request("/show/volumes", dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s completed.",
{'src_name': src_name, 'dest_name': dest_name, })
def volume_copy_in_progress(self, src_name):
"""Check if a volume copy is in progress for the named volume."""
# 'show volume-copies' always succeeds, even if none in progress.
tree = self._request("/show/volume-copies")
# Find 0 or 1 job(s) with source volume we're interested in
q = "OBJECT[PROPERTY[@name='source-volume']/text()='%s']" % src_name
joblist = tree.xpath(q)
if len(joblist) == 0:
return False
LOG.debug("Volume copy of volume: %(src_name)s is "
"%(pc)s percent completed.",
{'src_name': src_name,
'pc': joblist[0].findtext("PROPERTY[@name='progress']"), })
return True
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""Modify an initiator name to match firmware requirements.
Initiator name cannot include certain characters and cannot exceed
15 bytes in 'T' firmware (32 bytes in 'G' firmware).
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
index = len(hostname)
if index > 15:
index = 15
return hostname[:index]
def get_active_iscsi_target_portals(self):
# This function returns {'ip': status,}
portals = {}
prop = 'ip-address'
tree = self._request("/show/ports")
for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"):
prop = 'primary-ip-address'
break
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
"text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name, backend_type):
if backend_type == 'linear':
tree = self._request("/show/vdisks", backend_name)
else:
tree = self._request("/show/pools", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/set/volume", old_name, name=new_name)
def get_volume_size(self, volume_name):
tree = self._request("/show/volumes", volume_name)
size = tree.findtext(".//PROPERTY[@name='size-numeric']")
return self._get_size(size)
|
Hybrid-Cloud/cinder
|
cinder/volume/drivers/dothill/dothill_client.py
|
Python
|
apache-2.0
| 14,788
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import click
import click_spinner
import cli_common.cli
import please_cli.config
import please_cli.create_certs
import please_cli.shell
import please_cli.utils
CMD_HELP = '''
Run PROJECT in development mode.
\b
PROJECTS:
{projects}
'''.format(
projects=''.join([' - ' + i + '\n' for i in please_cli.config.PROJECTS]),
)
@click.command(
cls=please_cli.utils.ClickCustomCommand,
short_help='Run PROJECT in development mode.',
epilog='Happy hacking!',
help=CMD_HELP,
)
@click.argument(
'project',
required=True,
type=click.Choice(please_cli.config.PROJECTS),
)
@click.option(
'-q', '--quiet',
is_flag=True,
help='Don\'t display output of a command.',
)
@click.option(
'--nix-shell',
required=True,
default=please_cli.config.NIX_BIN_DIR + 'nix-shell',
help='`nix-shell` command',
)
@click.option(
'--interactive/--no-interactive',
default=True,
)
@cli_common.cli.taskcluster_options
@click.pass_context
def cmd(ctx,
project,
quiet,
nix_shell,
interactive,
taskcluster_secret,
taskcluster_client_id,
taskcluster_access_token,
):
project_config = please_cli.config.PROJECTS_CONFIG.get(project, {})
run_type = project_config.get('run')
run_options = project_config.get('run_options', {})
if not run_type:
raise click.ClickException(f'Application `{project}` is not configured to be runnable.')
host = run_options.get('host', os.environ.get('HOST', '127.0.0.1'))
port = str(run_options.get('port', 8000))
schema = 'https://'
project_name = project.replace('-', '_')
ca_cert_file = os.path.join(please_cli.config.TMP_DIR, 'certs', 'ca.crt')
server_cert_file = os.path.join(please_cli.config.TMP_DIR, 'certs', 'server.crt')
server_key_file = os.path.join(please_cli.config.TMP_DIR, 'certs', 'server.key')
os.environ['DEBUG'] = 'true'
os.environ['PROJECT_NAME'] = project_name
pg_host = please_cli.config.PROJECTS_CONFIG['postgresql']['run_options'].get('host', host)
pg_port = str(please_cli.config.PROJECTS_CONFIG['postgresql']['run_options']['port'])
redis_host = please_cli.config.PROJECTS_CONFIG['redis']['run_options'].get('host', host)
redis_port = str(please_cli.config.PROJECTS_CONFIG['redis']['run_options']['port'])
if 'postgresql' in project_config.get('requires', []):
dbname = 'services'
click.echo(f' => Checking if database `{dbname}` exists ... ', nl=False)
with click_spinner.spinner():
result, output, error = ctx.invoke(
please_cli.shell.cmd,
project=project,
quiet=True,
command=' '.join([
'psql',
'-lqt',
'-h', pg_host,
'-p', pg_port,
]),
nix_shell=nix_shell,
)
if result != 0 and 'psql: could not connect to server' in output:
click.secho('ERROR', fg='red')
raise click.UsageError(
'Could not connect to the database.\n\n'
'Please run:\n\n'
' ./please run postgresql\n\n'
'in a separate terminal.'
)
please_cli.utils.check_result(result, output, ask_for_details=interactive)
database_exists = False
for line in output.split('\n'):
column1 = line.split('|')[0].strip()
if column1 == dbname:
database_exists = True
break
if not database_exists:
click.echo(f' => Creating `{dbname}` database ` ... ', nl=False)
with click_spinner.spinner():
result, output, error = ctx.invoke(
please_cli.shell.cmd,
project=project,
command=' '.join([
'createdb',
'-h', pg_host,
'-p', pg_port,
dbname,
]),
nix_shell=nix_shell,
)
please_cli.utils.check_result(result, output, ask_for_details=interactive)
os.environ['DATABASE_URL'] = f'postgresql://{pg_host}:{pg_port}/{dbname}'
if 'redis' in project_config.get('requires', []):
# Check redis is running
click.echo(' => Checking if redis is running... ', nl=False)
with click_spinner.spinner():
result, output, error = ctx.invoke(
please_cli.shell.cmd,
project=project,
quiet=True,
command=f'redis-cli -h {redis_host} -p {redis_port} ping',
nix_shell=nix_shell,
)
please_cli.utils.check_result(result, output, ask_for_details=interactive)
# Setup config for client application
os.environ['REDIS_URL'] = f'redis://{redis_host}:{redis_port}'
if run_type == 'POSTGRESQL':
data_dir = run_options.get('data_dir', os.path.join(please_cli.config.TMP_DIR, 'postgresql'))
if not os.path.isdir(data_dir):
click.echo(f' => Initialize database folder `{data_dir}` ... ', nl=False)
with click_spinner.spinner():
result, output, error = ctx.invoke(please_cli.shell.cmd,
project=project,
command=f'initdb -D {data_dir} --auth=trust',
nix_shell=nix_shell,
)
please_cli.utils.check_result(result, output, ask_for_details=interactive)
schema = ''
command = [
'postgres',
'-D', data_dir,
'-h', host,
'-p', port,
]
elif run_type == 'REDIS':
data_dir = run_options.get('data_dir', os.path.join(please_cli.config.TMP_DIR, 'redis'))
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
command = [
'redis-server',
'--dir', data_dir,
'--bind', host,
'--port', port,
]
elif run_type == 'FLASK':
for env_name, env_value in run_options.get('envs', {}).items():
env_name = please_cli.utils.normalize_name(env_name).upper()
os.environ[env_name] = env_value
if not os.path.exists(ca_cert_file) or \
not os.path.exists(server_cert_file) or \
not os.path.exists(server_key_file):
ctx.invoke(please_cli.create_certs.cmd,
certificates_dir=os.path.join(please_cli.config.TMP_DIR, 'certs'),
)
project_cache_dir = os.path.join(please_cli.config.TMP_DIR, 'cache', project_name)
if not os.path.isdir(project_cache_dir):
os.makedirs(project_cache_dir)
os.environ['CACHE_TYPE'] = 'filesystem'
os.environ['CACHE_DIR'] = project_cache_dir
os.environ['APP_SETTINGS'] = os.path.join(
please_cli.config.ROOT_DIR, 'src', project_name, 'settings.py')
os.environ['APP_URL'] = f'{schema}{host}:{port}'
os.environ['CORS_ORIGINS'] = '*'
command = [
'gunicorn',
please_cli.utils.normalize_name(project_name) + '.flask:app',
'--bind', f'{host}:{port}',
f'--ca-certs={ca_cert_file}',
f'--certfile={server_cert_file}',
f'--keyfile={server_key_file}',
'--workers', '2',
'--timeout', '3600',
'--reload',
'--reload-engine=poll',
'--log-file', '-',
]
elif run_type == 'SPHINX':
schema = 'http://'
command = [
'HOST=' + host,
'PORT=' + port,
'python', 'run.py',
]
elif run_type == 'ELM':
if not os.path.exists(ca_cert_file) or \
not os.path.exists(server_cert_file) or \
not os.path.exists(server_key_file):
ctx.invoke(please_cli.create_certs.cmd,
certificates_dir=os.path.join(please_cli.config.TMP_DIR, 'certs'),
)
os.environ['WEBPACK_RELEASE_VERSION'] = please_cli.config.VERSION
os.environ['WEBPACK_RELEASE_CHANNEL'] = 'development'
os.environ['SSL_CACERT'] = ca_cert_file
os.environ['SSL_CERT'] = server_cert_file
os.environ['SSL_KEY'] = server_key_file
os.environ['HOST'] = host
os.environ['PORT'] = port
for env_name, env_value in run_options.get('envs', {}).items():
env_name = 'WEBPACK_' + please_cli.utils.normalize_name(env_name).upper()
os.environ[env_name] = env_value
# XXX: once we move please_cli.config.PROJECTS to nix we wont need this
for require in project_config.get('requires', []):
env_name = 'WEBPACK_{}_URL'.format(please_cli.utils.normalize_name(require).upper())
env_value = '{}://{}:{}'.format(
please_cli.config.PROJECTS_CONFIG[require]['run_options'].get('schema', 'https'),
please_cli.config.PROJECTS_CONFIG[require]['run_options'].get('host', host),
please_cli.config.PROJECTS_CONFIG[require]['run_options']['port'],
)
os.environ[env_name] = env_value
command = [
'webpack-dev-server',
'--host', host,
'--port', port,
'--config', os.path.join(please_cli.config.ROOT_DIR, 'src', project_name, 'webpack.config.js'),
]
elif run_type == 'NEUTRINO':
if not os.path.exists(ca_cert_file) or \
not os.path.exists(server_cert_file) or \
not os.path.exists(server_key_file):
ctx.invoke(please_cli.create_certs.cmd,
certificates_dir=os.path.join(please_cli.config.TMP_DIR, 'certs'),
)
envs = dict(
SSL_CACERT=ca_cert_file,
SSL_CERT=server_cert_file,
SSL_KEY=server_key_file,
HOST=host,
PORT=port,
RELEASE_VERSION=please_cli.config.VERSION,
RELEASE_CHANNEL='development',
)
for require in project_config.get('requires', []):
env_name = '{}_URL'.format(please_cli.utils.normalize_name(require).upper())
env_value = '{}://{}:{}'.format(
please_cli.config.PROJECTS_CONFIG[require]['run_options'].get('schema', 'https'),
please_cli.config.PROJECTS_CONFIG[require]['run_options'].get('host', host),
please_cli.config.PROJECTS_CONFIG[require]['run_options']['port'],
)
envs[env_name] = env_value
for env_name, env_value in run_options.get('envs', {}).items():
env_name = please_cli.utils.normalize_name(env_name).upper()
envs[env_name] = env_value
for env_name, env_value in envs.items():
os.environ[env_name] = env_value
command = ['yarn', 'start']
click.echo(f' => Running {project} on {schema}{host}:{port} ...')
returncode, output, error = ctx.invoke(please_cli.shell.cmd,
project=project,
quiet=quiet,
command=' '.join(command),
nix_shell=nix_shell,
taskcluster_secret=taskcluster_secret,
taskcluster_client_id=taskcluster_client_id,
taskcluster_access_token=taskcluster_access_token,
)
sys.exit(returncode)
if __name__ == '__main__':
cmd()
|
La0/mozilla-relengapi
|
lib/please_cli/please_cli/run.py
|
Python
|
mpl-2.0
| 12,167
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import GteOp as GteOp_
from jx_elasticsearch.es52.expressions.utils import _inequality_to_esfilter
class GteOp(GteOp_):
to_es = _inequality_to_esfilter
|
klahnakoski/ActiveData
|
vendor/jx_elasticsearch/es52/expressions/gte_op.py
|
Python
|
mpl-2.0
| 525
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-21 06:20
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('laboratory', '0034_group_perms'),
]
operations = [
migrations.RemoveField(
model_name='laboratory',
name='related_labs',
),
migrations.AlterField(
model_name='laboratory',
name='organization',
field=mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='laboratory.OrganizationStructure'),
),
]
|
solvo/organilab
|
src/laboratory/migrations/0035_auto_20180621_0020.py
|
Python
|
gpl-3.0
| 694
|
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: 2005-05-18
# Author: Francesc Alted - faltet@pytables.org
# Author: Ivan Vilata - ivan@selidor.net
#
# $Id$
#
########################################################################
"""Test module for nested types under PyTables"""
import sys
import unittest
import itertools
import numpy
import tables as t
from tables.utils import SizeType
from tables.tests import common
from tables.description import Description
minRowIndex = 10
# This is the structure of the table used for testing (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# Please note that some fields are explicitly ordered while others are
# ordered alphabetically by name.
# The declaration of the nested table:
class Info(t.IsDescription):
_v_pos = 3
Name = t.StringCol(itemsize=2)
Value = t.ComplexCol(itemsize=16)
class TestTDescr(t.IsDescription):
"""A description that has several nested columns."""
x = t.Int32Col(dflt=0, shape=2, pos=0) #0
y = t.Float64Col(dflt=1, shape=(2, 2))
z = t.UInt8Col(dflt=1)
color = t.StringCol(itemsize=2, dflt=b" ", pos=2)
info = Info()
class Info(t.IsDescription): #1
_v_pos = 1
name = t.StringCol(itemsize=2)
value = t.ComplexCol(itemsize=16, pos=0) #0
y2 = t.Float64Col(dflt=1, pos=1) #1
z2 = t.UInt8Col(dflt=1)
class Info2(t.IsDescription):
y3 = t.Time64Col(dflt=1, shape=2)
z3 = t.EnumCol({'r':4, 'g':2, 'b':1}, 'r', 'int32', shape=2)
name = t.StringCol(itemsize=2)
value = t.ComplexCol(itemsize=16, shape=2)
# The corresponding nested array description:
testADescr = [
('x', '(2,)Int32'),
('Info', [
('value', 'Complex64'),
('y2', 'Float64'),
('Info2', [
('name', 'a2'),
('value', '(2,)Complex64'),
('y3', '(2,)Float64'),
('z3', '(2,)Int32')]),
('name', 'a2'),
('z2', 'UInt8')]),
('color', 'a2'),
('info', [
('Name', 'a2'),
('Value', 'Complex64')]),
('y', '(2,2)Float64'),
('z', 'UInt8')]
# The corresponding nested array description (brief version):
testADescr2 = [
('x', '(2,)i4'),
('Info', [
('value', '()c16'),
('y2', '()f8'),
('Info2', [
('name', '()S2'),
('value', '(2,)c16'),
('y3', '(2,)f8'),
('z3', '(2,)i4')]),
('name', '()S2'),
('z2', '()u1')]),
('color', '()S2'),
('info', [
('Name', '()S2'),
('Value', '()c16')]),
('y', '(2, 2)f8'),
('z', '()u1')]
# A nested array for testing:
testABuffer = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
((3, 2), (6j, 6., ('nn', (6j, 4j), (6., 4.), (1, 2)), 'NN', 8), 'cc', ('NN', 6j), ((6., 4.), (6., 4.)), 8),
((4, 3), (7j, 7., ('oo', (7j, 5j), (7., 5.), (2, 1)), 'OO', 9), 'dd', ('OO', 7j), ((7., 5.), (7., 5.)), 9),
]
testAData = numpy.array(testABuffer, dtype=testADescr)
# The name of the column to be searched:
testCondCol = 'Info/z2'
# The name of a nested column (it can not be searched):
testNestedCol = 'Info'
# The condition to be applied on the column (all but the last row match it):
testCondition = '(2 < col) & (col < 9)'
def areDescriptionsEqual(desc1, desc2):
"""
Are both `desc1` and `desc2` equivalent descriptions?
The arguments may be description objects (``IsDescription``,
``Description``) or dictionaries.
"""
if isinstance(desc1, t.Col):
# This is a rough comparison but it suffices here.
return (desc1.type == desc2.type
and desc2.dtype == desc2.dtype
and desc1._v_pos == desc2._v_pos
#and desc1.dflt == desc2.dflt)
and common.areArraysEqual(desc1.dflt, desc2.dflt))
if hasattr(desc1, '_v_colObjects'): # quacks like a Description
cols1 = desc1._v_colObjects
elif hasattr(desc1, 'columns'): # quacks like an IsDescription
cols1 = desc1.columns
else: # hope it quacks like a dictionary
cols1 = desc1
if hasattr(desc2, '_v_colObjects'): # quacks like a Description
cols2 = desc2._v_colObjects
elif hasattr(desc2, 'columns'): # quacks like an IsDescription
cols2 = desc2.columns
else: # hope it quacks like a dictionary
cols2 = desc2
if len(cols1) != len(cols2):
return False
for (colName, colobj1) in cols1.iteritems():
colobj2 = cols2[colName]
if colName == '_v_pos':
# The comparison may not be quite exhaustive!
return colobj1 == colobj2
if not areDescriptionsEqual(colobj1, colobj2):
return False
return True
# Test creating nested column descriptions
class DescriptionTestCase(common.PyTablesTestCase):
_TestTDescr = TestTDescr
_testADescr = testADescr
_testADescr2 = testADescr2
_testAData = testAData
def test00_instance(self):
"""Creating an instance of a nested description."""
self.assertTrue(
areDescriptionsEqual(self._TestTDescr, self._TestTDescr()),
"Table description does not match the given one.")
def test01_instance(self):
"""Checking attrs of an instance of a nested description."""
descr = Description(self._TestTDescr().columns)
if common.verbose:
print "Generated description:", descr._v_nestedDescr
print "Should look like:", self._testADescr2
self.assertEqual(self._testADescr2, descr._v_nestedDescr,
"Description._v_nestedDescr does not match.")
# Test creating a nested table and opening it
class CreateTestCase(common.TempFileMixin, common.PyTablesTestCase):
_TestTDescr = TestTDescr
_testABuffer = testABuffer
_testAData = testAData
def _checkColumns(self, cols, desc):
"""
Check that `cols` has all the accessors for `self._TestTDescr`.
"""
# ``_desc`` is a leaf column and ``cols`` a ``Column``.
if isinstance(desc, t.Col):
return isinstance(cols, t.Column)
# ``_desc`` is a description object and ``cols`` a ``Cols``.
descColumns = desc._v_colObjects
for colName in descColumns:
if colName not in cols._v_colnames:
return False
if not self._checkColumns(cols._f_col(colName),
descColumns[colName]):
return False
return True
def _checkDescription(self, table):
"""
Check that description of `table` matches `self._TestTDescr`.
"""
# Compare descriptions.
self.assertTrue(
areDescriptionsEqual(self._TestTDescr, table.description),
"Table description does not match the given one.")
# Check access to columns.
self._checkColumns(table.cols, table.description)
def _checkColinstances(self, table):
"""
Check that ``colinstances`` and ``cols`` of `table` match.
"""
for colpathname in table.description._v_pathnames:
self.assertTrue(table.colinstances[colpathname]
is table.cols._f_col(colpathname))
def test00_create(self):
"""Creating a nested table."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
self._checkDescription(tbl)
self._checkColinstances(tbl)
def test01_open(self):
"""Opening a nested table."""
self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
self._reopen()
tbl = self.h5file.root.test
self._checkDescription(tbl)
self._checkColinstances(tbl)
def test02_NestedRecArrayCompat(self):
"""Creating a compatible nested record array``."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
self.assertTrue(common.areArraysEqual(nrarr, self._testAData),
"Can not create a compatible structured array.")
def test03_NRA(self):
"""Creating a table from a nested record array object."""
tbl = self.h5file.createTable(
'/', 'test', self._testAData, title=self._getMethodName())
tbl.flush()
readAData = tbl.read()
if common.verbose:
print "Read data:", readAData
print "Should look like:", self._testAData
self.assertTrue(common.areArraysEqual(self._testAData, readAData),
"Written and read values differ.")
def test04_NRA2(self):
"""Creating a table from a generated nested record array object."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
readAData = tbl.read()
tbl2 = self.h5file.createTable(
'/', 'test2', readAData, title=self._getMethodName())
readAData2 = tbl2.read()
self.assertTrue(common.areArraysEqual(self._testAData, readAData2),
"Written and read values differ.")
# Test writing data in a nested table
class WriteTestCase(common.TempFileMixin, common.PyTablesTestCase):
_TestTDescr = TestTDescr
_testAData = testAData
_testCondition = testCondition
_testCondCol = testCondCol
_testNestedCol = testNestedCol
def _testCondVars(self, table):
"""Get condition variables for the given `table`."""
return {'col': table.cols._f_col(self._testCondCol)}
def _testNestedCondVars(self, table):
"""Get condition variables for the given `table`."""
return {'col': table.cols._f_col(self._testNestedCol)}
def _appendRow(self, row, index):
"""
Append the `index`-th row in `self._testAData` to `row`.
Values are set field-by-field (be it nested or not).
"""
record = self._testAData[index]
for fieldName in self._testAData.dtype.names:
row[fieldName] = record[fieldName]
row.append()
def test00_append(self):
"""Appending a set of rows."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
readAData = tbl.read()
self.assertTrue(common.areArraysEqual(self._testAData, readAData),
"Written and read values differ.")
def test01_row(self):
"""Appending individual rows."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
row = tbl.row
# Add the first row
self._appendRow(row, 0)
# Add the rest of the rows field by field.
for i in range(1, len(self._testAData)):
self._appendRow(row, i)
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
readAData = tbl.read()
self.assertTrue(common.areArraysEqual(self._testAData, readAData),
"Written and read values differ.")
def test02_where(self):
"""Searching nested data."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
searchedCoords = tbl.getWhereList(
self._testCondition, self._testCondVars(tbl))
# All but the last row match the condition.
searchedCoords.sort()
self.assertEqual(searchedCoords.tolist(),
range(len(self._testAData) - 1),
"Search returned incorrect results.")
def test02b_whereAppend(self):
"""Searching nested data and appending it to another table."""
tbl1 = self.h5file.createTable(
'/', 'test1', self._TestTDescr, title=self._getMethodName())
tbl1.append(self._testAData)
tbl1.flush()
tbl2 = self.h5file.createTable(
'/', 'test2', self._TestTDescr, title=self._getMethodName())
tbl1.whereAppend(
tbl2, self._testCondition, self._testCondVars(tbl1))
if self.reopen:
self._reopen()
tbl1 = self.h5file.root.test1
tbl2 = self.h5file.root.test2
searchedCoords = tbl2.getWhereList(
self._testCondition, self._testCondVars(tbl2))
# All but the last row match the condition.
searchedCoords.sort()
self.assertEqual(searchedCoords.tolist(),
range(len(self._testAData) - 1),
"Search returned incorrect results.")
def test03_colscond(self):
"""Searching on a column with nested columns."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
self.assertRaises(
TypeError, tbl.getWhereList,
self._testCondition, self._testNestedCondVars(tbl))
def test04_modifyColumn(self):
"""Modifying one single nested column (modifyColumn)."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
tbl.flush()
nColumn = self._testNestedCol
# Get the nested column data and swap the first and last rows.
raTable = self._testAData.copy()
raColumn = raTable[nColumn]
# The next will not work until NestedRecords supports copies
(raColumn[0], raColumn[-1]) = (raColumn[-1], raColumn[0])
# Write the resulting column and re-read the whole table.
tbl.modifyColumn(colname=nColumn, column=raColumn)
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
raReadTable = tbl.read()
if common.verbose:
print "Table read:", raReadTable
print "Should look like:", raTable
# Compare it to the written one.
self.assertTrue(common.areArraysEqual(raTable, raReadTable),
"Written and read values differ.")
def test05a_modifyColumns(self):
"""Modifying one nested column (modifyColumns)."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
tbl.flush()
nColumn = self._testNestedCol
# Get the nested column data and swap the first and last rows.
raTable = self._testAData.copy()
raColumn = raTable[nColumn]
(raColumn[0], raColumn[-1]) = (raColumn[-1].copy(), raColumn[0].copy())
newdtype = numpy.dtype([(nColumn, raTable.dtype.fields[nColumn][0])])
self.assertTrue(newdtype is not None)
# Write the resulting column and re-read the whole table.
tbl.modifyColumns(names=[nColumn], columns=raColumn)
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
raReadTable = tbl.read()
if common.verbose:
print "Table read:", raReadTable
print "Should look like:", raTable
# Compare it to the written one.
self.assertTrue(common.areArraysEqual(raTable, raReadTable),
"Written and read values differ.")
def test05b_modifyColumns(self):
"""Modifying two nested columns (modifyColumns)."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
tbl.flush()
# Get the nested column data and swap the first and last rows.
colnames = ['x', 'color'] # Get the first two columns
raCols = numpy.rec.fromarrays([self._testAData['x'].copy(),
self._testAData['color'].copy()],
dtype=[('x', '(2,)i4'), ('color', '1a2')])
#descr=tbl.description._v_nestedDescr[0:2])
# or...
# names=tbl.description._v_nestedNames[0:2],
# formats=tbl.description._v_nestedFormats[0:2])
(raCols[0], raCols[-1]) = (raCols[-1].copy(), raCols[0].copy())
# Write the resulting columns
tbl.modifyColumns(names=colnames, columns=raCols)
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
# Re-read the appropriate columns
raCols2 = numpy.rec.fromarrays([tbl.cols._f_col('x'),
tbl.cols._f_col('color')],
dtype=raCols.dtype)
if common.verbose:
print "Table read:", raCols2
print "Should look like:", raCols
# Compare it to the written one.
self.assertTrue(common.areArraysEqual(raCols, raCols2),
"Written and read values differ.")
def test06_modifyRows(self):
"Checking modifying several rows at once (using nested rec array)"
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
tbl.flush()
# Get the nested record and swap the first and last rows.
raTable = self._testAData.copy()
(raTable[0], raTable[-1]) = (raTable[-1].copy(), raTable[0].copy())
# Write the resulting nested record and re-read the whole table.
tbl.modifyRows(start=0, stop=2, rows=raTable)
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
raReadTable = tbl.read()
if common.verbose:
print "Table read:", raReadTable
print "Should look like:", raTable
# Compare it to the written one.
self.assertTrue(common.areArraysEqual(raTable, raReadTable),
"Written and read values differ.")
def test07_index(self):
"""Checking indexes of nested columns"""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName(),
expectedrows = minRowIndex*2)
for i in range(minRowIndex):
tbl.append(self._testAData)
tbl.flush()
coltoindex = tbl.cols._f_col(self._testCondCol)
indexrows = coltoindex.createIndex()
self.assertTrue(indexrows is not None)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
coltoindex = tbl.cols._f_col(self._testCondCol)
if common.verbose:
print "Number of written rows:", tbl.nrows
print "Number of indexed rows:", coltoindex.index.nelements
# Check indexing flags:
self.assertEqual(tbl.indexed, True, "Table not indexed")
self.assertNotEqual(coltoindex.index, None, "Column not indexed")
self.assertTrue(tbl.colindexed[self._testCondCol], "Column not indexed")
# Do a look-up for values
searchedCoords = tbl.getWhereList(
self._testCondition, self._testCondVars(tbl))
searchedCoords.sort()
expectedCoords = numpy.arange(0, minRowIndex*2, 2, SizeType)
if common.verbose:
print "Searched coords:", searchedCoords
print "Expected coords:", expectedCoords
# All even rows match the condition.
self.assertEqual(searchedCoords.tolist(), expectedCoords.tolist(),
"Search returned incorrect results.")
def test08_setNestedField(self):
"Checking modifying a nested field via natural naming."
# See ticket #93 (http://www.pytables.org/trac/ticket/93).
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
tbl.flush()
oldvalue = tbl.cols.Info.z2[0]
tbl.cols.Info.z2[0] = oldvalue + 1
tbl.flush()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
newvalue = tbl.cols.Info.z2[0]
self.assertEqual(newvalue, oldvalue + 1)
class WriteNoReopen(WriteTestCase):
reopen = 0
class WriteReopen(WriteTestCase):
reopen = 1
class ReadTestCase(common.TempFileMixin, common.PyTablesTestCase):
_TestTDescr = TestTDescr
_testABuffer = testABuffer
_testAData = testAData
_testNestedCol = testNestedCol
def test00a_repr(self):
"""Checking representation of a nested Table"""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title="test00")
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
if common.verbose:
print "str(tbl)-->", str(tbl)
print "repr(tbl)-->", repr(tbl)
self.assertEqual(str(tbl), "/test (Table(2,)) 'test00'")
tblrepr = repr(tbl)
# Remove the platform-dependent information (i.e. byteorder)
tblrepr = "\n".join(tblrepr.split("\n")[:-2])+"\n"
if sys.version_info[0] < 3:
template = """/test (Table(2,)) 'test00'
description := {
"x": Int32Col(shape=(2,), dflt=0, pos=0),
"Info": {
"value": ComplexCol(itemsize=16, shape=(), dflt=0j, pos=0),
"y2": Float64Col(shape=(), dflt=1.0, pos=1),
"Info2": {
"name": StringCol(itemsize=2, shape=(), dflt='', pos=0),
"value": ComplexCol(itemsize=16, shape=(2,), dflt=0j, pos=1),
"y3": Time64Col(shape=(2,), dflt=1.0, pos=2),
"z3": EnumCol(enum=Enum({%s}), dflt='r', base=Int32Atom(shape=(), dflt=0), shape=(2,), pos=3)},
"name": StringCol(itemsize=2, shape=(), dflt='', pos=3),
"z2": UInt8Col(shape=(), dflt=1, pos=4)},
"color": StringCol(itemsize=2, shape=(), dflt=' ', pos=2),
"info": {
"Name": StringCol(itemsize=2, shape=(), dflt='', pos=0),
"Value": ComplexCol(itemsize=16, shape=(), dflt=0j, pos=1)},
"y": Float64Col(shape=(2, 2), dflt=1.0, pos=4),
"z": UInt8Col(shape=(), dflt=1, pos=5)}
"""
else:
template = """/test (Table(2,)) 'test00'
description := {
"x": Int32Col(shape=(2,), dflt=0, pos=0),
"Info": {
"value": ComplexCol(itemsize=16, shape=(), dflt=0j, pos=0),
"y2": Float64Col(shape=(), dflt=1.0, pos=1),
"Info2": {
"name": StringCol(itemsize=2, shape=(), dflt=b'', pos=0),
"value": ComplexCol(itemsize=16, shape=(2,), dflt=0j, pos=1),
"y3": Time64Col(shape=(2,), dflt=1.0, pos=2),
"z3": EnumCol(enum=Enum({%s}), dflt='r', base=Int32Atom(shape=(), dflt=0), shape=(2,), pos=3)},
"name": StringCol(itemsize=2, shape=(), dflt=b'', pos=3),
"z2": UInt8Col(shape=(), dflt=1, pos=4)},
"color": StringCol(itemsize=2, shape=(), dflt=b' ', pos=2),
"info": {
"Name": StringCol(itemsize=2, shape=(), dflt=b'', pos=0),
"Value": ComplexCol(itemsize=16, shape=(), dflt=0j, pos=1)},
"y": Float64Col(shape=(2, 2), dflt=1.0, pos=4),
"z": UInt8Col(shape=(), dflt=1, pos=5)}
"""
values = [template % ', '.join(items)
for items in itertools.permutations(("'r': 4", "'b': 1", "'g': 2"))]
self.assertTrue(tblrepr in values)
def test00b_repr(self):
"""Checking representation of a root Column."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title="test00")
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
if common.verbose:
print "str(tbl.cols.y)-->'%s'" % str(tbl.cols.y)
print "repr(tbl.cols.y)-->'%s'" % repr(tbl.cols.y)
self.assertEqual(str(tbl.cols.y),
"/test.cols.y (Column(2, 2, 2), float64, idx=None)")
self.assertEqual(repr(tbl.cols.y),
"/test.cols.y (Column(2, 2, 2), float64, idx=None)")
def test00c_repr(self):
"""Checking representation of a nested Column."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title="test00")
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
if common.verbose:
print "str(tbl.cols.Info.z2)-->'%s'" % str(tbl.cols.Info.z2)
print "repr(tbl.cols.Info.z2)-->'%s'" % repr(tbl.cols.Info.z2)
self.assertEqual(str(tbl.cols.Info.z2),
"/test.cols.Info.z2 (Column(2,), uint8, idx=None)")
self.assertEqual(repr(tbl.cols.Info.z2),
"/test.cols.Info.z2 (Column(2,), uint8, idx=None)")
def test01_read(self):
"""Checking Table.read with subgroups with a range index with step."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.rec.array(testABuffer,
dtype=tbl.description._v_nestedDescr)
tblcols = tbl.read(start=0, step=2, field='Info')
nrarrcols = nrarr['Info'][0::2]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test01_read_out_arg(self):
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.rec.array(testABuffer,
dtype=tbl.description._v_nestedDescr)
# When reading an entire nested column, the output array must contain
# all fields in the table. The output buffer will contain the contents
# of all fields. The selected column alone will be returned from the
# method call.
all_cols = numpy.empty(1, tbl.dtype)
tblcols = tbl.read(start=0, step=2, field='Info', out=all_cols)
nrarrcols = nrarr['Info'][0::2]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
self.assertTrue(common.areArraysEqual(nrarr[0::2], all_cols),
"Output buffer does not match full table.")
def test02_read(self):
"""Checking Table.read with a nested Column."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
tblcols = tbl.read(start=0, step=2, field='Info/value')
nrarr = numpy.rec.array(testABuffer,
dtype=tbl.description._v_nestedDescr)
nrarrcols = nrarr['Info']['value'][0::2]
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test02_read_out_arg(self):
"""Checking Table.read with a nested Column."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
tblcols = numpy.empty(1, dtype='c16')
tbl.read(start=0, step=2, field='Info/value', out=tblcols)
nrarr = numpy.rec.array(testABuffer,
dtype=tbl.description._v_nestedDescr)
nrarrcols = nrarr['Info']['value'][0::2]
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
class ReadNoReopen(ReadTestCase):
reopen = 0
class ReadReopen(ReadTestCase):
reopen = 1
# Checking the Table.Cols accessor
class ColsTestCase(common.TempFileMixin, common.PyTablesTestCase):
_TestTDescr = TestTDescr
_testABuffer = testABuffer
_testAData = testAData
_testNestedCol = testNestedCol
def test00a_repr(self):
"""Checking string representation of Cols."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title="test00")
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
if common.verbose:
print "str(tbl.cols)-->", str(tbl.cols)
print "repr(tbl.cols)-->", repr(tbl.cols)
self.assertEqual(str(tbl.cols), "/test.cols (Cols), 6 columns")
try:
self.assertEqual(repr(tbl.cols),
"""/test.cols (Cols), 6 columns
x (Column(0, 2), ('int32',(2,)))
Info (Cols(), Description)
color (Column(0,), |S2)
info (Cols(), Description)
y (Column(0, 2, 2), ('float64',(2, 2)))
z (Column(0,), uint8)
"""
)
except AssertionError:
self.assertEqual(repr(tbl.cols),
"""/test.cols (Cols), 6 columns
x (Column(0, 2), ('<i4', (2,)))
Info (Cols(), Description)
color (Column(0,), |S2)
info (Cols(), Description)
y (Column(0, 2, 2), ('<f8', (2, 2)))
z (Column(0,), uint8)
"""
)
def test00b_repr(self):
"""Checking string representation of nested Cols."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
if common.verbose:
print "str(tbl.cols.Info)-->", str(tbl.cols.Info)
print "repr(tbl.cols.Info)-->", repr(tbl.cols.Info)
self.assertEqual(str(tbl.cols.Info), "/test.cols.Info (Cols), 5 columns")
self.assertEqual(repr(tbl.cols.Info),
"""/test.cols.Info (Cols), 5 columns
value (Column(0,), complex128)
y2 (Column(0,), float64)
Info2 (Cols(), Description)
name (Column(0,), |S2)
z2 (Column(0,), uint8)
""")
def test01a_f_col(self):
"""Checking cols._f_col() with a subgroup."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
tblcol = tbl.cols._f_col(self._testNestedCol)
if common.verbose:
print "Column group name:", tblcol._v_desc._v_pathname
self.assertEqual(tblcol._v_desc._v_pathname, self._testNestedCol,
"Column group name doesn't match.")
def test01b_f_col(self):
"""Checking cols._f_col() with a column."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
tblcol = tbl.cols._f_col(self._testNestedCol+"/name")
if common.verbose:
print "Column name:", tblcol.name
self.assertEqual(tblcol.name, "name", "Column name doesn't match.")
def test01c_f_col(self):
"""Checking cols._f_col() with a nested subgroup."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tblcol = tbl.cols._f_col(self._testNestedCol+"/Info2")
if common.verbose:
print "Column group name:", tblcol._v_desc._v_pathname
self.assertEqual(tblcol._v_desc._v_pathname,
self._testNestedCol+"/Info2",
"Column group name doesn't match.")
def test02a__len__(self):
"""Checking cols.__len__() in root level."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
length = len(tbl.cols)
if common.verbose:
print "Column group length:", length
self.assertEqual(length, len(tbl.colnames),
"Column group length doesn't match.")
def test02b__len__(self):
"""Checking cols.__len__() in subgroup level."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
length = len(tbl.cols.Info)
if common.verbose:
print "Column group length:", length
self.assertEqual(length, len(tbl.cols.Info._v_colnames),
"Column group length doesn't match.")
def test03a__getitem__(self):
"""Checking cols.__getitem__() with a single index."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols[1]
nrarrcols = nrarr[1]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test03b__getitem__(self):
"""Checking cols.__getitem__() with a range index."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols[0:2]
nrarrcols = nrarr[0:2]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test03c__getitem__(self):
"""Checking cols.__getitem__() with a range index with step."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols[0::2]
nrarrcols = nrarr[0::2]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test04a__getitem__(self):
"""Checking cols.__getitem__() with subgroups with a single index."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols._f_col('Info')[1]
nrarrcols = nrarr['Info'][1]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test04b__getitem__(self):
"""Checking cols.__getitem__() with subgroups with a range index."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols._f_col('Info')[0:2]
nrarrcols = nrarr['Info'][0:2]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test04c__getitem__(self):
"""Checking cols.__getitem__() with subgroups with a range index with step."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols._f_col('Info')[0::2]
nrarrcols = nrarr['Info'][0::2]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test05a__getitem__(self):
"""Checking cols.__getitem__() with a column with a single index."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols._f_col('Info/value')[1]
nrarrcols = nrarr['Info']['value'][1]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertEqual(nrarrcols, tblcols,
"Original array are retrieved doesn't match.")
def test05b__getitem__(self):
"""Checking cols.__getitem__() with a column with a range index."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols._f_col('Info/value')[0:2]
nrarrcols = nrarr['Info']['value'][0:2]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test05c__getitem__(self):
"""Checking cols.__getitem__() with a column with a range index with step."""
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
tblcols = tbl.cols._f_col('Info/value')[0::2]
nrarrcols = nrarr['Info']['value'][0::2]
if common.verbose:
print "Read cols:", tblcols
print "Should look like:", nrarrcols
self.assertTrue(common.areArraysEqual(nrarrcols, tblcols),
"Original array are retrieved doesn't match.")
def test_01a__iter__(self):
tbl = self.h5file.createTable(
'/', 'test', self._TestTDescr, title=self._getMethodName())
tbl.append(self._testAData)
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
nrarr = numpy.array(testABuffer, dtype=tbl.description._v_nestedDescr)
row_num = 0
for item in tbl.cols.Info.value:
self.assertEqual(item, nrarr['Info']['value'][row_num])
row_num += 1
self.assertEqual(row_num, len(nrarr))
class ColsNoReopen(ColsTestCase):
reopen = 0
class ColsReopen(ColsTestCase):
reopen = 1
class Nested(t.IsDescription):
uid = t.IntCol(pos=1)
value = t.FloatCol(pos=2)
class A_Candidate(t.IsDescription):
nested1 = Nested()
nested2 = Nested()
class B_Candidate(t.IsDescription):
nested1 = Nested
nested2 = Nested
class C_Candidate(t.IsDescription):
nested1 = Nested()
nested2 = Nested
Dnested = {'uid': t.IntCol(pos=1),
'value': t.FloatCol(pos=2),
}
D_Candidate = {"nested1": Dnested,
"nested2": Dnested,
}
E_Candidate = {"nested1": Nested,
"nested2": Dnested,
}
F_Candidate = {"nested1": Nested(),
"nested2": Dnested,
}
# Checking several nested columns declared in the same way
class SameNestedTestCase(common.TempFileMixin, common.PyTablesTestCase):
correct_names = ['', # The root of columns
'nested1', 'nested1/uid', 'nested1/value',
'nested2', 'nested2/uid', 'nested2/value']
def test01a(self):
"""Checking same nested columns (instance flavor)."""
tbl = self.h5file.createTable(
'/', 'test', A_Candidate, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
names = [col._v_pathname for col in tbl.description._f_walk(type="All")]
if common.verbose:
print "Pathnames of columns:", names
print "Should look like:", self.correct_names
self.assertEqual(names, self.correct_names,
"Column nested names doesn't match.")
def test01b(self):
"""Checking same nested columns (class flavor)."""
tbl = self.h5file.createTable(
'/', 'test', B_Candidate, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
names = [col._v_pathname for col in tbl.description._f_walk(type="All")]
if common.verbose:
print "Pathnames of columns:", names
print "Should look like:", self.correct_names
self.assertEqual(names, self.correct_names,
"Column nested names doesn't match.")
def test01c(self):
"""Checking same nested columns (mixed instance/class flavor)."""
tbl = self.h5file.createTable(
'/', 'test', C_Candidate, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
names = [col._v_pathname for col in tbl.description._f_walk(type="All")]
if common.verbose:
print "Pathnames of columns:", names
print "Should look like:", self.correct_names
self.assertEqual(names, self.correct_names,
"Column nested names doesn't match.")
def test01d(self):
"""Checking same nested columns (dictionary flavor)."""
tbl = self.h5file.createTable(
'/', 'test', D_Candidate, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
names = [col._v_pathname for col in tbl.description._f_walk(type="All")]
if common.verbose:
print "Pathnames of columns:", names
print "Should look like:", self.correct_names
self.assertEqual(names, self.correct_names,
"Column nested names doesn't match.")
def test01e(self):
"""Checking same nested columns (mixed dictionary/class flavor)."""
tbl = self.h5file.createTable(
'/', 'test', E_Candidate, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
names = [col._v_pathname for col in tbl.description._f_walk(type="All")]
if common.verbose:
print "Pathnames of columns:", names
print "Should look like:", self.correct_names
self.assertEqual(names, self.correct_names,
"Column nested names doesn't match.")
def test01f(self):
"""Checking same nested columns (mixed dictionary/instance flavor)."""
tbl = self.h5file.createTable(
'/', 'test', F_Candidate, title=self._getMethodName())
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
names = [col._v_pathname for col in tbl.description._f_walk(type="All")]
if common.verbose:
print "Pathnames of columns:", names
print "Should look like:", self.correct_names
self.assertEqual(names, self.correct_names,
"Column nested names doesn't match.")
def test02a(self):
"""Indexing two simple columns under the same nested column."""
desc = {
'nested': {
'i1': t.Int32Col(),
'i2': t.Int32Col() } }
i1 = 'nested/i1'
i2 = 'nested/i2'
tbl = self.h5file.createTable(
'/', 'test', desc, title=self._getMethodName())
row = tbl.row
for i in xrange(1000):
row[i1] = i
row[i2] = i*2
row.append()
tbl.flush()
cols = {'i1': tbl.cols.nested.i1,
'i2': tbl.cols.nested.i2,}
cols['i1'].createIndex()
cols['i2'].createIndex()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
# Redefine the cols dictionary
cols = {'i1': tbl.cols.nested.i1,
'i2': tbl.cols.nested.i2,}
i1res = [row[i1] for row in tbl.where('i1 < 10', cols)]
i2res = [row[i2] for row in tbl.where('i2 < 10', cols)]
if common.verbose:
print "Retrieved values (i1):", i1res
print "Should look like:", range(10)
print "Retrieved values (i2):", i2res
print "Should look like:", range(0, 10, 2)
self.assertEqual(i1res, range(10),
"Select for nested column (i1) doesn't match.")
self.assertEqual(i2res, range(0, 10, 2),
"Select for nested column (i2) doesn't match.")
def test02b(self):
"""Indexing two simple columns under the same (very) nested column."""
desc = {
'nested1': {
'nested2': {
'nested3': {
'i1': t.Int32Col(),
'i2': t.Int32Col() } } } }
i1 = 'nested1/nested2/nested3/i1'
i2 = 'nested1/nested2/nested3/i2'
tbl = self.h5file.createTable(
'/', 'test', desc, title=self._getMethodName())
row = tbl.row
for i in xrange(1000):
row[i1] = i
row[i2] = i*2
row.append()
tbl.flush()
cols = {'i1': tbl.cols.nested1.nested2.nested3.i1,
'i2': tbl.cols.nested1.nested2.nested3.i2,}
cols['i1'].createIndex()
cols['i2'].createIndex()
if self.reopen:
self._reopen()
tbl = self.h5file.root.test
# Redefine the cols dictionary
cols = {'i1': tbl.cols.nested1.nested2.nested3.i1,
'i2': tbl.cols.nested1.nested2.nested3.i2,}
i1res = [row[i1] for row in tbl.where('i1 < 10', cols)]
i2res = [row[i2] for row in tbl.where('i2 < 10', cols)]
if common.verbose:
print "Retrieved values (i1):", i1res
print "Should look like:", range(10)
print "Retrieved values (i2):", i2res
print "Should look like:", range(0, 10, 2)
self.assertEqual(i1res, range(10),
"Select for nested column (i1) doesn't match.")
self.assertEqual(i2res, range(0, 10, 2),
"Select for nested column (i2) doesn't match.")
class SameNestedNoReopen(SameNestedTestCase):
reopen = 0
class SameNestedReopen(SameNestedTestCase):
reopen = 1
class NestedTypesWithGaps(common.PyTablesTestCase):
correct_descr = \
"""{
"float": Float32Col(shape=(), dflt=0.0, pos=0),
"compound": {
"char": Int8Col(shape=(), dflt=0, pos=0),
"double": Float64Col(shape=(), dflt=0.0, pos=1)}}"""
def test01(self):
"""Opening a table with nested types with gaps."""
h5file = t.openFile(self._testFilename('nested-type-with-gaps.h5'))
tbl = h5file.getNode('/nestedtype')
type_descr = repr(tbl.description)
if common.verbose:
print "Type size with no gaps:", tbl.description._v_itemsize
print "And should be: 13"
print "Representation of the nested type:\n", type_descr
print "And should be:\n", self.correct_descr
self.assertEqual(tbl.description._v_itemsize, 13)
self.assertEqual(type_descr, self.correct_descr)
if common.verbose:
print "Great! Nested types with gaps recognized correctly."
h5file.close()
#----------------------------------------------------------------------
def suite():
"""Return a test suite consisting of all the test cases in the module."""
theSuite = unittest.TestSuite()
niter = 1
#common.heavy = 1 # uncomment this only for testing purposes
#theSuite.addTest(unittest.makeSuite(DescriptionTestCase))
#theSuite.addTest(unittest.makeSuite(WriteReopen))
for i in range(niter):
theSuite.addTest(unittest.makeSuite(DescriptionTestCase))
theSuite.addTest(unittest.makeSuite(CreateTestCase))
theSuite.addTest(unittest.makeSuite(WriteNoReopen))
theSuite.addTest(unittest.makeSuite(WriteReopen))
theSuite.addTest(unittest.makeSuite(ColsNoReopen))
theSuite.addTest(unittest.makeSuite(ColsReopen))
theSuite.addTest(unittest.makeSuite(ReadNoReopen))
theSuite.addTest(unittest.makeSuite(ReadReopen))
theSuite.addTest(unittest.makeSuite(SameNestedNoReopen))
theSuite.addTest(unittest.makeSuite(SameNestedReopen))
theSuite.addTest(unittest.makeSuite(NestedTypesWithGaps))
return theSuite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
cpcloud/PyTables
|
tables/tests/test_nestedtypes.py
|
Python
|
bsd-3-clause
| 52,114
|
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import nnabla as nn
import nnabla.initializer as I
import sys
import importlib
import time
from collections import namedtuple
import csv
Inspec = namedtuple("Inspec", ['shape', 'init', 'need_grad'])
Inspec.__new__.__defaults__ = (I.NormalInitializer(), True)
BenchmarkStat = namedtuple("Benchmark", ['mean_time', 'run_count'])
class Timer:
"""Timer.
See :func:`Timer.lap()`.
"""
def __init__(self):
self.start = time.time()
self.lap_time = self.start
def lap(self):
"""Calculate lap time.
Returns:
float: Lap time. The duration from the previous call of ``lap()``
or initialization at first call.
float: Total time. The duration from initialization.
"""
now = time.time()
lap_time = now - self.lap_time
total_time = now - self.start
self.lap_time = now
return lap_time, total_time
class FunctionBenchmarkWriter:
"""Benchmark writer class.
It writes benchmark statistics taken by :class:`FunctionBenchmark`
in .ini file format. As it's not so readable,
use :class:`FunctionBenchmarkCsvWriter` instead.
Args:
file (Python file object): The benchmark will be written to the file.
"""
def __init__(self, file=sys.stdout):
self.file = file
self.write_header()
def write_header(self):
"""Writing header function that is called at initialization.
"""
pass
def _write_a_stat(self, k, v):
print('{} = ({:3.2f}, {})'.format(
k, v.mean_time * 1000, v.run_count), file=self.file)
def write(self, fb):
"""Write a single function benchmark.
Args:
fb (FunctionBenchmark): FunctionBenchmark class instance.
Before passing to this, you should call ``fb.benchmark()``.
"""
print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)
print('class = {}'.format(fb.func_ins.name), file=self.file)
print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)
print('func_args = {}'.format(repr(fb.func_args)), file=self.file)
print('func_kwargs = {}'.format(repr(fb.func_kwargs)), file=self.file)
print('ext = ({}, {})'.format(
repr(fb.ext), repr(fb.ext_kwargs)), file=self.file)
if self.setup_stat is not None:
self._write_a_stat('setup', self.setup_stat)
if self.foward_stat is not None:
self._write_a_stat('forward', self.forward_stat)
if self.backward_stat is not None:
self._write_a_stat('backward', self.backward_stat)
class FunctionBenchmarkCsvWriter(FunctionBenchmarkWriter):
"""CSV format benchmark writer.
It writes benchmark statistics taken by :class:`FunctionBenchmark`
in CSV file format.
Args:
file (Python file object): The benchmark will be written to the file.
"""
def __init__(self, file=sys.stdout):
self.file = file
self.write_header()
def write_header(self):
fields = [
'module', 'function', 'class', 'inspecs',
'func_args', 'func_kwargs', 'ext', 'ext_kwargs',
'setup [ms/run]', 'setup [run]',
'forward [ms/run]', 'forward [run]',
'backward [ms/run]', 'backward [run]',
]
writer = csv.writer(self.file)
writer.writerow(fields)
def _stat_as_a_list(self, stat):
return ['{:.2f}'.format(stat.mean_time * 1000), str(stat.run_count)]
def write(self, fb):
values = [
fb.module, fb.func.__name__, fb.func_ins.name,
repr(fb.inspecs),
repr(fb.func_args), repr(fb.func_kwargs),
fb.ext, repr(fb.ext_kwargs),
]
values.extend(self._stat_as_a_list(fb.setup_stat))
values.extend(self._stat_as_a_list(fb.forward_stat))
if fb.backward_stat is not None:
values.extend(self._stat_as_a_list(fb.backward_stat))
writer = csv.writer(self.file)
writer.writerow(values)
def create_inputs(inspecs):
"""Create input :obj:`nnabla.Variable` from :obj:`Inspec`.
Args:
inspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``.
Returns:
:obj:`list` of :obj:`nnabla.Variable`: Input variables.
"""
ret = []
for i in inspecs:
v = nn.Variable(i.shape, need_grad=i.need_grad)
v.d = i.init(v.shape)
ret.append(v)
return ret
class FunctionBenchmark:
r"""Benchmarking a function of a parametric function.
This will calculate time to execute setup, forward and backward
methods of Function class.
Args:
func (function): It can be a function in either
:module:`nnabla.functions` or
:module:`nnabla.parametric_functions`.
inspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``.
They specify shape, initializer, and need_grad attributes.
func_args (list): A list of function arguments passed to func.
func_kwargs (dict): Keyword arguments passed to func.
ext (str): Extension module, e.g. 'cuda', 'cudnn'.
ext_kwargs (dict): Keyword arguments passed to extension APIs,
e.g. ``context(*kw)``, ``synchronize(**kw).
min_run (int): Minimum number of calling function
Note:
You should not pass any compositional function
(a function constructed by multiple Functions) to ``func`` argument.
Benchmark will take place only in the last function instance in the
chain of a compositional function.
"""
def __init__(self, func, inspecs, func_args, func_kwargs,
ext, ext_kwargs, min_run=1, min_time=1.0):
nn.clear_parameters()
self.inputs = None
self.outputs = None
self.func_ins = None
self.setup_stat = None
self.forward_stat = None
self.backward_stat = None
self.func = func
self.module = func.__module__
self.inspecs = inspecs
self.inputs_f = create_inputs(inspecs)
self.func_args = func_args
self.func_kwargs = func_kwargs
self.ext = ext
self.ext_kwargs = ext_kwargs
self.mod_ext = importlib.import_module(
'.' + ext, 'nnabla_ext')
self.ctx = self.mod_ext.context(**ext_kwargs)
self.min_run = min_run
self.min_time = min_time
def _calc_benchmark_stat(self, f):
timer = Timer()
i = 0
while True:
f()
i += 1
if i >= self.min_run:
_, elapsed = timer.lap()
if elapsed > self.min_time:
break
return BenchmarkStat(elapsed / i, i)
def clear(self):
"""Clear computation graph internally kept.
"""
self.inputs = None
self.outputs = None
self.func_ins = None
def _setup(self, delete=True):
"""Create a function instance and execute setup.
Args:
delete (bool): Delete buffered variables.
"""
if delete:
self.clear()
with nn.context_scope(self.ctx):
outputs = self.func(
*(self.inputs_f + self.func_args), **self.func_kwargs)
if not hasattr(outputs, '__iter__'):
self.outputs = [outputs]
else:
self.outputs = outputs
self.func_ins = self.outputs[0].parent
self.inputs = self.func_ins.inputs
def _forward(self):
"""Execute forward.
This must be called after ``setup()`` called.
"""
self.func_ins.forward(self.inputs, self.outputs)
def _backward(self):
"""Execute backward.
This should be called after ``setup()`` and ``forward()`` called once.
"""
self.func_ins.backward(self.inputs, self.outputs)
def benchmark_setup(self):
"""Benchmark setup execution.
"""
def f():
self._setup()
self.mod_ext.synchronize(**self.ext_kwargs)
f() # Ignore first
self.setup_stat = self._calc_benchmark_stat(f)
def benchmark_forward(self):
"""Benchmark forward execution.
"""
self._setup()
def f():
self._forward()
self.mod_ext.synchronize(**self.ext_kwargs)
f() # Ignore first
self.forward_stat = self._calc_benchmark_stat(f)
def _benchmark_backward(self):
self._setup()
self._forward()
for o in self.outputs:
o.grad.fill(1)
def f():
self._backward()
self.mod_ext.synchronize(**self.ext_kwargs)
f() # Ignore first
self.backward_stat = self._calc_benchmark_stat(f)
def benchmark_backward(self):
"""Benchmark backward execution.
Note:
If backward execution throws any exception,
this benchmark system considers the error is because the function
doesn't support backward operation, then set the benchmark
``None``.
"""
try:
self._benchmark_backward()
except RuntimeError as e:
# Seems like not implemented.
print(e)
self.mod_ext.synchronize(**self.ext_kwargs)
self.backward_stat = None
def benchmark(self):
"""Do all benchmarks of setup, forward and backward.
"""
self.benchmark_setup()
self.benchmark_forward()
self.benchmark_backward()
def write(self, writer=FunctionBenchmarkCsvWriter()):
"""Write the function benchmark results using a writer class.
The benchmark result will be written according to the format
defined in a given writer class.
Args:
writer (FunctionBenchmarkWriter): Writer class.
It is recommended to use :func:`FunctionBenchmarkCsvWriter`.
"""
writer.write(self)
|
sony/nnabla
|
python/benchmark/function/function_benchmark.py
|
Python
|
apache-2.0
| 10,661
|
# test memoryview accessing maximum values for signed/unsigned elements
try:
from array import array
memoryview
except:
import sys
print("SKIP")
sys.exit()
print(list(memoryview(b'\x7f\x80\x81\xff')))
print(list(memoryview(array('b', [0x7f, -0x80]))))
print(list(memoryview(array('B', [0x7f, 0x80, 0x81, 0xff]))))
print(list(memoryview(array('h', [0x7f00, -0x8000]))))
print(list(memoryview(array('H', [0x7f00, 0x8000, 0x8100, 0xffff]))))
|
cwyark/micropython
|
tests/basics/memoryview2.py
|
Python
|
mit
| 460
|
# -*- coding: utf-8 -*-
"""
Username Resolver
~~~~~
:copyright: (c) 2015 by Openname.org
:license: MIT, see LICENSE for more details.
"""
from bs4 import BeautifulSoup
GITHUB_GIST_TAG = 'gist-description'
GITHUB_TEXT_TAG = 'blob-wrapper data type-text js-blob-data'
GITHUB_MARDOWN_TAG = 'blob-wrapper data type-markdown js-blob-data'
from .sites import SITES
# ---------------------------
def get_github_text(raw_html):
html = BeautifulSoup(raw_html)
gist_description = html.body.find('div', attrs={'class': GITHUB_GIST_TAG})
if gist_description is not None:
gist_description = gist_description.text
else:
gist_description = ''
file_text = html.body.find('div', attrs={'class': GITHUB_TEXT_TAG})
if file_text is not None:
file_text = file_text.text
else:
file_text = html.body.find('div', attrs={'class': GITHUB_MARDOWN_TAG})
if file_text is not None:
file_text = file_text.text
else:
file_text = ''
search_text = gist_description + ' ' + file_text
return search_text
# ---------------------------
def get_search_text(service, raw_html):
if service == 'facebook':
raw_html = raw_html.replace('<!--', '').replace('-->', '')
html_soup = BeautifulSoup(raw_html)
if service in SITES:
query_data = SITES[service]['html_query']
search_text = ''
if 'class' in query_data:
search_results = html_soup.body.find('div', class_=query_data['class'])
if search_results:
search_text = search_results.text
elif 'title' in query_data:
search_results = html_soup.title.string
print search_results
else:
search_results = html_soup.body
if search_results:
search_text = search_results.text
return search_text
|
jetbox/resolver
|
server/proofcheck/htmlparsing.py
|
Python
|
mit
| 1,899
|
"""
module queue
~~~~~~~~~~~~~~
FIFO.
:copyright: (c) 2017 by 0xE8551CCB.
:license: MIT, see LICENSE for more details.
"""
class Queue(object):
def __init__(self):
self._elements = []
def __repr__(self):
return '<Queue size={!r}>'.format(len(self))
def enqueue(self, x):
self._elements.append(x)
def dequeue(self):
return self._elements.pop(0)
def is_empty(self):
return len(self) == 0
def __len__(self):
return len(self._elements)
def clear(self):
self._elements.clear()
|
AjithPanneerselvam/pyalgorithm
|
pyalgorithm/datastructures/queue.py
|
Python
|
mit
| 590
|
#!/usr/bin/env yamtbx.python
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import sys, os, optparse, math
from collections import OrderedDict
from yamtbx.dataproc.scale_data import kBdecider
import iotbx.mtz
import iotbx.phil
from cctbx.array_family import flex
from cctbx import miller
master_params_str="""\
logscale = False
.type = bool
.help = log scale
dmin = None
.type = float
dmax = None
.type = float
nbins = 120
.type = int
show = true
.type = bool
.help = show graphics
output = "plot.pdf"
.type = path
take_anom_diff = False
.type = bool
.help = Use anomalous difference, I(+) - I(-)
over_sigma = False
.type = bool
.help = plot I/sigma rather than I
noscale = False
.type = bool
.help = Do not apply linear scale
extra = *cc rfactor rsplit no
.type = choice(multi=False)
.help = Plot CC or R-factor when two data given.
force_same_cell = False
.type = bool
.help = Use the same cell for all data
scale {
dmin = None
.type = float
dmax = None
.type = float
bscale = False
.type = bool
.help = B-factor scaling
}
"""
def commonalize(Is_in):
Is = [s[1] for s in Is_in]
new_Is = []
Is0 = Is[0]
for I in Is[1:]:
Is0, I = Is0.common_sets(I, assert_is_similar_symmetry=False)
new_Is.append(I)
Is = []
for I in new_Is:
I = I.common_set(Is0, assert_is_similar_symmetry=False)
Is.append(I)
res = [Is0,] + Is
return [[o[0], r, o[2]] for o, r in zip(Is_in, res)] # putting back original data
# commonalize()
def decide_label(labels):
labs = filter(lambda x:not x.upper().startswith(("SIG","PHI")), labels)
return ",".join(labs)
if __name__ == "__main__":
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
if len(args) == 0:
print "Usage: %s mtz1 lab1 mtz2 lab2 [mtz3 lab3...] param=value" % sys.argv[0]
print
print "Defaultparamters:"
print cmdline
cmdline.work.format(python_object=params).show(out=sys.stdout, prefix=" ", attributes_level=1)
quit()
print "Paramters:"
cmdline.work.format(python_object=params).show(out=sys.stdout, prefix=" ")
print
if params.over_sigma:
assert params.noscale
Is = [] # [[name, miller_array, scale], ..]
for mtzfile, label in ((args[2*i],args[2*i+1]) for i in xrange((len(args))//2)):
mtzobj = iotbx.mtz.object(file_name=mtzfile)
arrays = filter(lambda s: label in s.info().labels, mtzobj.as_miller_arrays())
if len(arrays) == 0:
print "ERROR! %s does not have column %s"%(mtzfile, label)
print "Candidates:", map(lambda x:x.info().labels, mtzobj.as_miller_arrays())
quit()
labels = arrays[0].info().labels
if params.take_anom_diff:
assert arrays[0].anomalous_flag()
data = arrays[0].as_intensity_array().anomalous_differences()
elif arrays[0].is_complex_array() or arrays[0].is_xray_reconstructed_amplitude_array() or arrays[0].is_xray_amplitude_array():
print "Warning - amplitude or complex array %s" % labels
data = arrays[0].as_intensity_array().as_non_anomalous_array().merge_equivalents(use_internal_variance=False).array()
elif arrays[0].is_integer_array() or arrays[0].is_real_array():
print "Warning - no experimental array %s" % labels
data = arrays[0].as_non_anomalous_array().as_double()
else:
raise "Can't plot %s" % labels
data = data.resolution_filter(d_max=params.dmax, d_min=params.dmin)
Is.append([mtzfile+":"+decide_label(labels), data, (1.0,0.0)])
print "loaded:", mtzfile, labels
#if "hkl" in mtzfile:
# Is[-1][1] = miller.array(miller_set=Is[-1][1], data= Is[-1][1].data() * flex.exp(4.8*Is[-1][1].d_star_sq().data()))
if params.force_same_cell:
for x in Is[1:]:
x[1] = x[1].customized_copy(crystal_symmetry=Is[0][1])
# Take common sets
Is = commonalize(Is) ####
# Decide scale
if not params.noscale:
for i in xrange(1, len(Is)):
I = Is[i][1].resolution_filter(d_max=params.scale.dmax, d_min=params.scale.dmin)
I0 = Is[0][1].resolution_filter(d_max=params.scale.dmax, d_min=params.scale.dmin)
I, I0 = I.common_sets(I0, assert_is_similar_symmetry=False)
if params.scale.bscale:
Is[i][2] = kBdecider(I0, I).run()
print "Scale for", Is[i][0], "is", Is[i][2]
else:
scale = flex.sum(I0.data()*I.data()) / flex.sum(flex.pow2(I.data()))
Is[i][2] = scale, 0
print "Scale for", Is[i][0], "is", scale
print Is[0][1].data().size()
# Prepare plot data
for_plot = OrderedDict() # {name: [mean, ...], ..}
binner = Is[0][1].setup_binner(n_bins=params.nbins)#reflections_per_bin=50)
for i_bin in binner.range_used():
for name, I, (scale, b) in Is:
dmax, dmin = binner.bin_d_range(i_bin)
Isel = I.resolution_filter(d_max=dmax, d_min=dmin)
#Isel = I.select(binner.bin_indices() == i_bin) # crash if not common
if params.over_sigma:
data = Isel.data() / Isel.sigmas()
else:
bfac = flex.exp(-b * Isel.d_star_sq().data()) if b != 0 else 1.
data = Isel.data() *scale*bfac
if len(data)==0:
print "WARNING: ", name, "No data in %f .. %f" % binner.bin_d_range(i_bin)
for_plot.setdefault(name, []).append(float("nan"))
elif params.logscale:
for_plot.setdefault(name, []).append(math.log(flex.mean(data))) # taking log<I>
else:
for_plot.setdefault(name, []).append(flex.mean(data))
# If only two data in, calc CC.
extra = []
if len(Is) == 2 and params.extra.lower() != "no":
for i_bin in binner.range_used():
dmax, dmin = binner.bin_d_range(i_bin)
#Isel0, Isel1 = map(lambda x:x[1].select(binner.bin_indices() == i_bin), Is)
Isel0, Isel1 = map(lambda x:x[1].resolution_filter(d_max=dmax, d_min=dmin), Is)
Isel0, Isel1 = Isel0.common_sets(Isel1, assert_is_similar_symmetry=False)
scale, b = Is[1][2][0], Is[1][2][1]
#bfac = flex.exp(-b * Isel.d_star_sq().data()) if b != 0 else 1.
#data = Isel.data() * scale*bfac
bfac = flex.exp(-b * Isel1.d_star_sq().data()) if b != 0 else 1.
if params.extra.lower() == "cc":
corr = flex.linear_correlation(Isel0.data(), Isel1.data()*bfac)
if corr.is_well_defined():
extra.append(corr.coefficient())
else:
extra.append(float("nan"))
elif params.extra.lower() == "rfactor":
# This R-factor is like model R-factor.
# but maybe this should be sum(|I1-I2|)/2sum(I1+I2) or something like that..?
denom = flex.sum(Isel0.data())
numer = flex.sum(flex.abs(Isel0.data() - Isel1.data()*scale*bfac))
if denom != 0:
extra.append(numer/denom)
else:
extra.append(float("nan"))
elif params.extra.lower() == "rsplit":
denom = flex.sum((Isel0.data()+Isel1.data()*scale*bfac)/2.)
numer = flex.sum(flex.abs(Isel0.data() - Isel1.data()*scale*bfac))
if denom != 0:
extra.append(1./math.sqrt(2.)*numer/denom)
else:
extra.append(float("nan"))
else:
raise "Never reaches here"
# Calc overall CC
Isel0, Isel1 = Is[0][1].common_sets(Is[1][1], assert_is_similar_symmetry=False)
scale, b = Is[1][2][0], Is[1][2][1]
bfac = flex.exp(-b * Isel1.d_star_sq().data()) if b != 0 else 1.
corr = flex.linear_correlation(Isel0.data(), Isel1.data()*bfac)
if corr.is_well_defined():
print "Overall CC=", corr.coefficient(), "with %d reflections" % len(Isel0.data())
# Calc overall R
denom = flex.sum(Isel0.data())
numer = flex.sum(flex.abs(Isel0.data() - Isel1.data()*scale*bfac))
print "Overall R=", numer/denom
plot_x = [binner.bin_d_range(i)[1]**(-2) for i in binner.range_used()]
print " d %s" % " ".join(for_plot.keys())
f = map(lambda x:"%"+"%d"%len(x)+".2f", for_plot.keys())
for i, x in enumerate(plot_x):
line = "%8.3f " % (1./math.sqrt(x))
line += " ".join([fj%for_plot[k][i] for fj, k in zip(f, for_plot)])
if extra != []:
line += " %.4f" % extra[i]
print line
# Plot
import matplotlib
matplotlib.use('Agg') # Allow to work without X
from pylab import *
from matplotlib.ticker import FuncFormatter
s2_formatter = lambda x,pos: "inf" if x<1e-10 else "%.2f" % (1./math.sqrt(x))
#from matplotlib.backends.backend_pdf import PdfPages
fig, ax1 = plt.subplots()
plots = {}
for name, vals in for_plot.items():
name = name[-30:].lstrip("_")
plots[name] = plot(plot_x, vals, label=name)
legend()
xlabel('resolution (d^-3)')
if params.logscale:
ylabel('log<I>')
else:
ylabel('<I/sigma>' if params.over_sigma else '<I>')
setp(gca().get_legend().get_texts(), fontsize="small")
plot_title = ""
if params.scale.bscale:
plot_title += ' Scaled with B-factors'
plot_title += ' (' + ", ".join(map(lambda x: "%.1f"%x[2][1], Is)) + ')'
if params.take_anom_diff:
plot_title += ' With anom diff'
title(plot_title)
gca().xaxis.set_major_formatter(FuncFormatter(s2_formatter))
if extra != []:
ax2 = ax1.twinx()
ax2.plot(plot_x, extra, 'r')
ax2.set_ylabel('CC' if params.extra.lower()=="cc" else "R-factor", color='r')
savefig(params.output)
if params.show:
show()
|
keitaroyam/yamtbx
|
yamtbx/dataproc/command_line/plot_I_d.py
|
Python
|
bsd-3-clause
| 10,298
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import re
import json
class ItudouResolver:
def fetch_url(self, url):
return urllib2.urlopen(url).read()
def resolve(self,iid,res):
url="http://v2.tudou.com/v2/cdn?id="+ iid
html = self.fetch_url(url)
find1 = re.search(r'<f w=.*?brt="4">(.*?)<\/f>', html)
find2 = re.search(r'<f w=.*?brt="3">(.*?)<\/f>', html)
find3 = re.search(r'<f w=.*?brt="\d">(.*?)<\/f>', html)
if find1 and res >=3: #超清
return find1.group(1)
elif find2 and res >=2: #高清
return find2.group(1)
elif find3 and res >=1:
return find3.group(1)
|
HDPxbmc/script.module.urlresolvercn
|
lib/urlresolvercn/plugins/lib/ItudouResolver.py
|
Python
|
gpl-2.0
| 711
|
#!/usr/bin/env python
#coding=utf-8
import os
from app import create_app, db
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from app.model.users import User
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
youqingkui/wx.youqingkui.me
|
manage.py
|
Python
|
mit
| 543
|
#!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Python client library for the Facebook Platform.
This client library is designed to support the Graph API and the official
Facebook JavaScript SDK, which is the canonical way to implement
Facebook authentication. Read more about the Graph API at
http://developers.facebook.com/docs/api. You can download the Facebook
JavaScript SDK at http://github.com/facebook/connect-js/.
If your application is using Google AppEngine's webapp framework, your
usage of this module might look like this:
user = facebook.get_user_from_cookie(self.request.cookies, key, secret)
if user:
graph = facebook.GraphAPI(user["access_token"])
profile = graph.get_object("me")
friends = graph.get_connections("me", "friends")
"""
import cgi
import hashlib
import time
import urllib
# Find a JSON parser
try:
import json
_parse_json = lambda s: json.loads(s)
except ImportError:
try:
import simplejson
_parse_json = lambda s: simplejson.loads(s)
except ImportError:
# For Google AppEngine
from django.utils import simplejson
_parse_json = lambda s: simplejson.loads(s)
class GraphAPI(object):
"""A client for the Facebook Graph API.
See http://developers.facebook.com/docs/api for complete documentation
for the API.
The Graph API is made up of the objects in Facebook (e.g., people, pages,
events, photos) and the connections between them (e.g., friends,
photo tags, and event RSVPs). This client provides access to those
primitive types in a generic way. For example, given an OAuth access
token, this will fetch the profile of the active user and the list
of the user's friends:
graph = facebook.GraphAPI(access_token)
user = graph.get_object("me")
friends = graph.get_connections(user["id"], "friends")
You can see a list of all of the objects and connections supported
by the API at http://developers.facebook.com/docs/reference/api/.
You can obtain an access token via OAuth or by using the Facebook
JavaScript SDK. See http://developers.facebook.com/docs/authentication/
for details.
If you are using the JavaScript SDK, you can use the
get_user_from_cookie() method below to get the OAuth access token
for the active user from the cookie saved by the SDK.
"""
def __init__(self, access_token=None):
self.access_token = access_token
def get_object(self, id, **args):
"""Fetchs the given object from the graph."""
return self.request(id, args)
def get_objects(self, ids, **args):
"""Fetchs all of the given object from the graph.
We return a map from ID to object. If any of the IDs are invalid,
we raise an exception.
"""
args["ids"] = ",".join(ids)
return self.request("", args)
def get_connections(self, id, connection_name, **args):
"""Fetchs the connections for given object."""
return self.request(id + "/" + connection_name, args)
def put_object(self, parent_object, connection_name, **data):
"""Writes the given object to the graph, connected to the given parent.
For example,
graph.put_object("me", "feed", message="Hello, world")
writes "Hello, world" to the active user's wall. Likewise, this
will comment on a the first post of the active user's feed:
feed = graph.get_connections("me", "feed")
post = feed["data"][0]
graph.put_object(post["id"], "comments", message="First!")
See http://developers.facebook.com/docs/api#publishing for all of
the supported writeable objects.
Most write operations require extended permissions. For example,
publishing wall posts requires the "publish_stream" permission. See
http://developers.facebook.com/docs/authentication/ for details about
extended permissions.
"""
assert self.access_token, "Write operations require an access token"
return self.request(parent_object + "/" + connection_name, post_args=data)
def put_wall_post(self, message, attachment={}, profile_id="me"):
"""Writes a wall post to the given profile's wall.
We default to writing to the authenticated user's wall if no
profile_id is specified.
attachment adds a structured attachment to the status message being
posted to the Wall. It should be a dictionary of the form:
{"name": "Link name"
"link": "http://www.example.com/",
"caption": "{*actor*} posted a new review",
"description": "This is a longer description of the attachment",
"picture": "http://www.example.com/thumbnail.jpg"}
"""
return self.put_object(profile_id, "feed", message=message, **attachment)
def put_comment(self, object_id, message):
"""Writes the given comment on the given post."""
return self.put_object(object_id, "comments", message=message)
def put_like(self, object_id):
"""Likes the given post."""
return self.put_object(object_id, "likes")
def delete_object(self, id):
"""Deletes the object with the given ID from the graph."""
self.request(id, post_args={"method": "delete"})
def fql(self, query):
return self.request(path='method/fql.query', domain='api', args={'query': query, 'format': 'json'})
def request(self, path, args=None, post_args=None, domain='graph'):
"""Fetches the given path in the Graph API.
We translate args to a valid query string. If post_args is given,
we send a POST request to the given path with the given arguments.
"""
if not args: args = {}
if self.access_token:
if post_args is not None:
post_args["access_token"] = self.access_token
else:
args["access_token"] = self.access_token
post_data = None if post_args is None else urllib.urlencode(post_args)
last_io_exception = None
for n in range(4):
try:
file = urllib.urlopen("https://" + domain + ".facebook.com/" + path + "?" +
urllib.urlencode(args), post_data)
try:
response = _parse_json(file.read())
finally:
file.close()
except IOError, io:
last_io_exception = io
time.sleep(n / 2.0) # Backoff a little and try again
else:
break
else:
raise last_io_exception
if isinstance(response, dict) and response.get("error"):
raise GraphAPIError(response["error"]["type"],
response["error"]["message"])
return response
class GraphAPIError(Exception):
def __init__(self, type, message):
Exception.__init__(self, message)
self.type = type
##### NEXT TWO FUNCTIONS PULLED FROM https://github.com/jgorset/facepy/blob/master/facepy/signed_request.py
import base64
import hmac
def urlsafe_b64decode(str):
"""Perform Base 64 decoding for strings with missing padding."""
l = len(str)
pl = l % 4
return base64.urlsafe_b64decode(str.ljust(l+pl, "="))
def parse_signed_request(signed_request, secret):
"""
Parse signed_request given by Facebook (usually via POST),
decrypt with app secret.
Arguments:
signed_request -- Facebook's signed request given through POST
secret -- Application's app_secret required to decrpyt signed_request
"""
if "." in signed_request:
esig, payload = signed_request.split(".")
else:
return {}
sig = urlsafe_b64decode(str(esig))
data = _parse_json(urlsafe_b64decode(str(payload)))
if not isinstance(data, dict):
raise SignedRequestError("Pyload is not a json string!")
return {}
if data["algorithm"].upper() == "HMAC-SHA256":
if hmac.new(secret, payload, hashlib.sha256).digest() == sig:
return data
else:
raise SignedRequestError("Not HMAC-SHA256 encrypted!")
return {}
def get_user_from_cookie(cookies, app_id, app_secret):
"""Parses the cookie set by the official Facebook JavaScript SDK.
cookies should be a dictionary-like object mapping cookie names to
cookie values.
If the user is logged in via Facebook, we return a dictionary with the
keys "uid" and "access_token". The former is the user's Facebook ID,
and the latter can be used to make authenticated requests to the Graph API.
If the user is not logged in, we return None.
Download the official Facebook JavaScript SDK at
http://github.com/facebook/connect-js/. Read more about Facebook
authentication at http://developers.facebook.com/docs/authentication/.
"""
cookie = cookies.get("fbsr_" + app_id, "")
if not cookie:
return None
response = parse_signed_request(cookie, app_secret)
if not response:
return None
args = dict(
code = response['code'],
client_id = app_id,
client_secret = app_secret,
redirect_uri = '',
)
file = urllib.urlopen("https://graph.facebook.com/oauth/access_token?" + urllib.urlencode(args))
try:
token_response = file.read()
finally:
file.close()
access_token = cgi.parse_qs(token_response)["access_token"][-1]
return dict(
uid = response["user_id"],
access_token = access_token,
)
|
canvasnetworks/canvas
|
common/facebook.py
|
Python
|
bsd-3-clause
| 10,217
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.