prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
import sys
from .compat import no_limit_int # NOQA
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
__all__ = ["ScalarFloat", "ExponentialFloat", "ExponentialCapsFloat"]
class ScalarFloat(float):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
width = kw.pop('width', None) # type: ignore
prec = kw.pop('prec', None) # type: ignore
m_sign = kw.pop('m_sign', None) # type: ignore
m_lead0 = kw.pop('m_lead0', 0) # type: ignore
exp = kw.pop('ex | p', None) # type: ignore
e_width = kw.pop('e_width', None) # type: ignore
e_sign = kw.pop('e_sign', None) # type: ignore
underscore = kw.pop('underscore', Non | e) # type: ignore
v = float.__new__(cls, *args, **kw) # type: ignore
v._width = width
v._prec = prec
v._m_sign = m_sign
v._m_lead0 = m_lead0
v._exp = exp
v._e_width = e_width
v._e_sign = e_sign
v._underscore = underscore
return v
def __iadd__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self + a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self // a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self * a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self ** a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self - a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def dump(self, out=sys.stdout):
# type: (Any) -> Any
print('ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}|{}, w:{}, s:{})'.format(
self, self._width, self._prec, self._m_sign, self._m_lead0, # type: ignore
self._exp, self._e_width, self._e_sign), file=out) # type: ignore
class ExponentialFloat(ScalarFloat):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
class ExponentialCapsFloat(ScalarFloat):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
|
~~~~~~~~~~~~~~~~~~~
Lexers for semantic web and RDF query languages and markup.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default
fro | m pygments.token import Keyword, Punctuation, String, Number, Operator, Generic, \
Whitespace, Name, Literal, Comment, Text
__all__ = [ | 'SparqlLexer', 'TurtleLexer']
class SparqlLexer(RegexLexer):
"""
Lexer for `SPARQL <http://www.w3.org/TR/rdf-sparql-query/>`_ query language.
.. versionadded:: 2.0
"""
name = 'SPARQL'
aliases = ['sparql']
filenames = ['*.rq', '*.sparql']
mimetypes = ['application/sparql-query']
# character group definitions ::
PN_CHARS_BASE_GRP = (u'a-zA-Z'
u'\u00c0-\u00d6'
u'\u00d8-\u00f6'
u'\u00f8-\u02ff'
u'\u0370-\u037d'
u'\u037f-\u1fff'
u'\u200c-\u200d'
u'\u2070-\u218f'
u'\u2c00-\u2fef'
u'\u3001-\ud7ff'
u'\uf900-\ufdcf'
u'\ufdf0-\ufffd')
PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
PN_CHARS_GRP = (PN_CHARS_U_GRP +
r'\-' +
r'0-9' +
u'\u00b7' +
u'\u0300-\u036f' +
u'\u203f-\u2040')
HEX_GRP = '0-9A-Fa-f'
PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
# terminal productions ::
PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
PN_CHARS = '[' + PN_CHARS_GRP + ']'
HEX = '[' + HEX_GRP + ']'
PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
'.]*' + PN_CHARS + ')?'
PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
VARNAME = u'[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
u'0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
PERCENT = '%' + HEX + HEX
PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
'(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
PN_CHARS_GRP + ':]|' + PLX + '))?')
EXPONENT = r'[eE][+-]?\d+'
# Lexer token definitions ::
tokens = {
'root': [
(r'\s+', Text),
# keywords ::
(r'((?i)select|construct|describe|ask|where|filter|group\s+by|minus|'
r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
r'offset|bindings|load|clear|drop|create|add|move|copy|'
r'insert\s+data|delete\s+data|delete\s+where|delete|insert|'
r'using\s+named|using|graph|default|named|all|optional|service|'
r'silent|bind|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
(r'(a)\b', Keyword),
# IRIs ::
('(' + IRIREF + ')', Name.Label),
# blank nodes ::
('(' + BLANK_NODE_LABEL + ')', Name.Label),
# # variables ::
('[?$]' + VARNAME, Name.Variable),
# prefixed names ::
(r'(' + PN_PREFIX + ')?(\:)(' + PN_LOCAL + ')?',
bygroups(Name.Namespace, Punctuation, Name.Tag)),
# function names ::
(r'((?i)str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|'
r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|'
r'count|sum|min|max|avg|sample|group_concat|separator)\b',
Name.Function),
# boolean literals ::
(r'(true|false)', Keyword.Constant),
# double literals ::
(r'[+\-]?(\d+\.\d*' + EXPONENT + '|\.?\d+' + EXPONENT + ')', Number.Float),
# decimal literals ::
(r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
# integer literals ::
(r'[+\-]?\d+', Number.Integer),
# operators ::
(r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator),
# punctuation characters ::
(r'[(){}.;,:^\[\]]', Punctuation),
# line comments ::
(r'#[^\n]*', Comment),
# strings ::
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String.Escape, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'u' + HEX + '{4}', String.Escape, '#pop'),
(r'U' + HEX + '{8}', String.Escape, '#pop'),
(r'.', String.Escape, '#pop'),
],
'end-of-string': [
(r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
bygroups(Operator, Name.Function), '#pop:2'),
(r'\^\^', Operator, '#pop:2'),
default('#pop:2'),
],
}
class TurtleLexer(RegexLexer):
"""
Lexer for `Turtle <http://www.w3.org/TR/turtle/>`_ data language.
.. versionadded:: 2.1
"""
name = 'Turtle'
aliases = ['turtle']
filenames = ['*.ttl']
mimetypes = ['text/turtle', 'application/x-turtle']
flags = re.IGNORECASE
patterns = {
'PNAME_NS': r'((?:[a-z][\w-]*)?\:)', # Simplified character range
'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
}
# PNAME_NS PN_LOCAL (with simplified character range)
patterns['PrefixedName'] = r'%(PNAME_NS)s([a-z][\w-]*)' % patterns
tokens = {
'root': [
(r'\s+', Whitespace),
# Base / prefix
(r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation)),
(r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
Name.Variable, Whitespace, Punctuation)),
# The shorthand predicate 'a'
(r'(?<=\s)a(?=\s)', Keyword.Type),
# IRIREF
(r'%(IRIREF)s' % patterns, Name.Variable),
# PrefixedName
(r'%(PrefixedName)s' % patterns,
bygroups(Name.Namespace, Name.Tag)),
# Comment
(r'#[^\n]+', Comment),
(r'\b(true|false)\b', Literal),
(r'[+\-]?\d*\.\d+', Number.Float),
(r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
(r'[+\-]?\d+', Number.Integer),
(r'[\[\](){}.;,:^]', Punctuation),
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'tripl |
from __future__ import print_function
# Time: O(n)
# Space: O(1)
#
# Given a sorted linked list, delete all nodes that have duplicate numbers,
# leaving only distinct numbers from the original list.
#
# For example,
# Given 1->2->3->3->4->4->5, return 1->2 | ->5.
# Given 1->1->1->2->3, return 2->3.
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self is None:
return "Nil"
else:
return "{} -> {}".format(self.val, repr(self.next))
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: Li | stNode
:rtype: ListNode
"""
dummy = ListNode(0)
pre, cur = dummy, head
while cur:
if cur.next and cur.next.val == cur.val:
val = cur.val;
while cur and cur.val == val:
cur = cur.next
pre.next = cur
else:
pre.next = cur
pre = cur
cur = cur.next
return dummy.next
if __name__ == "__main__":
head, head.next, head.next.next = ListNode(1), ListNode(2), ListNode(3)
head.next.next.next, head.next.next.next.next = ListNode(3), ListNode(4)
head.next.next.next.next.next, head.next.next.next.next.next.next = ListNode(4), ListNode(5)
print(Solution().deleteDuplicates(head))
|
f conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing the wiki service (WikiService class).
This service is used to convert some text to HTML. The converting
process, its rules, are described in the class documentation.
"""
import re
from service import Service
class WikiService(Service):
"""Class describing the wiki service.
This service is made to manipulate text and convert it to HTML format.
The converting rules are highly customizable by inheriting from this
class. Most of the constants used to convert text to HTML
are created in the constructor. Therefore, others may be added
very simply.
Here is a short example that shows how to add a new markup
to the wiki syntax. You can add a new 'wiki.py' file in your
'services' package placed in your bundle, and then paste the following
code into the file:
>>> from ext.aboard.service.default import WikiService
>>> class wiki(WikiService) # It's important to name -that class Wiki
... def __init__(self):
... Wikiservice.__ | init__(self)
... self.add_markup("bold", "b", "<strong>{string}</strong>")
You can also change the delimiters for the markup, add new regular
expressions, d | elete markups, etc. Have a look at the class methods.
"""
name = "wiki"
def __init__(self, start="<", end=">", close="/"):
"""Service constructor."""
Service.__init__(self)
self.markup_delimiter_start = start
self.markup_delimiter_end = end
self.markup_delimiter_close = close
self.expressions = []
self.exceptions = []
# Add the exceptions
self.add_except_expression("@", "@")
self.add_except_markup("pre")
# Add the expressions and markups
self.add_expression("italic", "/(.*?)/", "<em>\\1</em>")
self.add_expression("bold", r"\*(.*?)\*", "<strong>\\1</strong>")
self.add_expression(
"header1",
r"^(\s*)h1\.\s+(.*?)\s*$",
r"\1<h1>\2</h1>",
re.MULTILINE
)
# Test (o delete)
text = """
h1. A test
This is some text with *that* in bold,
But @*this part* h1. should@ not be interpreted at all.
<pre>
This one is a *long*
non /interpreted/ text, somehow.</pre>
and, finally, /this should be in italic/ and *bold*.
Well, @that *one* again@.
"""
def add_expression(self, name, regexp, replacement, options=0):
"""Add a new regular expression.
This methods automatically compiles the given regular expression and
adds the result to the self.expressions list.
Expected arguments:
name -- the name of the expression (see below)
regexp -- the regular expression which will be compiled
options [optionnal] -- the regular expression options.
An expression name should be a unique identifier. It's mostlu used
to replace an expression (if a developer decides to change the
rule to create bold text, for instance, he will use this identifier).
"""
name = name.lower()
names = [line[0] for line in self.expressions]
if name in names:
raise ValueError("the identifier {} already exists in the " \
"expression list. Use the 'replace_expression' " \
"method to replace it".format(repr(name)))
compiled = re.compile(regexp, options)
self.expressions.append((name, compiled, replacement))
def replace_expressions(self, name, regexp, replacement, options=0):
"""Replace an existing expression using its identifier.
The expected arguments are the same as the 'add_expression' method.
Instead of simply adding a new expression, though, it first delete
the expression with the name. This is very useful to define a new
rule for certain formatting.
"""
name = name.lower()
names = [line[0] for line in self.expressions]
if name not in names:
raise ValueError("the identifier {} doesn't exists in the " \
"expression list. Use the 'add_expression' " \
"method to add it".format(repr(name)))
compiled = re.compile(regexp, options)
exp_pos = names.find(name)
del self.expressions[exp_pos]
self.expressions.insert(exp_pos, (name, compiled, replacement))
def remove_expression(self, name):
"""Remove the expression identified by its name."""
name = name.lower()
names = [line[0] for line in self.expressions]
if name not in names:
raise ValueError("the identifier {} doesn't exists in the " \
"expression list.".format(repr(name)))
exp_pos = names.find(name)
del self.expressions[exp_pos]
def add_except_expression(self, start, end, options=0):
"""Add an expression for a Wiki exception.
Exceptions are not interpreted. If this expression is found, it is
deleted and its content (the second group) is copied into a
temporary field and paste in the original text, unchanged, at the end of the process.
"""
self.exceptions.append((start, end, options))
def add_markup(self, name, markup, html):
"""Add a new markup.
A wiki markup is by default close to a HTML markup. It should
begin with > (<), end with < (>). To close the markup
after the text to select, it use another > followed
by /, the markup and the < symbol.
These three symbols (markup_delimiter_start, markup_delimiter_end
and markup_delimiter_close) are instance attributes and can be
set in the constructor of a subclass. this allows to
set new markup symbols, brackets for instance.
Note: the 'html' parameter should contain the '{string}'
sub-string to identify a replacement. For instance:
>>> wiki.add_markup("italic", "i", "<em>{string}</em>")
That code will allow text like:
We <i>made</i> it!
To:
We <em>made</em> it!
"""
start = self.markup_delimiter_start
end = self.markup_delimiter_end
close = self.markup_delimiter_close
regexp = start + markup + end + "(.*?)" + start + close + markup + end
replacement = html.format(string="\\1")
self.add_expression(name, regexp, replacement)
def replace_markup(self, name, markup, html):
"""Replace the identified by markup.
The expected arguments are the same ones as t |
OR PERFORMANCE OF THIS SOFTWARE.
"""
from libnl.attr import nla_policy, NLA_U16, NLA_U32, NLA_U64, NLA_U8
from libnl.misc import c_int8, c_uint8, SIZEOF_S8, SIZEOF_U8
from libnl.nl80211 import nl80211
from libnl.nl80211.iw_util import ampdu_space, get_ht_capability, get_ht_mcs, get_ssid
WLAN_CAPABILITY_ESS = 1 << 0
WLAN_CAPABILITY_IBSS = 1 << 1
WLAN_CAPABILITY_CF_POLLABLE = 1 << 2
WLAN_CAPABILITY_CF_POLL_REQUEST = 1 << 3
WLAN_CAPABILITY_PRIVACY = 1 << 4
WLAN_CAPABILITY_SHORT_PREAMBLE = 1 << 5
WLAN_CAPABILITY_PBCC = 1 << 6
WLAN_CAPABILITY_CHANNEL_AGILITY = 1 << 7
WLAN_CAPABILITY_SPECTRUM_MGMT = 1 << 8
WLAN_CAPABILITY_QOS = 1 << 9
WLAN_CAPABILITY_SHORT_SLOT_TIME = 1 << 10
WLAN_CAPABILITY_APSD = 1 << 11
WLAN_CAPABILITY_RADIO_MEASURE = 1 << 12
WLAN_CAPABILITY_DSSS_OFDM = 1 << 13
WLAN_CAPABILITY_DEL_BACK = 1 << 14
WLAN_CAPABILITY_IMM_BACK = 1 << 15
# DMG (60gHz) 802.11ad
WLAN_CAPABILITY_DMG_TYPE_MASK = 3 << 0
WLAN_CAPABILITY_DMG_TYPE_IBSS = 1 << 0 # Tx by: STA
WLAN_CAPABILITY_DMG_TYPE_PBSS = 2 << 0 # Tx by: PCP
WLAN_CAPABILITY_DMG_TYPE_AP = 3 << 0 # Tx by: AP
WLAN_CAPABILITY_DMG_CBAP_ONLY = 1 << 2
WLAN_CAPABILITY_DMG_CBAP_SOURCE = 1 << 3
WLAN_CAPABILITY_DMG_PRIVACY = 1 << 4
WLAN_CAPABILITY_DMG_ECPAC = 1 << 5
WLAN_CAPABILITY_DMG_SPECTRUM_MGMT = 1 << 8
WLAN_CAPABILITY_DMG_RADIO_MEASURE = 1 << 12
IEEE80211_COUNTRY_EXTENSION_ID = 201
BSS_MEMBERSHIP_SELECTOR_VHT_PHY = 126
BSS_MEMBERSHIP_SELECTOR_HT_PHY = 127
ms_oui = b'\x00\x50\xf2'
ieee80211_oui = b'\x00\x0f\xac'
wfa_oui = b'\x50\x6f\x9a'
country_env_str = lambda e: {'I': 'Indoor only', 'O': 'Outdoor only', ' ': 'Indoor/Outdoor'}.get(e, 'bogus')
wifi_wps_dev_passwd_id = lambda e: {0: 'Default (PIN)', 1: 'User-specified', 2: 'Machine-specified', 3: 'Rekey',
4: 'PushButton', 5: 'Registrar-specified'}.get(e, '??')
ht_secondary_offset = ('no secondary', 'above', '[reserved!]', 'below')
class ieee80211_country_ie_triplet(object):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n60."""
def __init__(self, data):
"""Constructor."""
self.first_channel = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.reg_extension_id = self.first_channel
data = data[SIZEOF_U8:]
self.num_channels = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.reg_class = self.num_channels
data = data[SIZEOF_U8:]
self.max_power = c_int8.from_buffer(data[:SIZEOF_S8]).value
self.coverage_class = c_uint8.from_buffer(data[:SIZEOF_U8]).value
self.chans = self.ext = self
def get_supprates(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n227.
Positional arguments:
data -- bytearray data to read.
"""
answer = list()
for i in range(len(data)):
r = data[i] & 0x7f
if r == BSS_MEMBERSHIP_SELECTOR_VHT_PHY and data[i] & 0x80:
value = 'VHT'
elif r == BSS_MEMBERSHIP_SELECTOR_HT_PHY and data[i] & 0x80:
value = 'HT'
else:
value = '{0}.{1}'.format(int(r / 2), int(5 * (r & 1)))
answer.append('{0}{1}'.format(value, '*' if data[i] & 0x80 else ''))
return answer
def get_country(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n267.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = {'Environment': country_env_str(chr(data[2]))}
data = data[3:]
while len(data) >= 3:
triplet = ieee80211_country_ie_triplet(data)
if triplet.ext.reg_extension_id >= IEEE80211_COUNTRY_EXTENSION_ID:
answers['Extension ID'] = triplet.ext.reg_extension_id
answers['Regulatory Class'] = triplet.ext.reg_class
answers['Coverage class'] = triplet.ext.coverage_class
answers['up to dm'] = triplet.ext.coverage_class * 450
data = data[3:]
continue
if triplet.chans.first_channel <= 14: # 2 GHz.
end_channel = triplet.chans.first_channel + (triplet.chans.num_channels - 1)
else:
end_channel = triplet.chans.first_channel + (4 * (triplet.chans.num_channels - 1))
answers['Channels dBm'] = triplet.chans.max_power
answers['Channels'] = (triplet.chans.first_channel, end_channel)
data = data[3:]
return answers
def get_erp(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/t | ree/scan.c?id=v3.17#n323.
Positional arguments:
data -- bytearray data to read.
Returns:
String.
"""
if data[0] == 0x00:
return '<no flags>'
if data[0] & 0x01:
return 'NonERP_Present'
if data[0] & 0x02:
return 'Use_Protection'
if data[0] & 0x04:
return 'Barker_Preamble_Mode'
return '' |
def get_cipher(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n336.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi stream cipher used by the access point (string).
"""
legend = {0: 'Use group cipher suite', 1: 'WEP-40', 2: 'TKIP', 4: 'CCMP', 5: 'WEP-104', }
key = data[3]
if ieee80211_oui == bytes(data[:3]):
legend.update({6: 'AES-128-CMAC', 8: 'GCMP', })
elif ms_oui != bytes(data[:3]):
key = None
return legend.get(key, '{0:02x}-{1:02x}-{2:02x}:{3}'.format(data[0], data[1], data[2], data[3]))
def get_auth(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n393.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi authentication method used by the access point (string).
"""
legend = {1: 'IEEE 802.1X"', 2: 'PSK', }
key = data[3]
if ieee80211_oui == bytes(data[:3]):
legend.update({3: 'FT/IEEE 802.1X', 4: 'FT/PSK', 5: 'IEEE 802.1X/SHA-256', 6: 'PSK/SHA-256', 7: 'TDLS/TPK', })
elif ms_oui != bytes(data[:3]):
key = None
return legend.get(key, '{0:02x}-{1:02x}-{2:02x}:{3}'.format(data[0], data[1], data[2], data[3]))
def get_rsn_ie(defcipher, defauth, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n441.
Positional arguments:
defcipher -- default cipher if not in data (string).
defauth -- default authentication suites if not in data (string).
data -- bytearray data to read.
Returns:
Dict.
"""
answers = dict()
answers['version'] = data[0] + (data[1] << 8)
data = data[2:]
if len(data) < 4:
answers['group_cipher'] = answers['pairwise_ciphers'] = defcipher
return answers
answers['group_cipher'] = get_cipher(data)
data = data[4:]
if len(data) < 2:
answers['pairwise_ciphers'] = defcipher
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers['bogus tail data'] = data
return answers
answers['pairwise_ciphers'] = ' '.join(get_cipher(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) < 2:
answers['authentication_suites'] = defauth
return answers
count = data[0] | (data[1] << 8)
if 2 + (count * 4) > len(data):
answers['bogus tail data'] = data
return answers
answers['authentication_suites'] = ' '.join(get_auth(data[2 + (i * 4):]) for i in range(count))
data = data[2 + (count * 4):]
if len(data) >= 2:
capa = data[0] | (data[1] << 8)
answers['rsn_ie_capabilities'] = list()
if capa & 0x0001:
answers['rsn_ie_capabilities'].append('PreAuth')
if capa & 0x0002:
answers['rsn_ie_capabilities'].append('NoPairwise')
case = {0: '1-PTKSA-RC', 1: '2-PTKSA-RC', 2: '4-PTKSA-RC', 3: '16-PTKSA-RC'}.get((capa & 0x000c) >> 2)
if case:
answers['rsn_ie_capabilities'].append(case)
case = {0: '1-GTKSA-RC', 1: '2-GTKSA-RC', 2: '4-GTKSA-RC', 3: '16-GTKSA-RC'}.get((capa & 0x0030) >> 4)
if case:
answers['rsn_ie_capabilities'].append(case)
if capa & 0x0040:
|
#
# Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
import logging
from gui.utils import QtImport
from gui.BaseComponents import BaseWidget
from gui.widgets.task_toolbox_widget import TaskToolBoxWidget
from HardwareRepository import HardwareRepository as HWR
__credits__ = ["MXCuBE collaboration"]
__license__ = "LGPLv3+"
__category__ = "General"
class TaskToolBoxBrick(BaseWidget):
request_tree_brick = QtImport.pyqtSignal()
def __init__(self, *args):
BaseWidget.__init__(self, *args)
# Internal values -----------------------------------------------------
self.ispyb_logged_in = False
self.tree_brick = None
# Properties ----------------------------------------------------------
self.add_property("useOscStartCbox", "boolean", False)
self.add_property("useCompression", "boolean", False)
#self.add_property("availableTasks", "string", "discrete char helical")
self.add_property("showDiscreetTask", "boolean", True)
self.add_property("showHelicalTask", "boolean", True)
self.add_property("showCharTask", "boolean", True)
self.add_property("showAdvancedTask", "boolean", True)
self.add_property("showStillScanTask", "boolean", False)
self.add_property("showCollectNowButton", "boolean", False)
# Signals -------------------------------------------------------------
self.define_signal("request_tree_brick", ())
# Slots ---------------------------------------------------------------
self.define_slot("logged_in", ())
self.define_slot("set_session", ())
self.define_slot("selection_changed", ())
self.define_slot("user_group_saved", ())
self.define_slot("set_tree_brick", ())
# Graphic elements ----------------------------------------------------
self.task_tool_box_widget = TaskToolBoxWidget(self)
# Layout --------------------------------------------------------------
self.main_layout = QtImport.QVBoxLayout(self)
self.main_layout.addWidget(self.task_tool_box_widget)
self.main_layout.setSpacing(0)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.main_layout)
# SizePolicies --------------------------------------------------------
# self.setSizePolicy(QtImport.QSizePolicy.MinimumExpanding,
# QtImport.QSizePolicy.MinimumExpanding)
# Other ---------------------------------------------------------------
HWR.beamline.sample_view.connect("pointSelected", self.point_selected)
def set_expert_mode(self, expert):
self.task_tool_box_widget.set_expert_mode(expert)
def run(self):
if HWR.beamline.session.session_id:
self.setEnabled(True)
#self.task_tool_box_widget.set_available_tasks(self["availableTasks"])
self.request_tree_brick.emit()
self.task_tool_box_widget.adjust_width(self.width())
def user_group_saved(self, new_user_group):
HWR.beamline.session.set_user_group(str(new_user_group))
self.task_tool_box_widget.update_data_path_model()
path = (
HWR.beamline.session.get_base_image_directory()
+ "/"
+ str(new_user_group)
)
msg = "Image path is: %s" % path
logging.getLogger("GUI").info(msg)
@QtImport.pyqtSlot(BaseWidget)
def set_tree_brick(self, brick):
self.tree_brick = brick
self.tree_brick.compression_state = self["useCompression"] == 1
self.task_tool_box_widget.set_tree_brick(brick)
@QtImport.pyqtSlot(int, str, str, int, str, str, bool)
def set_session(
self,
session_id,
t_prop_code=None,
prop_number=None,
prop_id=None,
start_date=None,
prop_code=None,
is_inhouse=None,
):
"""
Connected to the slot set_session and is called after a
request to get the current session from LIMS (ISPyB) is
made. The signal is normally emitted by the brick that
handles LIMS login, ie ProposalBrick.
The session_id is '' if no session could be retrieved.
"""
if session_id is "":
self.logged_in(True)
@QtImport.pyqtSlot(bool)
def logged_in(self, logged_in):
"""
Handels the signal logged_in from the b | rick the handles
LIMS (ISPyB) login, ie ProposalBrick. The signal is
emitted when a user was succesfully logged in.
"""
logged_in = True
self.ispyb_lo | gged_in = logged_in
if HWR.beamline.session is not None:
HWR.beamline.session.set_user_group("")
self.setEnabled(logged_in)
self.task_tool_box_widget.ispyb_logged_in(logged_in)
def property_changed(self, property_name, old_value, new_value):
if property_name == "useOscStartCbox":
self.task_tool_box_widget.use_osc_start_cbox(new_value)
elif property_name == "useCompression":
self.task_tool_box_widget.enable_compression(new_value)
elif property_name == "showCollectNowButton":
self.task_tool_box_widget.collect_now_button.setVisible(new_value)
elif property_name == "showDiscreetTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.discrete_page
)
elif property_name == "showHelicalTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.helical_page
)
elif property_name == "showCharTask":
if not new_value:
self.task_tool_box_widget.hide_task(self.task_tool_box_widget.char_page)
elif property_name == "showAdvancedTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.advanced_page
)
elif property_name == "showStillScanTask":
if not new_value:
self.task_tool_box_widget.hide_task(
self.task_tool_box_widget.still_scan_page
)
def selection_changed(self, items):
"""
Connected to the signal "selection_changed" of the TreeBrick.
Called when the selection in the tree changes.
"""
self.task_tool_box_widget.selection_changed(items)
def point_selected(self, selected_position):
self.task_tool_box_widget.helical_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.discrete_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.char_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.energy_scan_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.xrf_spectrum_page.centred_position_selection(
selected_position
)
self.task_tool_box_widget.discrete_page.refresh_current_item()
self.task_tool_box_widget.helical_page.refresh_current_item()
self.task_tool_box_widget.char_page.refresh_current_item()
self.task_tool_box_widget.energy_scan_page.refresh_current_item()
self.task_tool_box_widget.xrf_spectrum_page.refresh |
from django.conf.urls import patterns, url
from . import vie | ws
urlpatterns = patterns(
'',
url(r'^$', views.customer_list, name='customers'),
u | rl(r'^(?P<pk>[0-9]+)/$', views.customer_details, name='customer-details')
)
|
#!/usr/bin/python
import numpy as np
#a = np.linspace(0.,10.,100)
#b = np.sqrt(a)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import csv
def import_text(filename, separator):
for line in csv.reader(open(filename), delimiter=separator,
skipinitialspace=True):
if line:
yield line
def to_num(s):
try:
return int(s)
except ValueError:
return float(s)
def to_float(s):
try:
return float(s)
except ValueError:
return int(s)
def column(matrix, i):
return [row[i] for row in matrix]
def bandwidth(timings, sizes):
result = []
for i in range(0, len(timings)):
result.append((2*to_float(sizes[i]))/(to_float(timings[i])*1000000000.0))
return result
#read data
table = []
for data in import_text('./0_cudamemcpy_offset1.dat', ' '):
table.append(data)
#print column(table, 0)[1:]
size = column(table, 1)[1:]
size_string = column(table, 0)[1:]
#print size_string
# data
char_t = column(table, 2)[1:]
#short_t = column(table, 3)[1:]
#float_t = column(table, 4)[1:]
#double_t = column(table, 5)[1:]
#float3_t = column(table, 6)[1:]
#float4_t = column(table, 7)[1:]
char_bw = bandwidth(char_t, size)
#short_bw = bandwidth(short_t, size)
#float_bw = bandwidth(float_t, size)
#double_bw = bandwidth(double_t, size)
#float3_bw = bandwidth(float3_t, size)
#float4_bw = bandwidth(float4_t, size)
# read other table
di_table = []
for di_data in import_text('./1_direct_offset1.dat', ' '):
di_table.append(di_data)
#print column(table, 0)[1:]
#size_string = column(table, 0)[1:]
#print size_string
# data
di_char_t = column(di_table, 2)[1:]
di_short_t = column(di_table, 3)[1:]
di_float_t = column(di_table, 4)[1:]
di_double_t = column(di_table, 5)[1:]
di_float3_t = column(di_table, 6)[1:]
di_float4_t = column(di_table, 7)[1:]
di_char_bw = bandwidth(di_char_t, size)
di_short_bw = bandwidth(di_short_t, size)
di_float_bw = bandwidth(di_float_t, size)
di_double_bw = bandwidth(di_double_t, size)
di_float3_bw = bandwidth(di_float3_t, size)
di_float4_bw = bandwidth(di_float4_t, size)
size_np = np.array(size)
# normalize the size
for i in range(0, len(size)):
size_np[i] = i+1
# size_np[len(size)-1-i] = to_num(to_num(size_np[len(size)-1-i])/to_num(size_np[0])) #to_float(size[i])/to_float(size[0])
#print to_float(size[11])
#print to_float(float4_t[11])
#print (to_float(2*sizes[i])/(to_float(timings[i])*1000000000.0))
#print char_bw
#print float_bw
#print float
# start drawing
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title("cuMemcpy v.s. d2d_direct_kernel (address not aligned)");
ax.set_xlabel(table[0][0])
ax.set_ylabel('Bandwidth (GB/sec)')
#print len(size_string)
#print len(char_bw)
fig.add_subplot(ax)
#ax.set_ylim([180,260])
print size_np
print size_string
#ax.set_xticklabels(size_np, range(len(size_np)))
ax.set_xticklabels(size_string)
#fig.xticks(size_np, size_string)
#ax.set_xticks(size_np, ('128K', '256K', '512K', '1M', '2M', '4M', '8M', '16M', '32M', '64M'))
#ax.set_autoscaley_on(False)
ax.plot(size_np, char_bw, linestyle = '-', color = 'blue', marker='o', linewidth = 1, label='cudaMemcpy')
#ax.plot(size, short_bw, linestyle = '-', color = 'red', linewidth = 1, label='cudaMemcpy_short')
#ax.plot(size, float_bw, linestyle = '-', color = 'c', linewidth = 1, label='cudaMemcpy_float')
#ax.plot(size, double_bw, linestyle = '-', color = 'm', linewidth = 1, label='cudaMemcpy_double')
#ax.plot(size, float3_bw, linestyle = '-', color = 'k', linewidth = 1, label='cudaMemcpy_float3')
#ax.plot(size, float4_bw, linestyle = '-', color = 'y', linewidth = 1, label='cudaMemcpy_float4')
ax.plot(size_np, di_char_bw, linestyle = ':', color = 'blue', marker='o', linewidth = 2, label='d2d_direct_char')
ax.plot(size_np, di_short_bw, linestyle = ':', color = 'red', marker='s', linewidth = 2, label='d2d_direct_short')
ax.plot(size_np, di_float_bw, linestyle = ':', color = 'c', marker='p', linewidth = 2, label='d2d_direct_float')
ax.plot(size_np, di_double_bw, linestyle = ':', color = 'm', marker='*', linewidth = 2, label='d2d_direct_double')
ax.plot(size_np, di_float3_bw, linestyle = ':', color = 'k', marker='h', linewidth = 2, label='d2d_direct_float3')
ax.plot(size_np, di_float4_bw, linestyle = ':', color = 'y', marker='x', linewidth = 2, label='d2d_direct_float4')
size_num=range(len(size))
#print size_num
print size_string
box = ax.get_position()
ax.set_position([box.x0, box.y0+box.height*0.1, box.width, box.heig | ht*0.9])
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol = 6, fancybox = True, shadow = True, prop={'size':9}, )
#ax.legend(loc='upper center', ncol = 3, fancybox = True, shadow = True, prop={'size':9}, | )
#ax.legend(loc='upper left', ncol = 1, fancybox = True, shadow = True, prop={'size':9}, )
ax.legend(loc='upper center', ncol = 4, bbox_to_anchor=(0.5,-0.1), fancybox = True, shadow = True, prop={'size':9}, )
plt.show()
fig.savefig('cudaMemcpy_vs_d2d_offset1.pdf')
|
# A Gui interface allowing the binary illiterate to figure out the ip address the Arduino has been assigned.
import os
import re
from PySide.QtCore import QFile, QMetaObject, QSignalMapper, Slot, QRegExp
from PySide.QtGui import QDialog, QPushButton, QRegExpValidator
from PySide.QtUiTools import QUiLoader
class IPHelper(QDialog):
def __init__(self, parent=None):
super(IPHelper, self).__init__(parent)
f = QFile(os.path.join(os.path.split(__file__)[0], 'iphelper.ui'))
loadUi(f, self)
f.close()
self.ipAddress = None
# create validators
validator = QRegExpValidator(QRegExp('\d{,3}'))
self.uiFirstTetTXT.setValidator(validator)
self.uiSecondTetTXT.setValidator(validator)
self.uiThirdTetTXT.setValidator(validator | )
self.uiFourthTetTXT.setValidator(validator)
# | build a map of the buttons
self.buttons = [None]*16
self.signalMapper = QSignalMapper(self)
self.signalMapper.mapped.connect(self.tetMap)
for button in self.findChildren(QPushButton):
match = re.findall(r'^uiTrellis(\d{,2})BTN$', button.objectName())
if match:
i = int(match[0])
self.buttons[i] = button
if i >= 12:
self.signalMapper.setMapping(button, i)
button.clicked.connect(self.signalMapper.map)
self.tetMap(12)
@Slot()
def accept(self):
self.ipAddress = '{}.{}.{}.{}'.format(self.uiFirstTetTXT.text(), self.uiSecondTetTXT.text(), self.uiThirdTetTXT.text(), self.uiFourthTetTXT.text())
super(IPHelper, self).accept()
@Slot(int)
def tetMap(self, index):
button = self.buttons[index]
if not button.isChecked():
return
for i in range(12, 16):
b = self.buttons[i]
if b != button:
b.setChecked(False)
# update the buttons to match the current value of the text
for edit in (self.uiFirstTetTXT, self.uiSecondTetTXT, self.uiThirdTetTXT, self.uiFourthTetTXT):
edit.setProperty('active', False)
if index == 12:
val = int(self.uiFourthTetTXT.text())
self.uiFourthTetTXT.setProperty('active', True)
elif index == 13:
val = int(self.uiThirdTetTXT.text())
self.uiThirdTetTXT.setProperty('active', True)
elif index == 14:
val = int(self.uiSecondTetTXT.text())
self.uiSecondTetTXT.setProperty('active', True)
elif index == 15:
val = int(self.uiFirstTetTXT.text())
self.uiFirstTetTXT.setProperty('active', True)
for i in range(8):
b = self.buttons[i]
b.blockSignals(True)
b.setChecked(2**i & val)
b.blockSignals(False)
# force a refresh of the styleSheet
self.setStyleSheet(self.styleSheet())
@Slot()
def buttonPressed(self):
total = 0
for i in range(8):
if self.buttons[i].isChecked():
total += 2**i
total = unicode(total)
if self.uiTrellis12BTN.isChecked():
self.uiFourthTetTXT.setText(total)
elif self.uiTrellis13BTN.isChecked():
self.uiThirdTetTXT.setText(total)
elif self.uiTrellis14BTN.isChecked():
self.uiSecondTetTXT.setText(total)
elif self.uiTrellis15BTN.isChecked():
self.uiFirstTetTXT.setText(total)
# Code to load a ui file like using PyQt4
# https://www.mail-archive.com/pyside@lists.openbossa.org/msg01401.html
class MyQUiLoader(QUiLoader):
def __init__(self, baseinstance):
super(MyQUiLoader, self).__init__()
self.baseinstance = baseinstance
def createWidget(self, className, parent=None, name=""):
widget = super(MyQUiLoader, self).createWidget(className, parent, name)
if parent is None:
return self.baseinstance
else:
setattr(self.baseinstance, name, widget)
return widget
def loadUi(uifile, baseinstance=None):
loader = MyQUiLoader(baseinstance)
ui = loader.load(uifile)
QMetaObject.connectSlotsByName(ui)
return ui
|
,
3: 'nzbs',
4: 'eztv',
5: 'nzbmatrix',
6: 'tvnzb',
7: 'ezrss',
8: 'thepiratebay',
9: 'kat'}
def execute(self):
self.connection.action("ALTER TABLE history RENAME TO history_old;")
self.connection.action("CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT);")
for x in self.histMap.keys():
self.upgradeHistory(x, self.histMap[x])
def upgradeHistory(self, number, name):
oldHistory = self.connection.action("SELECT * FROM history_old").fetchall()
for curResult in oldHistory:
sql = "INSERT INTO history (action, date, showid, season, episode, quality, resource, provider) VALUES (?,?,?,?,?,?,?,?)"
provider = 'unknown'
try:
provider = self.histMap[int(curResult["provider"])]
except ValueError:
provider = curResult["provider"]
args = [curResult["action"], curResult["date"], curResult["showid"], curResult["season"], curResult["episode"], curResult["quality"], curResult["resource"], provider]
self.connection.action(sql, args)
class NewQualitySettings (NumericProviders):
def test(self):
return self.hasTable("db_version")
def execute(self):
backupDatabase(0)
# old stuff that's been removed from common but we need it to upgrade
HD = 1
SD = 3
ANY = 2
BEST = 4
ACTION_SNATCHED = 1
ACTION_PRESNATCHED = 2
ACTION_DOWNLOADED = 3
PREDOWNLOADED = 3
MISSED = 6
BACKLOG = 7
DISCBACKLOG = 8
SNATCHED_BACKLOG = 10
### Update default quality
if sickbeard.QUALITY_DEFAULT == HD:
sickbeard.QUALITY_DEFAULT = common.HD
elif sickbeard.QUALITY_DEFAULT == SD:
sickbeard.QUALITY_DEFAULT = common.SD
elif sickbeard.QUALITY_DEFAULT == ANY:
sickbeard.QUALITY_DEFAULT = common.ANY
elif sickbeard.QUALITY_DEFAULT == BEST:
sickbeard.QUALITY_DEFAULT = common.BEST
### Update episode statuses
toUpdate = self.connection.select("SELECT episode_id, location, status FROM tv_episodes WHERE status IN (?, ?, ?, ?, ?, ?, ?)", [common.DOWNLOADED, common.SNATCHED, PREDOWNLOADED, MISSED, BACKLOG, DISCBACKLOG, SNATCHED_BACKLOG])
didUpdate = False
for curUpdate in toUpdate:
# remember that we changed something
didUpdate = True
newStatus = None
oldStatus = int(curUpdate["status"])
if oldStatus == common.SNATCHED:
newStatus = common.Quality.compositeStatus(common.SNATCHED, common.Quality.UNKNOWN)
elif oldStatus == PREDOWNLOADED:
newStatus = common.Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV)
elif oldStatus in (MISSED, BACKLOG, DISCBACKLOG):
newStatus = common.WANTED
elif oldStatus == SNATCHED_BACKLOG:
newStatus = common.Quality.compositeStatus(common.SNATCHED, common.Quality.UNKNOWN)
if newStatus != None:
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ? ", [newStatus, curUpdate["episode_id"]])
continue
# if we get here status should be == DOWNLOADED
if not curUpdate["location"]:
continue
newQuality = common.Quality.nameQuality(curUpdate["location"])
if newQuality == common.Quality.UNKNOWN:
newQuality = common.Quality.assumeQuality(curUpdate["location"])
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [common.Quality.compositeStatus(common.DOWNLOADED, newQuality), curUpdate["episode_id"]])
# if no updates were done then the backup is useless
if didUpdate:
os.remove(db.dbFilename(suffix='v0'))
### Update show qualities
toUpdate = self.connection.select("SELECT * FROM tv_shows")
for curUpdate in toUpdate:
if not curUpdate["quality"]:
continue
if int(curUpdate["quality"]) == HD:
newQuality = common.HD
elif int(curUpdate["quality"]) == SD:
newQuality = common.SD
elif int(curUpdate["quality"]) == ANY:
newQuality = common.ANY
elif int(curUpdate["quality"]) == BEST:
newQuality = common.BEST
else:
logger.log(u"Unknown show quality: " + str(curUpdate["quality"]), logger.WARNING)
newQuality = None
if newQuality:
self.connection.action("UPDATE tv_shows SET quality = ? WHERE show_id = ?", [newQuality, curUpdate["show_id"]])
### Update history
toUpdate = self.connection.select("SELECT * FROM history")
for curUpdate in toUpdate:
newAction = None
newStatus = None
if int(curUpdate["action"] == ACTION_SNATCHED):
newStatus = common.SNATCHED
elif int(curUpdate["action"] == ACTION_DOWNLOADED):
newStatus = common.DOWNLOADED
elif int(curUpdate["action"] == ACTION_PRESNATCHED):
newAction = common.Quality.compositeStatus(common.SNATCHED, common.Quality.SDTV)
if newAction == None and newStatus == None:
continue
if not newAction:
if int(curUpdate["quality"] == HD):
newAction = common.Quality.compositeStatus(newStatus, common.Quality.HDTV)
elif int(curUpdate["quality"] == SD):
newAction = common.Quality.compositeStatus(newStatus, common.Quality.S | DTV)
else:
newAction = common.Quality.compositeStatus(newStatus, common.Quality.UNKNOWN)
self.connection.action("UPDATE history SET action = ? WHERE date = ? AND showid = ?", [newAction, | curUpdate["date"], curUpdate["showid"]])
self.connection.action("CREATE TABLE db_version (db_version INTEGER);")
self.connection.action("INSERT INTO db_version (db_version) VALUES (?)", [1])
class DropOldHistoryTable(NewQualitySettings):
def test(self):
return self.checkDBVersion() >= 2
def execute(self):
self.connection.action("DROP TABLE history_old")
self.incDBVersion()
class UpgradeHistoryForGenericProviders(DropOldHistoryTable):
def test(self):
return self.checkDBVersion() >= 3
def execute(self):
providerMap = {'NZBs': 'NZBs.org',
'BinReq': 'Bin-Req',
'NZBsRUS': '''NZBs'R'US''',
'EZTV': 'EZTV@BT-Chat'}
for oldProvider in providerMap:
self.connection.action("UPDATE history SET provider = ? WHERE provider = ?", [providerMap[oldProvider], oldProvider])
self.incDBVersion()
class AddAirByDateOption(UpgradeHistoryForGenericProviders):
def test(self):
return self.checkDBVersion() >= 4
def execute(self):
self.connection.action("ALTER TABLE tv_shows ADD air_by_date NUMERIC")
self.incDBVersion()
class ChangeSabConfigFromIpToHost(AddAirByDateOption):
def test(self):
return self.checkDBVersion() >= 5
def execute(self):
sickbeard.SAB_HOST = 'http://' + sickbeard.SAB_HOST + '/sabnzbd/'
self.incDBVersion()
class FixSabHostURL(ChangeSabConfigFromIpToHost):
def test(self):
return self.checkDBVersion() >= 6
def execute(self):
if sickbeard.SAB_HOST.endswith('/sabnzbd/'):
sickbeard.SAB_HOST = sickbeard.SAB_HOST.replace('/sabnzbd/', '/')
sickbeard.save_config()
self.incDBVersion()
class AddLang (FixSabHostURL):
def test(self):
return self.hasColumn("tv_shows", "lang")
def execute(self):
self.addColumn("tv_shows", "lang", "TEXT", "fr")
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for attention functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.decoders.attention import AttentionLayerDot
from seq2seq.decoders.attention import AttentionLayerBahdanau
class AttentionLayerTest(tf.test.TestCase):
"""
Tests the AttentionLayer module.
"""
def setUp(self):
super(AttentionLayerTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 8
self.attention_dim = 128
self.input_dim = 16
self.seq_len = 10
self.state_dim = 32
def _create_layer(self):
"""Creates the attention layer. Should be implemented by child classes"""
raise NotImplementedError
def _test_layer(self):
"""Tests Attention layer with a given score type"""
inputs_pl = tf.placeholder(tf.float32, (None, None, self.input_dim))
inputs_length_pl = tf.placeholder(tf.int32, [None])
state_pl = tf.placeholder(tf.float32, (None, self.state_dim))
attention_fn = self._create_layer()
scores, context = attention_fn(
query=state_pl,
keys=inputs_pl,
values=inputs_pl,
values_length=inputs_length_pl)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {}
feed_dict[inputs_pl] = np.random.randn(self.batch_size, self.seq_len,
self.input_dim)
feed_dict[state_pl] = np.random.randn(self.batch_size, self.state_dim)
feed_dict[inputs_length_pl] = np.arange(self.batch_size) + 1
scores_, context_ = sess.run([scores, context], feed_dict)
np.testing.assert_array_equal(scores_.shape,
[self.batch_size, self.seq_len])
np.testing.assert_array_equal(context_.shape,
| [self.batch_size, self.input_dim])
for idx, batch in enumerate(scores_, 1):
# All scores that are padded should be zero
np.testing.assert_array_equal(batch[idx:], np.zeros_like(batch[idx:]))
# Scores should sum to 1
scores_sum = np.sum(scores_, axis=1)
np.testing.assert_array_almost_equal(scores_sum, np.ones([self.batch_size]))
class AttentionLayerDotTest(AttentionLayerTest):
"""Tests the | AttentionLayerDot class"""
def _create_layer(self):
return AttentionLayerDot(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
class AttentionLayerBahdanauTest(AttentionLayerTest):
"""Tests the AttentionLayerBahdanau class"""
def _create_layer(self):
return AttentionLayerBahdanau(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
if __name__ == "__main__":
tf.test.main()
|
# -*- coding: utf-8 -*-
from converters.circle import circle
from converters.currency import currency
from converters.electric import electric
from converters.force import force
from converters.pressure import pressure
from converters.speed import speed
fr | om converters.temperature import temperature
class UnitsManager(object):
'''
Class responsible to manage the unit converters
of this application.
'''
_units = [
circle,
currency,
| electric,
force,
pressure,
speed,
temperature,
]
def __iter__(self):
return (x for x in self._units)
def register(self, converter):
"""
Method that receives a new converter and adds it to
this manager.
Useful to add custom new methods without needing to edit
the core of this application.
"""
if converter is not None and callable(converter):
self._units.append(converter)
UNITS = UnitsManager()
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A very very simple mock object harness."""
from types import ModuleType
DONT_CARE = ''
class MockFunctionCall(object):
def __init__(self, name):
self.name = name
self.args = tuple()
self.return_value = None
self.when_called_handlers = []
def WithArgs(self, *args):
self.args = args
return self
def WillReturn(self, value):
self.return_value = value
return self
def WhenCalled(self, handler):
self.when_called_handlers.append(handler)
def VerifyEquals(self, got):
if self.name != got.name:
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
if len(self.args) != len(got.args):
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
for i in range(len(self.args)):
self_a = self.args[i]
got_a = got.args[i]
if self_a == DONT_CARE:
continue
if self_a != got_a:
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
def __repr__(self):
def arg_to_text(a):
if a == DONT_CARE:
return '_'
return repr(a)
args_text = ', '.join([arg_to_text(a) for a in self.args])
if self.return_value in (None, DONT_CARE):
return '%s(%s)' % (self.name, args_text)
return '%s(%s)->%s' % (self.name, args_text, repr(self.return_value))
class MockTrace(object):
def __init__(self):
self.expected_calls = []
self.next_call_index = 0
class MockObject(object):
def __init__(self, parent_mock=None):
if parent_mock:
self._trace = parent_mock._trace # pylint: disable=protected-access
else:
self._trace = MockTrace()
def __setattr__(self, name, value):
if (not hasattr(self, '_trace') or
hasattr(value, 'is_hook')):
object.__setattr__(self, name, value)
return
assert isinstance(value, MockObject)
object.__setattr__(self, name, value)
def SetAttribute(self, name, value):
setattr(self, name, value)
def ExpectCall(self, func_name, *args):
assert self._trace.next_call_index == 0
if not hasattr(self, func_name):
self._install_hook(func_name)
call = MockFunctionCall(func_name)
self._trace.expected_calls.appen | d(call)
call.WithArgs(*args)
return call
def _install_hook(self, func_name):
def handler(*args, **_):
got_call = MockFunctionCall(
func_name).WithArgs(*args).WillReturn(DONT_CARE)
if self._trace.next_call_index >= len(self._trace.expected_calls):
raise Exception(
'Call to %s was not expected, at end of programmed trace.' %
repr(got_call))
expected_call = self._trace.expected_calls[
| self._trace.next_call_index]
expected_call.VerifyEquals(got_call)
self._trace.next_call_index += 1
for h in expected_call.when_called_handlers:
h(*args)
return expected_call.return_value
handler.is_hook = True
setattr(self, func_name, handler)
class MockTimer(object):
""" A mock timer to fake out the timing for a module.
Args:
module: module to fake out the time
"""
def __init__(self, module=None):
self._elapsed_time = 0
self._module = module
self._actual_time = None
if module:
assert isinstance(module, ModuleType)
self._actual_time = module.time
self._module.time = self
def sleep(self, time):
self._elapsed_time += time
def time(self):
return self._elapsed_time
def SetTime(self, time):
self._elapsed_time = time
def __del__(self):
self.Restore()
def Restore(self):
if self._module:
self._module.time = self._actual_time
self._module = None
self._actual_time = None
|
it: Chunked Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A reader for corpora that contain chunked (and optionally tagged)
documents.
"""
import os.path, codecs
import nltk
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk import compat
from nltk.tree import Tree
from nltk.tokenize import *
from nltk.chunk import tagstr2tree
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class ChunkedCorpusReader(CorpusReader):
"""
Reader for chunked (and optionally tagged) corpora. Paragraphs
are split using a block reader. They are then tokenized into
sentences using a sentence tokenizer. Finally, these sentences
are parsed into chunk trees using a string-to-chunktree conversion
function. Each of these steps can be performed using a default
function or a custom function. By default, paragraphs are split
on blank lines; sentences are listed one per line; and sentences
are parsed into chunk trees using ``nltk.chunk.tagstr2tree``.
"""
def __init__(self, root, fileids, extension='',
str2chunktree=tagstr2tree,
sent_tokenizer=RegexpTokenizer('\n', gaps=True),
para_block_reader=read_blankline_block,
encoding='utf8'):
"""
:param root: The root directory for this corpus.
:param fileids: A list or regexp specifying the fileids in this corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader)
"""Arguments for corpus views generated by this corpus: a tuple
(str2chunktree, sent_tokenizer, para_block_tokenizer)"""
def raw(self, fileids=None):
"""
:return: the given file(s) as a single string.
:rtype: str
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def words(self, fileids=None):
"""
:return: the given file(s) as a list of words
and punctuation symbols.
:rtype: list(str)
"""
return concat([ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences or utterances, each encoded as a list of word
strings.
:rtype: list(list(str))
"""
return concat([ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of word strings.
:rtype: list(list(list(str)))
"""
return concat([ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_words(self, fileids=None):
"""
:return: the given file(s) as a list of tagged
words and punctuation symbols, encoded as tuples
``(word,tag)``.
:rtype: list(tuple(str,str))
"""
return concat([ChunkedCorpusView(f, enc, 1, 0, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences, each encoded as a list of ``(word,tag)`` tuples.
:rtype: list(list(tuple(str,str)))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of ``(word,tag)`` tuples.
:rtype: list(list(list(tuple(str,str))))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 1, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_words(self, fileids=None):
"""
:return: the given file(s) as a list of tagged
words and chunks. Words are encoded as ``(word, tag)``
tuples (if the corpus has tags) or word strings (if the
corpus has no tags). Chunks are encoded as depth-one
trees over ``(word,tag)`` tuples or word strings.
:rtype: list(tuple(str,str) and Tree)
"""
return concat([ChunkedCorpusView(f, enc, 1, 0, 0, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences, each encoded as a shallow Tree. The leaves
of these trees are encoded as ``(word, tag)`` tuples | (if
the corpus has tags) or word strings (if the cor | pus has no
tags).
:rtype: list(Tree)
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 0, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as a shallow Tree. The leaves of these
trees are encoded as ``(word, tag)`` tuples (if the corpus
has tags) or word strings (if the corpus has no tags).
:rtype: list(list(Tree))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 1, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def _read_block(self, stream):
return [tagstr2tree(t) for t in read_blankline_block(stream)]
class ChunkedCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding, tagged, group_by_sent,
group_by_para, chunked, str2chunktree, sent_tokenizer,
para_block_reader):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
self._tagged = tagged
self._group_by_sent = group_by_sent
self._group_by_para = group_by_para
self._chunked = chunked
self._str2chunktree = str2chunktree
self._sent_tokenizer = sent_tokenizer
self._para_block_reader = para_block_reader
def read_block(self, stream):
block = []
for para_str in self._para_block_reader(stream):
para = []
for sent_str in self._sent_tokenizer.tokenize(para_str):
sent = self._str2chunktree(sent_str)
# If requested, throw away the tags.
if not self._tagged:
sent = self._untag(sent)
# If requested, throw away the chunks.
if not self._chunked:
sent = sent.leaves()
# Add the sentence to `para`.
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
# Add the paragraph to `block`.
if self._group_by_para:
block.append(para)
else:
block.extend(para)
# Return the block
return block
def _untag(self, tree):
for i, child in enumerate(tree):
if isinstance(child, Tree):
self._untag(child)
elif isinstance(child, tuple):
tree[i] = child[0]
else:
raise ValueError('expected child to be Tree or tuple')
return tree |
import datetime
from dateutil import parser
from .numbers import is_number
from .strings import STRING_TYPES
DATE_TYPES = (datetime.date, datetime.datetime)
def parse_dates(d, default='today'):
""" Parses one or more dates from d """
if default == 'today':
default = datetime.datetime.today()
if d is None:
| return default
elif isinstance(d, DATE_TYPES):
return d
elif is_number(d):
# Treat as milliseconds since 1970
d = d if isinstance(d, float) else float(d)
return datetime.datetime.utcfromtimestamp(d)
elif not isinstance(d, STRING_TYPES):
if hasattr(d, '__iter__'):
return [parse_dates(s, default) for s in d]
else:
return default
elif len(d) == 0:
# Behaves like da | teutil.parser < version 2.5
return default
else:
try:
return parser.parse(d)
except (AttributeError, ValueError):
return default
|
#!/usr/bin/python
import argparse
import requests,json
from requests.auth import HTTPBasicAuth
from subprocess import call
import time
import sys
import os
from vas_config_sw1 import *
DEFAULT_PORT='8181'
USERNAME='admin'
PASSWORD='admin'
OPER_OVSDB_TOPO='/restconf/operational/network-topology:network-topology/topology/ovsdb:1'
def get(host, port, uri):
url = 'http://' + host + ":" + port + uri
#print url
r = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD))
jsondata=json.loads(r.text)
return jsondata
def put(host, port, uri, data, debug=False):
'''Perform a PUT rest operation, using the URL and data provided'''
url='http://'+host+":"+port+uri
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
if debug == True:
print "PUT %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
if debug == True:
print r.text
r.raise_for_status()
def post(host, port, uri, data, debug=False):
'''Perform a POST rest operation, using the URL and data provided'''
url='http://'+host+":"+port+uri
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
if debug == True:
print "POST %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.post(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASS | WORD))
if debug == True:
print r.text
r.raise_for_status()
# Main definition - constants
# =======================
# MENUS FUNCTIONS
# =======================
# Main menu
# =======================
# MAIN PROGRAM
# =======================
# Main Program
NODE_ID_OVSDB = ''
SUBNET_2_LSW = {"10.0.35.1":"vswitch-1", "10.0. | 36.1":"vswitch-1"}
PORTIDX_OF_LSW = {"vswitch-1":1, "vswitch-2":1}
def rpc_create_logic_switch_uri():
return "/restconf/operations/fabric-service:create-logical-switch"
def rpc_create_logic_switch_data(name):
return {
"input" : {
"fabric-id": "fabric:1",
"name":name
}
}
def rpc_create_logic_router_uri():
return "/restconf/operations/fabric-service:create-logical-router"
def rpc_create_logic_router_data(name):
return {
"input" : {
"fabric-id": "fabric:1",
"name":name
}
}
def rpc_create_logic_port_uri():
return "/restconf/operations/fabric-service:create-logical-port"
def rpc_create_logic_port_data(deviceName, portName):
return {
"input" : {
"fabric-id": "fabric:1",
"name":portName,
"logical-device":deviceName
}
}
def rpc_register_endpoint_uri():
return "/restconf/operations/fabric-endpoint:register-endpoint"
BRIDGE_REF_P="/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='%s']"
TP_REF_P="/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='%s']/network-topology:termination-point[network-topology:tp-id='%s']"
def rpc_register_endpoint_data(host, nodeid):
mac = host["mac"]
ip = host["ip"].split("/")[0]
gw = host["gw"]
lsw = SUBNET_2_LSW[gw]
lport = lsw + "-p-" + str(PORTIDX_OF_LSW[lsw])
PORTIDX_OF_LSW[lsw] += 1
#physical location
bridge = host["switch"]
port = host["switch"] + "-eth" + str(host["ofport"])
noderef = BRIDGE_REF_P % (nodeid)
tpref = TP_REF_P % (nodeid, port)
return {
"input" : {
"fabric-id":"fabric:1",
"mac-address":mac,
"ip-address":ip,
"gateway":gw,
"logical-location" : {
"node-id": lsw,
"tp-id": lport
},
"location" : {
"node-ref": noderef,
"tp-ref": tpref,
"access-type":"vlan",
"access-segment":"111"
}
}
}
def rpc_create_gateway_uri():
return "/restconf/operations/fabric-service:create-gateway"
def rpc_create_gateway_data(ipaddr, network, switchName):
return {
"input" : {
"fabric-id": "fabric:1",
"ip-address":ipaddr,
"network":network,
"logical-router":"vrouter-1",
"logical-switch":switchName
}
}
def pause():
print "press Enter key to continue..."
raw_input()
if __name__ == "__main__":
# Launch main menu
# Some sensible defaults
controller = os.environ.get('ODL')
if controller == None:
sys.exit("No controller set.")
print "get ovsdb node-id"
ovsdb_topo = get(controller, DEFAULT_PORT,OPER_OVSDB_TOPO)["topology"]
for topo_item in ovsdb_topo:
if topo_item["node"] is not None:
for ovsdb_node in topo_item["node"]:
#if ovsdb_node.has_key("ovsdb:ovs-version"):
if ovsdb_node.has_key("ovsdb:bridge-name") and ovsdb_node["ovsdb:bridge-name"] == "sw1":
#uuid_ovsdb = ovsdb_node["node-id"][13:]
#NODE_ID_OVSDB = ovsdb_node["node-id"]
node_sw1 = ovsdb_node["node-id"]
print "sw1=", node_sw1
if ovsdb_node.has_key("ovsdb:bridge-name") and ovsdb_node["ovsdb:bridge-name"] == "sw2":
node_sw2 = ovsdb_node["node-id"]
print "sw2=", node_sw2
print "create_logic_switch ..."
pause()
post(controller, DEFAULT_PORT, rpc_create_logic_switch_uri(), rpc_create_logic_switch_data("vswitch-1"), True)
print "create_logic_port ..."
pause()
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-1"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-2"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-3"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-4"), True)
post(controller, DEFAULT_PORT, rpc_create_logic_port_uri(), rpc_create_logic_port_data("vswitch-1", "vswitch-1-p-5"), True)
print "registering endpoints ..."
pause()
for host in hosts:
if host["switch"] == "sw1":
post(controller, DEFAULT_PORT, rpc_register_endpoint_uri(), rpc_register_endpoint_data(host, node_sw1), True)
if host["switch"] == "sw2":
post(controller, DEFAULT_PORT, rpc_register_endpoint_uri(), rpc_register_endpoint_data(host, node_sw2), True)
|
from examples.isomorph import (
get_all_canonicals,
get_canonical, |
get_translation_dict,
)
from pokertools import cards_from_str as flop
def test_isomorph():
assert len(get_all_canonicals()) == 1755
assert get_canonical(flop('6s 8d 7c')) == flop('6c 7d 8h')
assert get_translation_dict(flop('6s 8d 7c')) == {'c': 'd', 'd': 'h', 'h': 's', 's': 'c'}
asse | rt get_canonical(flop('Qs Qd 4d')) == flop('4c Qc Qd')
assert get_translation_dict(flop('Qs Qd 4d')) == {'c': 'h', 'd': 'c', 'h': 's', 's': 'd'}
|
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import rejviz.tests.utils as tutils
from rejviz import utils
class UtilsTest(tutils.TestCase):
def test_parse_keyvals(self):
expected = {'a': 'b', 'c': 'd'}
self.assertEqual(expected, utils.parse_keyvals("a=b,c=d"))
self.assertEqual(expected, utils.parse_ke | yvals("a:b/c:d", '/', ':'))
def test_extract_domain_or_image_args(self):
args1 = ['--something', '-d', 'domain', 'somethingelse']
args2 = ['-b', '--something', '-a', 'image', 'somethingelse']
args3 = ['-b', '-c', '--something']
self.assertEqual(['-d', 'domain'],
utils.ext | ract_domain_or_image_args(args1))
self.assertEqual(['-a', 'image'],
utils.extract_domain_or_image_args(args2))
self.assertRaises(ValueError,
utils.extract_domain_or_image_args, args3)
def test_extract_image_args_from_disks(self):
args1 = ['--disk', '/path/to/image,opt1=val1,opt2=val2']
args2 = ['--disk', 'opt1=val1,path=/path/to/image,opt2=val2']
args3 = ['-b', '-c', '--something']
self.assertEqual(['-a', '/path/to/image'],
utils.extract_image_args_from_disks(args1))
self.assertEqual(['-a', '/path/to/image'],
utils.extract_image_args_from_disks(args2))
self.assertRaises(ValueError,
utils.extract_domain_or_image_args, args3)
|
"""Support for HomematicIP Cloud weather devices."""
import logging
from homematicip.aio.device import (
AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)
from homematicip.aio.home import AsyncHome
from homeassistant.components.weather import WeatherEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud weather sensor."""
pass
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry,
async_add_entities) -> None:
"""Set up the HomematicIP weather sensor from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncWeatherSensorPro):
devices.append(HomematicipWeatherSensorPro(home, device))
elif isinstance(device, (AsyncWeatherSensor, AsyncWeatherSensorPlus)):
devices.append(HomematicipWeatherSensor(home, device))
if devices:
async_add_entities(devices)
class HomematicipWeatherSensor(HomematicipGenericDevice, WeatherEntity):
"""representation of a HomematicIP Cloud weather sensor plus & basic."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the weather sensor."""
super().__init__(home, device)
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._device.label
@property
def temperature(self) -> float:
"""Return the platform temperature."""
return self._device.actualTem | perature
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
return self._device.humidity
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
return self._device.windSpeed
@property
def attribution(self) -> str | :
"""Return the attribution."""
return "Powered by Homematic IP"
@property
def condition(self) -> str:
"""Return the current condition."""
if hasattr(self._device, "raining") and self._device.raining:
return 'rainy'
if self._device.storm:
return 'windy'
if self._device.sunshine:
return 'sunny'
return ''
class HomematicipWeatherSensorPro(HomematicipWeatherSensor):
"""representation of a HomematicIP weather sensor pro."""
@property
def wind_bearing(self) -> float:
"""Return the wind bearing."""
return self._device.windDirection
|
from bears.yml.RA | MLLintBear import RAMLLintBear
from tests.LocalBearTestHelper import verify_local_bear
good_file = """
#%RAML 0.8
title: World Music API
baseUri: http://example.api.com/{version}
version: v1
"""
bad_file = """#%RAML 0.8
title: Failing RAML
version: 1
baseUri: http://example.com
/resource:
description: hello
post:
"""
RAMLLintBearTest = verify_local_bear(RAMLLintBear,
valid_files=(good_file,),
invalid_fi | les=(bad_file,),
tempfile_kwargs={"suffix": ".raml"})
|
import sys
import six
import logging
import ooxml
from ooxml import parse, serialize, importer
logging.basicConfig(filename='ooxml.log', level=logging.INFO)
if len(sys.argv) > 1:
file_name = sys.argv[1]
dfile = ooxml.read_from_file(file_name)
six.print_("\n-[HTML]-----------------------------\n")
six.print_(serialize.serialize(dfile.document))
six.print_("\n-[CSS STYLE]------------------------\n")
six.print_(serialize.serialize_styles(dfile.document))
six.print_("\n-[USED STYLES]----------------------\n")
six.print_ | (dfile.document.used_styles)
| six.print_("\n-[USED FONT SIZES]------------------\n")
six.print_(dfile.document.used_font_size)
|
'one do I use?')
else:
# all good - the files are the same
# we can update our local sync info
self.state.markObjectAsSynced(f.path,
localFileInfo.hash,
localMD)
else:
# dates are the same, so we can assume the hash
# hasn't changed
if syncInfo.hash != f.hash:
# if the sync info is the same as the local file
# then it must mean the remote file has changed!
get_file_info = self.localStore.get_file_info
localFileInfo = get_file_info(localPath)
if localFileInfo.hash == syncInfo.hash:
self.replace_file(f, localPath)
else:
logging.info('remote hash: %r' % f.hash)
logging.info('local hash: %r' % localFileInfo.hash)
logging.info('sync hash: %r' % syncInfo.hash)
logging.warn('sync hash differs from local hash!')
else:
# sync hash is same as remote hash, and the file date
# hasn't changed. we assume this to mean, there have
# been no changes
pass
else:
# TODO: we need to do something here!
# the file exists locally, and remotely - but we don't have any
# record of having downloaded it
localFileInfo = self.localStore.get_file_info(localPath)
if localFileInfo.hash == f.hash:
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path,
localFileInfo.hash,
localMD)
else:
# we don't have any history of this file - and the hash
# from local differs from remote! WHAT DO WE DO!
logging.error('TODO: HASH differs! Which is which????: %r'
% f.path)
pass
def replace_file(self, f, localPath):
self._set_hadWorkToDo(True)
head, tail = os.path.split(localPath)
self.outputQueue.put(messages.Status('Downloading %s' % tail))
tmpFile = self.get_tmp_filename()
if os.path.exists(tmpFile):
# if a temporary file with the same name exists, remove it
os.remove(tmpFile)
self.objectStore.download_object(f.path, tmpFile)
| send2 | trash(localPath)
os.rename(tmpFile, localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path,
f.hash,
localMD)
self.outputQueue.put(self._get_working_message())
def get_tmp_filename(self):
return os.path.join(self.tempDownloadFolder, 'tmpfile')
def download_folder(self, folder):
if not self.running:
# return true, to indicate that children can be skipped
return True
# does the folder exist locally?
#logging.debug('download_folder(%s)' % folder.path)
localPath = self.get_local_path(folder.path)
downloadFolderContents = True
skipChildren = False
if not os.path.exists(localPath):
self._set_hadWorkToDo(True)
# the path exists online, but NOT locally
# we do one of two things, we either
# a) delete it remotely
# if we know for a fact we've already downloaded this folder,
# then it not being here, can only mean we've deleted it
# b) download it
# if we haven't marked this folder as being downloaded,
# then we get it now
if self.already_downloaded_folder(folder.path):
logging.info('we need to delete %r!' % folder.path)
self.delete_remote_folder(folder.path)
downloadFolderContents = False
skipChildren = True
logging.info('done deleting remote folder')
else:
#logging.info('creating: %r' % localPath)
os.makedirs(localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(folder.path,
None,
localMD)
#logging.info('done creating %r' % localPath)
if downloadFolderContents:
try:
#logging.debug('downloading folder
# 'contents for %s' % folder.path)
files = self.objectStore.list_dir(folder.path)
#logging.debug('got %r files' % len(files))
for f in files:
if folder.path.strip('/') != f.path.strip('/'):
if f.isFolder:
skipChildren = self.download_folder(f)
if skipChildren:
break
else:
self.download_file(f)
except:
logging.error('failed to download %s' % folder.path)
logging.error(traceback.format_exc())
return skipChildren
def get_local_path(self, remote_path):
return os.path.join(self.localSyncPath, remote_path)
def already_downloaded_folder(self, path):
""" Establish if this folder was downloaded before
typical use: the folder doesn't exist locally, but it
does exist remotely - that would imply that if we'd already
downloaded it, it can only be missing if it was deleted, and
thusly, we delete it remotely.
"""
alreadySynced = False
syncInfo = self.state.getObjectSyncInfo(path)
if syncInfo:
# if we have sync info for this path - it means we've
# already download
# or uploaded it
logging.info('we have sync info for %s' % path)
alreadySynced = True
else:
# if we don't have sync info for this path
# - it means we haven't downloaded it yet
#logging.info('no sync info for %s' % path)
pass
return alreadySynced
def already_synced_file(self, path):
""" See: already_downloaded_folder
"""
syncInfo = self.state.getObjectSyncInfo(path)
if syncInfo:
remoteFileInfo = self.objectStore.get_file_info(path)
if remoteFileInfo.hash == syncInfo.hash:
# the hash of the file we synced, is the
# same as the one online.
# this means, we've already synced this file!
return True
return False
else:
return False
def delete_remote_folder(self, path):
logging.info('delete_remote_folder(path = %r)' % path)
# a folder has children - and we need to remove those!
self._set_hadWorkToDo(True)
children = self.objectStore.list_dir(path)
#for child in children:
# logging.info('%s [child] %s' % (path, child.path))
for child in children:
if child.isFolder:
# remove this child folder
self.delete_remote_folder(child.path)
else:
# remove this child file
self.delete_remote_file(child.path)
logging.info('going to attempt to delete: %r' % path)
self.delete_remote_file(path)
def delete_remote_file(self, path):
self._set_hadWorkToDo(True)
logging.info('delete remote file: |
output
diff_output = ""
identical_hosts = [hosts[0]]
for (host, diff) in diffs:
if diff:
diff_output += "=" * 70 + "\n\n%s\n%s\n\n" % (host, diff)
else:
identical_hosts.append(host)
output = OUTPUT_TEMPLATE.render(
item=item,
hosts_count=len(hosts),
hosts=hosts,
identical_hosts=identical_hosts,
initial_output=initial_output
)
return output, diff_output, dict(item=item, hosts=hosts, identical_hosts=identical_hosts, initial_output=initial_output)
# output = item.name+"\n"
# output += "%s hosts with this issue\n" % len(hosts)
# output += "\n".join(str(i).split()[0] for i in hosts)
# output += "\n"+"-"*20+"\n"
# output += "\n".join(str(i) for i in identical_hosts) + "\n\n" + initial_output
# return output, diff_output
def show_nessus_item(self, item):
output, diff_output, _ = self.get_item_output(item)
diff_title = "Diffs"
self.delete_page_with_title(diff_title)
display = self.view.display
if diff_output:
self.add_output_page(diff_title, diff_output, font="Courier New")
display.SetValue(output)
def generate_rst(self, event):
saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save RST as...").get_choice()
if saveas:
merged_scans = MergedNessusReport(self.files)
if not saveas.endswith(".rst"):
saveas = saveas+".rst"
sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others)
with open(saveas, "wb") as f:
for item in sorted_tree_items:
issue, diffs, meta = self.get_item_output(item)
item.issue = meta
item.diffs = diffs
item.severity = SEVERITY[item.item.severity]
f.write(RST_TEMPLATE.render(
timestamp=datetime.now(),
hosts=merged_scans.hosts,
vulns=sorted_tree_items,
merged_scans=merged_scans,
)
)
def generate_vulnxml(self, event):
saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save VulnXML as...").get_choice()
if saveas:
merged_scans = MergedNessusReport(self.files)
if not saveas.endswith(".xml"):
saveas = saveas+".xml"
sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others)
with open(saveas, "wb") as f:
for item in sorted_tree_items:
issue, diffs, meta = self.get_item_output(item)
item.issue = meta
item.diffs = diffs
item.severity = SEVERITY[item.item.severity]
f.write(VULNXML_TEMPLATE.render(
timestamp=datetime.now(),
hosts=merged_scans.hosts,
vulns=sorted_tree_items,
merged_scans=merged_scans,
)
)
def generate_csv(self, event):
|
saveas = SaveDialog(self.view, defaultDir=self._save_path, message="Save csv as...").get_choice()
if saveas:
merged_scans = MergedNessusReport(self.files)
if not saveas.endswith(".csv"):
saveas = saveas+".csv"
sorted_tree_items = self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.low | s+merged_scans.others)
with open(saveas, "wb") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(["PID","Severity","Hosts","Output","Diffs"])
for item in sorted_tree_items:
csv_writer.writerow([
item.pid,
SEVERITY[item.item.severity],
"\n".join(x.address for x in merged_scans.hosts_with_pid(item.pid)),
self.get_item_output(item)[0],
self.get_item_output(item)[1],
]
)
def combine_files(self, event):
scans_hook = self.view.tree.GetRootItem()
merged_scans = MergedNessusReport(self.files)
if merged_scans.get_all_reports():
merge_hook = self.view.tree.AppendItem(scans_hook, "Merged Files", 0)
items_hook = self.view.tree.AppendItem(merge_hook, "Findings", 0)
self.view.tree.SetPyData(items_hook, self.sorted_tree_items(merged_scans, merged_scans.criticals+merged_scans.highs+merged_scans.meds+merged_scans.lows+merged_scans.others))
critical_hook = self.view.tree.AppendItem(items_hook, "Critical", 0)
self.view.tree.SetPyData(critical_hook, self.sorted_tree_items(merged_scans, merged_scans.criticals))
high_hook = self.view.tree.AppendItem(items_hook, "Highs", 0)
self.view.tree.SetPyData(high_hook, self.sorted_tree_items(merged_scans, merged_scans.highs))
med_hook = self.view.tree.AppendItem(items_hook, "Meds", 0)
self.view.tree.SetPyData(med_hook, self.sorted_tree_items(merged_scans, merged_scans.meds))
low_hook = self.view.tree.AppendItem(items_hook, "Lows", 0)
self.view.tree.SetPyData(low_hook, self.sorted_tree_items(merged_scans, merged_scans.lows))
other_hook = self.view.tree.AppendItem(items_hook, "Others", 0)
self.view.tree.SetPyData(other_hook, self.sorted_tree_items(merged_scans, merged_scans.others))
for crit in self.sorted_tree_items(merged_scans, merged_scans.criticals):
item = self.view.tree.AppendItem(critical_hook, str(crit), 0)
self.view.tree.SetPyData(item, crit)
for high in self.sorted_tree_items(merged_scans, merged_scans.highs):
item = self.view.tree.AppendItem(high_hook, str(high), 0)
self.view.tree.SetPyData(item, high)
for med in self.sorted_tree_items(merged_scans, merged_scans.meds):
item = self.view.tree.AppendItem(med_hook, str(med), 0)
self.view.tree.SetPyData(item, med)
for low in self.sorted_tree_items(merged_scans, merged_scans.lows):
item = self.view.tree.AppendItem(low_hook, str(low), 0)
self.view.tree.SetPyData(item, low)
for other in merged_scans.others:
item = self.view.tree.AppendItem(other_hook, str(other), 0)
self.view.tree.SetPyData(item, other)
self.view.tree.Expand(scans_hook)
def bind_events(self):
# Toolbar events
self.view.Bind(wx.EVT_TOOL, self.load_files, id=ID_Load_Files)
self.view.Bind(wx.EVT_TOOL, self.combine_files, id=ID_Merge_Files)
self.view.Bind(wx.EVT_TOOL, self.generate_csv, id=ID_Generate_CSV)
self.view.Bind(wx.EVT_TOOL, self.generate_vulnxml, id=ID_Generate_VulnXML)
self.view.Bind(wx.EVT_TOOL, self.generate_rst, id=ID_Generate_RST)
# Tree clicking and selections
self.view.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.on_sel_changed, self.view.tree)
self.view.tree.Bind(wx.EVT_TREE_ITEM_MENU, self.on_right_click, self.view.tree)
# Tab close event - will prevent closing the output tab
self.view.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_page_close)
# Menu stuff
self.view.Bind(wx.EVT_MENU, self.load_files, id=wx.ID_OPEN)
self.view.Bind(wx.EVT_MENU, self.extract_results, id=ID_Save_Results)
self.view.Bind(wx.EVT_MENU, self.on_exit, id=wx.ID_EXIT)
self.view.Bind(wx.EVT_MENU, self.on_about, |
# -*- coding: utf-8 -*-
import re
import logging
from completor.utils import check_subseq
from .utils import parse_uri
word_pat = re.compile(r'([\d\w]+)', re.U)
word_ends = re.compile(r'[\d\w]+$', re.U)
logger = logging.getLogger("completor")
# [
# [{
# u'range': {
# | u'start': {u'line': 273, u'character': 5},
# u'end': {u'line': 273, u'character': 12}
# },
# u'uri': u'file:///home/linuxbrew/.linuxbrew/Cellar/go/1.12.4/libexec/src/fmt/print.go' # noqa
# }]
# ]
def gen_jump_list(ft, name, data):
res = []
if not data:
return res
items = data[0]
if items is None:
return res
for item in items:
uri = parse_uri(item['uri'])
if ft == 'go':
uri = uri.replace('%2 | 1', '!')
start = item['range']['start']
res.append({
'filename': uri,
'lnum': start['line'] + 1,
'col': start['character'] + 1,
'name': name,
})
return res
# [
# [
# {
# u'newText': u'',
# u'range': {
# u'start': {u'line': 8, u'character': 0},
# u'end': {u'line': 9, u'character': 0}
# }
# }, {
# u'newText': u'',
# u'range': {
# u'start': {u'line': 9, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }, {
# u'newText': u'\tfmt.Println()\n',
# u'range': {
# u'start': {u'line': 10, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }, {
# u'newText': u'}\n',
# u'range': {
# u'start': {u'line': 10, u'character': 0},
# u'end': {u'line': 10, u'character': 0}
# }
# }
# ]
# ]
def format_text(data):
if not data:
return
for item in data[0]:
pass
def get_completion_word(item, insert_text):
if insert_text != b'label':
try:
return item['textEdit']['newText'], \
item['textEdit']['range']['start']['character']
except KeyError:
pass
label = item['label'].strip()
match = word_pat.match(label)
return match.groups()[0] if match else '', -1
hiddenLines = ["on pkg.go.dev"]
escapes = re.compile(r'''\\([\\\x60*{}[\]()#+\-.!_>~|"$%&'\/:;<=?@^])''',
re.UNICODE)
escape_types = ['go', 'json']
def _shouldHidden(line):
for item in hiddenLines:
if item in line:
return True
return False
def gen_hover_doc(ft, value):
if ft not in escape_types:
return value
lines = []
for l in value.split("\n"):
if _shouldHidden(l):
continue
lines.append(escapes.sub(r"\1", l).replace(' ', ' '))
return "\n".join(lines)
def filter_items(items, input_data):
target = ''
match = word_ends.search(input_data)
if match:
target = match.group()
if not target:
return items
filtered = []
for item in items:
score = check_subseq(target, item[1])
if score is None:
continue
filtered.append((item, score))
filtered.sort(key=lambda x: x[1])
return [e for e, _ in filtered]
|
from ctypes import POINTER
from ctypes import c_long
from ctypes import c_uint32
from ctypes impor | t c_void_p
CFIndex = c_long
CFStringEncoding = c_uint32
CFString = c_void_p
CFArray = c_void_p
CFDictionary = c_void_p
CFError = c_void_p
CFType = c_void_p
CFAllocatorRef = c_void_p
CFStringR | ef = POINTER(CFString)
CFArrayRef = POINTER(CFArray)
CFDictionaryRef = POINTER(CFDictionary)
CFErrorRef = POINTER(CFError)
CFTypeRef = POINTER(CFType)
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
kCGWindowListOptionAll = 0
kCGWindowListOptionOnScreenOnly = (1 << 0)
kCGNullWindowID = 0
|
#!/usr/bin/env python
from numpy import array, dtype, int32
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
label_traindat = '../data/label_train_multiclass.dat'
# set both input attributes as continuous i.e. 2
feattypes = array([2, 2],dtype=int32)
parameter_list = [[traindat,testdat,label_traindat,feattypes]]
def multiclass_chaidtree_modular(train=traindat,test=testdat,labels=label_traindat,ft=feattypes):
try:
from modshogun | import RealFeatures, MulticlassLabels, CSVFile, CHAIDTree
except ImportError:
print("Could not import Shogun modules")
return
# wrap featu | res and labels into Shogun objects
feats_train=RealFeatures(CSVFile(train))
feats_test=RealFeatures(CSVFile(test))
train_labels=MulticlassLabels(CSVFile(labels))
# CHAID Tree formation with nominal dependent variable
c=CHAIDTree(0,feattypes,10)
c.set_labels(train_labels)
c.train(feats_train)
# Classify test data
output=c.apply_multiclass(feats_test).get_labels()
return c,output
if __name__=='__main__':
print('CHAIDTree')
multiclass_chaidtree_modular(*parameter_list[0])
|
for _ in range(int(input())):
A, B, C, D = map(int, input().split())
if A < B or C + D < B:
print(" | No")
continue
elif C >= B - 1:
print("Yes")
continue
ret = []
s_set | = set()
now = A
while True:
now %= B
if now in s_set:
print("Yes", ret)
break
else:
s_set.add(now)
if now <= C:
now += D
ret.append(now)
else:
print("No", ret)
break
|
import pytest
import pwny
target_little_endian = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.little)
target_big_endian = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.big)
def test_pack():
assert pwny.pack('I', 0x41424344) == b'DCBA'
def test_pack_format_with_endian():
assert pwny.pack('>I', 0x41424344) == b'ABCD'
def test_pack_explicit_endian():
assert pwny.pack('I', 0x41424344, endian=pwny.Target.Endian.big) == b'ABCD'
def test_pack_explicit_target():
assert pwny.pack('I', 0x41424344, target=target_big_endian) == b'ABCD'
@pytest.mark.xfail(raises=NotImplementedError)
def test_pack_invalid_endian():
pwny.pack('I', 1, endian='invalid')
def test_unpack():
assert pwny.unpack('I', b'DCBA') == (0x41424344,)
def test_unpack_format_with_endian():
assert pwny.unpack('>I', b'ABCD') == (0x41424344,)
def test_unpack_explicit_endian():
assert pwny.unpack('I', b'ABCD', endian=pwny.Target.Endian.big) == (0x41424344,)
def test_unpack_explicit_target():
assert pwny.unpack('I', b'ABCD', target=target_big_endian) == (0x41424344,)
@pytest.mark.xfail(raises=NotImplementedError)
def test_unpack_invalid_endian():
pwny.unpack('I', 'AAAA', endian='invalid')
def test_pack_size():
# This tests both pack_size in general as well as not padding the byte.
assert pwny.pack_size('bq') == 9
short_signed_data = [
[8, -0x7f, b'\x81'],
[16, -0x7fff, b'\x80\x01'],
[32, -0x7fffffff, b'\x80\x00\x00\x01'],
[64, -0x7fffffffffffffff, b'\x80\x00\x00\x00\x00\x00\x00\x01'],
]
short_unsigned_data = [
[8, 0x61, b'a'],
[16, 0x6162, b'ab'],
[32, 0x61626364, b'abcd'],
[64, 0x6162636465666768, b'abcdefgh'],
]
def test_short_form_pack():
for width, num, bytestr in short_signed_data:
f = 'p%d' % width
yield check_short_form_pack, f, num, bytestr[::-1]
yield check_short_form_pack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_pack_endian, f, num, bytestr, pwny.Target.Endian.big
for width, num, bytestr in short_unsigned_data:
f = 'P%d' % width
yield check_short_form_pack, f, num, bytestr[::-1]
yield check_short_form_pack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_pack_endian, f, num, bytestr, pwny.Target.Endian.big
def test_short_form_unpack():
for width, num, bytestr in short_signed_data:
f = 'u%d' % width
yield check_short_form_unpack, f, num, bytestr[::-1]
yield check_short_form_unpack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_unpack_endian, f, num, bytestr, pwny.Target.Endian.big
for width, num, bytestr in short_unsigned_data:
f = 'U%d' % width
yield check_short_form_unpack, f, num, bytestr[::-1]
yield check_short_form_unpack_endian, f, num, bytestr[::-1], pwny.Target.Endian.little
yield check_short_form_unpack_endian, f, num, bytestr, pwny.Target.Endian.big
def test_pointer_pack():
yield check_short_form_pack, 'p', -66052, b'\xfc\xfd\xfe\xff'
yield check_short_form_pack_endian, 'p', -66052, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_pack_endian, 'p', -66052, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
yield check_short_form_pack, 'P', 4294901244, b'\xfc\xfd\xfe\xff'
yield check_short_form_pack_endian, 'P', 4294901244, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
| yield check_short_form_pack_endian, 'P', 4294901244, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
def test_pointer_unpack():
yield check_short_form_unpack, 'u', -66052, b'\xfc\xfd\xfe\xff'
yield check_short_form_unpack_endian, 'u', -66052, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_unpack_en | dian, 'u', -66052, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
yield check_short_form_unpack, 'U', 4294901244, b'\xfc\xfd\xfe\xff'
yield check_short_form_unpack_endian, 'U', 4294901244, b'\xfc\xfd\xfe\xff', pwny.Target.Endian.little
yield check_short_form_unpack_endian, 'U', 4294901244, b'\xff\xfe\xfd\xfc', pwny.Target.Endian.big
def check_short_form_pack(f, num, bytestr):
assert getattr(pwny, f)(num) == bytestr
def check_short_form_pack_endian(f, num, bytestr, endian):
assert getattr(pwny, f)(num, endian=endian) == bytestr
def check_short_form_unpack(f, num, bytestr):
assert getattr(pwny, f)(bytestr) == num
def check_short_form_unpack_endian(f, num, bytestr, endian):
assert getattr(pwny, f)(bytestr, endian=endian) == num
|
from django.conf.urls import include, url
from django.views.generic import TemplateView
from kuma.attachments.feeds import AttachmentsFeed
from kuma.attachments.views import edit_attachment
from . import feeds, views
from .constants import DOCUMENT_PATH_RE
# These patterns inherit (?P<document_path>[^\$]+).
document_patterns = [
url(r'^$',
views.document.document,
name='wiki.document'),
url(r'^\$revision/(?P<revision_id>\d+)$',
views.revision.revision,
nam | e='wiki.revision'),
url(r'^\$history$',
views.list.revisions,
name='wiki.document_revisions'),
url(r'^\$edit$',
views.edit.edit,
name='wiki.edit'),
url(r'^\$files$',
edit_attachm | ent,
name='attachments.edit_attachment'),
url(r'^\$edit/(?P<revision_id>\d+)$',
views.edit.edit,
name='wiki.new_revision_based_on'),
url(r'^\$compare$',
views.revision.compare,
name='wiki.compare_revisions'),
url(r'^\$children$',
views.document.children,
name='wiki.children'),
url(r'^\$translate$',
views.translate.translate,
name='wiki.translate'),
url(r'^\$locales$',
views.translate.select_locale,
name='wiki.select_locale'),
url(r'^\$json$',
views.document.as_json,
name='wiki.json_slug'),
url(r'^\$styles$',
views.document.styles,
name='wiki.styles'),
url(r'^\$toc$',
views.document.toc,
name='wiki.toc'),
url(r'^\$move$',
views.document.move,
name='wiki.move'),
url(r'^\$quick-review$',
views.revision.quick_review,
name='wiki.quick_review'),
url(r'^\$samples/(?P<sample_name>.+)/files/(?P<attachment_id>\d+)/(?P<filename>.+)$',
views.code.raw_code_sample_file,
name='wiki.raw_code_sample_file'),
url(r'^\$samples/(?P<sample_name>.+)$',
views.code.code_sample,
name='wiki.code_sample'),
url(r'^\$revert/(?P<revision_id>\d+)$',
views.delete.revert_document,
name='wiki.revert_document'),
url(r'^\$repair_breadcrumbs$',
views.document.repair_breadcrumbs,
name='wiki.repair_breadcrumbs'),
url(r'^\$delete$',
views.delete.delete_document,
name='wiki.delete_document'),
url(r'^\$restore$',
views.delete.restore_document,
name='wiki.restore_document'),
url(r'^\$purge$',
views.delete.purge_document,
name='wiki.purge_document'),
# Un/Subscribe to document edit notifications.
url(r'^\$subscribe$',
views.document.subscribe,
name='wiki.subscribe'),
# Un/Subscribe to document tree edit notifications.
url(r'^\$subscribe_to_tree$',
views.document.subscribe_to_tree,
name='wiki.subscribe_to_tree'),
]
urlpatterns = [
url(r'^/ckeditor_config.js$',
views.misc.ckeditor_config,
name='wiki.ckeditor_config'),
# internals
url(r'^.json$',
views.document.as_json,
name='wiki.json'),
url(r'^/preview-wiki-content$',
views.revision.preview,
name='wiki.preview'),
url(r'^/move-requested$',
TemplateView.as_view(template_name='wiki/move_requested.html'),
name='wiki.move_requested'),
url(r'^/get-documents$',
views.misc.autosuggest_documents,
name='wiki.autosuggest_documents'),
url(r'^/load/$',
views.misc.load_documents,
name='wiki.load_documents'),
# Special pages
url(r'^/templates$',
views.list.templates,
name='wiki.list_templates'),
url(r'^/tags$',
views.list.tags,
name='wiki.list_tags'),
url(r'^/tag/(?P<tag>.+)$',
views.list.documents,
name='wiki.tag'),
url(r'^/new$',
views.create.create,
name='wiki.create'),
url(r'^/all$',
views.list.documents,
name='wiki.all_documents'),
url(r'^/with-errors$',
views.list.with_errors,
name='wiki.errors'),
url(r'^/without-parent$',
views.list.without_parent,
name='wiki.without_parent'),
url(r'^/top-level$',
views.list.top_level,
name='wiki.top_level'),
url(r'^/needs-review/(?P<tag>[^/]+)$',
views.list.needs_review,
name='wiki.list_review_tag'),
url(r'^/needs-review/?',
views.list.needs_review,
name='wiki.list_review'),
url(r'^/localization-tag/(?P<tag>[^/]+)$',
views.list.with_localization_tag,
name='wiki.list_with_localization_tag'),
url(r'^/localization-tag/?',
views.list.with_localization_tag,
name='wiki.list_with_localization_tags'),
# Akismet Revision
url(r'^/submit_akismet_spam$',
views.akismet_revision.submit_akismet_spam,
name='wiki.submit_akismet_spam'),
# Feeds
url(r'^/feeds/(?P<format>[^/]+)/all/?',
feeds.DocumentsRecentFeed(),
name="wiki.feeds.recent_documents"),
url(r'^/feeds/(?P<format>[^/]+)/l10n-updates/?',
feeds.DocumentsUpdatedTranslationParentFeed(),
name="wiki.feeds.l10n_updates"),
url(r'^/feeds/(?P<format>[^/]+)/tag/(?P<tag>[^/]+)',
feeds.DocumentsRecentFeed(),
name="wiki.feeds.recent_documents"),
url(r'^/feeds/(?P<format>[^/]+)/needs-review/(?P<tag>[^/]+)',
feeds.DocumentsReviewFeed(),
name="wiki.feeds.list_review_tag"),
url(r'^/feeds/(?P<format>[^/]+)/needs-review/?',
feeds.DocumentsReviewFeed(),
name="wiki.feeds.list_review"),
url(r'^/feeds/(?P<format>[^/]+)/revisions/?',
feeds.RevisionsFeed(),
name="wiki.feeds.recent_revisions"),
url(r'^/feeds/(?P<format>[^/]+)/files/?',
AttachmentsFeed(),
name="attachments.feeds.recent_files"),
url(r'^/(?P<document_path>%s)' % DOCUMENT_PATH_RE.pattern,
include(document_patterns)),
]
|
#!/usr/bin/env python
from datetime import timedelta
import numpy as np
from opendrift.readers import reader_basemap_landmask
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.models.oceandrift import OceanDrift
o = OceanDrift(loglevel=0) # Set loglevel to 0 for debug information
reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
# Landmask (Basemap)
reader_basemap = reader_basemap_landmask.Reader(
llcrnrlon=4.0, llcrnrlat=59.9,
urcrnrlon=5.5, urcrnrlat=61.2,
resolution='h', projection='merc')
o.add_reader([reader_basemap, reader_norkyst])
# Seeding some particles
lons = np.linspace(4.4, 4.6, 10)
lats = np.linspace(60.0, 60.1, 10)
lons, lats = | np.meshgrid(lons, lats)
lons = lons.ravel()
lats = lats.ravel()
# Seed oil elements on a grid at regular time interval
start_time = reader_norkyst.start_time
time_step = timedelta(hours=6)
num_steps = 10
for i in range(num_steps+1):
o.seed_elements(lons, lats, radius=0, number=100,
time=start_time + i*time_step)
# Running model (until end of driver data)
o.run(steps=66*4, time_ste | p=900)
# Print and plot results
print(o)
o.animation()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Invoices Reference',
'version': '1.0',
'author': 'Camptocamp',
'maintainer': 'Camptocamp',
'license': 'AGPL-3',
'category': 'category',
'complexity': "easy",
'depends': ['account',
],
'description': """
Invoices Reference
==================
Aims to simplify the "references" fields on the invoices.
We observed difficulties for the users to file the references (name,
origin, free reference) and above all, to understand which field will be
copied in the reference field of the move and move lines.
The approach here is to state simple rules with one concern: consistency.
The reference of the move lines must be the number of the document at their very
origin (number of a sales order, of an external document like a supplier
invoice, ...). The goal is for the accountant to be able to trace to the
source document from a ledger).
The description of a line should always be... well, a description. Not a number
or a cryptic reference.
It particularly fits with other modules of the bank-statement-reconcile series
as account_advanced_reconcile_transaction_ref.
Fields
------
Enumerating the information we need in an invoice, we find that the
mandatory fields are:
* Invoice Number
* Description
* Internal Reference ("our reference")
* External Reference ("customer or supplier reference")
* Optionally, a technical transaction reference (credit card payment gateways,
SEPA, ...)
Now, on the move lines:
* Name
* Reference
* Optionally, a technical transaction reference (added by the module
`base_transaction_id`)
Let's see how the information will be organized with this module.
Customers Invoices / Refunds
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------+-----------------+------------------------------+
| Information | Invoice field | Instead of (in base modules) |
+=================+=================+==============================+
| Invoice number | Invoice number | Invoice number |
+-----------------+-----------------+------------------------------+
| Description | Name | -- |
+-----------------+-----------------+------------------------------+
| Internal Ref | Origin | Origin |
+-----------------+-----------------+------------------------------+
| External Ref | Reference | Name |
+-----------------+-----------------+------------------------------+
Information propagated to the move lines:
+-----------------+------------------------------------+
| Move line field | Invoice field |
+=================+====================================+
| Description | Name |
+-----------------+------------------------------------+
| Reference | Origin, or Invoice number if empty |
+-----------------+------------------------------------+
Supplier Invoices / Refunds
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Supplier invoices have an additional field `supplier_invoice_number`
that we consider as redundant with the reference field. This field is kept
and even set as mandatory, while the | reference field is hidden.
+-----------------+-----------------+------------------------------ | +
| Information | Invoice field | Instead of (in base modules) |
+=================+=================+==============================+
| Invoice number | Invoice number | Invoice number |
+-----------------+-----------------+------------------------------+
| Description | Name | -- |
+-----------------+-----------------+------------------------------+
| Internal Ref | Origin | Origin |
+-----------------+-----------------+------------------------------+
| External Ref | Supplier number | Supplier number |
+-----------------+-----------------+------------------------------+
The reference field is hidden when the reference type is "free reference",
because it is already filed in the Supplier invoice number.
Information propagated to the move lines:
+-----------------+---------------------------------------------+
| Move line field | Invoice field |
+=================+=============================================+
| Description | Name |
+-----------------+---------------------------------------------+
| Reference | Supplier number, or Invoice number if empty |
+-----------------+---------------------------------------------+
""",
'website': 'http://www.camptocamp.com',
'data': ['account_invoice_view.xml',
],
'test': ['test/out_invoice_with_origin.yml',
'test/out_invoice_without_origin.yml',
'test/in_invoice_with_supplier_number.yml',
'test/in_invoice_without_supplier_number.yml',
'test/out_refund_with_origin.yml',
'test/out_refund_without_origin.yml',
'test/in_refund_with_supplier_number.yml',
'test/in_refund_without_supplier_number.yml',
],
'installable': False,
'auto_install': False,
}
|
': ['glXGetVisualFromFBConfig'],
'arguments': 'Display* dpy, GLXFBConfig config', },
{ 'return_type': 'GLXWindow',
'names': ['glXCreateWindow'],
'arguments':
'Display* dpy, GLXFBConfig config, Window win, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyWindow'],
'arguments': 'Display* dpy, GLXWindow window', },
{ 'return_type': 'GLXPixmap',
'names': ['glXCreatePixmap'],
'arguments': 'Display* dpy, GLXFBConfig config, '
'Pixmap pixmap, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyPixmap'],
'arguments': 'Display* dpy, GLXPixmap pixmap', },
{ 'return_type': 'GLXPbuffer',
'names': ['glXCreatePbuffer'],
'arguments': 'Display* dpy, GLXFBConfig config, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyPbuffer'],
'arguments': 'Display* dpy, GLXPbuffer pbuf', },
{ 'return_type': 'void',
'names': ['glXQueryDrawable'],
'arguments':
'Display* dpy, GLXDrawable draw, int attribute, unsigned int* value', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateNewContext'],
'arguments': 'Display* dpy, GLXFBConfig config, int renderType, '
'GLXContext shareList, int direct', },
{ 'return_type': 'int',
'names': ['glXMakeContextCurrent'],
'arguments':
'Display* dpy, GLXDrawable draw, GLXDrawable read, GLXContext ctx', },
{ 'return_type': 'GLXDrawable',
'names': ['glXGetCurrentReadDrawable'],
'arguments': 'void', },
{ 'return_type': 'int',
'names': ['glXQueryContext'],
'arguments': 'Display* dpy, GLXContext ctx, int attribute, int* value', },
{ 'return_type': 'void',
'names': ['glXSelectEvent'],
'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long mask', },
{ 'return_type': 'void',
'names': ['glXGetSelectedEvent'],
'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long* mask', },
{ 'return_type': 'void',
'names': ['glXSwapIntervalMESA'],
'arguments': 'unsigned int interval', },
{ 'return_type': 'void',
'names': ['glXSwapIntervalEXT'],
'arguments': 'Display* dpy, GLXDrawable drawable, int interval', },
{ 'return_type': 'GLXFBConfig',
'names': ['glXGetFBConfigFromVis | ualSGIX'],
'arguments': 'Display* dpy, XVisualInfo* visualInfo', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateContextAttribsARB'],
'arguments':
'Display* dpy, GLXFBConfig config, GLXContext share_context, int direct, '
'const int* attrib_list', },
]
FUNCTION_SETS = [
[GL_FUNCTIONS, 'gl', ['../../third_party/m | esa/MesaLib/include/GL/glext.h',
'../../third_party/khronos/GLES2/gl2ext.h'], []],
[OSMESA_FUNCTIONS, 'osmesa', [], []],
[EGL_FUNCTIONS, 'egl', ['../../third_party/khronos/EGL/eglext.h'],
[
'EGL_ANGLE_d3d_share_handle_client_buffer',
'EGL_ANGLE_surface_d3d_texture_2d_share_handle',
],
],
[WGL_FUNCTIONS, 'wgl', [
'../../third_party/mesa/MesaLib/include/GL/wglext.h'], []],
[GLX_FUNCTIONS, 'glx', [
'../../third_party/mesa/MesaLib/include/GL/glx.h',
'../../third_party/mesa/MesaLib/include/GL/glxext.h'], []],
]
def GenerateHeader(file, functions, set_name, used_extension_functions):
"""Generates gl_binding_autogen_x.h"""
# Write file header.
file.write(
"""// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated.
#ifndef UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_
#define UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_
namespace gfx {
class GLContext;
void InitializeGLBindings%(name)s();
void InitializeGLExtensionBindings%(name)s(GLContext* context);
void InitializeDebugGLBindings%(name)s();
void ClearGLBindings%(name)s();
""" % {'name': set_name.upper()})
# Write typedefs for function pointer types. Always use the GL name for the
# typedef.
file.write('\n')
for func in functions:
file.write('typedef %s (GL_BINDING_CALL *%sProc)(%s);\n' %
(func['return_type'], func['names'][0], func['arguments']))
# Write declarations for booleans indicating which extensions are available.
file.write('\n')
for extension, ext_functions in used_extension_functions:
file.write('GL_EXPORT extern bool g_%s;\n' % extension)
# Write declarations for function pointers. Always use the GL name for the
# declaration.
file.write('\n')
for func in functions:
file.write('GL_EXPORT extern %sProc g_%s;\n' %
(func['names'][0], func['names'][0]))
file.write('\n')
file.write( '} // namespace gfx\n')
# Write macros to invoke function pointers. Always use the GL name for the
# macro.
file.write('\n')
for func in functions:
file.write('#define %s ::gfx::g_%s\n' %
(func['names'][0], func['names'][0]))
file.write('\n')
file.write('#endif // UI_GFX_GL_GL_BINDINGS_AUTOGEN_%s_H_\n' %
set_name.upper())
def GenerateSource(file, functions, set_name, used_extension_functions):
"""Generates gl_binding_autogen_x.cc"""
# Write file header.
file.write(
"""// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated.
#include <string>
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_implementation.h"
using gpu::gles2::GLES2Util;
namespace gfx {
""")
# Write definitions for booleans indicating which extensions are available.
for extension, ext_functions in used_extension_functions:
file.write('bool g_%s;\n' % extension)
# Write definitions of function pointers.
file.write('\n')
file.write('static bool g_debugBindingsInitialized;\n')
file.write('static void UpdateDebugGLExtensionBindings();\n')
file.write('\n')
for func in functions:
file.write('%sProc g_%s;\n' % (func['names'][0], func['names'][0]))
file.write('\n')
for func in functions:
file.write('static %sProc g_debug_%s;\n' %
(func['names'][0], func['names'][0]))
# Write function to initialize the core function pointers. The code assumes
# any non-NULL pointer returned by GetGLCoreProcAddress() is valid, although
# it may be overwritten by an extension function pointer later.
file.write('\n')
file.write('void InitializeGLBindings%s() {\n' % set_name.upper())
for func in functions:
first_name = func['names'][0]
for i, name in enumerate(func['names']):
if i:
file.write(' if (!g_%s)\n ' % first_name)
file.write(
' g_%s = reinterpret_cast<%sProc>(GetGLCoreProcAddress("%s"));\n' %
(first_name, first_name, name))
file.write('}\n')
file.write('\n')
# Write function to initialize the extension function pointers. This function
# uses a current context to query which extensions are actually supported.
file.write('void InitializeGLExtensionBindings%s(GLContext* context) {\n' %
set_name.upper())
file.write(' DCHECK(context && context->IsCurrent(NULL));\n')
for extension, ext_functions in used_extension_functions:
file.write(' g_%s = context->HasExtension("%s");\n' %
(extension, extension))
file.write(' if (g_%s) {\n' %
(extension))
queried_entry_points = set()
for entry_point_name, function_name in ext_functions:
# Replace the pointer unconditionally unless this extension has several
# alternatives for the same entry point (e.g.,
# GL_ARB_blend_func_extended).
if entry_point_name in queried_entry_points:
file.write(' if (!g_%s)\n ' % entry_point_name)
file.write(
' g_%s = reinterpret_cast<%sProc>(GetGLProcAddress("%s"));\n' %
(entry_point_name, entry_point_name, function_name))
queried_entry_points.add(entry_point_name)
file.write(' }\n')
file.write(' if (g_debugBindingsInitialized)\n')
file.write(' UpdateDebugGLExtensionBindings();\n')
file.write('}\n')
file.write('\n')
# Write logging wrappers for each function.
file.write('extern "C" {\n')
for func in functi |
import sys
import numpy as np
def check_symmetric(a, tol=1e-8):
return np.allclose(a, a.T, atol=tol)
for line in sys.stdin:
a = np.matrix(line)
f = che | ck_symmet | ric(a)
if not f:
print("Not symmetric")
else:
print("Symmetric")
|
from diofant import (Derivative, Function, Integral, bell, besselj, cos, exp,
legendre, oo, symbols)
from diofant.printing.conventions import requires_partial, split_super_sub
__all__ = ()
def test_super_sub():
assert split_super_sub('beta_13_2') == ('beta', [], ['13', '2'])
assert split_super_sub('beta_132_20') == ('beta', [], ['132', '20'])
assert split_super_sub('beta_13') == ('beta', [], ['13'])
assert split_super_sub('x_a_b') == ('x', [], ['a', 'b'])
assert split_super_sub('x_1_2_3') == ('x', [], ['1', '2', '3'])
assert split_super_sub('x_a_b1') == ('x', [], ['a', 'b1'])
assert split_super_sub('x_a_1') == ('x', [], ['a', '1'])
assert split_super_sub('x_1_a') == ('x', [], ['1', 'a'])
assert split_super_sub('x_1^aa') == ('x', ['aa'], ['1'])
assert split_super_sub('x_1__aa') == ('x', ['aa'], ['1'])
assert split_super_sub('x_11^a') == ('x', ['a'], ['11'])
assert split_super_sub('x_11__a') == ('x', ['a'], ['11'])
assert split_super_sub('x_a_b_c_d') == ('x', [], ['a', 'b', 'c', 'd'])
assert split_super_sub('x_a_b^c^d') == ('x', ['c', 'd'], ['a', 'b'])
assert split_super_sub('x_a_b__c__d') == ('x', ['c', 'd'], ['a', 'b'])
assert split_super_sub('x_a^b_c^d') == ('x', ['b', 'd'], ['a', 'c'])
assert split_super_sub('x_a__b_c__d') == ('x', ['b', 'd'], ['a', 'c'])
assert split_super_sub('x^a^b_c_d') == ('x', ['a', 'b'], ['c', 'd'])
assert split_super_sub('x__a__b_c_d') == ('x', ['a', 'b'], ['c', 'd'])
assert split_super_sub('x^a^b^c^d') == ('x', ['a', 'b', 'c', 'd'], [])
assert split_super_sub('x__a__b__c__d') == ('x', ['a', 'b', 'c', 'd'], [])
assert split_super_sub('alpha_11') == ('alpha', [], ['11'])
assert split_super_sub('alpha_11_11') == ('alpha', [], ['11', '11'])
assert spl | it_super_sub('') == ('', [], [])
| def test_requires_partial():
x, y, z, t, nu = symbols('x y z t nu')
n = symbols('n', integer=True)
f = x * y
assert requires_partial(Derivative(f, x)) is True
assert requires_partial(Derivative(f, y)) is True
# integrating out one of the variables
assert requires_partial(Derivative(Integral(exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False
# bessel function with smooth parameter
f = besselj(nu, x)
assert requires_partial(Derivative(f, x)) is True
assert requires_partial(Derivative(f, nu)) is True
# bessel function with integer parameter
f = besselj(n, x)
assert requires_partial(Derivative(f, x)) is False
# this is not really valid (differentiating with respect to an integer)
# but there's no reason to use the partial derivative symbol there. make
# sure we don't throw an exception here, though
assert requires_partial(Derivative(f, n)) is False
# bell polynomial
f = bell(n, x)
assert requires_partial(Derivative(f, x)) is False
# again, invalid
assert requires_partial(Derivative(f, n)) is False
# legendre polynomial
f = legendre(0, x)
assert requires_partial(Derivative(f, x)) is False
f = legendre(n, x)
assert requires_partial(Derivative(f, x)) is False
# again, invalid
assert requires_partial(Derivative(f, n)) is False
f = x ** n
assert requires_partial(Derivative(f, x)) is False
assert requires_partial(Derivative(Integral((x*y) ** n * exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False
# parametric equation
f = (exp(t), cos(t))
g = sum(f)
assert requires_partial(Derivative(g, t)) is False
# function of unspecified variables
f = symbols('f', cls=Function)
assert requires_partial(Derivative(f, x)) is False
assert requires_partial(Derivative(f, x, y)) is True
|
#---------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Gabriel de Queiroz Sousa 1715310044
# Lucas Gabriel Silveira Duarte 1715310053
# Matheus de Oliveira Marques 1515310514
# Rodrigo Duarte de Souza 1115140049
#
# Leet é uma forma de se escrever o alfabeto latino usando outros símbolos em lugar das letras,
# como números por exemplo. A própria pal | avra leet admite muitas variações, como l33t ou 1337.
# O uso do leet reflete uma subcultura relacionada ao mundo dos jogos de computador e internet,
# sendo muito usada para confundir os iniciantes e afirmar-se como parte de um grupo. Pesquise
# sobre as principais formas de traduzir as letras. Depois, faça um programa que peça uma texto
# e transforme-o para a grafia leet speak.
#---------------------------------------------------------------------------
leet = (('a', '4'), ('l', '1' | ), ('e', '3'), ('s', '5'), ('g', '6'), ('r', '12'), ('t', '7'), ('q', '9'))
sring = input("Informe palavra = ")
nova = sring
print("Inicialmente: ", sring)
for antigo, novo in leet:
nova = nova.replace(antigo, novo)
print("Finalmente = ", nova) |
alityChanges] = self._empty_quality_changes
self._containers[_ContainerIndexes.Quality] = self._empty_quality
self._containers[_ContainerIndexes.Material] = self._empty_material
self._containers[_ContainerIndexes.Variant] = self._empty_variant
self.containersChanged.connect(self._onContainersChanged)
import cura.CuraApplication #Here to prevent circular imports.
self.setMetaDataEntry("setting_version", cura.CuraApplication.CuraApplication.SettingVersion)
# This is emitted whenever the containersChanged signal from the ContainerStack base class is emitted.
pyqtContainersChanged = pyqtSignal()
## Set the user changes container.
#
# \param new_user_changes The new user changes container. It is expected to have a "type" metadata entry with the value "user".
def setUserChanges(self, new_user_changes: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.UserChanges, new_user_changes)
## Get the user changes container.
#
# \return The user changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setUserChanges, notify = pyqtContainersChanged)
def userChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.UserChanges])
## Set the quality changes container.
#
# \param new_quality_changes The new quality changes container. It is expected to have a "type" metadata entry with the value "quality_changes".
def setQualityChanges(self, new_quality_changes: InstanceContainer, postpone_emit = False) -> None:
self.replaceContainer(_ContainerIndexes.QualityChanges, new_quality_changes, postpone_emit = postpone_emit)
## Get the quality changes container.
#
# \return The quality changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setQualityChanges, notify = pyqtContainersChanged)
def qualityChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.QualityChanges])
## Set the quality container.
#
# \param new_quality The new quality container. It is expected to have a "type" metadata entry with the value "quality".
def setQuality(self, new_quality: InstanceContainer, postpone_emit: bool = False) -> None:
self.replaceContainer(_ContainerIndexes.Quality, new_quality, postpone_emit = postpone_emit)
## Get the quality container.
#
# \return The quality container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setQuality, notify = pyqtContainersChanged)
def quality(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Quality])
## Set the material container.
#
# \param new_material The new material container. It is expected to have a "type" metadata entry with the value "material".
def setMaterial(self, new_material: InstanceContainer, postpone_emit: bool = False) -> None:
self.replaceContainer(_ContainerIndexes.Material, new_material, postpone_emit = postpone_emit)
## Get the material container.
#
# \return The material container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setMaterial, notify = pyqtContainersChanged)
def material(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Material])
## Set the variant container.
#
# \param new_variant The new variant container. It is expected to have a "type" metadata entry with the value "variant".
def setVariant(self, new_variant: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.Variant, new_variant)
## Get the variant container.
#
# \return The variant container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setVariant, notify = pyqtContainersChanged)
def variant(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.Variant])
## Set the definition changes container.
#
# \param new_definition_changes The new definition changes container. It is expected to have a "type" metadata entry with the value "definition_changes".
def setDefinitionChanges(self, new_definition_changes: InstanceContainer) -> None:
self.replaceContainer(_ContainerIndexes.DefinitionChanges, new_definition_changes)
## Get the definition changes container.
#
# \return The definition changes container. Should always be a valid container, but can be equal to the empty InstanceContainer.
@pyqtProperty(InstanceContainer, fset = setDefinitionChanges, notify = pyqtContainersChanged)
def definitionChanges(self) -> InstanceContainer:
return cast(InstanceContainer, self._containers[_ContainerIndexes.DefinitionChanges])
## Set the definition container.
#
# \param new_definition The new definition container. It is expected to have a "type" metadata entry with the value "definition".
def setDefinition(self, new_definition: DefinitionContain | erInterface) -> None:
self.replaceContainer(_ContainerIndexes.Definition, new_definition)
def | getDefinition(self) -> "DefinitionContainer":
return cast(DefinitionContainer, self._containers[_ContainerIndexes.Definition])
definition = pyqtProperty(QObject, fget = getDefinition, fset = setDefinition, notify = pyqtContainersChanged)
@override(ContainerStack)
def getBottom(self) -> "DefinitionContainer":
return self.definition
@override(ContainerStack)
def getTop(self) -> "InstanceContainer":
return self.userChanges
## Check whether the specified setting has a 'user' value.
#
# A user value here is defined as the setting having a value in either
# the UserChanges or QualityChanges container.
#
# \return True if the setting has a user value, False if not.
@pyqtSlot(str, result = bool)
def hasUserValue(self, key: str) -> bool:
if self._containers[_ContainerIndexes.UserChanges].hasProperty(key, "value"):
return True
if self._containers[_ContainerIndexes.QualityChanges].hasProperty(key, "value"):
return True
return False
## Set a property of a setting.
#
# This will set a property of a specified setting. Since the container stack does not contain
# any settings itself, it is required to specify a container to set the property on. The target
# container is matched by container type.
#
# \param key The key of the setting to set.
# \param property_name The name of the property to set.
# \param new_value The new value to set the property to.
def setProperty(self, key: str, property_name: str, property_value: Any, container: "ContainerInterface" = None, set_from_cache: bool = False) -> None:
container_index = _ContainerIndexes.UserChanges
self._containers[container_index].setProperty(key, property_name, property_value, container, set_from_cache)
## Overridden from ContainerStack
#
# Since we have a fixed order of containers in the stack and this method would modify the container
# ordering, we disallow this operation.
@override(ContainerStack)
def addContainer(self, container: ContainerInterface) -> None:
raise Exceptions.InvalidOperationError("Cannot add a container to Global stack")
## Overridden from ContainerStack
#
# Since we have a fixed order of containers in the stack and this method would modify the container
# ordering, we disallow this operation.
@override(ContainerStack)
def insertContainer(self, index: int, container: ContainerInterface) -> None:
|
import nose
import angr
import logging
l = logging.getLogger("angr_tests.path_groups")
import os
location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
addresses_fauxware = {
'armel': 0x8524,
'armhf': 0x104c9, # addr+1 to force thumb
#'i386': 0x8048524, # commenting out because of the freaking stack check
'mips': 0x400710,
'mipsel': 0x4006d0,
'ppc': 0x1000054c,
'ppc64': 0x10000698,
'x86_64': 0x400664
}
def run_fauxware(arch, threads):
p = angr.Project(location + '/' + arch + '/fauxware', load_options={'auto_load_libs': False})
pg = p.factory.path_group(threads=threads)
nose.tools.assert_equal(len(pg.active), 1)
nose.tools.assert_equal(pg.active[0].length, 0)
# step until the backdoor split occurs
pg2 = pg.step(until=lambda lpg: len(lpg.active) > 1, step_func=lambda lpg: lpg.prune())
nose.tools.assert_equal(len(pg2.active), 2)
nose.tools.assert_true(any("SOSNEAKY" in s for s in pg2.mp_active.state.posix.dumps(0).mp_items))
nose.tools.assert_false(all("SOSNEAKY" in s for s in pg2.mp_active.state.posix.dumps(0).mp_items))
# separate out the backdoor and normal paths
pg3 = pg2.stash(lambda path: "SOSNEAKY" in path.state.posix.dumps(0), to_stash="backdoor").stash_all(to_stash="auth")
nose.tools.assert_equal(len(pg3.active), 0)
nose.tools.assert_equal(len(pg3.backdoor), 1)
nose.tools.assert_equal(len(pg3.auth), 1)
# step the backdoor path until it returns to main
pg4 = pg3.step(until=lambda lpg: lpg.backdoor[0].jumpkinds[-1] == 'Ijk_Ret', stash='backdoor')
main_addr = pg4.backdoor[0].addr
nose.tools.assert_equal(len(pg4.active), 0)
nose.tools.assert_equal(len(pg4.backdoor), 1)
nose.tools.assert_equal(len(pg4.auth), 1)
# now s | tep the real path until the real authentication paths return to the same place
pg5 = pg4.explore(find=main_addr, num_find=2, stash='auth').unstash_all(from_stash='found', to_stash='auth')
nose.tools.assert_equal(len(pg5.active), 0)
nose.tools.assert_equal(len(pg5.backdoor), 1)
nose.tools.assert_ | equal(len(pg5.auth), 2)
# now unstash everything
pg6 = pg5.unstash_all(from_stash='backdoor').unstash_all(from_stash='auth')
nose.tools.assert_equal(len(pg6.active), 3)
nose.tools.assert_equal(len(pg6.backdoor), 0)
nose.tools.assert_equal(len(pg6.auth), 0)
nose.tools.assert_equal(len(set(pg6.mp_active.addr.mp_items)), 1)
# now merge them!
pg7 = pg6.merge()
nose.tools.assert_equal(len(pg7.active), 1)
nose.tools.assert_equal(len(pg7.backdoor), 0)
nose.tools.assert_equal(len(pg7.auth), 0)
#import ipdb; ipdb.set_trace()
#print pg2.mp_active.addr.mp_map(hex).mp_items
# test selecting paths to step
pg_a = p.factory.path_group(immutable=True)
pg_b = pg_a.step(until=lambda lpg: len(lpg.active) > 1, step_func=lambda lpg: lpg.prune().drop(stash='pruned'))
pg_c = pg_b.step(selector_func=lambda p: p is pg_b.active[0], step_func=lambda lpg: lpg.prune().drop(stash='pruned'))
nose.tools.assert_is(pg_b.active[1], pg_c.active[0])
nose.tools.assert_is_not(pg_b.active[0], pg_c.active[1])
total_active = len(pg_c.active)
# test special stashes
nose.tools.assert_equals(len(pg_c.stashed), 0)
pg_d = pg_c.stash(filter_func=lambda p: p is pg_c.active[1], to_stash='asdf')
nose.tools.assert_equals(len(pg_d.stashed), 0)
nose.tools.assert_equals(len(pg_d.asdf), 1)
nose.tools.assert_equals(len(pg_d.active), total_active-1)
pg_e = pg_d.stash(from_stash=pg_d.ALL, to_stash='fdsa')
nose.tools.assert_equals(len(pg_e.asdf), 0)
nose.tools.assert_equals(len(pg_e.active), 0)
nose.tools.assert_equals(len(pg_e.fdsa), total_active)
pg_f = pg_e.stash(from_stash=pg_e.ALL, to_stash=pg_e.DROP)
nose.tools.assert_true(all(len(s) == 0 for s in pg_f.stashes.values()))
def test_fauxware():
for arch in addresses_fauxware:
yield run_fauxware, arch, None
yield run_fauxware, arch, 2
if __name__ == "__main__":
for func, march, threads in test_fauxware():
print 'testing ' + march
func(march, threads)
|
#!/usr/bin/env python3
import os # makedirs
import sys # argv, exit
import csv # DictReader
def cutoffdict(cdict):
rdict = dict()
for key in cdict.keys():
candi = cdict[key]
top = max(candi, key = candi.get)
if candi[top] > (sum(candi.values())*0.5):
rdict[key] = top
return rdict
def groupbyprefix(src_path):
des_path = src_path.split('/')[-1]
src_file = open(src_path, 'r')
src_csv = csv.DictReader(src_file)
des_file = open('./dbdays/' + des_path, 'w')
des_csv = csv.DictWriter(des_file, fieldnames = [
'ipprefix', 'district', 'city'])
des_csv.writeheader()
cdict = dict()
for row in src_csv:
cprefix = row['ipprefix']
ccity = row['district'] +' ' + row['city']
cdict[cprefix] = {ccity: cdict.get(cprefix, dict()).get(ccity, 0) + 1}
wdict = cutoffdict(cdict)
for prefix in wdict.keys():
district = wdict[prefix].split(' ')[0]
city = wdict[prefix].split(' ')[1]
des_csv.writerow({'ipprefix': prefix,
'district': district,
'city': city})
def main(argv):
if len(argv) < 2:
print('We need 1 arguments')
print('.py [SRC]')
sys.exit()
src_path = argv[1]
os.makedirs('./dbdays', exist_ok = True)
sit = os.sca | ndir(src_path)
for entry in sit:
if not entry.name.startsw | ith('.') and entry.is_file():
cip = entry.path
groupbyprefix(cip)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
# -*- coding: utf-8 -*-
"""The EWF image path specification implementation."""
from d | fvfs.lib import definitions
from dfvfs.path import factory
from dfvfs.path import path_spec
class EWFPathSpec(path_spec.PathSpec):
"""EWF image path specification."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_EWF
def __init__(self, parent=None, **kwargs):
"""Initializes a path specification.
Note that the EWF file path specification must have a parent.
Args:
parent (Optional[PathSpec]): parent path speci | fication.
Raises:
ValueError: when parent is not set.
"""
if not parent:
raise ValueError('Missing parent value.')
super(EWFPathSpec, self).__init__(parent=parent, **kwargs)
factory.Factory.RegisterPathSpec(EWFPathSpec)
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from django import forms
from django.utils.translation import ugettext_lazy as _
from envelope.forms import ContactForm
class ContactForm(ContactForm):
template_name = "envelope/contact_email.txt"
html_template_name = " | envelope/contact_email.html"
phone = forms.CharField(label='Teléfono', required=False)
country = forms.CharField(label='País', required=False)
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
| self.fields['email'].required = False
ContactForm.base_fields = OrderedDict(
(k, ContactForm.base_fields[k])
for k in [
'sender', 'subject', 'email', 'phone', 'country', 'message',
]
)
|
from dj | ango import template
register = template.Library()
@register.assignment_tag(takes_context=True)
def has_bookmark_permission(context, action):
"""Checks if the current user can bookmark the action item.
Returns a boolean.
Syntax::
{% has_bookmark_permission action %}
"""
request = context['request']
if not request.user.is_authenticated():
return False
has_permission = True
| if action.target.approval_required and not request.user.can_access_all_projects:
has_permission = False
if not has_permission:
return False
return True
@register.assignment_tag(takes_context=True)
def get_existing_bookmark(context, action):
request = context['request']
if not request.user.is_authenticated():
return None
existing_bookmark = request.user.bookmark_set.filter(
object_pk=action.action_object.pk, content_type=action.action_object_content_type
).first()
return existing_bookmark
|
s import stdout
from math import sqrt
try:
import numpy
except ImportError:
pass
from nltk.cluster.api import ClusterI
from nltk.compat import python_2_unicode_compatible
class VectorSpaceClusterer(ClusterI):
"""
Abstract clusterer which takes tokens and maps them into a vector space.
Optionally performs singular value decomposition to reduce the
dimensionality.
"""
def __init__(self, normalise=False, svd_dimens | ions=None):
"""
:param normalise: should vectors be normalised to length 1
:type normalise: boolean
:param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
:type svd_dimensions: int
"""
self._Tt = None
self._should_normalise = normalise
self._svd_dimensions = svd_dimensions
def | cluster(self, vectors, assign_clusters=False, trace=False):
assert len(vectors) > 0
# normalise the vectors
if self._should_normalise:
vectors = list(map(self._normalise, vectors))
# use SVD to reduce the dimensionality
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
[u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))
S = d[:self._svd_dimensions] * \
numpy.identity(self._svd_dimensions, numpy.float64)
T = u[:,:self._svd_dimensions]
Dt = vt[:self._svd_dimensions,:]
vectors = numpy.transpose(numpy.dot(S, Dt))
self._Tt = numpy.transpose(T)
# call abstract method to cluster the vectors
self.cluster_vectorspace(vectors, trace)
# assign the vectors to clusters
if assign_clusters:
print(self._Tt, vectors)
return [self.classify(vector) for vector in vectors]
def cluster_vectorspace(self, vectors, trace):
"""
Finds the clusters using the given set of vectors.
"""
raise NotImplementedError()
def classify(self, vector):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
cluster = self.classify_vectorspace(vector)
return self.cluster_name(cluster)
def classify_vectorspace(self, vector):
"""
Returns the index of the appropriate cluster for the vector.
"""
raise NotImplementedError()
def likelihood(self, vector, label):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
return self.likelihood_vectorspace(vector, label)
def likelihood_vectorspace(self, vector, cluster):
"""
Returns the likelihood of the vector belonging to the cluster.
"""
predicted = self.classify_vectorspace(vector)
return (1.0 if cluster == predicted else 0.0)
def vector(self, vector):
"""
Returns the vector after normalisation and dimensionality reduction
"""
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt is not None:
vector = numpy.dot(self._Tt, vector)
return vector
def _normalise(self, vector):
"""
Normalises the vector to unit length.
"""
return vector / sqrt(numpy.dot(vector, vector))
def euclidean_distance(u, v):
"""
Returns the euclidean distance between vectors u and v. This is equivalent
to the length of the vector (u - v).
"""
diff = u - v
return sqrt(numpy.dot(diff, diff))
def cosine_distance(u, v):
"""
Returns 1 minus the cosine of the angle between vectors v and u. This is equal to
1 - (u.v / |u||v|).
"""
return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))
class _DendrogramNode(object):
""" Tree node of a dendrogram. """
def __init__(self, value, *children):
self._value = value
self._children = children
def leaves(self, values=True):
if self._children:
leaves = []
for child in self._children:
leaves.extend(child.leaves(values))
return leaves
elif values:
return [self._value]
else:
return [self]
def groups(self, n):
queue = [(self._value, self)]
while len(queue) < n:
priority, node = queue.pop()
if not node._children:
queue.push((priority, node))
break
for child in node._children:
if child._children:
queue.append((child._value, child))
else:
queue.append((0, child))
# makes the earliest merges at the start, latest at the end
queue.sort()
groups = []
for priority, node in queue:
groups.append(node.leaves())
return groups
@python_2_unicode_compatible
class Dendrogram(object):
"""
Represents a dendrogram, a tree with a specified branching order. This
must be initialised with the leaf items, then iteratively call merge for
each branch. This class constructs a tree representing the order of calls
to the merge function.
"""
def __init__(self, items=[]):
"""
:param items: the items at the leaves of the dendrogram
:type items: sequence of (any)
"""
self._items = [_DendrogramNode(item) for item in items]
self._original_items = copy.copy(self._items)
self._merge = 1
def merge(self, *indices):
"""
Merges nodes at given indices in the dendrogram. The nodes will be
combined which then replaces the first node specified. All other nodes
involved in the merge will be removed.
:param indices: indices of the items to merge (at least two)
:type indices: seq of int
"""
assert len(indices) >= 2
node = _DendrogramNode(self._merge, *[self._items[i] for i in indices])
self._merge += 1
self._items[indices[0]] = node
for i in indices[1:]:
del self._items[i]
def groups(self, n):
"""
Finds the n-groups of items (leaves) reachable from a cut at depth n.
:param n: number of groups
:type n: int
"""
if len(self._items) > 1:
root = _DendrogramNode(self._merge, *self._items)
else:
root = self._items[0]
return root.groups(n)
def show(self, leaf_labels=[]):
"""
Print the dendrogram in ASCII art to standard out.
:param leaf_labels: an optional list of strings to use for labeling the leaves
:type leaf_labels: list
"""
# ASCII rendering characters
JOIN, HLINK, VLINK = '+', '-', '|'
# find the root (or create one)
if len(self._items) > 1:
root = _DendrogramNode(self._merge, *self._items)
else:
root = self._items[0]
leaves = self._original_items
if leaf_labels:
last_row = leaf_labels
else:
last_row = ["%s" % leaf._value for leaf in leaves]
# find the bottom row and the best cell width
width = max(map(len, last_row)) + 1
lhalf = width / 2
rhalf = width - lhalf - 1
# display functions
def format(centre, left=' ', right=' '):
return '%s%s%s' % (lhalf*left, centre, right*rhalf)
def display(str):
stdout.write(str)
# for each merge, top down
queue = [(root._value, root)]
verticals = [ format(' ') for leaf in leaves ]
while queue:
priority, node = queue.pop()
child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children))
indices = list(map(leaves.index, child_left_leaf))
if child_left_leaf:
min_idx = min(indices)
|
#!/usr/bin/env python
# REQUIRES both rst2pdf and wikir project from google code.
import sys
import subprocess
sys.path.insert(0, '../../rson/py2x')
from rson import loads
from simplejson import dumps
subprocess.call('../../rst2pdf/bin/rst2pdf manual.txt -e preprocess -e dotted_toc -o manual.pdf'.split())
lines = iter(open('manual.txt', 'rb').read().splitlines())
bads | tuff = 'page:: space:: footer:: ##Page## contents::'.split()
result = []
for line in lines:
for check in badstuff:
if check in line:
break
else:
result.append(line)
result. | append('')
result = '\n'.join(result)
from wikir import publish_string
result = publish_string(result)
f = open('manual.wiki', 'wb')
f.write(result)
f.close()
|
# -*- coding: utf-8 -*-
from requests import (get, post, delete)
from .base import Base
class System(Base):
def __init__(self, host, secret, endpoint='/plugins/restapi/v1/system/properties'):
"""
:param host: Scheme://Host/ for API requests
:param secret: Shared secret key for API requests
:param endpoint: Endpoint for API requests
"""
super(System, self).__init__(host, secret, endpoint)
def get_props(self):
"""
Retrieve all system properties
"""
return self._submit_request(get, self.endpoint)
def get_prop(self | , key):
"""
Retrieve system property
:param key: The name of system property
"""
endpoint = '/'.join([self.endpoint, key])
return self. | _submit_request(get, endpoint)
def update_prop(self, key, value):
"""
Create or update a system property
:param key: The name of system property
:param value: The value of system property
"""
payload = {
'@key': key,
'@value': value,
}
return self._submit_request(post, self.endpoint, json=payload)
def delete_prop(self, key):
"""
Delete a system property
:param key: The name of system property
"""
endpoint = '/'.join([self.endpoint, key])
return self._submit_request(delete, endpoint)
def get_concurrent_sessions(self):
"""
Retrieve concurrent sessions
"""
endpoint = '/'.join([self.endpoint.rpartition('/')[0], 'statistics', 'sessions'])
return self._submit_request(get, endpoint)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from django.contrib.auth.models import User
from sentry.constants import MEMBER_USER
from sentry.models import Project
from sentry.web.helpers import get_project_list
from tests.base import TestCase
class GetProjectListTEst(TestCase):
def setUp(self):
self.user = User.objects.create(username="admin", email="admin@localhost")
self.project = Project.objects.get()
assert self.project.public is True
self.project2 = Project.objects.create(name='Test', slug='test', owner=self.user, public=False)
@mock.patch('sentry.models.Team.objects.get_for_user', mock.Mock(return_value={}))
def test_includes_public_projects_without_access(self):
project_list = get_project_list(self.user)
self.assertEquals(len(project_list), 1)
self.assertIn(self.project.id, project_list)
@mock.patch('sentry.models.Team.objects.get_for_user', mock.Mock(return_value={}))
def test_does | _exclude_public_projects_without_access(self):
project_list = get | _project_list(self.user, MEMBER_USER)
self.assertEquals(len(project_list), 0)
@mock.patch('sentry.models.Team.objects.get_for_user')
def test_does_include_private_projects_without_access(self, get_for_user):
get_for_user.return_value = {self.project2.team.id: self.project2.team}
project_list = get_project_list(self.user)
get_for_user.assert_called_once_with(self.user, None)
self.assertEquals(len(project_list), 2)
self.assertIn(self.project.id, project_list)
self.assertIn(self.project2.id, project_list)
@mock.patch('sentry.models.Team.objects.get_for_user')
def test_does_exclude_public_projects_but_include_private_with_access(self, get_for_user):
get_for_user.return_value = {self.project2.team.id: self.project2.team}
project_list = get_project_list(self.user, MEMBER_USER)
get_for_user.assert_called_once_with(self.user, MEMBER_USER)
self.assertEquals(len(project_list), 1)
self.assertIn(self.project2.id, project_list)
|
#!/usr/bin/python
# Written by Stjepan Horvat
# ( zvanstefan@gmail.com )
# by the exercises from David Lucal Burge - Perfect Pitch Ear Traning Supercourse
# Thanks to Wojciech M. Zabolotny ( wzab@ise.pw.edu.pl ) for snd-virmidi example
# ( wzab@ise.pw.edu.pl )
import random
import time
import sys
import re
fname="/dev/snd/midiC2D0"
#fname=sys.argv[1]
fin=open(fname,"rb")
fout=open(fname,"wb")
#keymin=int(sys.argv[2])
#keymax=int(sys.argv[3])
#keymin=int(60)
#keymax=int(72)
#c major scale
print ("Exercise 10-17:")
print ("Aural chord analisys. First you have to unlock the sound by ear. And then you have to indentify. It's a very powerful tehnique to stabilize perfect pitch.")
#from c to c'' white tones
#c major scale
notes = [ 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96 ]
noteC = [ 36, 48, 60, 72, 84, 96 ]
def playNote(noteOne, noteTwo, noteThree):
fout.write((chr(0x90)+chr(noteOne)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteOne)+chr(127)).encode('utf-8'))
fout.flush()
fout.write((chr(0x90)+chr(noteTwo)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteTwo)+chr(127)).encode('utf-8'))
fout.flush()
fout.write((chr(0x90)+chr(noteThree)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteThree)+chr(127)).encode('utf-8'))
fout.flush()
def nameNote(note):
if note in noteC:
return("C")
elif note-2 in noteC:
return("D")
elif note-4 in noteC:
return("E")
elif note-5 in noteC:
return("F")
elif note-7 in noteC:
return("G")
elif note-9 in noteC:
return("A")
elif note-11 in noteC:
return("H")
def name2Note(name):
if name == "c":
return(60)
if name == "d":
return(62)
if name == "e":
return(64)
if name == "f":
return(65)
if name == "g":
return(67)
if name == "a":
return(69)
if name == "h":
return(71)
usage = "Usage: 1-repeat, <note> <note> \"c d e\", ?-usage."
round = 1
a = re.compile("^[a-h] [a-h] [a-h]$")
try:
print(usage)
while True:
noteOne = random.choice(notes)
while True:
while True:
noteTwo = random.choice( | notes)
if nameNote(noteOne) != nameNote(noteTwo):
break
while True:
noteThree = random.choice(notes)
if name | Note(noteOne) != nameNote(noteTwo):
break
if nameNote(noteOne) != nameNote(noteThree):
break
match = False
while not match:
done = False
playNote(noteOne, noteTwo, noteThree)
while not done:
n = input("? ")
if n == "1":
playNote(noteOne, noteTwo, noteThree)
if n == "?":
print(usage)
if n == "help":
print(nameNote(noteOne).lower(), nameNote(noteTwo).lower(), nameNote(noteThree).lower())
elif a.match(n):
splitNote = n.split()
if splitNote[0] == nameNote(noteOne).lower() and splitNote[1] == nameNote(noteTwo).lower() and splitNote[2] == nameNote(noteThree).lower():
round += 1
print("Correct. Next round. " + str(round) + ".:")
done = True
match = True
else:
playNote(name2Note(splitNote[0]), name2Note(splitNote[1]), name2Note(splitNote[2]))
except KeyboardInterrupt:
pass
|
#!/usr/bin/env python
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.dates as mdates
#import matplotlib.cbook as cbook
#from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from statsmodels.distributions.empirical_distribution import ECDF
from collections import Counter
from ..guifiwrapper.guifiwrapper import *
#root = 3671
#root = 2444
root = 17711
g = CNMLWrapper(root)
import os
basedir = os.path.join(os.getcwd(), 'figs')
baseservicesdir = os.path.join(basedir, 'services')
for d in [basedir, baseservicesdir]:
if not os.path.exists(d):
os.makedirs(d)
user = ['meteo', 'radio', 'web', 'VPS', 'tv', 'wol', 'Proxy', 'mail', 'irc',
'teamspeak', 'ftp', 'asterisk', 'apt-cache', 'AP', 'IM', 'p2p',
'VPN', 'Streaming', 'games', 'cam']
mgmt = ['iperf', 'LDAP', 'DNS', 'SNPgraphs', 'NTP', 'AirControl']
# Extract user services and frequencies
#userServices = [s.type for s in g.services.values() if s.type in user]
#totalServices = len(userServices)
#userServices = Counter(userServices).items()
#userServicesNumber = len(userServices)
#userTypes = [typ for (typ,values) in userServices]
#userValues = [float(value)/float(totalServices) for (typ,value) in userServices]
# Extract mgmt services and frequencies
services = [s.type for s in g.services.values() if s.type in user]
totalServices = len(services)
services = Counter(services).items()
from operator import itemgetter
sercices = services.sort(key=itemgetter(1), reverse=True)
servicesNumber = len(services)
types = [typ for (typ, value) in services]
values = [float(value) / float(totalServices) for (typ, value) in services]
ind = np.arange(servicesNumber)
width = 0.35
fig = plt.figure()
fig.set_canvas(plt.gcf().canvas)
#ax = fig.add_subplot(121)
ax = fig.add_subplot(111)
rects = ax.bar(ind, values, width, color='black')
ax.set_xlim(-width, len(ind) + width)
ax.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
ax.set_ylabel('Frequency')
#ax.set_xlabel('Service Type')
ax.set_title('User Services Frequency')
xTickMarks = [str(i) for i in types]
ax.set_xticks(ind + width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=45, fontsize=13)
services1 = [s.type for s in g.services.values() if s.type in mgmt]
totalServices1 = len(services1)
services1 = Counter(services1).items()
sercices1 = services1.sort(key=itemgetter(1), reverse=True)
servicesNumber1 = len(services1)
types1 = [typ for (typ, value1) in services1]
values1 = [float(value) / float(totalServices1) for (typ, value) in services1]
if False:
# Disable analytical mgmt frequency image
ind1 = np.arange(servicesNumber1)
ax1 = fig.add_subplot(122)
rects = ax1.bar(ind1, values1, width, color='black')
ax1.set_xlim(-width, len(ind1) + width)
ax1.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
# ax1.set_ylabel('Frequency')
#ax1.set_xlabel('Service Type')
ax1.set_title('Management Services Frequency')
xTickMarks1 = [str(i) for i in types1]
ax1.set_xticks(ind1 + width)
xtickNames1 = ax1.set_xticklabels(xTickMarks1)
plt.setp(xtickNames1, rotation=0, fontsize=13)
plt.show()
figfile = os.path.join(baseservicesdir, str(root) + "services_frequency")
fig.savefig(figfile, format='png', dpi=fig.dpi)
# Other categories
for s in g.services.values():
if s.type in mgmt:
s.type = "Management"
elif s.t | ype != "Proxy":
s.type = "Other services"
services = [s.type for s in g.services.values()]
totalServices = len(services)
services = Counter(services).items()
sercices = services.sort(key=itemgetter(1), reverse=True)
servicesNumber = len(services)
types = [typ for (typ, value) in services]
values = [float(value) / float(totalServices) for (typ, value) in services]
ind = np.arange(servicesNumber)
width = 0.35
fig = plt.figure()
fig.set_canvas(plt.gcf().canvas | )
ax = fig.add_subplot(111)
rects = ax.bar(ind, values, width, color='black')
ax.set_xlim(-width, len(ind) + width)
ax.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
ax.set_ylabel('Frequency')
#ax.set_xlabel('Service Type')
ax.set_title(' Service Categories Frequency')
xTickMarks = [str(i) for i in types]
ax.set_xticks(ind + width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=0, fontsize=12)
plt.show()
figfile = os.path.join(
baseservicesdir,
str(root) +
"services_frequency_categories")
fig.savefig(figfile, format='png', dpi=fig.dpi)
|
# -*- coding: utf-8 -*-
import gensim, logging
class SemanticVector:
model = ''
def __init__(self, structure):
self.structure = stru | cture
def model_word2vec(self, min_count=15, window=15, size=100):
print 'preparing sentences list'
sentences = self.structure.prepare_list_of_words_in_sentences()
print 'start modeling'
self.model = gensim.models.Word2Vec(sentences, size=size, window=window, min_count=min_count, workers=4, sample=0.001, sg=0)
return self.model
def save_model(self, name):
self.model.save(name)
def load_model(self, name):
| self.model = gensim.models.Word2Vec.load(name)
|
# 类结构的堆排序
class DLinkHeap(object):
def __init__(self, list=None, N = 0):
self.dList = list
self.lengthSize = N
# 插入数据
def insert_heap(self, data):
self.dList.append(data)
self.lengthSize += 1
# 初始化堆结构
def init_heap(self):
n = self.lengthSize
for i in range(n):
self.sift_down(i)
# 交换数据
def swap(self, a, b):
tmp = self.dList[a]
self.dList[a] = self.dList[b]
self.dList[b] = tmp
# 向下调整节点
def sift_down(self, size):
n = size
t = 0
tmp_pos = 0
# 注意python的/运算,是取浮点数
while t < int(n/2):
if self.dList[t] > self.dList[2*t+1]:
tmp_pos = 2*t+1
else:
tmp_pos = t
if 2*t+2 < n | :
if self.dList[tmp_pos] > self.dList[2*t+2]:
tmp_pos = 2*t+2
if t != tmp_pos:
self.swap(tmp_pos, t)
t = tmp_pos
else:
break
# 向 | 上调整节点
def sift_up(self, size):
n = size
i = n - 1
flag = 0
while i > 0 and flag == 0:
parent_i = int(i/2)
if self.dList[i] < self.dList[parent_i]:
self.swap(i, parent_i)
i = parent_i
else:
flag = 1
# 堆排序
def heap_sort(self):
n = self.lengthSize
while n > 0:
self.swap(0, n-1)
n -= 1
self.sift_down(n)
# 打印堆数据
def print_heap(self, size):
for idx in range(size):
print(self.dList[idx], end=" ")
print()
if __name__ == "__main__":
k = 0
# 读取n个数
n = int(input())
# 输入列表
input_L = list(map(int, input().split()))
L = input_L
dLinkHeap = DLinkHeap(L, n)
dLinkHeap.init_heap()
dLinkHeap.print_heap(n)
print("-----after sort-----")
dLinkHeap.heap_sort()
dLinkHeap.print_heap(n)
|
from . imp | ort slide_channel_technology_category
from . import slide_channel_technology
from | . import slide_channel
|
#!/usr/bin/python3
__author__ = 'ivan.shynkarenka'
import argp | arse
from TTWebClient.TickTraderWebClient import TickTraderWebClient
def main():
parser = argparse.ArgumentParser(description='TickTrader Web API sample')
parser.add_argument('web_api_address', help='TickTrader Web API address')
args = parser.parse_args()
# Create instance of the TickTrader Web API client
client = TickTraderWebClient(args.web_api_address)
# Public currencies
currencies = client.get_public_all_currencies()
| for c in currencies:
print('Currency: {0}'.format(c['Name']))
currency = client.get_public_currency(currencies[0]['Name'])
print("{0} currency precision: {1}".format(currency[0]['Name'], currency[0]['Precision']))
if __name__ == '__main__':
main() |
from django.contrib import admin
# Register your models here.
from django.c | ontrib import admin
from rango.models import Category, Page
class PageAdmin(admin.ModelAdmin) | :
list_display = ('title', 'category', 'url')
admin.site.register(Category)
admin.site.register(Page,PageAdmin)
|
#!/usr/bin/env python
# coding: utf-8
# # rede_gephi_com_ipca_csv
# In[6]:
ano_eleicao = '2014'
rede =f'rede{ano_eleicao}'
csv_dir = f'/home/neilor/{rede}'
# In[7]:
dbschema = f'rede{ano_eleicao}'
table_edges = f"{dbschema}.gephi_edges_com_ipca_2018"
table_nodes = f"{dbschema}.gephi_nodes_com_ipca_2018"
table_receitas = f"{dbschema}.receitas_com_ipca_2018"
table_candidaturas = f"{dbschema}.candidaturas_com_ipca_2018"
table_municipios = f"{dbschema}.municipios_{ano_eleicao}"
# In[8]:
import sys
sys.path.append('../')
import | mod_tse as mtse
# In[9]:
import os
home = os.environ["HOME"]
local_dir = f'{home}/temp'
# In[10]:
mtse.execute_query(f"update {table_municipios} set rede= 'N';")
# | ## REDE BRASIL
# In[11]:
def salva_rede_brasil(csv_dir,rede):
rede_dir_BR = f'{csv_dir}/{rede}_Brasil'
os.makedirs(rede_dir_BR)
edges_csv_query=f"""copy
(
select * from {table_edges}
)
TO '{rede_dir_BR}/{rede}_Brasil_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_csv_query)
nodes_csv_query=f"""copy
(
select * from {table_nodes}
)
TO '{rede_dir_BR}/{rede}_Brasil_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_csv_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas}
)
TO '{rede_dir_BR}/{rede}_Brasil_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas}
)
TO '{rede_dir_BR}/{rede}_Brasil_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# ## REDES POR ESTADO
# In[12]:
def salva_rede_csv_uf(csv_dir,rede,sg_uf):
rede_dir_uf = f'{csv_dir}/{rede}_{sg_uf}'
os.makedirs(rede_dir_uf)
edges_query=f"""copy
(
select * from {table_edges} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_query)
nodes_query=f"""copy
(
select * from {table_nodes} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas} where sg_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas} where receptor_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# In[13]:
import pandas as pd
import shutil
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.makedirs(csv_dir)
salva_rede_brasil(csv_dir,rede)
df_uf = mtse.pandas_query(f'select sg_uf from {table_candidaturas} group by sg_uf order by sg_uf')
for index, row in df_uf.iterrows():
sg_uf = row['sg_uf']
salva_rede_csv_uf(csv_dir,rede,sg_uf)
# In[14]:
import datetime
print(datetime.datetime.now())
# In[ ]:
|
import pytest
from django.db import connection, IntegrityError
from .models import MyTree
def flush_constraints():
# the default db setup is to have constraints DEFERRED.
# So IntegrityErrors only happen when the transaction commits.
# Django's testcase thing does eventually flush the constraints but to
# actually test it *within* a testcase we have to flush it manually.
connection.cursor().execute("SET CONSTRAINTS ALL IMMEDIATE")
def test_node_creation_simple(db):
MyTree.objects.create(label='root1')
MyTree.objects.create(label='root2')
def test_node_creation_with_no_label(db):
# You need a label
with pytest.raises(ValueError):
MyTree.objects.create(label='')
with pytest.raises(ValueError):
MyTree.objects.create(label=None)
with pytest.raises(ValueError):
MyTree.objects.create()
def test_root_node_already_exists(db):
MyTree.objects.create(label='root1')
with pytest.raises(IntegrityError):
MyTree.objects.create(label='root1')
def test_same_label_but_different_parent(db):
root1 = MyTree.objects.create(label='root1')
MyTree.objects.create(label='root1', parent=root1)
def test_same_label_as_sibling(db):
root1 = MyTree.objects.create(label='root1')
MyTree.objects.create(label='child', parent=root1)
with pytest.raises(IntegrityError):
MyTree.objects.create(label='child', parent=root1)
def test_parent_is_self_errors(db):
root1 = MyTree.objects.create(label='root1')
root1.parent = root1
with pytest.raises(IntegrityError):
root1.save()
flush_constraints()
def test_parent_is_remote_ancestor_errors(db):
root1 = MyTree.objects.create(label='root1')
child2 = MyTree.objects.create(label='child2', parent=root1)
desc3 = MyTree.objects.create(label='desc3', parent=child2)
with pytest.raises(IntegrityError):
| # To test this integrity error, have to update table without calling save()
# (because save() changes `ltree` to match `parent_id`)
MyTree.objects.filter(pk=desc3.pk).update(parent=root1)
flush_constraints()
def test_parent_i | s_descendant_errors(db):
root1 = MyTree.objects.create(label='root1')
child2 = MyTree.objects.create(label='child2', parent=root1)
desc3 = MyTree.objects.create(label='desc3', parent=child2)
child2.parent = desc3
with pytest.raises(IntegrityError):
child2.save()
flush_constraints()
|
"""Unit test for the SNES nonlinear solver"""
# Copyright (C) 2012 Patrick E. Farrell
#
# This file is par | t of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public L | icense for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2012-10-17
# Last changed: 2012-10-26
"""Solve the Yamabe PDE which arises in the differential geometry of general
relativity. http://arxiv.org/abs/1107.0360.
The Yamabe equation is highly nonlinear and supports many solutions. However,
only one of these is of physical relevance -- the positive solution.
This unit test demonstrates the capability of the SNES solver to accept bounds
on the resulting solution. The plain Newton method converges to an unphysical
negative solution, while the SNES solution with {sign: nonnegative} converges
to the physical positive solution.
"""
from dolfin import *
import unittest
try:
parameters["linear_algebra_backend"] = "PETSc"
except RuntimeError:
import sys; sys.exit(0)
parameters["form_compiler"]["quadrature_degree"] = 5
mesh = Mesh("doughnut.xml.gz")
V = FunctionSpace(mesh, "CG", 1)
bcs = [DirichletBC(V, 1.0, "on_boundary")]
u = Function(V)
v = TestFunction(V)
u.interpolate(Constant(-1000.0))
r = sqrt(triangle.x[0]**2 + triangle.x[1]**2)
rho = 1.0/r**3
F = (8*inner(grad(u), grad(v))*dx +
rho * inner(u**5, v)*dx +
(-1.0/8.0)*inner(u, v)*dx)
newton_solver_parameters = {"nonlinear_solver": "newton",
"linear_solver": "lu",
"newton_solver": {"maximum_iterations": 100,
"report": False}}
snes_solver_parameters = {"nonlinear_solver": "snes",
"linear_solver": "lu",
"snes_solver": {"maximum_iterations": 100,
"sign": "nonnegative",
"report": False}}
class SNESSolverTester(unittest.TestCase):
def test_snes_solver(self):
solve(F == 0, u, bcs, solver_parameters=snes_solver_parameters)
self.assertTrue(u.vector().min() >= 0)
def test_newton_solver(self):
solve(F == 0, u, bcs, solver_parameters=newton_solver_parameters)
self.assertTrue(u.vector().min() < 0)
if __name__ == "__main__":
# Turn off DOLFIN output
set_log_active(False)
print ""
print "Testing DOLFIN nls/PETScSNESSolver interface"
print "--------------------------------------------"
unittest.main()
|
# Copyright (c) 2016-2017 Dustin Doloff
# Licensed under Apache License v2.0
import argparse
import difflib
import hashlib
import os
import subprocess
import zipfile
# Resets color formatting
COLOR_END = '\33[0m'
# Modifies characters or color
COLOR_BOLD = '\33[1m'
COLOR_DISABLED = '\33[02m' # Mostly just means darker
# Sets the text color
COLOR_GREEN = '\33[32m'
COLOR_YELLOW = '\33[33m'
COLOR_RED = '\33[31m'
def parse_args():
parser = argparse.ArgumentParser(description='Asserts files are the same')
parser.add_argument('--stamp', type=argparse.FileType('w+'), required=True,
help='Stamp file to record action completed')
parser.add_argument('--files', type=str, nargs='+', required=True)
return parser.parse_args()
def bytes_to_str(bytes):
return bytes.decode('utf-8', 'backslashreplace')
def color_diff(text_a, text_b):
"""
Compares two pieces of text and returns a tuple
The first value is a colorized diff of the texts.
The second value is a boolean, True if there was a diff, False if there wasn't.
"""
sequence_matcher = difflib.SequenceMatcher(None, text_a, text_b)
colorized_diff = ''
diff = False
for opcode, a0, a1, b0, b1 in sequence_matcher.get_opcodes():
if opcode == 'equal':
colorized_diff += bytes_to_str(sequence_matcher.a[a0:a1])
elif opcode == 'insert':
colorized_diff += COLOR_BOLD + COLOR_GREEN + bytes_to_str(sequence_matcher.b[b0:b1]) + COLOR_END
diff = True
elif opcode == 'delete':
colorized_diff += COLOR_BOLD + COLOR_RED + bytes_to_str(sequence_matcher.a[a0:a1]) + COLOR_END
diff = True
elif opcode == 'replace':
colorized_diff += (COLOR_BOLD + COLOR_YELLOW + bytes_to_str(sequence_matcher.a[a0:a1]) +
COLOR_DISABLED + bytes_to_str(sequence_matcher.b[b0:b1]) + COLOR_END)
diff = True
else:
raise RuntimeError('unexpected opcode ' + opcode)
return colorized_diff, diff
def hash_file(file):
"""
Computes the SHA-256 hash of the file
file - The file to hash
"""
hasher = hashlib.sha256()
with open(file, 'rb') as f:
for block in iter(lambda: f.read(1024), b''):
hasher.update(block)
return hasher.digest()
def summarize(file):
"""
Summarizes a file via it's metadata to provide structured text for diffing
"""
summary = None
if zipfile.is_zipfile(file):
with zipfile.ZipFile(file) as zf:
summary = ''
for info in zf.infolist():
summary += 'Entry: ('
summary += ', '.join(s + ': ' + repr(getattr( | info, s)) for s in info.__sl | ots__)
summary += ') ' + os.linesep
assert summary is not None, 'Unable to summarize %s' % file
return summary
def main():
args = parse_args()
files = args.files
assert len(files) >= 2, 'There must be at least two files to compare'
files_hashes = set()
max_file_size = 0
for file in files:
files_hashes.add(hash_file(file))
max_file_size = max(max_file_size, os.stat(file).st_size)
# Check hashes first
if len(files_hashes) != 1:
for i in range(len(files) - 1):
file_a = files[i]
file_b = files[i + 1]
file_a_contents = None
file_b_contents = None
if max_file_size > 1024 * 1024:
file_a_contents = summarize(file_a)
file_b_contents = summarize(file_b)
else:
with open(file_a, 'rb') as a:
file_a_contents = a.read()
with open(file_b, 'rb') as b:
file_b_contents = b.read()
diff, problem = color_diff(file_a_contents, file_b_contents)
assert not problem, 'File {a} does not match {b}:{newline}{diff}'.format(
a = file_a,
b = file_b,
newline = os.linesep,
diff = diff)
assert False, 'File hashes don\'t match.'
with args.stamp as stamp_file:
stamp_file.write(str(args))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright 2015 Dietrich Epp.
# This file is part of SGGL. SGGL is | licensed under the terms of the
# 2-cl | ause BSD license. For more information, see LICENSE.txt.
import glgen.__main__
glgen.__main__.main()
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""Command for setting target pools of instance group manager."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import utils
class SetTargetPools(base_classes.BaseAsyncMutator):
"""Set instances target pools of instance group manager."""
@staticmethod
def Args(parser):
parser.add_argument('instance_group_manager',
help='Instance group manager name.')
mutually_exclusive_group = parser.add_mutually_exclusive_group()
mutually_exclusive_group.add_argument(
'--clear-target-pools',
action='store_true',
help='Do not add instances to any Compute Engine Target Pools.')
mutually_exclusive_group.add_argument(
'--target-pools',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
metavar='TARGET_POOL',
help=('Compute Engine Target Pools to add the instances to. '
'Target Pools must can specified by name or by URL. Example: '
'--target-pool target-pool-1,target-pool-2'))
utils.AddZoneFlag(
parser,
resource_type='instance group manager',
operation_type='set target pools')
@property
def method(self):
return 'SetTargetPools'
@property
def service(self):
return self.compute.instanceGroupManagers
@property
def resource_type(self):
return 'instanceGroupManagers'
def _ValidateArgs(self, args):
if not args.clear_target_pools and args.target_pools is None:
raise exceptions.InvalidArgumentException(
'--target-pools', 'not passed but --clear-target-pools not present '
'either.')
def CreateRequests(self, args):
self._ValidateArgs(args)
ref = self.CreateZonalReference(args.instance_group_manager, args.zone)
region = utils.ZoneName | ToRegionName(ref.zone)
if args.clear_target_pools:
pool_refs = []
else:
pool_refs = self.CreateRegionalReferences(
args.target_pools, region, resource_type='targetPools')
pools = [pool_ref.SelfLink() for pool_ref in pool_refs]
request = (
self.messages.ComputeInstanceGroupManagersSetTargetPoolsRequest(
instanceGroupManager=ref.Name | (),
instanceGroupManagersSetTargetPoolsRequest=(
self.messages.InstanceGroupManagersSetTargetPoolsRequest(
targetPools=pools,
)
),
project=self.project,
zone=ref.zone,)
)
return [request]
SetTargetPools.detailed_help = {
'brief': 'Set instance template for instance group manager.',
'DESCRIPTION': """
*{command}* sets the target pools for an existing instance group
manager.
The new target pools won't apply to existing instances in the group
unless they are recreated using the recreate-instances command. But any
new instances created in the managed instance group will be added to all
of the provided target pools for load balancing purposes.
""",
}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import | models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Incident',
fields=[
('id', models.AutoField(ve | rbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=150)),
('description', models.TextField(max_length=1000)),
('severity', models.CharField(default='ME', max_length=2, choices=[('UR', 'Urgent'), ('HI', 'High'), ('ME', 'Medium'), ('LO', 'Low'), ('IN', 'Info')])),
('closed', models.BooleanField(default=False)),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
]
|
which fit the specified height.
:rtype: list
"""
items_sub = []
fm = QFontMetrics(self.items_font)
for i in items:
sz = self.items_size(items_sub)
if sz.height() > height:
break
items_sub.append(i)
return items_sub
def _font_height(self, font, text):
"""
Computes the height for the given font object.
:param font: Font object.
:type font: QFont
:param text: Text
:type text: str
:return: Returns the minimum height for the given font object.
:rtype: int
"""
fm = QFontMetrics(font)
return fm.size(Qt.TextSingleLine, text).height()
def draw_text(self, painter, text, font, bounds, alignment=Qt.AlignCenter):
"""
Provides a device independent mechanism for rendering fonts
regardless of the device's resolution. By default, the text will be
centred. This is a workaround for the font scaling issue for devices
with different resolutions.
:param painter: Painter object.
:type painter: QPainter
:param text: Text to be rendered.
:type text: str
:param font: Font for rendering the text.
:type font: QFont
:param bounds: Rect object which will provide the reference point for
drawing the text.
:type bounds: QRectF
:param alignment: Qt enums used to describe alignment. AlignCenter is
the default. Accepts bitwise OR for horizontal and vertical flags.
:type alignment: int
"""
layout = QTextLayout(text, font)
layout.beginLayout()
# Create the required number of lines in the layout
while layout.createLine().isValid():
pass
layout.endLayout()
y = 0
max_width = 0
# Set line positions relative to the layout
for i in range(layout.lineCount()):
line = layout.lineAt(i)
max_width = max(max_width, line.naturalTextWidth())
line.setPosition(QPointF(0, y))
y += line.height()
# Defaults
start_x = bounds.left()
start_y = bounds.top()
# Horizontal flags
if (alignment & Qt.AlignLeft) == Qt.AlignLeft:
start_x = bounds.left()
elif (alignment & Qt.AlignCenter) == Qt.AlignCenter or \
(alignment & Qt.AlignHCenter) == Qt.AlignHCenter:
start_x = bounds.left() + (bounds.width() - max_width) / 2.0
# Vertical flags
if (alignment == Qt.AlignTop) == Qt.AlignTop:
start_y = bounds.top()
elif (alignment & Qt.AlignCenter) == Qt.AlignCenter or \
(alignment & Qt.AlignVCenter) == Qt.AlignVCenter:
start_y = bounds.top() + (bounds.height() - y) / 2.0
layout.draw(painter, QPointF(start_x, start_y))
def paint(self, painter, option, widget=None):
"""
Performs the painting of the tenure item based on the object's
attributes.
:param painter: Performs painting operation on the item.
:type painter: QPainter
:param option: Provides style option for the item.
:type option: QStyleOptionGraphicsItem
:param widget: Provides points to the widget that is being painted on.
:type widget: QWidget
"""
shadow_start_pos = self._start_pos + self.shadow_thickness
# Use height of subsections to compute the appropriate height
header_height = self._font_height(self.header_font, self.header) + 7
items_title_height = self._font_height(
self.items_title_font,
self.items_title
)
margin = 1
fixed_height = header_height + items_title_height + (6 * margin)
if self.auto_adjust_height():
items_height = self.items_size(self.items).height() + 2
main_item_height = max(self._side, fixed_height + items_height)
else:
items_height = self._side - fixed_height
main_item_height = self._side
self._height = main_item_height
shadow_rect = QRect(
shadow_start_pos,
shadow_start_pos,
self._side,
main_item_height
)
main_item_rect = QRect(
self._start_pos,
self._start_pos,
self._side,
main_item_height
)
painter_pen = painter.pen()
painter_pen.setColor(self._normal_text_color)
painter_pen.setWidth(0)
# Create shadow effect using linear gradient
painter.setBrush(self._shadow_gradient)
painter.setPen(Qt.NoPen)
painter.drawRect(shadow_rect)
painter.setPen(self.pen)
painter.setBrush(self._brush)
# Main item outline
painter.drawRect(main_item_rect)
line_y_pos = header_height + margin * 2
painter.drawLine(
self._start_pos,
self._start_pos + line_y_pos,
self._start_pos + self._side,
self._start_pos + line_y_pos
)
# Draw header text
header_start_pos = self._start_pos + margin
header_rect = QRect(
header_start_pos,
header_start_pos,
self._side - (margin * 2),
header_height
)
# Adjust header text area if there is an icon renderer
if not self.icon_renderer is None:
init_width = header_rect.width()
adj_width = init_width - (self.icon_renderer.width + 6)
header_rect.setWidth(adj_width)
# Draw header icon if renderer is available
if not self.icon_renderer is None:
if isinstance(self.icon_renderer, BaseIconRender):
self.icon_renderer.draw(painter, self)
painter.setFont(self.header_font)
if self.header == self._default_header:
painter.setPen(self._text_highlight_color)
else:
painter.setPen(self._normal_text_color)
elided_header = self._elided_text(
self.header_font,
self.header,
header_rect.width()
)
# print(elided_header)
self.draw_text(painter, elided_header, self.header_font, header_rect)
# Draw items header
items_title_rect = QRect(
header_start_pos + 1,
header_height + items_title_height - 1,
self._side - (margin * 4),
items_title_height
)
painter.setFont(self.items_title_font)
painter.setPen(QColor('#c3b49c'))
items_title_brush = QBrush(self._gradient_dark)
painter.setBrush(items_title_brush)
painter.drawRect(items_title_rect)
# Adjust left margin of items title
items_title_rect.adjust(1, 0, 0, 0)
painter.setPen(self._normal_text_color)
self.draw_text(
painter,
self.items_title,
self.items_title_font,
items_title_rect
)
# Items listing
items_margin = 6
items_vertical_pos = header_height + items_title_height + 16
items_w = self._side - (items_margin * 2)
items_rect = QRect(
header_start_pos + items_margin,
items_vertical_pos,
items_w,
items_height
)
# Draw if there are items
if len(self.items) > 0:
painter.setFont(self.items_font)
painter.setPen(self._text_item_color)
multiline_items = self._elided_items(self.items_font, items_w)
# If auto-adjust is disabled then extract subset that will fit
if not self.auto_adjust_height():
multiline_items = self.items_by_height(
items_height,
multiline_items
)
# QTextL | ayout requires the unicode character of the line separator
multiline_items = '\u2028' | .join(multiline_items)
self.draw_text(
painter,
multiline_items,
self.items_font,
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-SVC (Support Vector Classification)
=========================================================
The classification application of the SVM is used below. The
`Iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_
dataset has been used for this example
The decision boundaries, are shown with all the points in the training-set.
"""
print __doc__
import sys
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
#iris = datasets.load_iris()
#X = iris.data[:, :2] # we only take the first two features.
#Y = iris.target
XTmp, Y = datasets.load_svmlight_file("../SVMData.txt")
X = XTmp.toarray()
h = .02 # step size in the mesh
clf = svm.SVC(C=1.0, kernel='linear')
# we create an instance of SVM Classifier and fit the data.
clf.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max | , h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.re | shape(xx.shape)
pl.figure(1, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
pl.show()
|
r.db import manage
from pulp.server.db.migrate import models
from pulp.server.db.model import MigrationTracker
import pulp.plugins.types.database as types_db
import migration_packages.a
import migration_packages.b
import migration_packages.duplicate_versions
import migration_packages.platform
import migration_packages.raise_exception
import migration_packages.version_gap
import migration_packages.version_zero
import migration_packages.z
# This is used for mocking
_test_type_json = '''{"types": [{
"id" : "test_type_id",
"display_name" : "Test Type",
"description" : "Test Type",
"unit_key" : ["attribute_1", "attribute_2", "attribute_3"],
"search_indexes" : ["attribute_1", "attribute_3"]
}]}'''
# This is used to mock the entry_point system for discovering migration packages.
def iter_entry_points(name):
class FakeEntryPoint(object):
def __init__(self, migration_package):
self._migration_package = migration_package
def load(self):
return self._migration_package
test_packages = [
migration_packages.a,
migration_packages.duplicate_versions,
migratio | n_packages.raise_exception,
migration_packages.version_gap,
migration_packages.version_zero,
migration_packages.z,
]
if name == models.MIGRATIONS_ENTRY_POINT:
return [FakeEntryPoint(package) for package in test_packages]
return []
# Mock 1.0.0 has a built in mock_open, and one day when we upgrade to 1.0.0 we can use that. In the
# meantime, I've included the example for mock_open as listed in the Mock 0.8 docs, slightly
# modified to allow read_da | ta to just be a str.
# http://www.voidspace.org.uk/python/mock/0.8/examples.html?highlight=open#mocking-open
if inPy3k:
file_spec = [
'_CHUNK_SIZE', '__enter__', '__eq__', '__exit__',
'__format__', '__ge__', '__gt__', '__hash__', '__iter__', '__le__',
'__lt__', '__ne__', '__next__', '__repr__', '__str__',
'_checkClosed', '_checkReadable', '_checkSeekable',
'_checkWritable', 'buffer', 'close', 'closed', 'detach',
'encoding', 'errors', 'fileno', 'flush', 'isatty',
'line_buffering', 'mode', 'name',
'newlines', 'peek', 'raw', 'read', 'read1', 'readable',
'readinto', 'readline', 'readlines', 'seek', 'seekable', 'tell',
'truncate', 'writable', 'write', 'writelines']
else:
file_spec = file
def mock_open(mock=None, read_data=None):
if mock is None:
mock = MagicMock(spec=file_spec)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
fake_file = StringIO(read_data)
if read_data is None:
if hasattr(handle, '__enter__'):
handle.__enter__.return_value = handle
else:
if hasattr(handle, '__enter__'):
handle.__enter__.return_value = fake_file
handle.read = fake_file.read
mock.return_value = handle
return mock
class MigrationTest(base.PulpServerTests):
def clean(self):
super(MigrationTest, self).clean()
# Make sure each test doesn't have any lingering MigrationTrackers
MigrationTracker.objects().delete()
class TestMigrateDatabase(MigrationTest):
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pulp.server.db.migrate.models.get_migration_packages', auto_spec=True)
def test_migration_removed(self, mock_get_packages, mock_getLogger):
"""
ensure that if a migration raises the MigrationRemovedError, it bubbles up.
"""
mock_package = MagicMock()
mock_package.current_version = 6
mock_package.latest_available_version = 7
mock_package.name = 'foo'
mock_migration = MagicMock()
mock_migration.version = 7
mock_package.unapplied_migrations = [mock_migration]
e = models.MigrationRemovedError('0006', '1.2.0', '1.1.0', 'foo')
mock_package.apply_migration.side_effect = e
mock_get_packages.return_value = [mock_package]
options = MagicMock()
options.dry_run = False
with self.assertRaises(models.MigrationRemovedError) as assertion:
manage.migrate_database(options)
self.assertTrue(assertion.exception is e)
class TestManageDB(MigrationTest):
def clean(self):
super(self.__class__, self).clean()
types_db.clean()
@patch.object(manage, 'PluginManager')
@patch.object(manage, 'model')
def test_ensure_database_indexes(self, mock_model, mock_plugin_manager):
"""
Make sure that the ensure_indexes method is called for all
the appropriate platform models
"""
test_model = MagicMock()
mock_plugin_manager.return_value.unit_models.items.return_value = [('test-unit',
test_model)]
manage.ensure_database_indexes()
test_model.ensure_indexes.assert_called_once_with()
@patch.object(manage, 'PluginManager')
@patch.object(manage, 'model')
def test_ensure_database_indexes_throws_exception(self, mock_model, mock_plugin_manager):
"""
Make sure that the ensure_indexes method is called for all
the appropriate platform models
"""
test_model = MagicMock()
test_model.unit_key_fields = ('1', '2', '3')
unit_key_index = {'fields': test_model.unit_key_fields, 'unique': True}
test_model._meta.__getitem__.side_effect = [[unit_key_index]]
mock_plugin_manager.return_value.unit_models.items.return_value = [('test-unit',
test_model)]
with self.assertRaises(ValueError) as context:
manage.ensure_database_indexes()
self.assertEqual(context.exception.message, "Content unit type 'test-unit' explicitly "
"defines an index for its unit key. This is "
"not allowed because the platform handlesit "
"for you.")
@patch.object(manage, 'ensure_database_indexes')
@patch('logging.config.fileConfig')
@patch('pkg_resources.iter_entry_points', iter_entry_points)
@patch('pulp.server.db.manage.connection.initialize')
@patch('pulp.server.db.manage.factory')
@patch('pulp.server.db.manage.logging.getLogger')
@patch('pulp.server.db.manage.RoleManager.ensure_super_user_role')
@patch('pulp.server.db.manage.managers.UserManager.ensure_admin')
@patch('pulp.server.db.migrate.models.pulp.server.db.migrations',
migration_packages.platform)
@patch('sys.argv', ["pulp-manage-db"])
@patch.object(models.MigrationPackage, 'apply_migration')
def test_admin_is_ensured(self, apply_migration, ensure_admin, ensure_super_user_role,
getLogger, factory, initialize, fileConfig, ensure_db_indexes):
"""
pulp-manage-db is responsible for making sure the admin user and role are in place. This
test makes sure the manager methods that do that are called.
"""
logger = MagicMock()
getLogger.return_value = logger
code = manage.main()
self.assertEqual(code, os.EX_OK)
# Make sure all the right logging happens
expected_messages = ('Ensuring the admin role and user are in place.',
'Admin role and user are in place.')
info_messages = ''.join([mock_call[1][0] for mock_call in logger.info.mock_calls])
for msg in expected_messages:
self.assertTrue(msg in info_messages)
# Make sure the admin user and role creation methods were called. We'll leave it up to other
# tests to make sure they work.
ensure_admin.assert_called_once_with()
ensure_super_user_role.assert_called_once_with()
# Also, make sure the factory was initialized
factory.initialize.assert_called_once_with()
initialize.assert_called_once_with(max_timeout=1)
@patch('logging.config.fileConfig')
|
from flask import Flask
from flask import request
from flask import jsonify
from flask import abort
import time
app = Flask(__name__)
@app.route('/api/1', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/1/<path:path>', methods=['GET', 'POST'])
def api1(path):
time.sleep(20)
return jsonify({
'userinfo': {
'username': 'zhouyang',
'pk': 10,
'birthday': '2010101'
}
})
@app.route('/api/2', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/2/<path:path>', methods=['GET', 'POST'])
def api2(path):
return abort(400, 'you d | id a bad request')
@app.route('/api/3', defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/api/3/<path:path>', methods=['GET', 'POST'])
def api3(path):
userId = request.args.get('userId')
return jsonify({
'userinfo': {
'userId': userId
}
})
@app.route('/usercenter/userinfo', methods=[' | GET', 'POST'])
def api4():
return jsonify({
'userinfo': {
'username': 'zhouyang'
}
})
if __name__ == '__main__':
app.run(port=1330, host='0.0.0.0')
|
"""
Simple utility code for animations.
"""
# Author: Prabhu Ramachandran <prabhu at aerodotiitbdotacdotin>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
import types
from functools import wraps
try:
from decorator import decorator
HAS_DECORATOR = True
except ImportError:
HAS_DECORATOR = False
from pyface.timer.api import Timer
from traits.api import HasTraits, Button, Instance, Range
from traitsui.api import View, Group, Item
###############################################################################
# `Animator` class.
###############################################################################
class Animator(HasTraits):
""" Convenience class to manage a timer and present a convenient
UI. This is based on the code in `tvtk.tools.visual`.
Here is a simple example of using this class::
>>> from mayavi import mlab
>>> def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
| ...
>>> anim = anim()
>>> t = Animator(500, anim.next)
>>> t.edit_traits()
This makes it very easy to animate your visualizations and control
it from a simple UI.
**Notes**
If you want to modify the data plotted by an `mlab` function call,
please refer to the section on: :ref:`mlab-animating-data`
"""
#### | ####################################
# Traits.
start = Button('Start Animation')
stop = Button('Stop Animation')
delay = Range(10, 100000, 500,
desc='frequency with which timer is called')
# The internal timer we manage.
timer = Instance(Timer)
######################################################################
# User interface view
traits_view = View(Group(Item('start'),
Item('stop'),
show_labels=False),
Item('_'),
Item(name='delay'),
title='Animation Controller',
buttons=['OK'])
######################################################################
# Initialize object
def __init__(self, millisec, callable, *args, **kwargs):
"""Constructor.
**Parameters**
:millisec: int specifying the delay in milliseconds
between calls to the callable.
:callable: callable function to call after the specified
delay.
:\*args: optional arguments to be passed to the callable.
:\*\*kwargs: optional keyword arguments to be passed to the callable.
"""
HasTraits.__init__(self)
self.delay = millisec
self.ui = None
self.timer = Timer(millisec, callable, *args, **kwargs)
######################################################################
# `Animator` protocol.
######################################################################
def show(self):
"""Show the animator UI.
"""
self.ui = self.edit_traits()
def close(self):
"""Close the animator UI.
"""
if self.ui is not None:
self.ui.dispose()
######################################################################
# Non-public methods, Event handlers
def _start_fired(self):
self.timer.Start(self.delay)
def _stop_fired(self):
self.timer.Stop()
def _delay_changed(self, value):
t = self.timer
if t is None:
return
if t.IsRunning():
t.Stop()
t.Start(value)
###############################################################################
# Decorators.
def animate(func=None, delay=500, ui=True):
""" A convenient decorator to animate a generator that performs an
animation. The `delay` parameter specifies the delay (in
milliseconds) between calls to the decorated function. If `ui` is
True, then a simple UI for the animator is also popped up. The
decorated function will return the `Animator` instance used and a
user may call its `Stop` method to stop the animation.
If an ordinary function is decorated a `TypeError` will be raised.
**Parameters**
:delay: int specifying the time interval in milliseconds between
calls to the function.
:ui: bool specifying if a UI controlling the animation is to be
provided.
**Returns**
The decorated function returns an `Animator` instance.
**Examples**
Here is the example provided in the Animator class documentation::
>>> from mayavi import mlab
>>> @mlab.animate
... def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> a = anim() # Starts the animation.
For more specialized use you can pass arguments to the decorator::
>>> from mayavi import mlab
>>> @mlab.animate(delay=500, ui=False)
... def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> a = anim() # Starts the animation without a UI.
**Notes**
If you want to modify the data plotted by an `mlab` function call,
please refer to the section on: :ref:`mlab-animating-data`.
"""
class Wrapper(object):
# The wrapper which calls the decorated function.
def __init__(self, function):
self.func = function
self.ui = ui
self.delay = delay
def __call__(self, *args, **kw):
if isinstance(self.func, types.GeneratorType):
f = self.func
else:
f = self.func(*args, **kw)
if isinstance(f, types.GeneratorType):
a = Animator(self.delay, f.next)
if self.ui:
a.show()
return a
else:
msg = 'The function "%s" must be a generator '\
'(use yield)!' % (self.func.__name__)
raise TypeError(msg)
def decorator_call(self, func, *args, **kw):
return self(*args, **kw)
def _wrapper(function):
# Needed to create the Wrapper in the right scope.
if HAS_DECORATOR:
# The decorator calls a callable with (func, *args, **kw) signature
return decorator(Wrapper(function).decorator_call, function)
else:
return wraps(function)(Wrapper(function))
if func is None:
return _wrapper
else:
return _wrapper(func)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'NewMangaDialog.ui'
#
# Created: Wed Jul 24 19:06:21 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_NewMangaDialog(object):
def setupUi(self, NewMangaDialog):
NewMangaDialog.setObjectName(_fromUtf8("NewMangaDialog"))
NewMangaDialog.resize(231, 78)
self.gridLayout = QtGui.QGridLayout(NewMangaDialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.splitter = QtGui.QSplitter(NewMangaDialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.label = QtGui.QLabel(self.splitter)
self.label.setObjectName(_fromUtf8("label"))
self.mangaLineEdit = QtGui.QLineEdit(self.splitter)
self.mangaLineEdit.setObjectName(_fromUtf8("mangaLineEdit"))
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(NewMangaDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox | .Cancel|QtGui.QDialogButtonBox.Ok)
self.b | uttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(NewMangaDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), NewMangaDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), NewMangaDialog.reject)
QtCore.QMetaObject.connectSlotsByName(NewMangaDialog)
def retranslateUi(self, NewMangaDialog):
NewMangaDialog.setWindowTitle(_translate("NewMangaDialog", "Dialog", None))
self.label.setText(_translate("NewMangaDialog", "Manga:", None))
|
"""engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# dis | tribute, sublicense, and/or sell copies of the | Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/darwin.py 5023 2010/06/14 22:05:46 scons"
import posix
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/sw/bin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
# -*- coding: utf-8 -*-
# This file is part of https://github.com/26fe/jsonstat.py
# Copyright (C) 2016-2021 gf <gf@26fe.com>
# See LICENSE file
# stdlib
import time
import os
import hashlib
# packages
import requests
# jsonstat
from jsonstat.exceptions import JsonStatException
class Downloader:
"""Helper class to download json stat files.
It has a very simple cache mechanism
"""
def __init__(self, cache_dir="./data", time_to_live=None):
"""initialize downloader
:param cache_dir: directory where to store downloaded files, if cache_dir is None files are not stored
:param time_to_live: how many seconds to store file on disk, No | ne is infinity, 0 for not to store
"""
if cache_dir is not None:
self.__cache_dir = os.path.abspath(cache_dir)
else:
self.__cache_dir = None
self.__time_to_live = time_to_live
self.__session = requests.session()
def cache_dir(self):
return self.__cache_dir
def download(self, url, filename=None, time_to_live=None):
"""Download url from in | ternet.
Store the downloaded content into <cache_dir>/file.
If <cache_dir>/file exists, it returns content from disk
:param url: page to be downloaded
:param filename: filename where to store the content of url, None if we want not store
:param time_to_live: how many seconds to store file on disk,
None use default time_to_live,
0 don't use cached version if any
:returns: the content of url (str type)
"""
pathname = self.__build_pathname(filename, url)
# note: html must be a str type not byte type
if time_to_live == 0 or not self.__is_cached(pathname):
response = self.__session.get(url)
response.raise_for_status()
html = response.text
self.__write_page_to_cache(pathname, html)
else:
html = self.__read_page_from_file(pathname)
return html
def __build_pathname(self, filename, url):
if self.__cache_dir is None:
return None
if filename is None:
filename = hashlib.md5(url.encode('utf-8')).hexdigest()
pathname = os.path.join(self.__cache_dir, filename)
return pathname
def __is_cached(self, pathname):
"""check if pathname exists
:param pathname:
:returns: True if the file can be retrieved from the disk (cache)
"""
if pathname is None:
return False
if not os.path.exists(pathname):
return False
if self.__time_to_live is None:
return True
cur = time.time()
mtime = os.stat(pathname).st_mtime
# print("last modified: %s" % time.ctime(mtime))
return cur - mtime < self.__time_to_live
def __write_page_to_cache(self, pathname, content):
"""write content to pathname
:param pathname:
:param content:
"""
if pathname is None:
return
# create cache directory only the fist time it is needed
if not os.path.exists(self.__cache_dir):
os.makedirs(self.__cache_dir)
if not os.path.isdir(self.__cache_dir):
msg = "cache_dir '{}' is not a directory".format(self.__cache_dir)
raise JsonStatException(msg)
# note:
# in python 3 file must be open without b (binary) option to write string
# otherwise the following error will be generated
# TypeError: a bytes-like object is required, not 'str'
with open(pathname, 'w') as f:
f.write(content)
@staticmethod
def __read_page_from_file(pathname):
"""it reads content from pathname
:param pathname:
"""
with open(pathname, 'r') as f:
content = f.read()
return content
|
# Us | e default debug configuration or local configuration
try:
from .config_local import *
except ImportError:
from .config_default impo | rt *
|
#-------------------------------------------------------------------------------
# Name: ModSlaveSettingsRTU
# Purpose:
#
# Author: ElBar
#
# Created: 17/04/2012
# Copyright: (c) ElBar 2012
# Licence: <your licence>
#--------------------------------- | ----------------------------------------------
#!/usr/bin/env python
from PyQt4 import QtGui,QtCore
from Ui_settingsModbusRTU import Ui_SettingsModbusRTU
import Utils
#add logging capability
import logging
#--- | ----------------------------------------------------------------------------
class ModSlaveSettingsRTUWindow(QtGui.QDialog):
""" Class wrapper for RTU settings ui """
def __init__(self):
super(ModSlaveSettingsRTUWindow,self).__init__()
#init value
self.rtu_port = 1
self.baud_rate = 9600
self.byte_size = 8
self.parity = 'None'
self.stop_bits = '1'
self._logger = logging.getLogger("modbus_tk")
self.setupUI()
def setupUI(self):
#create window from ui
self.ui=Ui_SettingsModbusRTU()
self.ui.setupUi(self)
#set init values
self._set_values()
#signals-slots
self.accepted.connect(self._OK_pressed)
self.rejected.connect(self._cancel_pressed)
def _set_values(self):
"""set param values to ui"""
self._logger.info("Set param values to UI")
self.ui.cmbPort.setEditText(str(self.rtu_port))
self.ui.cmbBaud.setCurrentIndex(self.ui.cmbBaud.findText(str(self.baud_rate)))
self.ui.cmbDataBits.setCurrentIndex(self.ui.cmbDataBits.findText(str(self.byte_size)))
self.ui.cmbParity.setCurrentIndex(self.ui.cmbParity.findText(self.parity))
self.ui.cmbStopBits.setCurrentIndex(self.ui.cmbStopBits.findText(str(self.stop_bits)))
def _get_values(self):
"""get param values from ui"""
self._logger.info("Get param values from UI")
self.rtu_port = int(self.ui.cmbPort.currentText())
self.baud_rate = self.ui.cmbBaud.currentText()
self.byte_size = self.ui.cmbDataBits.currentText()
self.parity = self.ui.cmbParity.currentText()
self.stop_bits = self.ui.cmbStopBits.currentText()
def _OK_pressed(self):
"""new values are accepted"""
port = str(self.ui.cmbPort.currentText())
if (port.isdigit() and int(port) >= 1 and int(port) <= 16):#port must be an integer
self._get_values()
else:
self.rtu_port = 1
self._set_values()
self._get_values()
self._logger.error("Port must be an integer between 1 and 16")
Utils.errorMessageBox("Port must be an integer between 1 and 16")
def _cancel_pressed(self):
"""new values are rejected"""
self._set_values()
def showEvent(self,QShowEvent):
"""set values for controls"""
self._set_values()
#------------------------------------------------------------------------------- |
for views.py
"""
from base_handler import base_handler
import traceback
import app.model
from flask import g, render_template
class single_access_handler(base_handler):
def __init__(self):
"""
Manages all the operations that are involved with a single port association with EPGs
(for virtual port channel association the vpc_access_handler is used)
:return:
"""
try:
self.cobra_apic_object = single_access_handler.init_connections()
self.exception = None
except Exception as e:
self.exception = e
print traceback.print_exc()
def get_create_single_access_networks(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_create_single_access_network select with the networks within the selected group
try:
network_ap = self.cobra_apic_object.get_nca_ap(form_values['sel_create_single_access_group'])
item_list = []
if network_ap is not None:
networks = self.cobra_apic_object.get_epg_by_ap(str(network_ap.dn))
for network in networks:
# Creates a dynamic object
network_do = type('network_do', (object,), {})
network_do.key = str(network.dn)
network_do.text = network.name
item_list.append(network_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_create_single_access_network", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve networks', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def get_create_single_access_ports(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_create_single_access_port select with the available ports within the selected leaf
try:
ports = self.cobra_apic_object.get_available_ports(form_values['sel_create_single_access_leaf'])
item_list = []
for i in range(0, len(ports[0])):
# Creates a dynamic object
port_do = type('port_do', (object,), {})
port_do.key = ports[0][i]
port_do.text = ports[1][i]
item_list.append(port_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_create_single_access_port", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve ports', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def create_single_access(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', | 0)")
return
# Creates switch profiles, interface profiles, policy groups and static bindings to associate a port
# to an EPG
try:
port_id = form_values['sel_create_single_access_port'].split('[')[-1][:-1].replace('/','-')
switch_id = form_values['sel_cr | eate_single_access_leaf'].split('/')[-1]
if form_values['create_port_access_type'] == 'single_vlan':
network_o = app.model.network.select().where(app.model.network.epg_dn ==
form_values['sel_create_single_access_network'])
if len(network_o) > 0:
self.cobra_apic_object.create_single_access(network_o[0].epg_dn,
form_values['sel_create_single_access_leaf'],
form_values['sel_create_single_access_port'],
network_o[0].encapsulation,
'migration-tool',
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
obj_response.script("create_notification('Assigned', '', 'success', 5000)")
else:
obj_response.script(
"create_notification('Network not found in local database', '', 'danger', 0)")
elif form_values['create_port_access_type'] == 'vlan_profile':
network_profilexnetworks = app.model.network_profilexnetwork.select().where(
app.model.network_profilexnetwork.network_profile == int(form_values['sel_profile_create_port_access']))
for network_profile in network_profilexnetworks:
network_o = app.model.network.select().where(app.model.network.id == network_profile.network.id)
if len(network_o) > 0:
self.cobra_apic_object.create_single_access(network_o[0].epg_dn,
form_values['sel_create_single_access_leaf'],
form_values['sel_create_single_access_port'],
network_o[0].encapsulation,
'migration-tool',
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
else:
ex = Exception()
ex.message = 'Some networks where not assigned because they are not in the local database'
raise ex
obj_response.script("create_notification('Assigned', '', 'success', 5000)")
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not create single access', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def get_delete_single_access_networks(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_delete_single_access_network select with the network within the selected group
try:
network_ap = self.cobra_apic_object.get_nca_ap(form_values['sel_delete_single_access_group |
datetime.now()
t0 = datetime.datetime.now()
ideal_path = 'ideal_coordinates.pickle'
#if no paths have been done before, start afresh!
if not os.path.exists(ideal_path):
M = 1e5
many_points = ps.points_in_shape(shape_path, M)
coords = cluster_points(many_points,N)
#else import already processed coordinates if the program has already done so.
else:
f = open(name=ideal_path, mode='rb')
coords = pickle.load(f)
f.close()
#generate N kmeans cluster points from massive M number of randomly distributed
#points inside the shape file.
lonmin = np.floor(min(coords[:,0]))
lonmax = np.ceil(max(coords[:,0]))
latmin = np.floor(min(coords[:,1]))
latmax = np.ceil(max(coords[:,1]))
print lonmin,lonmax,latmin,latmax
#coords1 = [coord1 for coord1 in coords for coord2 in coords]
#coords2 = [coord2 for coord1 in coords for coord2 in coords]
#columns = np.column_stack((coords1, coords2))
kappa = [np.vstack([[coord1[0],coord1[1],coord2[0],coord2[1]]\
for coord2 in coords]) for coord1 in coords]
def spread_paths(nets):
#pool = mp.Pool()
#paths = pool.map(new_paths, nets)
#pool.close()
#pool.join()
paths = map(paths_func, nets)
#create a flattened numpy array of size 2xN from the paths created!
#paths = np.asarray(list(itertools.chain(*paths)))
#keep all but the repeated coordinates by keeping only unique whole rows!
#method is slowed without the b contiguous array
#b = np.ascontiguousarray(paths).view(np.dtype((np.void, paths.dtype.itemsize * paths.shape[1])))
#_, idx = np.unique(b, return_index=True)
#paths = np.unique(b).view(paths.dtype).reshape(-1, paths.shape[1])
#plt.figure()
#plt.scatter(paths[:,0],paths[:,1])
#name = uuid.uuid4()
#plt.savefig('{}.png'.format(name))
return paths
t0 = datetime.datetime.now()
pool = mp.Pool()
paths = pool.map(spread_paths, kappa)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print t1-t0
#paths = list(paths)
counter = 0
#cd Desktop/Link\ to\ SIMULATIONS/Network_Tracks/smarter_model/
grad_ideal, grad_check1, grad_check2, H_avg1, H_avg2 = 0, 0, 0, 0, 0
SHAPE = (1,1)
counter2 = 0
perc_high = 0.01
#counter of how many times the points
#have been chosen from the lowest path density spots
low_counter = 0
#counter of how many times the points
#have been chosen from the random spots.
random_counter = 0
new_coord = 0
infinite_counter = 0
while infinite_counter <= 1:
t0 = datetime.datetime.now()
#the following while loop is a work around fix to a:
#new paths shape: (130, 100, 4) rather than being (130,) like it should be!
while SHAPE != (130,):
#if counter2 >= len(paths)-1:
# counter2 = 0
#cycle through paths
#----------------------------------------------------------------------
#old_path = paths[counter2]
#del paths[counter2]
#old_coord = [old_path[0][0][0],old_path[0][0][1]]
#itemindex = np.where(coords==old_coord)[0][0]
#coords = list | (coords)
#find index of array in nested array to remove!
#del coords[itemindex]
#print(counter2)
#----------------------------------------------- | -----------------------
#or random selection of paths?!
#----------------------------------------------------------------------
#remove a random set of paths associated with a single one of the N coordinates
rand_int = random.randint(0,len(paths)-1)
old_path = paths[rand_int]
#figure out which old coordinate to remove from the coordinates list
old_coord = [old_path[0][0][0],old_path[0][0][1]]
#print "old coord:", old_coord
#NEED TO REMOVE OLD POINT FROM COORDS!
#find index of array in nested array to remove!
itemindex = np.where(coords==old_coord)[0][0]
coords = list(coords)
#find index of array in nested array to remove!
del coords[itemindex]
coords = np.asarray(coords)
new_coord_first = new_coord
#----------------------------------------------------------------------
#generate new point coordinate
if not counter >= 1:
new_coord = ps.points_in_shape(shape_path, 1)[0]
else:
new_coord = new_coord
#place new coordinate in old set of coordinates
coords = np.append(coords, [new_coord], axis=0)
#generate new array of points in conjunction with the new randomly generated point!
new_coord_set = np.vstack([[new_coord[0],new_coord[1],coord1[0],\
coord1[1]] for coord1 in coords])
#generate new random point in place of all 'popped' points!
new_paths = map(paths_func, new_coord_set)
SHAPE = np.asarray(new_paths).shape
if not SHAPE == (130,):
#remove substitude back the old coordinate for the new coordinate!
coords = list(coords)
#find index of array in nested array to remove!
del coords[-1]
coords = np.asarray(coords)
#place new coordinate in old set of coordinates
coords = np.append(coords, [old_coord], axis=0)
#print "new paths shape:", SHAPE
#paths = np.asarray(paths)
#if np.asarray(new_paths).shape != (130,):
# print("This one's trouble")
# print np.asarray(new_paths).shape
# new_paths = np.asarray(new_paths[0]).reshape(130,)
del paths[rand_int]
SHAPE = (1,1)
#place new_paths in original path set!
#paths = np.insert(paths, [1], [new_paths], axis=0)
paths = np.append(paths, [new_paths], axis=0)
#paths = paths.append(new_paths)
#paths = np.concatenate((paths, [new_paths]), axis=0)
#paths = np.append(paths, new_paths, axis=0)
#create a flattened numpy array of size 2xN from the paths created!
paths_density_check = list(itertools.chain(*paths))
paths_density_check = np.asarray(list(itertools.chain(*paths_density_check)))
#keep all but the repeated coordinates by keeping only unique whole rows!
#method is slowed without the b contiguous array
b = np.ascontiguousarray(paths_density_check).view(np.dtype\
((np.void, paths_density_check.dtype.itemsize * \
paths_density_check.shape[1])))
_, idx = np.unique(b, return_index=True)
paths_density_check = np.unique(b).view(paths_density_check.dtype)\
.reshape(-1, paths_density_check.shape[1])
#plt.figure()
#plt.scatter(paths_density_check[:,0],paths_density_check[:,1])
#plt.savefig('{}.png'.format(counter))
#remove 3rd and 4th columns
#paths_density_check = np.column_stack((paths_density_check[:,0],
# paths_density_check[:,1]))
#remove all path points that lay outside the shape file polygon
#paths_density_check = ps.paths_in_shape(paths_density_check)
paths = list(paths)
# Estimate the 2D histogram
H, xedges, yedges = np.histogram2d(paths_density_check[:,0],
paths_density_check[:,1],
bins=nbins)
#edges_new = ps.paths_in_shape(np.column_stack((xedges,yedges)))
GRAD = np.abs(np.asarray(np.gradient(H)[0]))
# H needs to be rotated and flipped
H = np.rot90(H)
GRAD = np.rot90(GRAD)
H = np.flipud(H)
GRAD = np.flipud(GRAD)
# Mask zeros
H = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
GRAD = np.ma.masked_where(GRAD==0,GRAD) # Mask pixels with a value of zero
H_avg1 = np.average(H)
grad_check1 = np.std(GRAD)
rand_indicator = random.randint(1,10)
if 0 < rand_indicator <= 5:
#half the time move the coordinates to low density locations.
WHERE = np.where(H < perc_high*H_avg1)
#scale these points with respect to the lat-lon limits!
|
"
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from tempfile import mkstemp
from shutil import move
from django.db import transaction
from django.conf import settings
from rest_framework.response import Response
from storageadmin.models import (NetworkInterface, Appliance)
from storageadmin.util import handle_exception
from storageadmin.serializers import NetworkInterfaceSerializer
from system.osi import (config_network_device, get_net_config, update_issue)
from system.samba import update_samba_discovery
from system.services import superctl
import rest_framework_custom as rfc
import logging
logger = logging.getLogger(__name__)
class NetworkMixin(object):
@staticmethod
def _update_ni_obj(nio, values):
nio.dname = values.get('dname', None)
nio.mac = values.get('mac', None)
nio.method = values.get('method', 'manual')
nio.autoconnect = values.get('autoconnect', 'no')
nio.netmask = values.get('netmask', None)
nio.ipaddr = values.get('ipaddr', None)
nio.gateway = values.get('gateway', None)
nio.dns_servers = values.get('dns_servers', None)
nio.ctype = values.get('ctype', None)
nio.dtype = values.get('dtype', None)
nio.dspeed = values.get('dspeed', None)
nio.state = values.get('state', None)
return nio
@staticmethod
def _update_nginx(ipaddr=None):
#update nginx config and restart the service
conf = '%s/etc/nginx/nginx.conf' % settings.ROOT_DIR
fo, npath = mkstemp()
with open(conf) as ifo, open(npath, 'w') as tfo:
for line in ifo.readlines():
if (re.search('listen.*80 default_server', line) is not None):
substr = 'listen 80'
if (ipaddr is not None):
substr = 'listen %s:80' % ipaddr
line = re.sub(r'listen.*80', substr, line)
elif (re.search('listen.*443 default_server', line) is not None):
substr = 'listen 443'
if (ipaddr is not None):
substr = 'listen %s:443' % ipaddr
line = re.sub(r'listen.*443', substr, line)
tfo.write(line)
move(npath, conf)
superctl('nginx', 'restart')
class NetworkListView(rfc.GenericView, NetworkMixin):
serializer_class = NetworkInterfaceSerializer
def get_queryset(self, *args, **kwargs):
with self._handle_exception(self.request):
self._net_scan()
#to be deprecated soon
update_samba_discovery()
return NetworkInterface.objects.all()
@classmethod
@transaction.atomic
def _net_scan(cls):
config_d = get_net_config(all=True)
for dconfig in config_d.values():
ni = None
if (NetworkInterface.objects.filter(
name=dconfig['name']).exists()):
ni = NetworkInterface.objects.get(name=dconfig['name'])
ni = cls._update_ni_obj(ni, dconfig)
else:
ni = NetworkInterface(name=dconfig.get('name', None),
dname=dconfig.get('dname', None),
dtype=dconfig.get('dtype', None),
dspeed=dconfig.get('dspeed', None),
mac=dconfig.get('mac', None),
method=dconfig.get('method', None),
autoconnect=dconfig.get('autoconnect', None),
netmask=dconfig.get('netmask', None),
ipaddr=dconfig.get('ipaddr', None),
gateway=dconfig.get('gateway', None),
dns_servers=dconfig.get('dns_servers', None),
ctype=dconfig.get('ctype', None),
state=dconfig.get('state', None))
ni.save()
devices = []
for ni in NetworkInterface.objects.all():
if (ni.dname not in config_d):
logger.debug('network interface(%s) does not exist in the '
'system anymore. Removing from db' % (ni.name))
ni.delete()
else:
devices.append(ni)
serializer = NetworkInterfaceSerializer(devices, many=True)
return Response(serializer.data)
def post(self, request):
with self._handle_exception(request):
return self._net_scan()
class NetworkDetailView(rfc.GenericView, NetworkMixin):
serializer_class = NetworkInterfaceSerializer
def get(self, *args, **kwargs):
try:
data = NetworkInterface.objects.get(name=self.kwargs['iname'])
serialized_data = NetworkInterfaceSerializer(data)
return Response(serialized_data.data)
except:
return Response()
@transaction.atomic
def delete(self, request, iname):
with self._handle_exception(request):
if (NetworkInterface.objects.filter(name=iname).exists()):
i = NetworkInterface.objects.get(name=iname)
i.delete()
| return Response()
def _validate_netmask(self, request):
netmask = request.data.get('netmask', None)
e_msg = ('Provided netmask value(%s) is invalid. You can provide it '
'in a IP address format(eg: 255.255.255.0) or number of '
'bits(eg: 24)' % netmask)
if (netmask is None):
handle_exception(Exception(e_msg), request)
bits = | 0
try:
bits = int(netmask)
except ValueError:
#assume ip address format was provided
bits = sum([bin(int(x)).count('1') for x in '255.255.255'.split('.')])
if (bits < 1 or bits > 32):
e_msg = ('Provided netmask value(%s) is invalid. Number of '
'bits in netmask must be between 1-32' % netmask)
handle_exception(Exception(e_msg), request)
return bits
@transaction.atomic
def put(self, request, iname):
with self._handle_exception(request):
if (not NetworkInterface.objects.filter(name=iname).exists()):
e_msg = ('Netowrk interface(%s) does not exist.' % iname)
handle_exception(Exception(e_msg), request)
ni = NetworkInterface.objects.get(name=iname)
itype = request.data.get('itype')
if (itype != 'management'):
itype = 'io'
method = request.data.get('method')
ni.onboot = 'yes'
if (method == 'auto'):
config_network_device(ni.name)
elif (method == 'manual'):
ipaddr = request.data.get('ipaddr')
for i in NetworkInterface.objects.filter(ipaddr=ipaddr):
if (i.id != ni.id):
e_msg = ('IP: %s already in use by another '
'interface: %s' % (ni.ipaddr, i.name))
handle_exception(Exception(e_msg), request)
netmask = self._validate_netmask(request)
gateway = request.data.get('gateway', None)
dns_servers = request.data.get('dns_servers', None)
config_network_device(ni.name, dtype=ni.dtype, method='manual',
|
from unittest import TestCase
from rfxcom.protocol.temperature import Temperature
from rfxcom.exceptions import (InvalidPacketLength, UnknownPacketSubtype,
UnknownPacketType)
class TemperatureTestCase(TestCase):
def setUp(self):
self.data = bytearray(b'\x08\x50\x02\x11\x70\x02\x00\xA7\x89')
self.parser = Temperature()
def test_parse_bytes(self):
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 17,
'packet_subtype': 2,
'packet_subtype_name':
'THC238/268,THN132,THWR288,THRN122,THN122,AW129/131',
'temperature': 16.7,
'id': '0x7002',
# 'channel': 2, TBC
'signal_level': 8,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0x7002>")
def test_parse_bytes2(self):
self.data = bytearray(b'\x08\x50\x03\x02\xAE\x01\x00\x63\x59')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 2,
'packet_subtype': 3,
'packet_subtype_name': 'THWR800',
'temperature': 9.9,
'id': '0xAE01',
# 'channel': 1, TBC
'signal_level': 5,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0xAE01>")
def test_parse_bytes_negative_temp(self):
self.data = bytearray(b'\x08\x50\x06\x02\xAE\x01\x80\x55\x59')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'packet_length': 8,
'packet_type': 80,
'packet_type_name': 'Temperature sensors',
'sequence_number': 2,
'packet_subtype': 6,
'packet_subtype_name': 'TS15C',
'temperature': -8.5,
'id': '0xAE01',
# 'channel': 1, TBC
'signal_level': 5,
'battery_level': 9
})
self.assertEquals(str(self.parser), "<Temperature ID:0xAE01>")
def test_validate_bytes_short(self):
data = self.data[:1]
with self.assertRaises(InvalidPacketLength):
self.parser.validate_packet(data)
def test_validate_unkown_packet_type(self):
self.data[1] = 0xFF
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketType):
self.parser.validate_packet(self.data)
def test_validate_unknown_su | b_type(self):
self.data[2] = 0xEE
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketSubtype):
self.parser.validate_packet(self.data)
def test_log_name(self):
self.asse | rtEquals(self.parser.log.name, 'rfxcom.protocol.Temperature')
|
#! /usr/bin/env python3
import sys
in_class = Fa | lse
for l in sys.stdin:
if l.startswith("c | lass"):
in_class = True
if in_class:
if l.startswith("};"):
in_class = False
continue
else:
print(l, end='')
|
:
try:
ret = "1"
ret += sessionString.split()[1][-2:] # last 2 digits of year
tempMap = (("fall", "9"), ("winter", "1"), ("spring", "5"))
for season in tempMap:
if season[0] in sessionString.lower():
ret += season[1]
return ret
except:
return None
def getWebData(self, course):
"""submits a POST query, initializes HTMLParser"""
try:
params = urllib.urlencode({"sess": course.session,
"subject": course.subject,
"cournum": course.catalogNumber})
page = urllib.urlopen(WebParser.requestURL, params)
parser = CustomHTMLParser(self.webData)
# we use .replace() because HTMLParser ignores " ",
# which would screwn up our table
parser.feed(page.read().replace(" ", " "))
except:
return "WebPageError"
def parseWebData(self):
"""We try to find the beginning of the desired table"""
# now, we find the start index and pass that on along
# with the webData
for i in xrange(len(self.webData)-3):
if self.webData[i] == self.thisCourse.subject \
and self.webData[i+2] == self.thisCourse.catalogNumber:
self.index = i
break
if self.index == -1: # website not found
return "CourseNotFound"
def processCourseInfo(self):
"""now, we do the heavy-duty processing of the data table"""
# sets basic attrs of thisCourse
self.thisCourse.units = self.webData[self.index+4]
self.thisCourse.title = self.webData[self.index+6]
while self.webData[self.index] != "Instructor":
self.index += 1
# processing row-by-row
while not self.endOfRow(self.webData[self.index]):
if self.webData[self.index] != "":
self.processSlot()
self.index += 1
if self.index == len(self.webData):
return
def processSlot(self):
"""we check to see if this is the BEGINNING of a valid row"""
if (self.webData[self.index+1][:3].upper() == "LEC"
or self.webData[self.index+1][:3].upper() == "LAB") \
and "ONLINE" not in self.webData[self.index+2]:
# we don't want online classes!
# processing a lecture row
lec = Lecture()
if self.processClass(lec, self.index, self.webData):
return
self.thisCourse.lectures.append(lec)
elif self.webData[self.index+1][:3].upper() == "TUT":
# processing a tutorial row
tut = Tutorial()
if self.processClass(tut, self.index, self.webData):
return
self.thisCourse.tutorials.append(tut)
elif self.webData[self.index][:7].upper() == "RESERVE":
# processing a reserve row
res = Reserve()
self.processReserve(res, self.index, self.webData)
if self.thisCourse.lectures:
self.thisCourse.lectures[-1].reserves.append(res)
# note: we leave out the TST (exam?) times for now
def processReserve(self, res, index, webData):
"""processing reservations for certain types of students"""
res.name = webData[index][9:]
# we remove the "only" suffix (which is annoyingly pointless)
if "only" in res.name:
res.name = res.name[:-5]
# also, the "students" suffx
if "students" in res.name or "Students" in res.name:
res.name = res.name[:-9]
# now, we merge the match list
while not webData[index].isdigit():
index += 1
# retriving enrollment numbers
res.enrlCap = int(webData[index])
res.enrlTotal = int(webData[index+1])
def | processClass(self, lec, index, webData):
"""we process a typica | l lecture or tutorial row"""
attr1 = ["classNumber", "compSec", "campusLocation"]
for i in xrange(len(attr1)):
setattr(lec, attr1[i], webData[index+i].strip())
index += 6
attr2 = ["enrlCap", "enrlTotal", "waitCap", "waitTotal"]
for i in xrange(len(attr2)):
setattr(lec, attr2[i], int(webData[index+i]))
index += 4
# parsing the "Times Days/Date" field
match = re.search(r"([:\d]+)-([:\d]+)(\w+)", webData[index])
if not match:
# we return an error message in the "TBA" case
return "NoTimeError"
attr3 = ["startTime", "endTime", "days"]
for i in xrange(len(attr3)):
setattr(lec, attr3[i], match.group(i+1).strip())
index += 1
if len(webData[index].split()) == 2:
# sometimes, no building, room, and instructor will be given
# this is mostly for Laurier courses
lec.building, lec.room = webData[index].split()
lec.instructor = webData[index+1].strip()
def endOfRow(self, data):
"""returns true if the current data-cell is the last cell
of this course; else - false"""
# the last cell is of the form: ##/##-##/## or
# "Information last updated
if re.search(r"\d+/\d+-\d+/\d+", data) or \
"Information last updated" in data:
return True
else:
return False
def postProcess(self, course):
"""this function will convert the class times to minutes-past-
the-previous-midnight, and converts the days to numbers.
Also, some reservation-postprocessing"""
map(lambda x: x.calcMiscSeats(), course.lectures)
for lec in course.lectures:
lec.courseID = course.subject + " " + course.catalogNumber
for tut in course.tutorials:
tut.courseID = course.subject + " " + course.catalogNumber
for slot in course.lectures + course.tutorials:
# first, we convert time to 24hr time
# earliest start time for a class is 8:30am
# night classes start at/before 7:00pm
if 1 <= int(slot.startTime.split(":")[0]) <= 7:
slot.startTime, slot.endTime = \
map(lambda x: "{}:{}".format(str(int(x.split(":")[0])
+ 12), x[-2:]), [slot.startTime,
slot.endTime])
elif int(slot.startTime.split(":")[0]) > int(
slot.endTime.split(":")[0]):
# e.g. 12:00 to 1:00
slot.endTime = "{}:{}".format(str(int(
slot.endTime.split(":")[0])+12), slot.endTime[-2:])
# now, we write to slot.sTime, slot.eTime
# (minutes-past-midnight...)
slot.sTime, slot.eTime = map(lambda x: int(x[:2]) * 60 +
int(x[-2:]),
[slot.startTime, slot.endTime])
# we write to slot.ndays, where ndays is a string of numbers,
# 0->4
if "M" in slot.days:
slot.ndays += "0"
i = slot.days.find("T")
if i != -1 and (i == len(slot.days) - 1 or
slot.days[i+1] != 'h'):
# basically, if not Th (for Thursday)
slot.ndays += "1"
# now, for the rest of the days...
for i in [("W", "2"), ("Th", "3"), ("F", "4")]:
if i[0] in slot.days:
slot.ndays += i[1]
# we make a small adjustment to campusLocation,
# removing whitespace
slot.campusLocation = slot.campusLocation.split()[0]
# we make the prof name "first last" instead of
# "last,first middle"
if slot.instructor != "":
s = slot.instructor.split(" ")
for i in s:
if "," in i:
# we want the 2 words connected by the ","
slot |
# coding=utf-8
'''
cron trigger
@author: Huiyugeng
'''
import datetime
import trigger
class CronTrigger(trigger.Trigger):
def __init__(self, cron):
trigger.Trigger.__init__(self, 0, 1);
self.cron = cron
def _is_match(self):
parser = CronParser(self.cron)
_date = datetime.date.today()
_time = datetime.datetime.now()
return parser._is_match(_date, _time)
class CronParser():
def __init__(self, cron):
cron_item = cron.split(' ')
if len(cron_item) == 6 or len(cron_item) == 7:
self.second_set = self._parse_integer(cron_item[0], 0, 59)
self.minute_set = self._parse_integer(cron_item[1], 0, 59)
self.hour_set = self._parse_integer(cron_item[2], 0, 23)
self.day_of_month_set = self._parse_integer(cron_item[3], 1, 31)
self.month_set = self._parse_month(cron_item[4])
self.day_of_week_set = self._parse_day_of_week(cron_item[5])
if len(cron_item) == 7:
self.year_set = self._parse_integer(cron_item[6], 1970, 2100)
def _parse_integer(self, value, min_val, max_val):
result = []
range_items = []
if ',' in value:
range_item | s = value.split(',')
else:
range_items.append(value)
for range_item in range_items:
temp_result = []
interval = 1
if '/' in range_item:
temp = range_item.split('/')
range_item = temp[0]
interval = int(temp[1])
if interval < 1:
| interval = 1
if '*' in range_item:
temp_result.extend(self._add_to_set(min_val, max_val))
elif '-' in range_item:
item = range_item.split('-')
temp_result.extend(self._add_to_set(int(item[0]), int(item[1])))
else:
temp_result.append(int(range_item))
count = 0
for item in temp_result:
if count % interval == 0:
result.append(item)
count = count + 1
return result
def _add_to_set(self, start, end):
result = [i for i in range(start, end + 1)]
return result
def _parse_month(self, value):
months = ["JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]
for i in range(0, 12):
value = value.replace(months[i], str(i + 1))
return self._parse_integer(value, 1, 12);
def _parse_day_of_week(self, value):
day_of_weeks = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
for i in range(0, 7):
value = value.replace(day_of_weeks[i], str(i + 1));
return self._parse_integer(value, 1, 7);
def _is_match(self, _date, _time):
# In Python datetime's weekday Monday is 0 and Sunday is 6
day_of_week = _date.weekday() + 1
result = True and \
_time.second in self.second_set and \
_time.minute in self.minute_set and \
_time.hour in self.hour_set and \
_date.day in self.day_of_month_set and \
_date.month in self.month_set and \
_date.year in self.year_set and \
day_of_week in self.day_of_week_set
return result
|
't be properly parsed
elif buf[0] == 128:
ssl2 = True
# in SSLv2 we need to read 2 bytes in total to know the size of
# header, we already read 1
result = None
for result in self._sockRecvAll(1):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
else:
raise TLSIllegalParameterException(
"Record header type doesn't specify known type")
#Parse the record header
if ssl2:
record = RecordHeader2().parse(Parser(buf))
else:
record = RecordHeader3().parse(Parser(buf))
yield record
def recv(self):
"""
Read a single record from socket, handle SSLv2 and SSLv3 record layer
@rtype: generator
@return: generator that returns 0 or 1 in case the read would be
blocking or a tuple containing record header (object) and record
data (bytearray) read from socket
@raise socket.error: In case of network error
@raise TLSAbruptCloseError: When the socket was closed on the other
side in middle of record receiving
@raise TLSRecordOverflow: When the received record was longer than
allowed by TLS
@raise TLSIllegalParameterException: When the record header was
malformed
"""
record = None
for record in self._recvHeader():
if record in (0, 1):
yield record
else: break
assert record is not None
#Check the record header fields
# 18432 = 2**14 (basic record size limit) + 1024 (maximum compression
# overhead) + 1024 (maximum encryption overhead)
if record.length > 18432:
raise TLSRecordOverflow()
#Read the record contents
buf = bytearray(0)
result = None
for result in self._sockRecvAll(record.length):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
yield (record, buf)
class ConnectionState(object):
"""Preserve the connection state for reading and writing data to records"""
def __init__(self):
"""Create an instance with empty encryption and MACing contexts"""
self.macContext = None
self.encContext = None
self.fixedNonce = None
self.seqnum = 0
def getSeqNumBytes(self):
"""Return encoded sequence number and increment it."""
writer = Writer()
writer.add(self.seqnum, 8)
self.seqnum += 1
return writer.bytes
class RecordLayer(object):
"""
Implementation of TLS record layer protocol
@ivar version: the TLS version to use (tuple encoded as on the wire)
@ivar sock: underlying socket
@ivar client: whether the connection should use encryption
@ivar encryptThenMAC: use the encrypt-then-MAC mechanism for record
integrity
"""
def __init__(self, sock):
self.sock = sock
self._recordSocket = RecordSocket(sock)
self._version = (0, 0)
self.client = True
self._writeState = ConnectionState()
self._readState = ConnectionState()
self._pendingWriteState = ConnectionState()
self._pendingReadState = ConnectionState()
self.fixedIVBlock = None
self.encryptThenMAC = False
@property
def blockSize(self):
"""Return the size of block used by current symmetric cipher (R/O)"""
return self._writeState.encContext.block_size
@property
def version(self):
"""Return the TLS version used by record layer"""
return self._version
@version.setter
def version(self, val):
"""Set the TLS version used by record layer"""
self._version = val
self._recordSocket.version = val
def getCipherName(self):
"""
Return the name of the bulk cipher used by this connection
@rtype: str
@return: The name of the cipher, like 'aes128', 'rc4', etc.
"""
if self._writeState.encContext is None:
return None
return self._writeState.encContext.name
def getCipherImplementation(self):
"""
Return the name of the implementation used for the connection
'python' for tlslite internal implementation, 'openssl' for M2crypto
and 'pycrypto' for pycrypto
@rtype: str
@return: Name of cipher implementation used, None if not initialised
"""
if self._writeState.encContext is None:
return None
return self._writeState.encContext.implementation
def shutdown(self):
"""Clear read and write states"""
self._writeState = ConnectionState()
self._readState = ConnectionState()
self._pendingWriteState = ConnectionState()
self._pendingReadState = ConnectionState()
def isCBCMode(self):
"""Returns true if cipher uses CBC mode"""
if self._writeState and self._writeState.encContext and \
self._writeState.encContext.isBlockCipher:
return True
else:
return False
#
# sending messages
#
def addPadding(self, data):
"""Add padding to data so that | it is multiple of block size"""
currentLength = len(data)
blockLength = self.blockSize
paddingLength = blockLength - 1 - (currentLength % blockLength)
paddingBytes = bytearray([paddingLength] * (paddingLength+1))
data += paddingBytes
return data
def calculateMAC(self, mac, seqnumBytes, content | Type, data):
"""Calculate the SSL/TLS version of a MAC"""
mac.update(compatHMAC(seqnumBytes))
mac.update(compatHMAC(bytearray([contentType])))
assert self.version in ((3, 0), (3, 1), (3, 2), (3, 3))
if self.version != (3, 0):
mac.update(compatHMAC(bytearray([self.version[0]])))
mac.update(compatHMAC(bytearray([self.version[1]])))
mac.update(compatHMAC(bytearray([len(data)//256])))
mac.update(compatHMAC(bytearray([len(data)%256])))
mac.update(compatHMAC(data))
return bytearray(mac.digest())
def _macThenEncrypt(self, data, contentType):
"""MAC, pad then encrypt data"""
if self._writeState.macContext:
seqnumBytes = self._writeState.getSeqNumBytes()
mac = self._writeState.macContext.copy()
macBytes = self.calculateMAC(mac, seqnumBytes, contentType, data)
data += macBytes
#Encrypt for Block or Stream Cipher
if self._writeState.encContext:
#Add padding (for Block Cipher):
if self._writeState.encContext.isBlockCipher:
#Add TLS 1.1 fixed block
if self.version >= (3, 2):
data = self.fixedIVBlock + data
data = self.addPadding(data)
#Encrypt
data = self._writeState.encContext.encrypt(data)
return data
def _encryptThenMAC(self, buf, contentType):
"""Pad, encrypt and then MAC the data"""
if self._writeState.encContext:
# add IV for TLS1.1+
if self.version >= (3, 2):
buf = self.fixedIVBlock + buf
buf = self.addPadding(buf)
buf = self._writeState.encContext.encrypt(buf)
# add MAC
if self._writeState.macContext:
seqnumBytes = self._writeState.getSeqNumBytes()
mac = self._writeState.macContext.copy()
# append MAC
macBytes = self.calculateMAC(mac, seqnumBytes, contentType, buf)
buf += macBytes
return buf
def _encryptThenSeal(self, buf, contentType):
"""Encrypt with AEAD cipher"""
#Assemble the authenticated data.
seqNumBytes = self._writeState.getSeqNumBytes()
authData = seqNumBytes + bytearray([contentType,
self.version[0],
|
from django.contrib import admin
from django.contrib.admin.filters import RelatedFieldListFilter
from .models import ClientLog, Client, Feedback
def client_id(obj):
return obj.client.externid
class AliveClientsRelatedFieldListFilter(RelatedFieldListFilter):
def __init__(self, field, request, *args, **kwargs):
field.rel.limit_choices_to = {'status': Client.STATUS_ALIVE }
super(AliveClientsRelatedFieldListFilter, self).__init__(field, request, *args, **kwargs)
class ClientLogAdmin(admin.ModelAdmin):
list_display = ('client', 'tag', 'log', 'updated')
list_filter = ('client', )
ordering = ('-updated',)
search_fields = ("client__ip", "client__externid", "log", "tag",)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "client":
kwargs["queryset"] = Client.objects.filter(status = Client.STATUS_ALIVE)
return super(ClientLogAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
admin.site.register | (ClientLog, ClientLogAdmin)
class ClientAdmin(admin.ModelAdmin):
list_display = ("status", "externid", "ip", "updated", "created", "useragent")
list_filter = ("status", "useragent", "failures", "complets")
ordering = ("status", "-updated", "-created", )
search_fields = ("ip", "useragent", "externid", )
admin.site.register(Client, ClientAdmin)
class | FeedbackAdmin(admin.ModelAdmin):
list_display = ("id", "useremail", "ip", "created")
ordering = ("-id",)
admin.site.register(Feedback, FeedbackAdmin)
|
from sqlalchemy import Column, String, BigInteger
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
import time
BaseModel = declarative_base()
class Video(BaseModel):
__tablename__ = 'video'
id = Column(BigInteger, primary_key=True, autoincrement=True)
name = Column(String(80), nullable=False)
image = Column(String(200))
desc = Column(String(100))
play_num = Column(String(50))
update_num = Column(String(50))
link = Column(String(200))
score = Column(String(10))
platform = Column(String(10) | , nullable=False) # 来源平台
video_category = Column(String(10), nullable=False) # 视频大分类:电视剧、电影、综艺
series_region = Column(String(20)) # 电视剧地区分类:全部热播、内地、网剧、韩剧、美剧
movie_region = Column(String(20)) # 电影地区分类:全部热播、院线、内地、香港、美国
veriety_region = Column(String(20)) # 综艺分类:热门
created_at = Column(BigInteger, default=time.time)
engine = create_engi | ne('mysql+pymysql://root:123456@localhost:3306/videoSpider?charset=utf8mb4')
BaseModel.metadata.create_all(engine)
"""
data = {
'name' : name.get_text(),
'image' : 'http:' + image.get('r-lazyload'),
'desc' : ' '.join(desc.get_text().strip().split()),
'play_number' : num.get_text(),
'update_status' : status,
'link' : link.get('href')
}
# 视频类型:电视剧、电影、综艺
Video_large_type = Enum('Video_large_type', ('Series', 'Movies', 'Variety'))
# 电视剧类型:全部热播、内地、网剧、韩剧、美剧
Series_region = Enum('Series_region', ('All', 'Local', 'Net', 'SouthKorea', 'EuropeAndAmerica'))
# 电影类型:全部热播、院线、内地、香港、美国
Movie_region = Enum('Movie_region', ('All', 'Cinemas', 'Local', 'HongKong', 'America'))
# 综艺类型:全部热播
Variety_type = Enum('Variety_type', ('Hot'))
"""
class RequestModel(object):
def __init__(self, source_url, platform, video_category, *, series_region=None, movie_region=None, veriety_region=None):
self.source_url = source_url
self.platform = platform
self.video_category = video_category
self.series_region = series_region
self.movie_region = movie_region
self.veriety_region = veriety_region
|
#!/usr/bin/env python
#coding=utf-8
import sys
sys.path.append("..")
import urllib
import myjson
from datetime import datetime, date, timedelta
import time
from define import *
from data_interface.stock_dataset import stock_dataset
class turtle(object):
"""
turtle model
"""
def get_mean(self, data, end_index, k):
if end_index < k-1:
return 0
else:
sum = 0
for num in data[end_index-k+1 : end_index+1]:
sum += num
return float(sum / (k * 1.0))
def get_max(self, data, end_index, k):
if end_index < k:
return 0
else:
tmp = data[end_index-k : end_index]
max = tmp[0]
for num in tmp:
if num > max:
max = num
return max
def get_max_date(self, dataset, end_index, k):
if end_index < k:
return 0
else:
tmp = dataset.data[end_index-k : end_index]
max = tmp[0].close_price
date = tmp[0].date
for num in tmp:
if num.close_price > max:
max = num.close_price
date = num.date
return (max, date)
def get_min(self, data, end_index, k):
if end_index < k:
return 0
else:
tmp = data[end_index-k : end_index]
min = tmp[0]
for num in tmp:
if num < min:
min = num
return min
def get_min_date(self, dataset, end_index, k):
if end_index < k:
return 0
else:
tmp = dataset.data[end_index-k : end_index]
min = tmp[0].close_price
date = tmp[0].date
for num in tmp:
if num.close_price < min:
min = num.close_price
date = num.date
return (min, date)
def get_trading_plan(self, dataset, date_str):
"""
get trading plan of 28 lundong model, return a empty map when no decision can be made
choise:
-3: not enough data, do not trade
-2: date_str error
-1: unknown problem, do not trade
0: sell all
1: sell half
2: close_price unsatisfy, do not trade
3: mean line unsatisfy, do not trade
4: buy
策略:
1. 我只将此交易模型用于小时图
2. 一般也只用于趋势性较强的大盘指数,不用于个股
3. 首先绘制10小时和100小时两根无线,然后绘制50小时最高点和最低点曲线,再加25小时内最低点曲线(如果你使用通达信,可直接复制我下面的指标源代码,记得设为图叠加)。
4. 买入条件:当且仅当10小时无线大于100小时均线的前提下,小时收盘突破此前50小时的最高点时做多。
5. 平仓条件:当小时收盘跌破此前25小时的最低点时平仓一半,跌破此前50小时最低点时全部平仓。
"""
result = {}
result["choise"] = -1
# Get stock data by date_str. If not exist, return.
data = dataset.get_data(date_str)
if data == None:
result["choise"] = -2
return result
data_index = dataset.get_data_index(date_str)
close_prices = [ item.close_price for item in dataset.data ]
result["close_price"] = close_prices[data_index]
result["10_mean"] = self.get_mean(close_prices, data_index, 10)
result["100_mean"] = self.get_mean(close_prices, data_index, 100)
result["50_max"] = self.get_max(close_prices, data_index, 50)
result["50_min"] = self.get_min(close_prices, data_index, 50)
result["25_min"] = self.get_min(close_prices, data_index, 25)
if result["10_mean"] == 0 or result["100_mean"] == 0 or result["50_max"] == 0 or result["50_min"] == 0 or result["25_min"] == 0:
result["choise"] = -3
elif result["close_price"] < result["50_min"]:
result["choise"] = 0
elif result["close_price"] < result["25_min"]:
result["choise"] = 1
elif result["close_price"] > result["50_max"]:
if result["10_mean"] < result["100_mean"]:
result["choise"] = 3
else:
result["choise"] = 4
else:
result["choise"] = 2
return result
def get_trading_plan3(self, dataset, date_str):
"""
策略 https://www.jisilu.cn/question/66127
1. 买入条件:收盘价超过60个交易日里面的盘中最高价(不是收盘价中的最高)
2. 卖出条件:收盘价低于38个交易日里面的盘中最低价
3. 其他时候维持原状。
choise:
-3: not enough data, do not trade
-2: date_str error
-1: unknown problem, do not trade
0: sell all
1: sell half
2: close_price unsatisfy, do not trade
3: mean line unsatisfy, do not trade
4: buy
"""
# Get stock data by date_str. If not exist, return.
result = {}
result["file_end_date"] = dataset.data[-2].date #dataset.data[-1].date的数据为读取文件后加入的今天的数据
result["start_buy_date"] = dataset.data[0 - BUY_DAYS].date
result["start_sell_date"] = dataset.data[0 - SELL_DAYS].date
result["date"] = date_str
result["BUY_DAYS"] = str(BUY_DAYS)
result["SELL_DAYS"] = str(SELL_DAYS)
result["choise"] = -1
result["info"] = "unknown problem, do not trade"
data = dataset.get_data(date_str)
if data == None:
result["choise"] = -2
result["info"] = "date_str error"
return result
data_index = dataset.get_data_index(date_str)
result["close_price"] = dataset.data[data_index].close_price
result["max_date"] = self.get_max_date(dataset, data_index, BUY_DAYS)
result["min_date"] = self.get_min_date(dataset, data_index, SELL_DAYS)
result["BUY_DA | YS"] = str(BUY_DAYS)
result["SELL_DAYS"] = str(SELL_DAYS)
if result["close_price"] > result["max | _date"][0]:
result["choise"] = 4
result["info"] = "buy"
elif result["close_price"] < result["min_date"][0]:
result["choise"] = 0
result["info"] = "sell all"
elif result["close_price"] < result["max_date"][0] and result["close_price"] > result["min_date"][0]:
result["choise"] = 2
result["info"] = "hold on"
return result
if __name__ == '__main__':
pass
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import PythonLibrary, PythonTests
from pants.engine.target import BoolField
class SkipIsortField(BoolField):
alias | = "skip_isort"
default = False
help = "If true, don't run isort on this target's code."
def rules():
return [
PythonLibrar | y.register_plugin_field(SkipIsortField),
PythonTests.register_plugin_field(SkipIsortField),
]
|
import requests
import csv |
from configparser import ConfigParser
config = ConfigParse | r()
config.read("config.cfg")
token = config.get("auth", "token")
domain = config.get("instance", "domain")
headers = {"Authorization" : "Bearer %s" % token}
source_course_id = 311693
csv_file = ""
payload = {'migration_type': 'course_copy_importer', 'settings[source_course_id]': source_course_id}
with open(csv_file, 'rb') as courses:
coursesreader = csv.reader(courses)
for course in coursesreader:
uri = domain + "/api/v1/courses/sis_course_id:%s/content_migrations" % course
r = requests.post(uri, headers=headers,data=payload)
print r.status_code + " " + course |
from django_nose.tools import assert_equal
from pontoon.base.tests import TestCase
from pontoon.base.utils import NewlineEscapePlaceable, mark_placeables
class PlaceablesTests(TestCase):
def test_newline_escape_placeable(self):
"""Test detecting newline escape sequences"""
placeable = NewlineEscapePlaceable
assert_equal(placeable.parse(u'A string\\n')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'\\nA string')[0], placeable([u'\\n']))
assert_equal(placeable.parse(u'A\\nstring')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'A string'), None)
assert_equal(placeable.parse(u'A\nstring'), None)
def test_mark_newline_escape_placeables(self):
"""Test detecting newline escape sequences"""
assert_equal(
mark_placeables(u'A string\\n'),
u'A string<mark class="placeable" title="Escaped newline">\\n</mark>'
)
assert_equal(
| mark_placeables(u'\\nA string'),
u'<mark class="placeable" title="Escaped newline">\\n</mark>A string'
)
assert_equal(
mark_placeables(u' | A\\nstring'),
u'A<mark class="placeable" title="Escaped newline">\\n</mark>string'
)
assert_equal(
mark_placeables(u'A string'),
u'A string'
)
assert_equal(
mark_placeables(u'A\nstring'),
u'A\nstring'
)
|
# Copyright (C) 2009 - TODAY Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
class AccountTax(models.Model):
_inherit = 'account.tax'
fiscal_tax_ids = fields.Many2many(
comodel_name='l10n_br_fiscal.tax',
relation='l10n_br_fiscal_account_tax_rel',
colunm1='account_tax_id',
colunm2='fiscal_tax_id',
string='Fiscal Taxes',
)
@api.multi
def compute_all(
self,
price_unit,
currency=None,
quantity=None,
product=None,
partner=None,
fiscal_taxes=None,
operation_line=False,
ncm=None,
nbm=None,
cest=None,
discount_value=None,
insurance_value=None,
other_costs_value=None,
freight_value=None,
fiscal_price=None,
fiscal_quantity=None,
uot=None,
| icmssn_range=None
):
""" Returns all information required to apply taxes
(in self + their children in case of a tax goup).
We consider the sequence of the parent for group of taxes.
Eg. considering letters as taxes and alphabetic order
as sequence :
| [G, B([A, D, F]), E, C] will be computed as [A, D, F, C, E, G]
RETURN: {
'total_excluded': 0.0, # Total without taxes
'total_included': 0.0, # Total with taxes
'taxes': [{ # One dict for each tax in self
# and their children
'id': int,
'name': str,
'amount': float,
'sequence': int,
'account_id': int,
'refund_account_id': int,
'analytic': boolean,
}]
} """
taxes_results = super().compute_all(
price_unit, currency, quantity, product, partner)
if not fiscal_taxes:
fiscal_taxes = self.env['l10n_br_fiscal.tax']
product = product or self.env['product.product']
# FIXME Should get company from document?
fiscal_taxes_results = fiscal_taxes.compute_taxes(
company=self.env.user.company_id,
partner=partner,
product=product,
price_unit=price_unit,
quantity=quantity,
uom_id=product.uom_id,
fiscal_price=fiscal_price or price_unit,
fiscal_quantity=fiscal_quantity or quantity,
uot_id=uot or product.uot_id,
ncm=ncm or product.ncm_id,
nbm=nbm or product.nbm_id,
cest=cest or product.cest_id,
discount_value=discount_value,
insurance_value=insurance_value,
other_costs_value=other_costs_value,
freight_value=freight_value,
operation_line=operation_line,
icmssn_range=icmssn_range)
account_taxes_by_domain = {}
for tax in self:
tax_domain = tax.tax_group_id.fiscal_tax_group_id.tax_domain
account_taxes_by_domain.update({tax.id: tax_domain})
for account_tax in taxes_results['taxes']:
fiscal_tax = fiscal_taxes_results.get(
account_taxes_by_domain.get(account_tax.get('id'))
)
if fiscal_tax:
tax = self.filtered(lambda t: t.id == account_tax.get('id'))
if not fiscal_tax.get('tax_include') and not tax.deductible:
taxes_results['total_included'] += fiscal_tax.get(
'tax_value')
account_tax.update({
'id': account_tax.get('id'),
'name': '{0} ({1})'.format(
account_tax.get('name'),
fiscal_tax.get('name')
),
'amount': fiscal_tax.get('tax_value'),
'base': fiscal_tax.get('base'),
'tax_include': fiscal_tax.get('tax_include'),
})
if tax.deductible:
account_tax.update({
'amount': fiscal_tax.get('tax_value', 0.0) * -1,
})
return taxes_results
|
ict['txIn_txin_script_len'] .append(self.txin_script_len)
txDict['txIn_scriptSig'] = txDict.get('txIn_scriptSig', [])
txDict['txIn_scriptSig'].append(get_hexstring(self.scriptSig))
txDict['txIn_sequence_no'] = txDict.get('txIn_sequence_no', [])
txDict['txIn_sequence_no'].append(self.sequence_no)
return txDict
def __str__(self):
return 'PrevHash: %s \nPrev Tx out index: %d \nTxin Script Len: %d \nscriptSig: %s \nSequence: %8x' % \
(get_hexstring(self.prevhash),
self.prevtx_out_idx,
self.txin_script_len,
get_hexstring(self.scriptSig),
self.sequence_no)
def __repr__(self):
return __str__(self)
class Tx_Output(object):
def __init__(self):
super(Tx_Output, self).__init__()
pass
def parse(self, stream):
self.value = read_uint8(stream)
self.txout_script_len = read_varint(stream)
self.scriptPubKey = stream.read(self.txout_script_len)
def updateTxDict(self,txDict):
'''txDict holds arrays of Tx_Output values'''
txDict['txOut_value'] = txDict.get('txOut_value', [])
txDict['txOut_value'].append(self.value)
txDict['txOut_script_len'] = txDict.get('txOut_script_len', [])
txDict['txOut_script_len'].append(self.txout_script_len)
txDict['txOut_scriptPubKey'] = txDict.get('txOut_scriptPubKey', [])
txDict['txOut_scriptPubKey'].append(get_hexstring(self.scriptPubKey))
return txDict
def __str__(self):
return 'Value (satoshis): %d (%f btc)\nTxout Script Len: %d\nscriptPubKey: %s' %\
(self.value, (1.0*self.value)/100000000.00,
self.txout_script_len,
get_hexstring(self.scriptPubKey))
def __repr__(self):
return __str__(self)
class Transaction(object):
"""Holds one Transaction as part of a block"""
def __init__(self):
super(Transaction, self).__init__()
self.version = None
self.in_cnt = None
self.inputs = None
self.out_cnt = None
self.outputs = None
self.lock_time = None
def parse(self,stream):
#TODO: error checking
self.version = read_uint4(stream)
self.in_cnt = read_varint(stream)
self.inputs = []
if self.in_cnt > 0:
for i in range(0, self.in_cnt):
input = Tx_Input()
input.parse(stream)
self.inputs.append(input)
self.out_cnt = read_varint(stream)
self.outputs = []
if self.out_cnt > 0:
for i in range(0, self.out_cnt):
output = Tx_Output()
output.parse(stream)
self.outputs.append(output)
self.lock_time = read_uint4(stream)
def updateTxDict(self,txDict):
txDict['tx_version'] = self.version
txDict['in_cnt'] = self.in_cnt
txDict['out_cnt'] = self.out_cnt
txDict['lock_time'] = self.lock_time
for i in range(self.in_cnt):
txDict = self.inputs[i].updateTxDict(txDict)
for i in range(self.out_cnt):
txDict = self.outputs[i].updateTxDict(txDict)
return txDict
def __str__(self):
s = 'Version: %d\nInputs count: %d\n---Inputs---\n%s\nOutputs count: %d\n---Outputs---\n%s\nLock_time:%8x' % (self.version, self.in_cnt,
'\n'.join(str(i) for i in self.inputs),
self.out_cnt,
'\n'.join(str(o) for o in self.outputs),
self.lock_time)
return s
class BlockHeader(object):
"""BlockHeader represents the header of the block"""
def __init__(self):
super( BlockHeader, self).__init__()
self.version = None
self.prevhash = None
self.merklehash = None
self.time = None
self.bits = None
self.nonce = None
self.blockprefix = None
self.blockhash = None
def parse(self, stream):
#TODO: error checking
self.version = read_uint4(stream)
self.prevhash = read_hash32(stream)
self.merklehash = read_merkle32(stream)
self.time = read_time(stream)
self.bits = read_uint4(stream)
self.nonce = read_uint4(stream)
# construct the prefix and hash
self.blockprefix = ( struct.pack("<L", self.version) + self.prevhash[::-1] + \
self.merklehash[::-1] + struct.pack("<LLL", self.time, self.bits, self.nonce))
self.blockhash = hashlib.sha256(hashlib.sha256(self.blockprefix).digest()).digest()[::-1]
def updateTxDict(self, txDict):
txDict['version'] = self.version
txDict['prevhash'] = get_hexstring(self.prevhash)
txDict['merklehash'] = get_hexstring(self.merklehash)
txDict['time'] = self.time
txDict['bits'] = self.bits
txDict['nonce'] = self.nonce
txDict['blockprefix'] = get_hexstring(self.blockprefix)
txDict['blockhash'] = get_hexstring(self.blockhash)
return txDict
def __str__(self):
return "\n\t\tVersion: %d \n\t\tPreviousHash: %s \n\t\tMerkle: %s \n\t\tTime: %8x \n\t\tBits: %8x \n\t\tNonce: %8x \n\t\tPrefix: %s \n\t\tBlockHash: %s \n\t\t" % (self.version, \
get_hexstring(self.prevhash), \
get_hexstring(self.merklehash), \
self.time, \
self.bits, \
self.nonce, \
get_hexstring(self.blockprefix), \
get_hexstring(self.blockhash))
def __repr__(self):
return __str__(self)
class Block(object):
"""A block to be parsed from file"""
def __init__(self):
self.magic_no = -1
self.blocksize = 0
self.blockheader = None
self.transaction_cnt = 0
self.transactions = None
def parseBlock(self, bf):
self.magic_no = find_magic_number(bf)
if self.magic_no != None:
self.blocksize = read_uint4(bf)
self.blockheader = BlockHeader()
self.blockheader.parse(bf)
self.transaction_cnt = read_varint(bf)
self.tra | nsactions = []
#print 'List of transactions'
for i in range(0, self.transaction_cnt):
tx = Transaction()
tx.parse(bf)
self.transactions.append(tx)
def printBlock(se | lf):
print 'magic_no:\t0x%8x' % self.magic_no
print 'size: \t%u bytes' % self.blocksize
print 'Block header:\t%s' % self.blockheader
print 'Transactions: \t%d' % self.transaction_cnt
for i in range(0, self.transaction_cnt):
print '='*50
print ' TX NUMBER: %d' % (i+1)
print '='*50
print self.transactions[i]
print '\n'
def updateTxDict(self,idx,txDict):
'''Return data for a specific transaction as a dict'''
'''Each transaction record will also contain all information about the block as well'''
txDict['magic_no'] = self.magic_no
txDict['blocksize'] = self.blocksize
txDict['transaction_cnt'] = self.transaction_cnt
txDict = self.blockheader.updateTxDict(txDict)
txDict = self.transactions[idx].updateTxDict(txDict)
return txDict
def getBlockHash(self):
return get_hexstring(self.blockheader.blockhash)
def getBlockPrevHash(self):
return get_hexstring(self.blockheader.prevhash)
def getBlockDifficulty(self):
return self.blockheader.bits
def getNumTxs(self):
return self.transaction_cnt
def parseBlockBytes(bytestream):
blocks = []
count = 0;
while True:
curBlock = Block()
curBlock.parseBlock(bytestream)
if (curBlock.blocksize == 0):
break
else:
blocks.append(curBlock)
return blocks
def parseBlockFile(blockfile):
with open(blockfile, 'rb') as bf:
blocks = parseBlockBytes(bf)
return blocks
def printBlockFile(blockfile):
print 'Parsing block file: %s\n' % blockfile
blocks = parseBlockFile(blockfile)
count = 0;
for blk in blocks:
count = count + 1
print("Block Count: " + str(count))
blk.printBlock()
if __name__ == "__main__":
import sys
usage = "Usage: python {0} "
if len(sys.argv) < 2:
print usa |
import json
import logging
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from controllers.api.api_base_controller import ApiBaseController
from database.event_query import EventListQuery
from helpers.award_helper import AwardHelper
from helpers.district_helper import DistrictHelper
from helpers.event_insights_helper import EventInsightsHelper
from helpers.model_to_dict import ModelToDict
from models.event import Event
class ApiEventController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventController, self).__init__(*args, **kw)
self.event_key = self.request.route_kwargs["event_key"]
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
@property
def _validators(self):
return [("event_id_validator", self.event_key)]
def _set_event(self, event_key):
self.event = Event.get_by_id(event_key)
if self.event is None:
self._errors = json.dumps({"404": "%s event not found" % self.event_key})
self.abort(404)
def _track_call(self, event_key):
self._track_call_defer('event', event_key)
def _render(self, event_key):
self._set_event(event_key)
event_dict = ModelToDict.eventConverter(self.event)
return json.dumps(event_dict, ensure_ascii=True)
class ApiEventTeamsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_teams_controller_{}" # (event_key)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventTeamsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/teams', event_key)
def _render(self, event_key):
self._set_event(event_key)
teams = filter(None, self.event.teams)
team_dicts = [ModelToDict.teamConverter(team) for team in teams]
return json.dumps(team_dicts, ensure_ascii=True)
class ApiEventMatchesController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_matches_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventMatchesController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/matches', event_key)
def _render(self, event_key):
self._set_event(event_key)
matches = self.event.matches
match_dicts = [ModelToDict.matchConverter(match) for match in matches]
return json.dumps(match_dicts, ensure_ascii=True)
class ApiEventStatsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_stats_controller_{}" # (event_key)
CACHE_VERSION = 5
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventStatsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/stats', event_key)
def _render(self, event_key):
self._set_event(event_key)
stats = {}
matchstats = self.event.matchstats
if matchstats:
stats.update(matchstats)
year_specific = EventInsightsHelper.calculate_event_insights(self.event.matches, self.event.year)
if year_specific:
stats['year_specific'] = year_specific
return json.dumps(stats)
class ApiEventRankingsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_rankings_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventRankingsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/rankings', event_key)
def _render(self, event_key):
self._set_event(event_key)
ranks = json.dumps(Event.get_by_id(event_key).rankings)
if ranks is None or ranks == 'null':
return '[]'
else:
return ranks
class ApiEventAwardsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_awards_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH | = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventAwardsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/award | s', event_key)
def _render(self,event_key):
self._set_event(event_key)
award_dicts = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.event.awards)]
return json.dumps(award_dicts, ensure_ascii=True)
class ApiEventDistrictPointsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_district_points_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventDistrictPointsController, self).__init__(*args, **kw)
self.partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/district_points', event_key)
def _render(self, event_key):
self._set_event(event_key)
points = DistrictHelper.calculate_event_points(self.event)
return json.dumps(points, ensure_ascii=True)
class ApiEventListController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_list_controller_{}" # (year)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventListController, self).__init__(*args, **kw)
self.year = int(self.request.route_kwargs.get("year") or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.year)
@property
def _validators(self):
return []
def _track_call(self, *args, **kw):
self._track_call_defer('event/list', self.year)
def _render(self, year=None):
if self.year < 1992 or self.year > datetime.now().year + 1:
self._errors = json.dumps({"404": "No events found for %s" % self.year})
self.abort(404)
events = EventListQuery(self.year).fetch()
event_list = [ModelToDict.eventConverter(event) for event in events]
return json.dumps(event_list, ensure_ascii=True)
|
"""
Check the measured process sizes. If we are on a platform which supports
multiple measuring facilities (e.g. Linux), check if the reported sizes match.
This should help to protect against scaling errors (e.g. Byte vs KiB) or using
the wrong value for a different measure (e.g. resident in physical memory vs
virtual memory size).
"""
import sys
import unittest
from unittest import mock
from pympler import process
class ProcessMemoryTests(unittest.TestCase):
def _match_sizes(self, pi1, pi2, ignore=[]):
"""
Match sizes by comparing each set field. Process size may change
inbetween two measurements.
"""
if pi1.available and pi2.available:
for arg in ('vsz', 'rss', 'data_segment', 'shared_segment',
'stack_segment', 'code_segment'):
if arg in ignore:
continue
size1 = getattr(pi1, arg)
size2 = getattr(pi2, arg)
if size1 and size2:
delta = abs(size1 - size2)
# Allow for a difference of the size of two pages or 5%
if delta > pi1.pagesize * 2 and delta > size1 * 0.05:
self.fail("%s mismatch: %d != %d" % (arg, size1, size2))
if pi1.pagefaults and pi2.pagefaults:
# If both records report pagefaults compare the reported
# number. If a pagefault happens after taking the first
# snapshot and before taking the second the latter will show a
# higher pagefault number. In that case take another snapshot
# with the first variant and check it's now reporting a higher
# number as well. We assume pagefaults statistics are
# monotonic.
if pi1.pagefaults < pi2.pagefaults:
pi1.update()
if pi1.pagefaults < pi2.pagefaults:
pf1 = pi1.pagefaults
pf2 = pi2.pagefaults
self.fail("Pagefault mismatch: %d != %d" % (pf1, pf2))
else:
self.assertEqual(pi1.pagefaults, pi2.pagefaults)
if pi1.pagesize and pi2.pagesize:
self.assertEqual(pi1.pagesize, pi2.pagesize)
def test_ps_vs_proc_sizes(self):
'''Test process sizes match: ps util vs /proc/self/stat
'''
ps | info = process._ProcessMemoryInfoPS()
procinfo = process._ProcessMemoryInfoProc()
self._match_sizes(psinfo, procinfo)
def test_ps_vs_getrusage(self):
'''Test process sizes match: ps util vs getrusage
'''
psinfo = process._ProcessMemoryInfoPS()
try:
| resinfo = process._ProcessMemoryInfoResource()
except AttributeError:
pass
else:
self._match_sizes(psinfo, resinfo, ignore=['rss'])
if psinfo.available and resinfo.available:
self.assertTrue(resinfo.rss >= psinfo.rss)
def test_proc_vs_getrusage(self):
'''Test process sizes match: /proc/self/stat util vs getrusage
'''
procinfo = process._ProcessMemoryInfoProc()
try:
resinfo = process._ProcessMemoryInfoResource()
except AttributeError:
pass
else:
self._match_sizes(procinfo, resinfo, ignore=['rss'])
if procinfo.available and resinfo.available:
self.assertTrue(resinfo.rss >= procinfo.rss)
def test_get_current_threads(self):
'''Test thread info is extracted.'''
tinfos = process.get_current_threads()
for tinfo in tinfos:
self.assertEqual(type(tinfo.ident), int)
self.assertEqual(type(tinfo.name), type(''))
self.assertEqual(type(tinfo.daemon), type(True))
self.assertNotEqual(tinfo.ident, 0)
def test_proc(self):
'''Test reading proc stats with mock data.'''
mock_stat = mock.mock_open(read_data='22411 (cat) R 22301 22411 22301 34818 22411 4194304 82 0 0 0 0 0 0 0 20 0 1 0 709170 8155136 221 18446744073709551615 94052544688128 94052544719312 140729623469552 0 0 0 0 0 0 0 0 0 17 6 0 0 0 0 0 94052546816624 94052546818240 94052566347776 140729623473446 140729623473466 140729623473466 140729623478255 0')
mock_status = mock.mock_open(read_data='Name: cat\n\nVmData: 2 kB\nMultiple colons: 1:1')
with mock.patch('builtins.open', new_callable=mock.mock_open) as mock_file:
mock_file.side_effect = [mock_stat.return_value, mock_status.return_value]
procinfo = process._ProcessMemoryInfoProc()
self.assertTrue(procinfo.available)
self.assertEqual(procinfo.vsz, 8155136)
self.assertEqual(procinfo.data_segment, 2048)
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ ProcessMemoryTests, ]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(map(tclass, names))
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
|
"""Collection of | fixtures and functions for the HomeKit tests."""
from unittest.mock import patch
def patch_debounce():
"""Return patch for debounce method."""
return patch(
"homeassistant.components.homekit.accessories.debounce",
lambda f: lambda *args, **kwargs: f(*args, **kwargs),
)
| |
# python-jinjatools
#
# Various tools for Jinja2,
# including new filters and tests based on python-moretools,
# a JinjaLoader class for Django,
# and a simple JinjaBuilder class for SCons.
#
# Copyright (C) 2011-2015 Stefan Zimmermann <zimmermann.code@gmail.com>
#
# python-jinjatools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-jinjatools is distributed in the hope that it will be useful,
# but | WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with python-jinjatools. If not, see <http://www.gnu.org/licenses/>.
all = ['Environment']
from itertools import chain
import jinja2
class Environment | (jinja2.Environment):
def __init__(self, filters={}, tests={}, globals={}, **kwargs):
jinja2.Environment.__init__(self, **kwargs)
morefilters = __import__('jinjatools.filters').filters.filters
for name, func in chain(morefilters.items(), filters.items()):
self.filters[name] = func
for name, func in tests.items():
self.tests[name] = func
for name, value in globals.items():
self.globals[name] = value
# from .filters import filters as morefilters
|
sessions"
description:
- allows the addition, modification and deletion of sessions in a consul
cluster. These sessions can then be used in conjunction with key value pairs
to implement distributed locks. In depth documentation for working with
sessions can be found here http://www.consul.io/docs/internals/sessions.html
requirements:
- "python >= 2.6"
- python-consul
- requests
version_added: "2.0"
author: "Steve Gargan @sgargan"
options:
state:
description:
- whether the session should be present i.e. created if it doesn't
exist, or absent, removed if present. If created, the ID for the
session is returned in the output. If absent, the name or ID is
required to remove the session. Info for a single session, all the
sessions for a node or all available sessions can be retrieved by
specifying info, node or list for the state; for node or info, the
node name or session id is required as parameter.
required: false
choices: ['present', 'absent', 'info', 'node', 'list']
default: present
name:
description:
- the name that should be associated with the session. This is opaque
to Consul and not required.
required: false
default: None
delay:
description:
- the optional lock delay that can be attached to the session when it
is created. Locks for invalidated sessions ar blocked from being
acquired until this delay has expired. Durations are in seconds
default: 15
required: false
node:
description:
- the name of the node that with which the session will be associated.
by default this is the name of the agent.
required: false
default: None
datacenter:
description:
- name of the datacenter in which the session exists or should be
created.
required: false
default: None
checks:
description:
- a list of checks that will be used to verify the session health. If
all the checks fail, the session will be invalidated and any locks
associated with the session will be release and can be acquired once
the associated lock delay has expired.
required: false
default: None
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
port:
description:
- the port on which the consul agent is running
required: false
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
required: false
default: http
version_added: "2.1"
validate_certs:
description:
- whether to verify the tls certificate of the consul agent
required: false
default: True
version_added: "2.1"
behavior:
description:
- the optional behavior that can be attached to the session when it
is created. This can be set to either ‘release’ or ‘delete’. This
controls the behavior when a session is invalidated.
default: release
required: false
version_added: "2.2"
"""
EXAMPLES = '''
- name: register basic session with consul
consul_session:
name: session1
- name: register a session with an existing check
consul_session:
name: session_with_check
checks:
- existing_check_name
- name: register a session with lock_delay
consul_session:
name: session_with_delay
delay: 20s
- name: retrieve info about session by id
consul_session: id=session_id state=info
- name: retrieve active sessions
consul_session: state=list
'''
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
def execute(module):
state = module.params.get('state')
if state in ['info', 'list', 'node']:
lookup_sessions(module)
elif state == 'present':
update_session(module)
else:
remove_session(module)
def lookup_sessions(module):
datacenter = module.params.get('datacenter')
state = module.params.get('state')
consul_client = get_consul_api(module)
try:
if state == 'list':
sessions_list = consul_client.session.list(dc=datacenter)
#ditch the index, this can be grabbed from the results
if sessions_list and sessions_list[1]:
sessions_list = sessions_list[1]
module.exit_json(changed=True,
| sessions=sessions_list)
elif state == 'node':
node = module.params.get('node')
if not node:
module.fail_json(
msg="nod | e name is required to retrieve sessions for node")
sessions = consul_client.session.node(node, dc=datacenter)
module.exit_json(changed=True,
node=node,
sessions=sessions)
elif state == 'info':
session_id = module.params.get('id')
if not session_id:
module.fail_json(
msg="session_id is required to retrieve indvidual session info")
session_by_id = consul_client.session.info(session_id, dc=datacenter)
module.exit_json(changed=True,
session_id=session_id,
sessions=session_by_id)
except Exception as e:
module.fail_json(msg="Could not retrieve session info %s" % e)
def update_session(module):
name = module.params.get('name')
delay = module.params.get('delay')
checks = module.params.get('checks')
datacenter = module.params.get('datacenter')
node = module.params.get('node')
behavior = module.params.get('behavior')
consul_client = get_consul_api(module)
try:
session = consul_client.session.create(
name=name,
behavior=behavior,
node=node,
lock_delay=delay,
dc=datacenter,
checks=checks
)
module.exit_json(changed=True,
session_id=session,
name=name,
behavior=behavior,
delay=delay,
checks=checks,
node=node)
except Exception as e:
module.fail_json(msg="Could not create/update session %s" % e)
def remove_session(module):
session_id = module.params.get('id')
if not session_id:
module.fail_json(msg="""A session id must be supplied in order to
remove a session.""")
consul_client = get_consul_api(module)
try:
consul_client.session.destroy(session_id)
module.exit_json(changed=True,
session_id=session_id)
except Exception as e:
module.fail_json(msg="Could not remove session with id '%s' %s" % (
session_id, e))
def get_consul_api(module):
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'))
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "\
"see http://python-consul.readthedocs.org/en/latest/#installation")
def main():
argument_spec = dict(
checks=dict(default=None, required=False, type='list'),
delay=dict(required=False,type='int', default='15'),
behavior=dict(required=False,type='str', default='release',
choices=['release', 'delete']),
host=dict(default='localhost'),
port=dict(default=8500, type='int'),
scheme=dict(required=False, default='http'),
validate_certs=dict(required=False, default=True),
id=dict(required=False),
name=dict(required=False),
node=dict(require |
"""Tests for asyncio/sslproto.py."""
try:
import ssl
except ImportError:
ssl = None
import trollius as asyncio
from trollius import ConnectionResetError
from trollius import sslproto
from trollius import test_utils
from trollius.test_utils import mock
from trollius.test_utils import unittest
@unittest.skipIf(ssl is None, 'No ssl module')
class SslProtoHandshakeTes | ts(test_utils.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def ssl_protocol(self, waiter=None):
sslcontext = test_utils.dummy_ssl_context()
app_proto = asyncio.Protocol()
proto = sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter)
self.addCleanup(proto._ | app_transport.close)
return proto
def connection_made(self, ssl_proto, do_handshake=None):
transport = mock.Mock()
sslpipe = mock.Mock()
sslpipe.shutdown.return_value = b''
if do_handshake:
sslpipe.do_handshake.side_effect = do_handshake
else:
def mock_handshake(callback):
return []
sslpipe.do_handshake.side_effect = mock_handshake
with mock.patch('trollius.sslproto._SSLPipe', return_value=sslpipe):
ssl_proto.connection_made(transport)
def test_cancel_handshake(self):
# Python issue #23197: cancelling an handshake must not raise an
# exception or log an error, even if the handshake failed
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
handshake_fut = asyncio.Future(loop=self.loop)
def do_handshake(callback):
exc = Exception()
callback(exc)
handshake_fut.set_result(None)
return []
waiter.cancel()
self.connection_made(ssl_proto, do_handshake)
with test_utils.disable_logger():
self.loop.run_until_complete(handshake_fut)
def test_eof_received_waiter(self):
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
self.connection_made(ssl_proto)
ssl_proto.eof_received()
test_utils.run_briefly(self.loop)
self.assertIsInstance(waiter.exception(), ConnectionResetError)
if __name__ == '__main__':
unittest.main()
|
import tensorflow as tf
from tensorflow.contrib import slim as slim
from avb.ops import *
import math
def encoder(x, config, is_training=True):
df_dim = config['df_dim']
z_dim = config['z_dim']
a_dim = config['iaf_a_dim']
# Center x at 0
x = 2*x - 1
net = flatten_spatial(x)
| net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_0")
net = slim.fully_connected(net, 300, activation | _fn=tf.nn.softplus, scope="fc_1")
zmean = slim.fully_connected(net, z_dim, activation_fn=None)
log_zstd = slim.fully_connected(net, z_dim, activation_fn=None)
a = slim.fully_connected(net, a_dim, activation_fn=None)
return zmean, log_zstd, a
|
"""
2015 gupon.jp
Connector for C4D Python Generator
"""
import c4d, math, itertools, random
from c4d.modules import mograph as mo
#userdata id
ID_SPLINE_TYPE = 2
ID_SPLINE_CLOSED = 4
ID_SPLINE_INTERPOLATION = 5
ID_SPLINE_SUB = 6
ID_SPLINE_ANGLE = 8
ID_SPLINE_MAXIMUMLENGTH = 9
ID_USE_SCREEN_DIST = 10
ID_USE_MAXSEG = 15
ID_MAXSEG_NUM = 13
ID_USE_CENTER = 19
ID_CENTER_OBJ = 18
class Point:
def __init__(self, p):
self.world = p
self.screen = c4d.Vector(0)
def calc2D(self, bd):
self.screen = bd.WS(self.world)
self.screen.z = 0
class PointGroup:
def __init__(self):
self.points = []
def AddPoint(self, point):
self.points.append(Point(point))
def Make2DPoints(self):
bd = doc.GetRenderBaseDraw()
for point in self.points:
point.calc2D(bd)
def MakeCombsWith(self, target):
combs = []
for pA in self.points:
for pB in target.points:
combs.append([pA, pB])
return combs
def MakeCombsInOrder(self):
combs = []
for i,pA in enumerate(self.points):
if i == len(self.points)-1:
combs.append([pA, self.points[0]])
else:
combs.append([pA, self.points[i+1]])
return combs
def GetPoint(self, index):
return self.points[index]
def GetAllPoints(self):
return self.points
def GetNumPoints(self):
return len(self.points)
def SetSplineGUI():
UD = op.GetUserDataContainer()
intermediatePoints = op[c4d.ID_USERDATA, ID_SPLINE_INTERPOLATION]
for id, bc in UD:
if id[1].id == ID_SPLINE_SUB:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_NATURAL \
or intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_UNIFORM:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_SPLINE_ANGLE:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_ADAPTIVE \
or intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_SUBDIV:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_SPLINE_MAXIMUMLENGTH:
if intermediatePoints == c4d.SPLINEOBJECT_INTERPOLATION_SUBDIV:
bc[c4d.DESC_HIDE] = False
else:
bc[c4d.DESC_HIDE] = True
if id[1].id == ID_MAXSEG_NUM:
bc[c4d.DESC_HIDE] = not op[c4d.ID_USERDATA, ID_USE_MAXSEG]
if id[1].id == ID_CENTER_OBJ:
bc[c4d.DESC_HIDE] = not op[c4d.ID_USERDATA, ID_USE_CENTER]
op.SetUserDataContainer(id, bc)
def SetSplineAttributes(obj):
obj[c4d.SPLINEOBJECT_TYPE] = op[c4d.ID_USERDATA, ID_SPLINE_TYPE]
obj[c4d.SPLINEOBJECT_CLOSED] = op[c4d.ID_USERDATA, ID_SPLINE_CLOSED]
obj[c4d.SPLINEOBJECT_INTERPOLATION] = op[c4d.ID_USERDATA, ID_SPLINE_INTERPOLATION]
obj[c4d.SPLINEOBJECT_SUB] = op[c4d.ID_USERDATA, ID_SPLINE_SUB]
obj[c4d.SPLINEOBJECT_ANGLE] = op[c4d.ID_USERDATA, ID_SPLINE_ANGLE]
obj[c4d.SPLINEOBJECT_MAXIMUMLENGTH] = op[c4d.ID_USERDATA, ID_SPLINE_MAXIMUMLENGTH]
obj.Message(c4d.MSG_UPDATE)
def GetPointsFromObjects(targetList):
step = op[c4d.ID_USERDATA, 12]
# add every points to list
pointGroups = []
baseMg = op.GetMg()
for target in targetList:
if target != None :
group = PointGroup()
moData = mo.GeGetMoData(target)
if moData==None:
group.AddPoint(target.GetMg().off * ~baseMg)
else:
if not moData.GetCount():
continue
moList = moData.GetArray(c4d.MODATA_MATRIX)
clonerMg = target.GetMg()
for i,data in enumerate(moList):
if i % step == 0:
group.AddPoint(data.off * clonerMg * ~baseMg)
pointGroups.append(group)
return pointGroups
def SetCombinations(pointGroups, obj):
bd = doc.GetRenderBaseDraw()
maxDist = op[c4d.ID_USERDATA, 1]
excludeSame = op[c4d.ID_USERDATA, 11]
maxSegNum = op[c4d.ID_USERDATA, 13]
useMaxSeg = op[c4d.ID_USERDATA, 15]
useCenter = op[c4d.ID_USERDATA, ID_USE_CENTER]
useScreenDist = op[c4d.ID_USERDATA, 10]
if useScreenDist:
for group in pointGroups:
group.Make2DPoints()
frame = bd.GetSafeFrame()
baseLength = frame["cr"] - frame["cl"]
maxDist = baseLength * maxDist/1000
_combs = []
inOrder = False
# if inOrder:
# for group in pointGroups:
# _combs = _combs + group.MakeCombsInOrder()
if useCenter:
target = op[c4d.ID_USERDATA, ID_CENTER_OBJ]
if target:
pA = Point(target.GetMg().off * ~op.GetMg())
for group in pointGroups:
for pB in group.GetAllPoints():
_combs.append([pA, pB])
else:
print "no target found"
return
else:
if excludeSame:
numGroups = len(pointGroups)
for i in range(numGroups-1):
groupA = pointGroups[i]
for j in range(i+1, numGroups):
groupB = pointGroups[j]
_combs = _combs + groupA.MakeCombsWith(groupB)
else:
allPoints = []
for group in pointGroups:
allPoints = allPoints + group.GetAllPoints()
numPoints = len(allPoints)
for i in range(numPoints-1):
for j in range(i+1, numPoints):
_combs.append([allPoints[i], allPoints[j]])
combs = []
for comb in _combs:
v0 = comb[0].screen if useScreenDist else comb[0].world
v1 = comb[1].screen if useScreenDist else comb[1].world
if c4d.Vector(v1 - v0).GetLength() < maxDist:
combs.append(comb)
random.shuffle(combs)
obj.ResizeObject(len(combs) * 2)
for i, comb in enumerate(combs):
a = comb[0].world
| b = comb[1].world
addP = True
if useMaxSeg:
if maxSegNum:
acnt = 0
bcnt = 0
for p in obj.GetAllPoints():
if p == a: acnt += 1
if p == b: bcnt += 1
if acnt >= maxSegNum or bcnt >= maxSegNum:
addP = False
break
else:
addP = False
if addP:
obj.SetPoint(i * 2 + 0, a)
obj.SetPoint(i * 2 + 1, b)
obj.MakeVariableTag(c4d.Tsegment, len(combs))
for i in range(len(combs)):
obj.SetSegment(i, 2, False)
def main():
random.seed(100 | )
obj = c4d.BaseObject(c4d.Ospline)
targetListData = op[c4d.ID_USERDATA, 3]
numTargets = targetListData.GetObjectCount()
if numTargets < 1:
return obj
targetList = []
for i in range(numTargets):
targetList.append(targetListData.ObjectFromIndex(doc, i))
pointGroups = GetPointsFromObjects(targetList)
if len(pointGroups) < 1:
return obj
SetCombinations(pointGroups, obj)
SetSplineGUI()
SetSplineAttributes(obj)
return obj |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
#### Utilities
def get_first_visit_date(data_patient):
''' Determines the first visit for a given patient'''
#IDEA Could be parallelized in Dask
data_patient['first_visit_date'] = min(data_patient.visit_date)
return data_patient
def subset_analysis_data(data, date_analysis):
''' Function that subsets the full dataset to only the data available for a certain analysis date'''
if type(data.date_entered.iloc[0]) is str :
data.date_entered = pd.to_datetime(data.date_entered)
data = data[data.date_entered < date_analysis]
return data
def subset_cohort(data, horizon_date, horizon_time, bandwidth):
''' Function that subsets data from a cohort that has initiated care a year before the horizon_date, and after a year + bandwith'''
horizon_date = pd.to_datetime(horizon_date)
data['first_visit_date'] = pd.to_datetime(data['first_visit_date'])
cohort_data = data[(data['first_visit_date'] >= horizon_date - relativedelta(days=horizon_time + bandwidth)) &
(data['first_visit_date'] < horizon_date - relativedelta(days=horizon_time))]
return cohort_data
#### Standard reporting
def status_patient(data_patient, reference_date, grace_period):
''' Determines the status of a patient at a given reference_date, given the data available at a given analysis_date
TODO Also select the available data for Death and Transfer and other outcomes based on data entry time
'''
#IDEA Could be parallelized in Dask
data_patient = get_first_visit_date(data_patient)
date_out = pd.NaT
date_last_appointment = pd.to_datetime(max(data_ | patient.next_visit_date))
late_time = reference_date - date_last_appointment
if late_time.days > grace_period:
status = 'LTFU'
date_out = date_last_appointment
if late_time.days <= grace_period:
status = 'Followed'
if (data_patient.reasonDescEn.iloc[0] is not np.nan) & (pd.to_datetime(data_patient.discDate.iloc[0]) < reference_date):
status = data_patient.reasonDescEn.iloc[0]
date_out = pd.to_dat | etime(data_patient.discDate.iloc[0])
return pd.DataFrame([{'status': status,
'late_time': late_time,
'last_appointment': date_last_appointment,
'date_out':date_out ,
'first_visit_date':data_patient.first_visit_date.iloc[0],
'facility':data_patient.facility.iloc[0]}])
def horizon_outcome(data_cohort, reference_date, horizon_time):
# TODO Make sure dates are dates
data_cohort['first_visit_date'] = pd.to_datetime(data_cohort['first_visit_date']) #TODO This conversion should happen earlier
data_cohort.loc[:, 'horizon_date'] = data_cohort['first_visit_date'] + np.timedelta64(horizon_time, 'D')
data_cohort.loc[: , 'horizon_status'] = data_cohort['status']
# If the patient exited the cohort after his horizon date, still consider him followed
# BUG This is marginally invalid, for example if a patient was considered LTFU before he died
data_cohort.horizon_status[~(data_cohort['status'] == 'Followed') & (data_cohort['date_out'] > data_cohort['horizon_date'])] = 'Followed'
return data_cohort
## Transversal description only
def n_visits(data, month):
reporting_month = pd.to_datetime(data['visit_date']).dt.to_period('M')
n_vis = sum(reporting_month == month)
return n_vis
def make_report(data, reference_date, date_analysis, grace_period, horizon_time, cohort_width):
assert reference_date <= date_analysis, 'You should not analyze a period before you have the data (date of analysis is before reference date)'
if type(reference_date) is str :
reference_date = pd.to_datetime(reference_date)
if type(date_analysis) is str:
date_analysis = pd.to_datetime(date_analysis)
report_data = subset_analysis_data(data, date_analysis)
if len(report_data) > 0:
month = reference_date.to_period('M') - 1
n_visits_month = report_data.groupby('facility').apply(n_visits, month)
df_status = report_data.groupby('patient_id').apply(status_patient, reference_date, 90)
cohort_data = subset_cohort(df_status, reference_date, horizon_time, cohort_width)
# print(df_status.head())
horizon_outcome_data = horizon_outcome(cohort_data, month, 365)
transversal_reports = df_status.groupby('facility').status.value_counts()
longitudinal_reports = horizon_outcome_data.groupby('facility').status.value_counts()
out_reports = {'transversal':transversal_reports,
'longitudinal':longitudinal_reports,
'n_visits':n_visits_month}
return out_reports
# QUESTION What are the form_types
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import latex_plot_inits
parameter_list = [[20, 5, 1., 1000, 1, None, 5], [100, 5, 1., 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
from shogun import RealFeatures, BinaryLabels
from shogun import Perceptron
from shogun import MSG_INFO
# 2D data
_DIM = 2
# To get the nice message that the perceptron has | converged
dummy = BinaryLabels()
dummy.io.set_loglevel(MSG_INFO)
np.random.seed(seed)
# Produce some (probably) linearly separable traini | ng data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(_DIM,n))+distance
Y = np.array(np.random.randn(_DIM,n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X,Y))
feats_train = RealFeatures(fm_train_real)
labels = BinaryLabels(label_train_twoclass)
perceptron = Perceptron(feats_train, labels)
perceptron.set_learn_rate(learn_rate)
perceptron.set_max_iter(max_iter)
perceptron.set_initialize_hyperplane(False)
# Find limits for visualization
x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
x_max = max(np.max(X[0,:]), np.max(Y[0,:]))
y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
y_max = max(np.max(X[1,:]), np.max(Y[1,:]))
for i in xrange(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.set_w(np.random.random(2))
perceptron.set_bias(np.random.random())
# Run the perceptron algorithm
perceptron.train()
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get_bias()
w = perceptron.get_w()
hx = np.linspace(x_min-1,x_max+1)
hy = -w[1]/w[0] * hx
plt.plot(hx, -1/w[1]*(w[0]*hx+b))
# Plot the two-class data
plt.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min-1, x_max+1, y_min-1, y_max+1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__=='__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
|
from typing import Optional
from thinc.api import Model
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import RussianLemmatizer
from ...language import Language
class RussianDefaults(Language.Defaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Russian(Language):
lang = "ru"
Defaults = RussianDefaults
@Russian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "pymorphy2", "overwrite": False},
default_score_weights={"lemma_acc": 1.0},
| )
def make_lemmatizer(
nlp: Language,
| model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
):
return RussianLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite)
__all__ = ["Russian"]
|
if (not found_next_field):
print ("Skipping hit at " + hex(hit) + " - cannot find PHONE2 field")
continue # can't find next field so skip this hit
funi.seek(fb.tell())
phonestring2 = read_nullterm_unistring(funi)
if (phonestring2 == ""):
print ("Skipping hit at " + hex(hit) + " - cannot read PHONE2 field")
continue # skip this hit if empty string
# print phonestring2
offset_after_string = funi.tell()
found_next_field = goto_next_field(fb, offset_after_string, 3)
if (not found_next_field):
print ("Skipping hit at " + hex(hit) + " - cannot find PHONE3 field")
continue # can't find next field so skip this hit
funi.seek(fb.tell())
phonestring3 = read_nullterm_unistring(funi)
if (phonestring3 == ""):
print ("Skipping hit at " + hex(hit) + " - cannot read PHONE3 field")
continue # skip this hit if empty string
# print phonestring3
# print "Number(s): " + phonestring1 + ", " + phonestring2 + ", " + phonestring3
offset_after_string = funi.tell()
found_next_field = goto_next_field(fb, offset_after_string, 3)
if (not found_next_field):
| print ("Skipping hit at " + hex(hit) + " - cannot find Received text field")
continue # can't find next field so skip this hit
| string_offset = fb.tell()
funi.seek(string_offset)
unistring = read_nullterm_unistring(funi)
# print "Text (" + hex(string_offset).rstrip("L") +"): " + unistring
timeval = 0
if (nums_listed == 0):
# Original method: Manual adjustment of FILETIME2 offset value
# Offsets between begin of FILETIME2 and begin of "SMStext" string for Sent SMS
# MAD: 0xBF | OH: 0xB4 | DUB1: 0xBF | DUB2: 0xB4 bytes
# WARNING: Might need to adjust the 0xBF value to suit your data ...
# Note: Remember there's no PHONE0 field to account for in Sent SMS.
# filetime2_offset = 0xBF
# fb.seek(hit - filetime2_offset)
# timeval = read_filetime(fb)
# Experimental method. Use test data offsets +/-5
# From test data, minimum offset was 0xB4.
# Allowing for some tolerance => 0xB4 - 5 = 0xAF as min offset
# From test data, maximum offset was 0xBF.
# Allowing for some tolerance => 0xBF + 5 = 0xC4 as max offset
# Some adjustment may be required for other data sets
fb.seek(hit)
# timeval = find_timestamp(fb, 0xC4, 0xAF)
timeval = find_timestamp(fb, 0xEA+0x5, 0x7D) # Based on 30AUG DUB data, change the max offset to 0xEA + 5,
# Based on 1SEP data change min to x7D (from 0xAF)
if (nums_listed == 1):
# Old method: This doesnt handle variable length phone numbers
# Offsets between begin of FILETIME2 and begin of "SMStext" string for Recvd SMS
# MAD: 0xEA | OH: 0xDF | DUB1: 0xEC | DUB2: 0xDF bytes
# fb.seek(hit - 0xEA)
# timeval = read_filetime(fb)
#
# Updated method of calculating FILETIME2 offset using the "PHONE0" field length.
# This means the script can handle received SMS with variable length phone numbers
# offset = length of string in bytes + (NULL bytes + "IPM." + 0x01 byte = 0xB) + offset from beginning of
# FILETIME2 to start of phonestring (=0xC7)
# This assumes "PHONE0" is same length as "PHONE1" (phonestring)
# WARNING: Might need to adjust the 0xC7 value to suit your data ...
# 0xEA = 12 digit phone number (0x18 bytes) + 0xB + 0xC7
# 0xEC = 13 digit phone number (0x1A bytes) + 0xB + 0xC7
# filetime2_offset = len(phonestring)*2 + (0xB) + 0xC7
# print "filetime2_offset = " + hex(filetime2_offset)
# fb.seek(hit - filetime2_offset)
# timeval = read_filetime(fb)
# Experimental method: Use projected min/max from test data
# From the test data, we can see a maximum offset of 0xEC (236 dec) for 13 digits (ie DUB1).
# So for the theoretical maximum of 15 digits, this projects to 0xD4 (240 dec) for 15 digits.
# Add in some tolerance and we will use 0xFA (250 dec) for our max offset between FILETIME2 and "SMStext"
# From the test data, we can see a minimum offset of 0xDF (223 dec) for 13 digits (ie DUB2).
# So for the theoretical minimum of 1 digit, this projects to 0xC7 (199 dec).
# Add in some tolerance and we will use 0xBD (189 dec) for our min offset between FILETIME2 and "SMStext"
fb.seek(hit)
# timeval = find_timestamp(fb, 0xFA, 0xBD)
timeval = find_timestamp(fb, 0xFA, 0x9B) # Based on 30AUG DUB data, change the min offset to 0xB8 - 5
# Based on MPD log file data, changed min offset to 0x9B
timestring = ""
if (timeval != 0):
# print "timeval = " + hex(timeval)
try:
# returns time referenced to local system timezone
# timestring = datetime_advanced.datetime_advanced.fromtimestamp(timeval).isoformat()
# returns UTC time
timestring = datetime.datetime.utcfromtimestamp(timeval).isoformat()
except (ValueError):
timestring = "Error"
else:
# something bad happened reading time
timestring = "Error"
# print "Time2 (UTC) = " + timestring + "\n"
# If no number listed (ie sent SMS), try grabbing the PHONEX phone number based on the FILETIME2 timestamp retrieved
if (nums_listed == 0 and timestring != "Error"):
phonestring = "Unknown"
if (timestring in smslogdict.keys()):
phonestring = smslogdict[timestring]
# Store parsed data in dictionary keyed by SMS string offset
sms_entries[string_offset] = (timestring, sentflag, phonestring, unistring)
# ends for hits loop
print ("\nProcessed " + str(len(hits)) + " SMStext hits\n")
# sort by filetime
sorted_messages_keys = sorted(sms_entries, key=lambda x: (sms_entries[x][0]))
# print to TSV
# open contacts output file if reqd
if ("sms.tsv" is not None):
tsvof = None
try:
tsvof = open("sms.tsv", "w")
except (IOError, ValueError):
print ("Trouble Opening TSV Output File")
exit(-1)
tsvof.write("Text_Offset\tUTC_Time2\tDirection\tPhone_No\tText\n")
for key in sorted_messages_keys:
tsvof.write(
hex(key).rstrip("L") + "\t" +
sms_entries[key][0] + "\t" +
sms_entries[key][1] + "\t" +
sms_entries[key][2] + "\t" +
sms_entries[key][3] + "\n")
print ("\nFinished writing out " + str(len(sorted_messages_keys)) + " TSV entries\n")
tsvof.close()
funi.close()
fb.close()
# print("Running " + __version__ + "\n")
usage = " %prog -f dump -o database -d directory"
# Handle command line args
parser = OptionParser(usage=usage)
parser.add_option("-f", dest="dump",
action="store", type="string",
help="Input File To Be Searched")
parser.add_option("-o", dest="database",
action="store", type="string",
help="sqlite3 database holding processed phone data")
parser.add_option("-d", dest="directory",
action="store", type="string",
help="base directory for other arguments (default current directory)")
(options, cmd_args) = parser.parse_args()
fb = None
# Check if no arguments given by user, exit
if len(sys.argv) == 1:
parser.print_help()
exit(-1)
if (options.dump is None):
parser.print_help()
print ("\nInput filename incorrectly specified!")
|
"""Support for tracking consumption over given periods of time."""
from datetime import timedelta
import logging
from croniter import croniter
import voluptuous as vol
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import CONF_NAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_TARIFF,
CONF_CRON_PATTERN,
CONF_METER,
CONF_METER_DELTA_VALUES,
CONF_METER_NET_CONSUMPTION,
CONF_METER_OFFSET,
CONF_METER_TYPE,
CONF_SOURCE_SENSOR,
CONF_TARIFF,
CONF_TARIFF_ENTITY,
CONF_TARIFFS,
DATA_TARIFF_SENSORS,
DATA_UTILITY,
DOMAIN,
METER_TYPES,
SERVICE_RESET,
SERVICE_SELECT_NEXT_TARIFF,
SERVICE_SELECT_TARIFF,
SIGNAL_RESET_METER,
)
_LOGGER = logging.getLogger(__name__)
TARIFF_ICON = "mdi:clock-outline"
ATTR_TARIFFS = "tariffs"
DEFAULT_OFFSET = timedelta(hours=0)
def validate_cron_pattern(pattern):
"""Check that the pattern is well-formed."""
if croniter.is_valid(pattern):
return pattern
raise vol.Invalid("Invalid pattern")
def period_or_cron(config):
"""Check that if cron pattern is used, then meter type and offsite must be removed."""
if CONF_CRON_PATTERN in config and CONF_METER_TYPE in config:
raise vol.Invalid(f"Use <{CONF_CRON_PATTERN}> or <{CONF_METER_TYPE}>")
if (
CONF_CRON_PATTERN in config
and CONF_METER_OFFSET in config
and config[CONF_METER_OFFSET] != DEFAULT_OFFSET
):
raise vol.Invalid(
f"When <{CONF_CRON_PATTERN}> is used <{CONF_METER_OFFSET}> has no meaning"
)
return config
def max_28_days(config):
"""Check that time period does not include more then 28 days."""
if config.days >= 28:
raise vol.Invalid(
"Unsupported offset of more then 28 days, please use a cron pattern."
)
return config
METER_CONFIG_SCHEMA = vol.Schema(
vol.All(
{
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_METER_TYPE): vol.In(METER_TYPES),
vol.Optional(CONF_METER_OFFSET, default=DEFAULT_OFFSET): vol.All(
cv.time_period, cv.positive_timedelta, max_28_days
),
vol.Optional(CONF_METER_DELTA_VALUES, default=False): cv.boolean,
vol.Optional(CONF_METER_NET_CONSUMPTION, default=False): cv.boolean,
vol.Optional(CONF_TARIFFS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_CRON_PATTERN): validate_cron_pattern,
},
period_or_cron,
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.slug: METER_CONFIG_SCHEMA})}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Set up an Utility Meter."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[DATA_UTILITY] = {}
register_services = False
for meter, conf in config.get(DOMAIN).items():
_LOGGER.debug("Setup %s.%s", DOMAIN, meter)
hass.data[DATA_UTILITY][meter] = conf
hass.data[DATA_UTILITY][meter][DATA_TARIFF_SENSORS] = []
if not conf[CONF_TARIFFS]:
# only one entity is required
hass.async_create_task(
discovery.async_load_platform(
hass,
SENSOR_DOMAIN,
DOMAIN,
[{CONF_METER: meter, CONF_NAME: conf.get(CONF_NAME, meter)}],
config,
)
)
else:
# create tariff selection
await component.async_add_entities(
[TariffSelect(meter, list(conf[CONF_TARIFFS]))]
)
hass.data[DATA_UTILITY][meter][CONF_TARIFF_ENTITY] = "{}.{}".format(
DOMAIN, meter
)
# add one meter for each tariff
tariff_confs = []
for tariff in conf[CONF_TARIFFS]:
tariff_confs.append(
{
CONF_METER: meter,
CONF_NAME: f"{meter} {tariff}",
CONF_TARIFF: tariff,
}
)
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, tariff_confs, config
)
)
register_services = True
if register_services:
component.async_register_entity_service(SERVICE_RESET, {}, "async_reset_meters")
component.async_register_entity_service(
SERVICE_SELECT_TARIFF,
{vol.Required(ATTR_TARIFF): cv.string},
"async_select_tariff",
)
component.async_register_entity_service(
SERVICE_SELECT_NEXT_TARIFF, {}, "async_next_tariff"
)
return True
class TariffSelect(RestoreEntity):
"""Representation of a Tariff selector."""
def __init__(self, name, tariffs):
"""Initialize a tariff selector."""
self._name = name
self._current_tariff = None
self._tariffs = tariffs
self._icon = TARIFF_ICON
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
if self._current_tariff is not None:
return
state = await self.async_get_last_state()
if not state or state.state not in self._tariffs:
self._current_tariff = self._tariffs[0]
else:
self._current_tariff = state.state
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_tariff
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_TARIFFS: self._tariffs}
async def async_reset_meters(self):
"""Reset all sensors of this meter."""
_LOGGER.debug("reset meter %s", self.entity_id)
async_dispatcher_send(self.hass, SIGNAL_RESET_METER, self.entity_id)
async def async_select_tariff(self, tariff):
"""Select new option."""
if tariff not in self._tariffs:
_LOGGER.warning(
| "Invalid tariff: %s (possible tariffs: %s)",
tariff,
", ".join(self._tariffs),
)
return
self._current_tariff = tariff
self.async_write_ha_state()
async def async_next_tariff(s | elf):
"""Offset current index."""
current_index = self._tariffs.index(self._current_tariff)
new_index = (current_index + 1) % len(self._tariffs)
self._current_tariff = self._tariffs[new_index]
self.async_write_ha_state()
|
'like'] = self.like
js['family'] = self.family
js['packager'] = self.packager
js['start_system'] = self.start_system
js['has_os_release'] = self.has_os_release
js['fallback_detection'] = self.fallback_detection
return js
class PackageInfo(object):
"""
Basic information about particular package
"""
def __init__(self, name, version, arch, repo, size=None, section=None):
self._version = None
self.name = name
self.version = version
self.arch = arch
self.repo = repo
self.size = size
self.section = section
@property
def version(self):
return self._version
@version.setter
def version(self, val):
self._version = Version(val)
def __str__(self):
return '%s-%s.%s' % (self.name, self.version, self.arch)
def __repr__(self):
return 'PackageInfo(name=%r, version=%r, arch=%r, repo=%r, size=%r, section=%r)' \
% (self.name, self.version, self.arch, self.repo, self.size, self.section)
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = se | lf.name
js['version'] = str(self.version)
js['arch'] = self.arch
js['repo'] = self.repo
if self.size is not None:
js['size'] = self.size
if | self.section is not None:
js['section'] = self.section
return js
@classmethod
def from_json(cls, js):
"""
Converts json dict to the object
:param js:
:return:
"""
obj = cls(name=js['name'], version=js['version'], arch=js['arch'], repo=js['repo'])
if 'size' in js:
obj.size = js['size']
if 'section' in js:
obj.section = js['section']
return obj
def get_os():
"""
Returns basic information about the OS.
:return: OSInfo
"""
# At first - parse os-release
ros = OSInfo()
os_release_path = '/etc/os-release'
if os.path.isfile(os_release_path):
ros.name = _get_systemd_os_release_var("ID", filepath=os_release_path)
ros.version = _get_systemd_os_release_var("VERSION_ID", filepath=os_release_path)
ros.like = _get_systemd_os_release_var("ID_LIKE", os_release_path).split(" ")
ros.long_name = _get_systemd_os_release_var("PRETTY_NAME", filepath=os_release_path)
ros.has_os_release = True
if not ros.long_name:
ros.long_name = _get_systemd_os_release_var("NAME", filepath=os_release_path)
# Try /etc/redhat-release and /etc/debian_version
if not ros.has_os_release or ros.like is None or ros.version is None or ros.name is None:
os_redhat_release(ros)
os_debian_version(ros)
os_issue(ros)
# like detection
os_like_detect(ros)
os_family_detect(ros)
# Major version
os_major_version(ros)
# Packager detection - yum / apt-get
os_packager(ros)
# Start system - init.d / systemd
os_start_system(ros)
return ros
def os_family_detect(ros):
"""
OS Family (redhat, debian, ...)
:param ros:
:return:
"""
if util.startswith(ros.like, YUMS):
ros.family = FAMILY_REDHAT
if util.startswith(ros.like, DEBS):
ros.family = FAMILY_DEBIAN
if ros.family is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_REDHAT
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_DEBIAN
return
def os_packager(ros):
if ros.like is not None:
if util.startswith(ros.like, YUMS):
ros.packager = PKG_YUM
if util.startswith(ros.like, DEBS):
ros.packager = PKG_APT
return ros
if ros.name is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_YUM
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_APT
return
if os.path.exists('/etc/yum'):
ros.packager = PKG_YUM
if os.path.exists('/etc/apt/sources.list'):
ros.packager = PKG_APT
def os_start_system(ros):
if os.path.exists('/etc/systemd'):
ros.start_system = START_SYSTEMD
else:
ros.start_system = START_INITD
return ros
def os_issue(ros):
if os.path.exists('/etc/issue'):
with open('/etc/issue', 'r') as fh:
issue = fh.readline().strip()
issue = re.sub(r'\\[a-z]', '', issue).strip()
match1 = re.match(r'^(.+?)\s+release\s+(.+?)$', issue, re.IGNORECASE)
match2 = re.match(r'^(.+?)\s+([0-9.]+)\s*(LTS)?$', issue, re.IGNORECASE)
if match1:
ros.long_name = match1.group(1).strip()
ros.version = match1.group(2).strip()
elif match2:
ros.long_name = match2.group(1).strip()
ros.version = match2.group(2).strip()
else:
ros.long_name = issue
return ros
def os_debian_version(ros):
if os.path.exists('/etc/debian_version'):
with open('/etc/debian_version', 'r') as fh:
debver = fh.readline().strip()
ros.like = 'debian'
ros.family = FAMILY_DEBIAN
if ros.version is None:
ros.version = debver.strip()
return ros
def os_redhat_release(ros):
if os.path.exists('/etc/redhat-release'):
with open('/etc/redhat-release', 'r') as fh:
redhatrel = fh.readline().strip()
ros.like = 'redhat'
ros.family = FAMILY_REDHAT
match = re.match(r'^(.+?)\s+release\s+(.+?)$', redhatrel, re.IGNORECASE)
if match is not None:
ros.long_name = match.group(1).strip()
ros.version = match.group(2).strip()
else:
ros.long_name = redhatrel
return ros
def os_like_detect(ros):
if not ros.like and ros.name is not None:
try:
ros.like = FLAVORS[ros.name.lower()]
except:
pass
if not ros.like and ros.long_name is not None:
try:
ros.like = FLAVORS[ros.long_name.lower()]
except:
pass
return ros
def os_major_version(ros):
if ros.version is not None:
match = re.match(r'(.+?)[/.]', ros.version)
if match:
ros.version_major = match.group(1)
return ros
def get_os_info(filepath="/etc/os-release"):
"""
Get OS name and version
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
if os.path.isfile(filepath):
# Systemd os-release parsing might be viable
os_name, os_version = get_systemd_os_info(filepath=filepath)
if os_name:
return (os_name, os_version)
# Fallback to platform module
return get_python_os_info()
def get_os_info_ua(filepath="/etc/os-release"):
"""
Get OS name and version string for User Agent
:param str filepath: File path of os-release file
:returns: os_ua
:rtype: `str`
"""
if os.path.isfile(filepath):
os_ua = _get_systemd_os_release_var("PRETTY_NAME", filepath=filepath)
if not os_ua:
os_ua = _get_systemd_os_release_var("NAME", filepath=filepath)
if os_ua:
return os_ua
# Fallback
return " ".join(get_python_os_info())
def get_systemd_os_info(filepath="/etc/os-release"):
"""
Parse systemd /etc/os-release for distribution information
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
os_name = _get_systemd_os_release_var("ID", filepath=filepath)
os_version = _get_systemd_os_release_var("VERSION_ID", filepath=filepath)
return (os_name, os_version)
def get_systemd_os_like(filepath="/etc/os-release"):
"""
Get a list of strings that indicate the distribution likeness to
other |
#!/Users/wuga/Documents/website/wuga/env/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap | image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
| # photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.