hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7949ef51ac8d0e525ca8e5edd0cebee89039f348
| 211,261
|
py
|
Python
|
src/twisted/mail/imap4.py
|
mathieui/twisted
|
35546d2b50742a32edba54719ce3e752dc50dd2a
|
[
"MIT",
"Unlicense"
] | 9,953
|
2019-04-03T23:41:04.000Z
|
2022-03-31T11:54:44.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/mail/imap4.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 44
|
2019-05-27T10:59:29.000Z
|
2022-03-31T14:14:29.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/mail/imap4.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 2,803
|
2019-04-06T13:15:33.000Z
|
2022-03-31T07:42:01.000Z
|
# -*- test-case-name: twisted.mail.test.test_imap.IMAP4HelperTests -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An IMAP4 protocol implementation
@author: Jp Calderone
To do::
Suspend idle timeout while server is processing
Use an async message parser instead of buffering in memory
Figure out a way to not queue multi-message client requests (Flow? A simple callback?)
Clarify some API docs (Query, etc)
Make APPEND recognize (again) non-existent mailboxes before accepting the literal
"""
import binascii
import codecs
import copy
import functools
import re
import string
import tempfile
import time
import uuid
import email.utils
from itertools import chain
from io import BytesIO
from zope.interface import implementer
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import defer
from twisted.internet import error
from twisted.internet.defer import maybeDeferred
from twisted.python import log, text
from twisted.python.compat import (
_bytesChr, unichr as chr, _b64decodebytes as decodebytes,
_b64encodebytes as encodebytes,
intToBytes, iterbytes, long, nativeString, networkString, unicode,
_matchingString, _PY3, _get_async_param,
)
from twisted.internet import interfaces
from twisted.cred import credentials
from twisted.cred.error import UnauthorizedLogin, UnhandledCredentials
# Re-exported for compatibility reasons
from twisted.mail.interfaces import (
IClientAuthentication, INamespacePresenter,
IAccountIMAP as IAccount,
IMessageIMAPPart as IMessagePart,
IMessageIMAP as IMessage,
IMessageIMAPFile as IMessageFile,
ISearchableIMAPMailbox as ISearchableMailbox,
IMessageIMAPCopier as IMessageCopier,
IMailboxIMAPInfo as IMailboxInfo,
IMailboxIMAP as IMailbox,
ICloseableMailboxIMAP as ICloseableMailbox,
IMailboxIMAPListener as IMailboxListener
)
from twisted.mail._cred import (
CramMD5ClientAuthenticator,
LOGINAuthenticator, LOGINCredentials,
PLAINAuthenticator, PLAINCredentials)
from twisted.mail._except import (
IMAP4Exception, IllegalClientResponse, IllegalOperation, MailboxException,
IllegalMailboxEncoding, MailboxCollision, NoSuchMailbox, ReadOnlyMailbox,
UnhandledResponse, NegativeResponse, NoSupportedAuthentication,
IllegalIdentifierError, IllegalQueryError, MismatchedNesting,
MismatchedQuoting, IllegalServerResponse,
)
# locale-independent month names to use instead of strftime's
_MONTH_NAMES = dict(zip(
range(1, 13),
"Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split()))
def _swap(this, that, ifIs):
"""
Swap C{this} with C{that} if C{this} is C{ifIs}.
@param this: The object that may be replaced.
@param that: The object that may replace C{this}.
@param ifIs: An object whose identity will be compared to
C{this}.
"""
return that if this is ifIs else this
def _swapAllPairs(of, that, ifIs):
"""
Swap each element in each pair in C{of} with C{that} it is
C{ifIs}.
@param of: A list of 2-L{tuple}s, whose members may be the object
C{that}
@type of: L{list} of 2-L{tuple}s
@param ifIs: An object whose identity will be compared to members
of each pair in C{of}
@return: A L{list} of 2-L{tuple}s with all occurences of C{ifIs}
replaced with C{that}
"""
return [(_swap(first, that, ifIs), _swap(second, that, ifIs))
for first, second in of]
class MessageSet(object):
"""
A set of message identifiers usable by both L{IMAP4Client} and
L{IMAP4Server} via L{IMailboxIMAP.store} and
L{IMailboxIMAP.fetch}.
These identifiers can be either message sequence numbers or unique
identifiers. See Section 2.3.1, "Message Numbers", RFC 3501.
This represents the C{sequence-set} described in Section 9,
"Formal Syntax" of RFC 3501:
- A L{MessageSet} can describe a single identifier, e.g.
C{MessageSet(1)}
- A L{MessageSet} can describe C{*} via L{None}, e.g.
C{MessageSet(None)}
- A L{MessageSet} can describe a range of identifiers, e.g.
C{MessageSet(1, 2)}. The range is inclusive and unordered
(see C{seq-range} in RFC 3501, Section 9), so that
C{Message(2, 1)} is equivalent to C{MessageSet(1, 2)}, and
both describe messages 1 and 2. Ranges can include C{*} by
specifying L{None}, e.g. C{MessageSet(None, 1)}. In all
cases ranges are normalized so that the smallest identifier
comes first, and L{None} always comes last; C{Message(2, 1)}
becomes C{MessageSet(1, 2)} and C{MessageSet(None, 1)}
becomes C{MessageSet(1, None)}
- A L{MessageSet} can describe a sequence of single
identifiers and ranges, constructed by addition.
C{MessageSet(1) + MessageSet(5, 10)} refers the message
identified by C{1} and the messages identified by C{5}
through C{10}.
B{NB: The meaning of * varies, but it always represents the
largest number in use}.
B{For servers}: Your L{IMailboxIMAP} provider must set
L{MessageSet.last} to the highest-valued identifier (unique or
message sequence) before iterating over it.
B{For clients}: C{*} consumes ranges smaller than it, e.g.
C{MessageSet(1, 100) + MessageSet(50, None)} is equivalent to
C{1:*}.
@type getnext: Function taking L{int} returning L{int}
@ivar getnext: A function that returns the next message number,
used when iterating through the L{MessageSet}. By default, a
function returning the next integer is supplied, but as this
can be rather inefficient for sparse UID iterations, it is
recommended to supply one when messages are requested by UID.
The argument is provided as a hint to the implementation and
may be ignored if it makes sense to do so (eg, if an iterator
is being used that maintains its own state, it is guaranteed
that it will not be called out-of-order).
"""
_empty = []
_infinity = float('inf')
def __init__(self, start=_empty, end=_empty):
"""
Create a new MessageSet()
@type start: Optional L{int}
@param start: Start of range, or only message number
@type end: Optional L{int}
@param end: End of range.
"""
self._last = self._empty # Last message/UID in use
self.ranges = [] # List of ranges included
self.getnext = lambda x: x+1 # A function which will return the next
# message id. Handy for UID requests.
if start is self._empty:
return
if isinstance(start, list):
self.ranges = start[:]
self.clean()
else:
self.add(start,end)
# Ooo. A property.
def last():
def _setLast(self, value):
if self._last is not self._empty:
raise ValueError("last already set")
self._last = value
for i, (l, h) in enumerate(self.ranges):
if l is None:
l = value
if h is None:
h = value
if l > h:
l, h = h, l
self.ranges[i] = (l, h)
self.clean()
def _getLast(self):
return self._last
doc = '''
Replaces all occurrences of "*". This should be the
largest number in use. Must be set before attempting to
use the MessageSet as a container.
@raises: L{ValueError} if a largest value has already
been set.
'''
return _getLast, _setLast, None, doc
last = property(*last())
def add(self, start, end=_empty):
"""
Add another range
@type start: L{int}
@param start: Start of range, or only message number
@type end: Optional L{int}
@param end: End of range.
"""
if end is self._empty:
end = start
if self._last is not self._empty:
if start is None:
start = self.last
if end is None:
end = self.last
start, end = sorted(
[start, end],
key=functools.partial(_swap, that=self._infinity, ifIs=None))
self.ranges.append((start, end))
self.clean()
def __add__(self, other):
if isinstance(other, MessageSet):
ranges = self.ranges + other.ranges
return MessageSet(ranges)
else:
res = MessageSet(self.ranges)
if self.last is not self._empty:
res.last = self.last
try:
res.add(*other)
except TypeError:
res.add(other)
return res
def extend(self, other):
"""
Extend our messages with another message or set of messages.
@param other: The messages to include.
@type other: L{MessageSet}, L{tuple} of two L{int}s, or a
single L{int}
"""
if isinstance(other, MessageSet):
self.ranges.extend(other.ranges)
self.clean()
else:
try:
self.add(*other)
except TypeError:
self.add(other)
return self
def clean(self):
"""
Clean ranges list, combining adjacent ranges
"""
ranges = sorted(_swapAllPairs(self.ranges,
that=self._infinity,
ifIs=None))
mergedRanges = [(float('-inf'), float('-inf'))]
for low, high in ranges:
previousLow, previousHigh = mergedRanges[-1]
if previousHigh < low - 1:
mergedRanges.append((low, high))
continue
mergedRanges[-1] = (min(previousLow, low),
max(previousHigh, high))
self.ranges = _swapAllPairs(mergedRanges[1:],
that=None,
ifIs=self._infinity)
def _noneInRanges(self):
"""
Is there a L{None} in our ranges?
L{MessageSet.clean} merges overlapping or consecutive ranges.
None is represents a value larger than any number. There are
thus two cases:
1. C{(x, *) + (y, z)} such that C{x} is smaller than C{y}
2. C{(z, *) + (x, y)} such that C{z} is larger than C{y}
(Other cases, such as C{y < x < z}, can be split into these
two cases; for example C{(y - 1, y)} + C{(x, x) + (z, z + 1)})
In case 1, C{* > y} and C{* > z}, so C{(x, *) + (y, z) = (x,
*)}
In case 2, C{z > x and z > y}, so the intervals do not merge,
and the ranges are sorted as C{[(x, y), (z, *)]}. C{*} is
represented as C{(*, *)}, so this is the same as 2. but with
a C{z} that is greater than everything.
The result is that there is a maximum of two L{None}s, and one
of them has to be the high element in the last tuple in
C{self.ranges}. That means checking if C{self.ranges[-1][-1]}
is L{None} suffices to check if I{any} element is L{None}.
@return: L{True} if L{None} is in some range in ranges and
L{False} if otherwise.
"""
return self.ranges[-1][-1] is None
def __contains__(self, value):
"""
May raise TypeError if we encounter an open-ended range
@param value: Is this in our ranges?
@type value: L{int}
"""
if self._noneInRanges():
raise TypeError(
"Can't determine membership; last value not set")
for low, high in self.ranges:
if low <= value <= high:
return True
return False
def _iterator(self):
for l, h in self.ranges:
l = self.getnext(l-1)
while l <= h:
yield l
l = self.getnext(l)
def __iter__(self):
if self._noneInRanges():
raise TypeError("Can't iterate; last value not set")
return self._iterator()
def __len__(self):
res = 0
for l, h in self.ranges:
if l is None:
res += 1
elif h is None:
raise TypeError("Can't size object; last value not set")
else:
res += (h - l) + 1
return res
def __str__(self):
p = []
for low, high in self.ranges:
if low == high:
if low is None:
p.append('*')
else:
p.append(str(low))
elif high is None:
p.append('%d:*' % (low,))
else:
p.append('%d:%d' % (low, high))
return ','.join(p)
def __repr__(self):
return '<MessageSet %s>' % (str(self),)
def __eq__(self, other):
if isinstance(other, MessageSet):
return self.ranges == other.ranges
return False
def __ne__(self, other):
return not self.__eq__(other)
class LiteralString:
def __init__(self, size, defered):
self.size = size
self.data = []
self.defer = defered
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.append(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = b''
if data:
self.data.append(data)
return passon
def callback(self, line):
"""
Call deferred with data and rest of line
"""
self.defer.callback((b''.join(self.data), line))
class LiteralFile:
_memoryFileLimit = 1024 * 1024 * 10
def __init__(self, size, defered):
self.size = size
self.defer = defered
if size > self._memoryFileLimit:
self.data = tempfile.TemporaryFile()
else:
self.data = BytesIO()
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.write(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = b''
if data:
self.data.write(data)
return passon
def callback(self, line):
"""
Call deferred with data and rest of line
"""
self.data.seek(0,0)
self.defer.callback((self.data, line))
class WriteBuffer:
"""
Buffer up a bunch of writes before sending them all to a transport at once.
"""
def __init__(self, transport, size=8192):
self.bufferSize = size
self.transport = transport
self._length = 0
self._writes = []
def write(self, s):
self._length += len(s)
self._writes.append(s)
if self._length > self.bufferSize:
self.flush()
def flush(self):
if self._writes:
self.transport.writeSequence(self._writes)
self._writes = []
self._length = 0
class Command:
_1_RESPONSES = (b'CAPABILITY', b'FLAGS', b'LIST', b'LSUB', b'STATUS', b'SEARCH', b'NAMESPACE')
_2_RESPONSES = (b'EXISTS', b'EXPUNGE', b'FETCH', b'RECENT')
_OK_RESPONSES = (b'UIDVALIDITY', b'UNSEEN', b'READ-WRITE', b'READ-ONLY', b'UIDNEXT', b'PERMANENTFLAGS')
defer = None
def __init__(self, command, args=None, wantResponse=(),
continuation=None, *contArgs, **contKw):
self.command = command
self.args = args
self.wantResponse = wantResponse
self.continuation = lambda x: continuation(x, *contArgs, **contKw)
self.lines = []
def __repr__(self):
return "<imap4.Command {!r} {!r} {!r} {!r} {!r}>".format(
self.command, self.args, self.wantResponse, self.continuation,
self.lines
)
def format(self, tag):
if self.args is None:
return b' '.join((tag, self.command))
return b' '.join((tag, self.command, self.args))
def finish(self, lastLine, unusedCallback):
send = []
unuse = []
for L in self.lines:
names = parseNestedParens(L)
N = len(names)
if (N >= 1 and names[0] in self._1_RESPONSES or
N >= 2 and names[1] in self._2_RESPONSES or
N >= 2 and names[0] == b'OK' and isinstance(names[1], list)
and names[1][0] in self._OK_RESPONSES):
send.append(names)
else:
unuse.append(names)
d, self.defer = self.defer, None
d.callback((send, lastLine))
if unuse:
unusedCallback(unuse)
# Some constants to help define what an atom is and is not - see the grammar
# section of the IMAP4 RFC - <https://tools.ietf.org/html/rfc3501#section-9>.
# Some definitions (SP, CTL, DQUOTE) are also from the ABNF RFC -
# <https://tools.ietf.org/html/rfc2234>.
_SP = b' '
_CTL = b''.join(_bytesChr(ch) for ch in chain(range(0x21), range(0x80, 0x100)))
# It is easier to define ATOM-CHAR in terms of what it does not match than in
# terms of what it does match.
_nonAtomChars = b']\\\\(){%*"' + _SP + _CTL
# _nonAtomRE is only used in Query, so it uses native strings.
if _PY3:
#
_nativeNonAtomChars = _nonAtomChars.decode('charmap')
else:
_nativeNonAtomChars = _nonAtomChars
_nonAtomRE = re.compile('[' + _nativeNonAtomChars + ']')
# This is all the bytes that match the ATOM-CHAR from the grammar in the RFC.
_atomChars = b''.join(_bytesChr(ch) for ch in list(range(0x100)) if _bytesChr(ch) not in _nonAtomChars)
@implementer(IMailboxListener)
class IMAP4Server(basic.LineReceiver, policies.TimeoutMixin):
"""
Protocol implementation for an IMAP4rev1 server.
The server can be in any of four states:
- Non-authenticated
- Authenticated
- Selected
- Logout
"""
# Identifier for this server software
IDENT = b'Twisted IMAP4rev1 Ready'
# Number of seconds before idle timeout
# Initially 1 minute. Raised to 30 minutes after login.
timeOut = 60
POSTAUTH_TIMEOUT = 60 * 30
# Whether STARTTLS has been issued successfully yet or not.
startedTLS = False
# Whether our transport supports TLS
canStartTLS = False
# Mapping of tags to commands we have received
tags = None
# The object which will handle logins for us
portal = None
# The account object for this connection
account = None
# Logout callback
_onLogout = None
# The currently selected mailbox
mbox = None
# Command data to be processed when literal data is received
_pendingLiteral = None
# Maximum length to accept for a "short" string literal
_literalStringLimit = 4096
# IChallengeResponse factories for AUTHENTICATE command
challengers = None
# Search terms the implementation of which needs to be passed both the last
# message identifier (UID) and the last sequence id.
_requiresLastMessageInfo = set([b"OR", b"NOT", b"UID"])
state = 'unauth'
parseState = 'command'
def __init__(self, chal = None, contextFactory = None, scheduler = None):
if chal is None:
chal = {}
self.challengers = chal
self.ctx = contextFactory
if scheduler is None:
scheduler = iterateInReactor
self._scheduler = scheduler
self._queuedAsync = []
def capabilities(self):
cap = {b'AUTH': list(self.challengers.keys())}
if self.ctx and self.canStartTLS:
if not self.startedTLS and interfaces.ISSLTransport(self.transport, None) is None:
cap[b'LOGINDISABLED'] = None
cap[b'STARTTLS'] = None
cap[b'NAMESPACE'] = None
cap[b'IDLE'] = None
return cap
def connectionMade(self):
self.tags = {}
self.canStartTLS = interfaces.ITLSTransport(self.transport, None) is not None
self.setTimeout(self.timeOut)
self.sendServerGreeting()
def connectionLost(self, reason):
self.setTimeout(None)
if self._onLogout:
self._onLogout()
self._onLogout = None
def timeoutConnection(self):
self.sendLine(b'* BYE Autologout; connection idle too long')
self.transport.loseConnection()
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'timeout'
def rawDataReceived(self, data):
self.resetTimeout()
passon = self._pendingLiteral.write(data)
if passon is not None:
self.setLineMode(passon)
# Avoid processing commands while buffers are being dumped to
# our transport
blocked = None
def _unblock(self):
commands = self.blocked
self.blocked = None
while commands and self.blocked is None:
self.lineReceived(commands.pop(0))
if self.blocked is not None:
self.blocked.extend(commands)
def lineReceived(self, line):
if self.blocked is not None:
self.blocked.append(line)
return
self.resetTimeout()
f = getattr(self, 'parse_' + self.parseState)
try:
f(line)
except Exception as e:
self.sendUntaggedResponse(b'BAD Server error: ' + networkString(str(e)))
log.err()
def parse_command(self, line):
args = line.split(None, 2)
rest = None
if len(args) == 3:
tag, cmd, rest = args
elif len(args) == 2:
tag, cmd = args
elif len(args) == 1:
tag = args[0]
self.sendBadResponse(tag, b'Missing command')
return None
else:
self.sendBadResponse(None, b'Null command')
return None
cmd = cmd.upper()
try:
return self.dispatchCommand(tag, cmd, rest)
except IllegalClientResponse as e:
self.sendBadResponse(tag, b'Illegal syntax: ' + networkString(str(e)))
except IllegalOperation as e:
self.sendNegativeResponse(tag, b'Illegal operation: ' + networkString(str(e)))
except IllegalMailboxEncoding as e:
self.sendNegativeResponse(tag, b'Illegal mailbox name: ' + networkString(str(e)))
def parse_pending(self, line):
d = self._pendingLiteral
self._pendingLiteral = None
self.parseState = 'command'
d.callback(line)
def dispatchCommand(self, tag, cmd, rest, uid=None):
f = self.lookupCommand(cmd)
if f:
fn = f[0]
parseargs = f[1:]
self.__doCommand(tag, fn, [self, tag], parseargs, rest, uid)
else:
self.sendBadResponse(tag, b'Unsupported command')
def lookupCommand(self, cmd):
return getattr(self, '_'.join((self.state, nativeString(cmd.upper()))), None)
def __doCommand(self, tag, handler, args, parseargs, line, uid):
for (i, arg) in enumerate(parseargs):
if callable(arg):
parseargs = parseargs[i+1:]
maybeDeferred(arg, self, line).addCallback(
self.__cbDispatch, tag, handler, args,
parseargs, uid).addErrback(self.__ebDispatch, tag)
return
else:
args.append(arg)
if line:
# Too many arguments
raise IllegalClientResponse("Too many arguments for command: " + repr(line))
if uid is not None:
handler(uid=uid, *args)
else:
handler(*args)
def __cbDispatch(self, result, tag, fn, args, parseargs, uid):
(arg, rest) = result
args.append(arg)
self.__doCommand(tag, fn, args, parseargs, rest, uid)
def __ebDispatch(self, failure, tag):
if failure.check(IllegalClientResponse):
self.sendBadResponse(tag, b'Illegal syntax: ' + networkString(str(failure.value)))
elif failure.check(IllegalOperation):
self.sendNegativeResponse(tag, b'Illegal operation: ' +
networkString(str(failure.value)))
elif failure.check(IllegalMailboxEncoding):
self.sendNegativeResponse(tag, b'Illegal mailbox name: ' +
networkString(str(failure.value)))
else:
self.sendBadResponse(tag, b'Server error: ' + networkString(str(failure.value)))
log.err(failure)
def _stringLiteral(self, size):
if size > self._literalStringLimit:
raise IllegalClientResponse(
"Literal too long! I accept at most %d octets" %
(self._literalStringLimit,))
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralString(size, d)
self.sendContinuationRequest(
networkString('Ready for %d octets of text' % size))
self.setRawMode()
return d
def _fileLiteral(self, size):
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralFile(size, d)
self.sendContinuationRequest(
networkString('Ready for %d octets of data' % size))
self.setRawMode()
return d
def arg_finalastring(self, line):
"""
Parse an astring from line that represents a command's final
argument. This special case exists to enable parsing empty
string literals.
@param line: A line that contains a string literal.
@type line: L{bytes}
@return: A 2-tuple containing the parsed argument and any
trailing data, or a L{Deferred} that fires with that
2-tuple
@rtype: L{tuple} of (L{bytes}, L{bytes}) or a L{Deferred}
@see: https://twistedmatrix.com/trac/ticket/9207
"""
return self.arg_astring(line, final=True)
def arg_astring(self, line, final=False):
"""
Parse an astring from the line, return (arg, rest), possibly
via a deferred (to handle literals)
@param line: A line that contains a string literal.
@type line: L{bytes}
@param final: Is this the final argument?
@type final L{bool}
@return: A 2-tuple containing the parsed argument and any
trailing data, or a L{Deferred} that fires with that
2-tuple
@rtype: L{tuple} of (L{bytes}, L{bytes}) or a L{Deferred}
"""
line = line.strip()
if not line:
raise IllegalClientResponse("Missing argument")
d = None
arg, rest = None, None
if line[0:1] == b'"':
try:
spam, arg, rest = line.split(b'"',2)
rest = rest[1:] # Strip space
except ValueError:
raise IllegalClientResponse("Unmatched quotes")
elif line[0:1] == b'{':
# literal
if line[-1:] != b'}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse(
"Bad literal size: " + repr(line[1:-1]))
if final and not size:
return (b'', b'')
d = self._stringLiteral(size)
else:
arg = line.split(b' ',1)
if len(arg) == 1:
arg.append(b'')
arg, rest = arg
return d or (arg, rest)
# ATOM: Any CHAR except ( ) { % * " \ ] CTL SP (CHAR is 7bit)
atomre = re.compile(b'(?P<atom>[' + re.escape(_atomChars) + b']+)( (?P<rest>.*$)|$)')
def arg_atom(self, line):
"""
Parse an atom from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
m = self.atomre.match(line)
if m:
return m.group('atom'), m.group('rest')
else:
raise IllegalClientResponse("Malformed ATOM")
def arg_plist(self, line):
"""
Parse a (non-nested) parenthesised list from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[:1] != b"(":
raise IllegalClientResponse("Missing parenthesis")
i = line.find(b")")
if i == -1:
raise IllegalClientResponse("Mismatched parenthesis")
return (parseNestedParens(line[1:i],0), line[i+2:])
def arg_literal(self, line):
"""
Parse a literal from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[:1] != b'{':
raise IllegalClientResponse("Missing literal")
if line[-1:] != b'}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse(
"Bad literal size: {!r}".format(line[1:-1]))
return self._fileLiteral(size)
def arg_searchkeys(self, line):
"""
searchkeys
"""
query = parseNestedParens(line)
# XXX Should really use list of search terms and parse into
# a proper tree
return (query, b'')
def arg_seqset(self, line):
"""
sequence-set
"""
rest = b''
arg = line.split(b' ',1)
if len(arg) == 2:
rest = arg[1]
arg = arg[0]
try:
return (parseIdList(arg), rest)
except IllegalIdentifierError as e:
raise IllegalClientResponse("Bad message number " + str(e))
def arg_fetchatt(self, line):
"""
fetch-att
"""
p = _FetchParser()
p.parseString(line)
return (p.result, b'')
def arg_flaglist(self, line):
"""
Flag part of store-att-flag
"""
flags = []
if line[0:1] == b'(':
if line[-1:] != b')':
raise IllegalClientResponse("Mismatched parenthesis")
line = line[1:-1]
while line:
m = self.atomre.search(line)
if not m:
raise IllegalClientResponse("Malformed flag")
if line[0:1] == b'\\' and m.start() == 1:
flags.append(b'\\' + m.group('atom'))
elif m.start() == 0:
flags.append(m.group('atom'))
else:
raise IllegalClientResponse("Malformed flag")
line = m.group('rest')
return (flags, b'')
def arg_line(self, line):
"""
Command line of UID command
"""
return (line, b'')
def opt_plist(self, line):
"""
Optional parenthesised list
"""
if line.startswith(b'('):
return self.arg_plist(line)
else:
return (None, line)
def opt_datetime(self, line):
"""
Optional date-time string
"""
if line.startswith(b'"'):
try:
spam, date, rest = line.split(b'"',2)
except ValueError:
raise IllegalClientResponse("Malformed date-time")
return (date, rest[1:])
else:
return (None, line)
def opt_charset(self, line):
"""
Optional charset of SEARCH command
"""
if line[:7].upper() == b'CHARSET':
arg = line.split(b' ',2)
if len(arg) == 1:
raise IllegalClientResponse("Missing charset identifier")
if len(arg) == 2:
arg.append(b'')
spam, arg, rest = arg
return (arg, rest)
else:
return (None, line)
def sendServerGreeting(self):
msg = (b'[CAPABILITY ' + b' '.join(self.listCapabilities()) + b'] ' +
self.IDENT)
self.sendPositiveResponse(message=msg)
def sendBadResponse(self, tag = None, message = b''):
self._respond(b'BAD', tag, message)
def sendPositiveResponse(self, tag = None, message = b''):
self._respond(b'OK', tag, message)
def sendNegativeResponse(self, tag = None, message = b''):
self._respond(b'NO', tag, message)
def sendUntaggedResponse(self, message, isAsync=None, **kwargs):
isAsync = _get_async_param(isAsync, **kwargs)
if not isAsync or (self.blocked is None):
self._respond(message, None, None)
else:
self._queuedAsync.append(message)
def sendContinuationRequest(self, msg = b'Ready for additional command text'):
if msg:
self.sendLine(b'+ ' + msg)
else:
self.sendLine(b'+')
def _respond(self, state, tag, message):
if state in (b'OK', b'NO', b'BAD') and self._queuedAsync:
lines = self._queuedAsync
self._queuedAsync = []
for msg in lines:
self._respond(msg, None, None)
if not tag:
tag = b'*'
if message:
self.sendLine(b' '.join((tag, state, message)))
else:
self.sendLine(b' '.join((tag, state)))
def listCapabilities(self):
caps = [b'IMAP4rev1']
for c, v in self.capabilities().items():
if v is None:
caps.append(c)
elif len(v):
caps.extend([(c + b'=' + cap) for cap in v])
return caps
def do_CAPABILITY(self, tag):
self.sendUntaggedResponse(b'CAPABILITY ' + b' '.join(self.listCapabilities()))
self.sendPositiveResponse(tag, b'CAPABILITY completed')
unauth_CAPABILITY = (do_CAPABILITY,)
auth_CAPABILITY = unauth_CAPABILITY
select_CAPABILITY = unauth_CAPABILITY
logout_CAPABILITY = unauth_CAPABILITY
def do_LOGOUT(self, tag):
self.sendUntaggedResponse(b'BYE Nice talking to you')
self.sendPositiveResponse(tag, b'LOGOUT successful')
self.transport.loseConnection()
unauth_LOGOUT = (do_LOGOUT,)
auth_LOGOUT = unauth_LOGOUT
select_LOGOUT = unauth_LOGOUT
logout_LOGOUT = unauth_LOGOUT
def do_NOOP(self, tag):
self.sendPositiveResponse(tag, b'NOOP No operation performed')
unauth_NOOP = (do_NOOP,)
auth_NOOP = unauth_NOOP
select_NOOP = unauth_NOOP
logout_NOOP = unauth_NOOP
def do_AUTHENTICATE(self, tag, args):
args = args.upper().strip()
if args not in self.challengers:
self.sendNegativeResponse(tag, b'AUTHENTICATE method unsupported')
else:
self.authenticate(self.challengers[args](), tag)
unauth_AUTHENTICATE = (do_AUTHENTICATE, arg_atom)
def authenticate(self, chal, tag):
if self.portal is None:
self.sendNegativeResponse(tag, b'Temporary authentication failure')
return
self._setupChallenge(chal, tag)
def _setupChallenge(self, chal, tag):
try:
challenge = chal.getChallenge()
except Exception as e:
self.sendBadResponse(tag, b'Server error: ' + networkString(str(e)))
else:
coded = encodebytes(challenge)[:-1]
self.parseState = 'pending'
self._pendingLiteral = defer.Deferred()
self.sendContinuationRequest(coded)
self._pendingLiteral.addCallback(self.__cbAuthChunk, chal, tag)
self._pendingLiteral.addErrback(self.__ebAuthChunk, tag)
def __cbAuthChunk(self, result, chal, tag):
try:
uncoded = decodebytes(result)
except binascii.Error:
raise IllegalClientResponse("Malformed Response - not base64")
chal.setResponse(uncoded)
if chal.moreChallenges():
self._setupChallenge(chal, tag)
else:
self.portal.login(chal, None, IAccount).addCallbacks(
self.__cbAuthResp,
self.__ebAuthResp,
(tag,), None, (tag,), None
)
def __cbAuthResp(self, result, tag):
(iface, avatar, logout) = result
assert iface is IAccount, "IAccount is the only supported interface"
self.account = avatar
self.state = 'auth'
self._onLogout = logout
self.sendPositiveResponse(tag, b'Authentication successful')
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebAuthResp(self, failure, tag):
if failure.check(UnauthorizedLogin):
self.sendNegativeResponse(tag, b'Authentication failed: unauthorized')
elif failure.check(UnhandledCredentials):
self.sendNegativeResponse(tag, b'Authentication failed: server misconfigured')
else:
self.sendBadResponse(tag, b'Server error: login failed unexpectedly')
log.err(failure)
def __ebAuthChunk(self, failure, tag):
self.sendNegativeResponse(tag, b'Authentication failed: ' + networkString(str(failure.value)))
def do_STARTTLS(self, tag):
if self.startedTLS:
self.sendNegativeResponse(tag, b'TLS already negotiated')
elif self.ctx and self.canStartTLS:
self.sendPositiveResponse(tag, b'Begin TLS negotiation now')
self.transport.startTLS(self.ctx)
self.startedTLS = True
self.challengers = self.challengers.copy()
if b'LOGIN' not in self.challengers:
self.challengers[b'LOGIN'] = LOGINCredentials
if b'PLAIN' not in self.challengers:
self.challengers[b'PLAIN'] = PLAINCredentials
else:
self.sendNegativeResponse(tag, b'TLS not available')
unauth_STARTTLS = (do_STARTTLS,)
def do_LOGIN(self, tag, user, passwd):
if b'LOGINDISABLED' in self.capabilities():
self.sendBadResponse(tag, b'LOGIN is disabled before STARTTLS')
return
maybeDeferred(self.authenticateLogin, user, passwd
).addCallback(self.__cbLogin, tag
).addErrback(self.__ebLogin, tag
)
unauth_LOGIN = (do_LOGIN, arg_astring, arg_finalastring)
def authenticateLogin(self, user, passwd):
"""
Lookup the account associated with the given parameters
Override this method to define the desired authentication behavior.
The default behavior is to defer authentication to C{self.portal}
if it is not None, or to deny the login otherwise.
@type user: L{str}
@param user: The username to lookup
@type passwd: L{str}
@param passwd: The password to login with
"""
if self.portal:
return self.portal.login(
credentials.UsernamePassword(user, passwd),
None, IAccount
)
raise UnauthorizedLogin()
def __cbLogin(self, result, tag):
(iface, avatar, logout) = result
if iface is not IAccount:
self.sendBadResponse(tag, b'Server error: login returned unexpected value')
log.err("__cbLogin called with %r, IAccount expected" % (iface,))
else:
self.account = avatar
self._onLogout = logout
self.sendPositiveResponse(tag, b'LOGIN succeeded')
self.state = 'auth'
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebLogin(self, failure, tag):
if failure.check(UnauthorizedLogin):
self.sendNegativeResponse(tag, b'LOGIN failed')
else:
self.sendBadResponse(tag, b'Server error: ' + networkString(str(failure.value)))
log.err(failure)
def do_NAMESPACE(self, tag):
personal = public = shared = None
np = INamespacePresenter(self.account, None)
if np is not None:
personal = np.getPersonalNamespaces()
public = np.getSharedNamespaces()
shared = np.getSharedNamespaces()
self.sendUntaggedResponse(b'NAMESPACE ' + collapseNestedLists([personal, public, shared]))
self.sendPositiveResponse(tag, b"NAMESPACE command completed")
auth_NAMESPACE = (do_NAMESPACE,)
select_NAMESPACE = auth_NAMESPACE
def _selectWork(self, tag, name, rw, cmdName):
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'auth'
name = _parseMbox(name)
maybeDeferred(self.account.select, _parseMbox(name), rw
).addCallback(self._cbSelectWork, cmdName, tag
).addErrback(self._ebSelectWork, cmdName, tag
)
def _ebSelectWork(self, failure, cmdName, tag):
self.sendBadResponse(tag, cmdName + b" failed: Server error")
log.err(failure)
def _cbSelectWork(self, mbox, cmdName, tag):
if mbox is None:
self.sendNegativeResponse(tag, b'No such mailbox')
return
if '\\noselect' in [s.lower() for s in mbox.getFlags()]:
self.sendNegativeResponse(tag, 'Mailbox cannot be selected')
return
flags = [networkString(flag) for flag in mbox.getFlags()]
self.sendUntaggedResponse(intToBytes(mbox.getMessageCount()) + b' EXISTS')
self.sendUntaggedResponse(intToBytes(mbox.getRecentCount()) + b' RECENT')
self.sendUntaggedResponse(b'FLAGS (' + b' '.join(flags) + b')')
self.sendPositiveResponse(None, b'[UIDVALIDITY ' + intToBytes(mbox.getUIDValidity()) + b']')
s = mbox.isWriteable() and b'READ-WRITE' or b'READ-ONLY'
mbox.addListener(self)
self.sendPositiveResponse(tag, b'[' + s + b'] ' + cmdName + b' successful')
self.state = 'select'
self.mbox = mbox
auth_SELECT = ( _selectWork, arg_astring, 1, b'SELECT' )
select_SELECT = auth_SELECT
auth_EXAMINE = ( _selectWork, arg_astring, 0, b'EXAMINE' )
select_EXAMINE = auth_EXAMINE
def do_IDLE(self, tag):
self.sendContinuationRequest(None)
self.parseTag = tag
self.lastState = self.parseState
self.parseState = 'idle'
def parse_idle(self, *args):
self.parseState = self.lastState
del self.lastState
self.sendPositiveResponse(self.parseTag, b"IDLE terminated")
del self.parseTag
select_IDLE = ( do_IDLE, )
auth_IDLE = select_IDLE
def do_CREATE(self, tag, name):
name = _parseMbox(name)
try:
result = self.account.create(name)
except MailboxException as c:
self.sendNegativeResponse(tag, networkString(str(c)))
except:
self.sendBadResponse(tag, b"Server error encountered while creating mailbox")
log.err()
else:
if result:
self.sendPositiveResponse(tag, b'Mailbox created')
else:
self.sendNegativeResponse(tag, b'Mailbox not created')
auth_CREATE = (do_CREATE, arg_finalastring)
select_CREATE = auth_CREATE
def do_DELETE(self, tag, name):
name = _parseMbox(name)
if name.lower() == 'inbox':
self.sendNegativeResponse(tag, b'You cannot delete the inbox')
return
try:
self.account.delete(name)
except MailboxException as m:
self.sendNegativeResponse(tag, str(m).encode("imap4-utf-7"))
except:
self.sendBadResponse(tag, b"Server error encountered while deleting mailbox")
log.err()
else:
self.sendPositiveResponse(tag, b'Mailbox deleted')
auth_DELETE = (do_DELETE, arg_finalastring)
select_DELETE = auth_DELETE
def do_RENAME(self, tag, oldname, newname):
oldname, newname = [_parseMbox(n) for n in (oldname, newname)]
if oldname.lower() == 'inbox' or newname.lower() == 'inbox':
self.sendNegativeResponse(tag, b'You cannot rename the inbox, or rename another mailbox to inbox.')
return
try:
self.account.rename(oldname, newname)
except TypeError:
self.sendBadResponse(tag, b'Invalid command syntax')
except MailboxException as m:
self.sendNegativeResponse(tag, networkString(str(m)))
except:
self.sendBadResponse(tag, b"Server error encountered while renaming mailbox")
log.err()
else:
self.sendPositiveResponse(tag, b'Mailbox renamed')
auth_RENAME = (do_RENAME, arg_astring, arg_finalastring)
select_RENAME = auth_RENAME
def do_SUBSCRIBE(self, tag, name):
name = _parseMbox(name)
try:
self.account.subscribe(name)
except MailboxException as m:
self.sendNegativeResponse(tag, networkString(str(m)))
except:
self.sendBadResponse(tag, b"Server error encountered while subscribing to mailbox")
log.err()
else:
self.sendPositiveResponse(tag, b'Subscribed')
auth_SUBSCRIBE = (do_SUBSCRIBE, arg_finalastring)
select_SUBSCRIBE = auth_SUBSCRIBE
def do_UNSUBSCRIBE(self, tag, name):
name = _parseMbox(name)
try:
self.account.unsubscribe(name)
except MailboxException as m:
self.sendNegativeResponse(tag, networkString(str(m)))
except:
self.sendBadResponse(tag, b"Server error encountered while unsubscribing from mailbox")
log.err()
else:
self.sendPositiveResponse(tag, b'Unsubscribed')
auth_UNSUBSCRIBE = (do_UNSUBSCRIBE, arg_finalastring)
select_UNSUBSCRIBE = auth_UNSUBSCRIBE
def _listWork(self, tag, ref, mbox, sub, cmdName):
mbox = _parseMbox(mbox)
ref = _parseMbox(ref)
maybeDeferred(self.account.listMailboxes, ref, mbox
).addCallback(self._cbListWork, tag, sub, cmdName
).addErrback(self._ebListWork, tag
)
def _cbListWork(self, mailboxes, tag, sub, cmdName):
for (name, box) in mailboxes:
if not sub or self.account.isSubscribed(name):
flags = [networkString(flag) for flag in box.getFlags()]
delim = box.getHierarchicalDelimiter().encode('imap4-utf-7')
resp = (DontQuoteMe(cmdName), map(DontQuoteMe, flags), delim, name.encode('imap4-utf-7'))
self.sendUntaggedResponse(collapseNestedLists(resp))
self.sendPositiveResponse(tag, cmdName + b' completed')
def _ebListWork(self, failure, tag):
self.sendBadResponse(tag, b"Server error encountered while listing mailboxes.")
log.err(failure)
auth_LIST = (_listWork, arg_astring, arg_astring, 0, b'LIST')
select_LIST = auth_LIST
auth_LSUB = (_listWork, arg_astring, arg_astring, 1, b'LSUB')
select_LSUB = auth_LSUB
def do_STATUS(self, tag, mailbox, names):
nativeNames = []
for name in names:
nativeNames.append(nativeString(name))
mailbox = _parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox, 0
).addCallback(self._cbStatusGotMailbox, tag, mailbox, nativeNames
).addErrback(self._ebStatusGotMailbox, tag
)
def _cbStatusGotMailbox(self, mbox, tag, mailbox, names):
if mbox:
maybeDeferred(mbox.requestStatus, names).addCallbacks(
self.__cbStatus, self.__ebStatus,
(tag, mailbox), None, (tag, mailbox), None
)
else:
self.sendNegativeResponse(tag, b"Could not open mailbox")
def _ebStatusGotMailbox(self, failure, tag):
self.sendBadResponse(tag, b"Server error encountered while opening mailbox.")
log.err(failure)
auth_STATUS = (do_STATUS, arg_astring, arg_plist)
select_STATUS = auth_STATUS
def __cbStatus(self, status, tag, box):
# STATUS names should only be ASCII
line = networkString(' '.join(['%s %s' % x for x in status.items()]))
self.sendUntaggedResponse(b'STATUS ' + box.encode('imap4-utf-7') + b' ('+ line + b')')
self.sendPositiveResponse(tag, b'STATUS complete')
def __ebStatus(self, failure, tag, box):
self.sendBadResponse(tag, b'STATUS '+ box + b' failed: ' +
networkString(str(failure.value)))
def do_APPEND(self, tag, mailbox, flags, date, message):
mailbox = _parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbAppendGotMailbox, tag, flags, date, message
).addErrback(self._ebAppendGotMailbox, tag
)
def _cbAppendGotMailbox(self, mbox, tag, flags, date, message):
if not mbox:
self.sendNegativeResponse(tag, '[TRYCREATE] No such mailbox')
return
decodedFlags = [nativeString(flag) for flag in flags]
d = mbox.addMessage(message, decodedFlags, date)
d.addCallback(self.__cbAppend, tag, mbox)
d.addErrback(self.__ebAppend, tag)
def _ebAppendGotMailbox(self, failure, tag):
self.sendBadResponse(tag, b"Server error encountered while opening mailbox.")
log.err(failure)
auth_APPEND = (do_APPEND, arg_astring, opt_plist, opt_datetime,
arg_literal)
select_APPEND = auth_APPEND
def __cbAppend(self, result, tag, mbox):
self.sendUntaggedResponse(intToBytes(mbox.getMessageCount()) + b' EXISTS')
self.sendPositiveResponse(tag, b'APPEND complete')
def __ebAppend(self, failure, tag):
self.sendBadResponse(tag, b'APPEND failed: ' +
networkString(str(failure.value)))
def do_CHECK(self, tag):
d = self.checkpoint()
if d is None:
self.__cbCheck(None, tag)
else:
d.addCallbacks(
self.__cbCheck,
self.__ebCheck,
callbackArgs=(tag,),
errbackArgs=(tag,)
)
select_CHECK = (do_CHECK,)
def __cbCheck(self, result, tag):
self.sendPositiveResponse(tag, b'CHECK completed')
def __ebCheck(self, failure, tag):
self.sendBadResponse(tag, b'CHECK failed: ' +
networkString(str(failure.value)))
def checkpoint(self):
"""
Called when the client issues a CHECK command.
This should perform any checkpoint operations required by the server.
It may be a long running operation, but may not block. If it returns
a deferred, the client will only be informed of success (or failure)
when the deferred's callback (or errback) is invoked.
"""
return None
def do_CLOSE(self, tag):
d = None
if self.mbox.isWriteable():
d = maybeDeferred(self.mbox.expunge)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
if d is not None:
d.addCallback(lambda result: cmbx.close())
else:
d = maybeDeferred(cmbx.close)
if d is not None:
d.addCallbacks(self.__cbClose, self.__ebClose, (tag,), None, (tag,), None)
else:
self.__cbClose(None, tag)
select_CLOSE = (do_CLOSE,)
def __cbClose(self, result, tag):
self.sendPositiveResponse(tag, b'CLOSE completed')
self.mbox.removeListener(self)
self.mbox = None
self.state = 'auth'
def __ebClose(self, failure, tag):
self.sendBadResponse(tag, b'CLOSE failed: ' +
networkString(str(failure.value)))
def do_EXPUNGE(self, tag):
if self.mbox.isWriteable():
maybeDeferred(self.mbox.expunge).addCallbacks(
self.__cbExpunge, self.__ebExpunge, (tag,), None, (tag,), None
)
else:
self.sendNegativeResponse(tag, b'EXPUNGE ignored on read-only mailbox')
select_EXPUNGE = (do_EXPUNGE,)
def __cbExpunge(self, result, tag):
for e in result:
self.sendUntaggedResponse(intToBytes(e) + b' EXPUNGE')
self.sendPositiveResponse(tag, b'EXPUNGE completed')
def __ebExpunge(self, failure, tag):
self.sendBadResponse(tag, b'EXPUNGE failed: ' +
networkString(str(failure.value)))
log.err(failure)
def do_SEARCH(self, tag, charset, query, uid=0):
sm = ISearchableMailbox(self.mbox, None)
if sm is not None:
maybeDeferred(sm.search, query, uid=uid
).addCallback(self.__cbSearch, tag, self.mbox, uid
).addErrback(self.__ebSearch, tag)
else:
# that's not the ideal way to get all messages, there should be a
# method on mailboxes that gives you all of them
s = parseIdList(b'1:*')
maybeDeferred(self.mbox.fetch, s, uid=uid
).addCallback(self.__cbManualSearch,
tag, self.mbox, query, uid
).addErrback(self.__ebSearch, tag)
select_SEARCH = (do_SEARCH, opt_charset, arg_searchkeys)
def __cbSearch(self, result, tag, mbox, uid):
if uid:
result = map(mbox.getUID, result)
ids = networkString(' '.join([str(i) for i in result]))
self.sendUntaggedResponse(b'SEARCH ' + ids)
self.sendPositiveResponse(tag, b'SEARCH completed')
def __cbManualSearch(self, result, tag, mbox, query, uid,
searchResults=None):
"""
Apply the search filter to a set of messages. Send the response to the
client.
@type result: L{list} of L{tuple} of (L{int}, provider of
L{imap4.IMessage})
@param result: A list two tuples of messages with their sequence ids,
sorted by the ids in descending order.
@type tag: L{str}
@param tag: A command tag.
@type mbox: Provider of L{imap4.IMailbox}
@param mbox: The searched mailbox.
@type query: L{list}
@param query: A list representing the parsed form of the search query.
@param uid: A flag indicating whether the search is over message
sequence numbers or UIDs.
@type searchResults: L{list}
@param searchResults: The search results so far or L{None} if no
results yet.
"""
if searchResults is None:
searchResults = []
i = 0
# result is a list of tuples (sequenceId, Message)
lastSequenceId = result and result[-1][0]
lastMessageId = result and result[-1][1].getUID()
for (i, (msgId, msg)) in list(zip(range(5), result)):
# searchFilter and singleSearchStep will mutate the query. Dang.
# Copy it here or else things will go poorly for subsequent
# messages.
if self._searchFilter(copy.deepcopy(query), msgId, msg,
lastSequenceId, lastMessageId):
if uid:
searchResults.append(intToBytes(msg.getUID()))
else:
searchResults.append(intToBytes(msgId))
if i == 4:
from twisted.internet import reactor
reactor.callLater(
0, self.__cbManualSearch, list(result[5:]), tag, mbox, query, uid,
searchResults)
else:
if searchResults:
self.sendUntaggedResponse(b'SEARCH ' + b' '.join(searchResults))
self.sendPositiveResponse(tag, b'SEARCH completed')
def _searchFilter(self, query, id, msg, lastSequenceId, lastMessageId):
"""
Pop search terms from the beginning of C{query} until there are none
left and apply them to the given message.
@param query: A list representing the parsed form of the search query.
@param id: The sequence number of the message being checked.
@param msg: The message being checked.
@type lastSequenceId: L{int}
@param lastSequenceId: The highest sequence number of any message in
the mailbox being searched.
@type lastMessageId: L{int}
@param lastMessageId: The highest UID of any message in the mailbox
being searched.
@return: Boolean indicating whether all of the query terms match the
message.
"""
while query:
if not self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId):
return False
return True
def _singleSearchStep(self, query, msgId, msg, lastSequenceId, lastMessageId):
"""
Pop one search term from the beginning of C{query} (possibly more than
one element) and return whether it matches the given message.
@param query: A list representing the parsed form of the search query.
@param msgId: The sequence number of the message being checked.
@param msg: The message being checked.
@param lastSequenceId: The highest sequence number of any message in
the mailbox being searched.
@param lastMessageId: The highest UID of any message in the mailbox
being searched.
@return: Boolean indicating whether the query term matched the message.
"""
q = query.pop(0)
if isinstance(q, list):
if not self._searchFilter(q, msgId, msg,
lastSequenceId, lastMessageId):
return False
else:
c = q.upper()
if not c[:1].isalpha():
# A search term may be a word like ALL, ANSWERED, BCC, etc (see
# below) or it may be a message sequence set. Here we
# recognize a message sequence set "N:M".
messageSet = parseIdList(c, lastSequenceId)
return msgId in messageSet
else:
f = getattr(self, 'search_' + nativeString(c), None)
if f is None:
raise IllegalQueryError("Invalid search command %s" % nativeString(c))
if c in self._requiresLastMessageInfo:
result = f(query, msgId, msg, (lastSequenceId,
lastMessageId))
else:
result = f(query, msgId, msg)
if not result:
return False
return True
def search_ALL(self, query, id, msg):
"""
Returns C{True} if the message matches the ALL search key (always).
@type query: A L{list} of L{str}
@param query: A list representing the parsed query string.
@type id: L{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
return True
def search_ANSWERED(self, query, id, msg):
"""
Returns C{True} if the message has been answered.
@type query: A L{list} of L{str}
@param query: A list representing the parsed query string.
@type id: L{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
return '\\Answered' in msg.getFlags()
def search_BCC(self, query, id, msg):
"""
Returns C{True} if the message has a BCC address matching the query.
@type query: A L{list} of L{str}
@param query: A list whose first element is a BCC L{str}
@type id: L{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
bcc = msg.getHeaders(False, 'bcc').get('bcc', '')
return bcc.lower().find(query.pop(0).lower()) != -1
def search_BEFORE(self, query, id, msg):
date = parseTime(query.pop(0))
return email.utils.parsedate(nativeString(msg.getInternalDate())) < date
def search_BODY(self, query, id, msg):
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_CC(self, query, id, msg):
cc = msg.getHeaders(False, 'cc').get('cc', '')
return cc.lower().find(query.pop(0).lower()) != -1
def search_DELETED(self, query, id, msg):
return '\\Deleted' in msg.getFlags()
def search_DRAFT(self, query, id, msg):
return '\\Draft' in msg.getFlags()
def search_FLAGGED(self, query, id, msg):
return '\\Flagged' in msg.getFlags()
def search_FROM(self, query, id, msg):
fm = msg.getHeaders(False, 'from').get('from', '')
return fm.lower().find(query.pop(0).lower()) != -1
def search_HEADER(self, query, id, msg):
hdr = query.pop(0).lower()
hdr = msg.getHeaders(False, hdr).get(hdr, '')
return hdr.lower().find(query.pop(0).lower()) != -1
def search_KEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_LARGER(self, query, id, msg):
return int(query.pop(0)) < msg.getSize()
def search_NEW(self, query, id, msg):
return '\\Recent' in msg.getFlags() and '\\Seen' not in msg.getFlags()
def search_NOT(self, query, id, msg, lastIDs):
"""
Returns C{True} if the message does not match the query.
@type query: A L{list} of L{str}
@param query: A list representing the parsed form of the search query.
@type id: L{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastIDs: L{tuple}
@param lastIDs: A tuple of (last sequence id, last message id).
The I{last sequence id} is an L{int} containing the highest sequence
number of a message in the mailbox. The I{last message id} is an
L{int} containing the highest UID of a message in the mailbox.
"""
(lastSequenceId, lastMessageId) = lastIDs
return not self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
def search_OLD(self, query, id, msg):
return '\\Recent' not in msg.getFlags()
def search_ON(self, query, id, msg):
date = parseTime(query.pop(0))
return email.utils.parsedate(msg.getInternalDate()) == date
def search_OR(self, query, id, msg, lastIDs):
"""
Returns C{True} if the message matches any of the first two query
items.
@type query: A L{list} of L{str}
@param query: A list representing the parsed form of the search query.
@type id: L{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastIDs: L{tuple}
@param lastIDs: A tuple of (last sequence id, last message id).
The I{last sequence id} is an L{int} containing the highest sequence
number of a message in the mailbox. The I{last message id} is an
L{int} containing the highest UID of a message in the mailbox.
"""
(lastSequenceId, lastMessageId) = lastIDs
a = self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
b = self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
return a or b
def search_RECENT(self, query, id, msg):
return '\\Recent' in msg.getFlags()
def search_SEEN(self, query, id, msg):
return '\\Seen' in msg.getFlags()
def search_SENTBEFORE(self, query, id, msg):
"""
Returns C{True} if the message date is earlier than the query date.
@type query: A L{list} of L{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type id: L{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = email.utils.parsedate(date)
return date < parseTime(query.pop(0))
def search_SENTON(self, query, id, msg):
"""
Returns C{True} if the message date is the same as the query date.
@type query: A L{list} of L{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = email.utils.parsedate(date)
return date[:3] == parseTime(query.pop(0))[:3]
def search_SENTSINCE(self, query, id, msg):
"""
Returns C{True} if the message date is later than the query date.
@type query: A L{list} of L{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = email.utils.parsedate(date)
return date > parseTime(query.pop(0))
def search_SINCE(self, query, id, msg):
date = parseTime(query.pop(0))
return email.utils.parsedate(msg.getInternalDate()) > date
def search_SMALLER(self, query, id, msg):
return int(query.pop(0)) > msg.getSize()
def search_SUBJECT(self, query, id, msg):
subj = msg.getHeaders(False, 'subject').get('subject', '')
return subj.lower().find(query.pop(0).lower()) != -1
def search_TEXT(self, query, id, msg):
# XXX - This must search headers too
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_TO(self, query, id, msg):
to = msg.getHeaders(False, 'to').get('to', '')
return to.lower().find(query.pop(0).lower()) != -1
def search_UID(self, query, id, msg, lastIDs):
"""
Returns C{True} if the message UID is in the range defined by the
search query.
@type query: A L{list} of L{bytes}
@param query: A list representing the parsed form of the search
query. Its first element should be a L{str} that can be interpreted
as a sequence range, for example '2:4,5:*'.
@type id: L{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastIDs: L{tuple}
@param lastIDs: A tuple of (last sequence id, last message id).
The I{last sequence id} is an L{int} containing the highest sequence
number of a message in the mailbox. The I{last message id} is an
L{int} containing the highest UID of a message in the mailbox.
"""
(lastSequenceId, lastMessageId) = lastIDs
c = query.pop(0)
m = parseIdList(c, lastMessageId)
return msg.getUID() in m
def search_UNANSWERED(self, query, id, msg):
return '\\Answered' not in msg.getFlags()
def search_UNDELETED(self, query, id, msg):
return '\\Deleted' not in msg.getFlags()
def search_UNDRAFT(self, query, id, msg):
return '\\Draft' not in msg.getFlags()
def search_UNFLAGGED(self, query, id, msg):
return '\\Flagged' not in msg.getFlags()
def search_UNKEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_UNSEEN(self, query, id, msg):
return '\\Seen' not in msg.getFlags()
def __ebSearch(self, failure, tag):
self.sendBadResponse(tag, b'SEARCH failed: ' +
networkString(str(failure.value)))
log.err(failure)
def do_FETCH(self, tag, messages, query, uid=0):
if query:
self._oldTimeout = self.setTimeout(None)
maybeDeferred(self.mbox.fetch, messages, uid=uid
).addCallback(iter
).addCallback(self.__cbFetch, tag, query, uid
).addErrback(self.__ebFetch, tag
)
else:
self.sendPositiveResponse(tag, b'FETCH complete')
select_FETCH = (do_FETCH, arg_seqset, arg_fetchatt)
def __cbFetch(self, results, tag, query, uid):
if self.blocked is None:
self.blocked = []
try:
id, msg = next(results)
except StopIteration:
# The idle timeout was suspended while we delivered results,
# restore it now.
self.setTimeout(self._oldTimeout)
del self._oldTimeout
# All results have been processed, deliver completion notification.
# It's important to run this *after* resetting the timeout to "rig
# a race" in some test code. writing to the transport will
# synchronously call test code, which synchronously loses the
# connection, calling our connectionLost method, which cancels the
# timeout. We want to make sure that timeout is cancelled *after*
# we reset it above, so that the final state is no timed
# calls. This avoids reactor uncleanliness errors in the test
# suite.
# XXX: Perhaps loopback should be fixed to not call the user code
# synchronously in transport.write?
self.sendPositiveResponse(tag, b'FETCH completed')
# Instance state is now consistent again (ie, it is as though
# the fetch command never ran), so allow any pending blocked
# commands to execute.
self._unblock()
else:
self.spewMessage(id, msg, query, uid
).addCallback(lambda _: self.__cbFetch(results, tag, query, uid)
).addErrback(self.__ebSpewMessage
)
def __ebSpewMessage(self, failure):
# This indicates a programming error.
# There's no reliable way to indicate anything to the client, since we
# may have already written an arbitrary amount of data in response to
# the command.
log.err(failure)
self.transport.loseConnection()
def spew_envelope(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w(b'ENVELOPE ' + collapseNestedLists([getEnvelope(msg)]))
def spew_flags(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.writen
encodedFlags = [networkString(flag) for flag in msg.getFlags()]
_w(b'FLAGS ' + b'(' + b' '.join(encodedFlags) + b')')
def spew_internaldate(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
idate = msg.getInternalDate()
ttup = email.utils.parsedate_tz(nativeString(idate))
if ttup is None:
log.msg("%d:%r: unpareseable internaldate: %r" % (id, msg, idate))
raise IMAP4Exception("Internal failure generating INTERNALDATE")
# need to specify the month manually, as strftime depends on locale
strdate = time.strftime("%d-%%s-%Y %H:%M:%S ", ttup[:9])
odate = networkString(strdate % (_MONTH_NAMES[ttup[1]],))
if ttup[9] is None:
odate = odate + b"+0000"
else:
if ttup[9] >= 0:
sign = b"+"
else:
sign = b"-"
odate = odate + sign + intToBytes(
((abs(ttup[9]) // 3600) * 100 +
(abs(ttup[9]) % 3600) // 60)
).zfill(4)
_w(b'INTERNALDATE ' + _quote(odate))
def spew_rfc822header(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
hdrs = _formatHeaders(msg.getHeaders(True))
_w(b'RFC822.HEADER ' + _literal(hdrs))
def spew_rfc822text(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w(b'RFC822.TEXT ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
def spew_rfc822size(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w(b'RFC822.SIZE ' + intToBytes(msg.getSize()))
def spew_rfc822(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w(b'RFC822 ')
_f()
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()
).beginProducing(self.transport
)
return MessageProducer(msg, None, self._scheduler
).beginProducing(self.transport
)
def spew_uid(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w(b'UID ' + intToBytes(msg.getUID()))
def spew_bodystructure(self, id, msg, _w=None, _f=None):
_w(b'BODYSTRUCTURE ' + collapseNestedLists([getBodyStructure(msg, True)]))
def spew_body(self, part, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
for p in part.part:
if msg.isMultipart():
msg = msg.getSubPart(p)
elif p > 0:
# Non-multipart messages have an implicit first part but no
# other parts - reject any request for any other part.
raise TypeError("Requested subpart of non-multipart message")
if part.header:
hdrs = msg.getHeaders(part.header.negate, *part.header.fields)
hdrs = _formatHeaders(hdrs)
_w(part.__bytes__() + b' ' + _literal(hdrs))
elif part.text:
_w(part.__bytes__() + b' ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
elif part.mime:
hdrs = _formatHeaders(msg.getHeaders(True))
_w(part.__bytes__() + b' ' + _literal(hdrs))
elif part.empty:
_w(part.__bytes__() + b' ')
_f()
if part.part:
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
else:
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()).beginProducing(self.transport)
return MessageProducer(msg, None, self._scheduler).beginProducing(self.transport)
else:
_w(b'BODY ' + collapseNestedLists([getBodyStructure(msg)]))
def spewMessage(self, id, msg, query, uid):
wbuf = WriteBuffer(self.transport)
write = wbuf.write
flush = wbuf.flush
def start():
write(b'* ' + intToBytes(id) + b' FETCH (')
def finish():
write(b')\r\n')
def space():
write(b' ')
def spew():
seenUID = False
start()
for part in query:
if part.type == 'uid':
seenUID = True
if part.type == 'body':
yield self.spew_body(part, id, msg, write, flush)
else:
f = getattr(self, 'spew_' + part.type)
yield f(id, msg, write, flush)
if part is not query[-1]:
space()
if uid and not seenUID:
space()
yield self.spew_uid(id, msg, write, flush)
finish()
flush()
return self._scheduler(spew())
def __ebFetch(self, failure, tag):
self.setTimeout(self._oldTimeout)
del self._oldTimeout
log.err(failure)
self.sendBadResponse(tag, b'FETCH failed: ' +
networkString(str(failure.value)))
def do_STORE(self, tag, messages, mode, flags, uid=0):
mode = mode.upper()
silent = mode.endswith(b'SILENT')
if mode.startswith(b'+'):
mode = 1
elif mode.startswith(b'-'):
mode = -1
else:
mode = 0
flags = [nativeString(flag) for flag in flags]
maybeDeferred(self.mbox.store, messages, flags, mode, uid=uid).addCallbacks(
self.__cbStore, self.__ebStore, (tag, self.mbox, uid, silent), None, (tag,), None
)
select_STORE = (do_STORE, arg_seqset, arg_atom, arg_flaglist)
def __cbStore(self, result, tag, mbox, uid, silent):
if result and not silent:
for (k, v) in result.items():
if uid:
uidstr = b' UID ' + intToBytes(mbox.getUID(k))
else:
uidstr = b''
flags = [networkString(flag) for flag in v]
self.sendUntaggedResponse(
intToBytes(k) +
b' FETCH (FLAGS ('+ b' '.join(flags) + b')' +
uidstr + b')')
self.sendPositiveResponse(tag, b'STORE completed')
def __ebStore(self, failure, tag):
self.sendBadResponse(tag, b'Server error: ' +
networkString(str(failure.value)))
def do_COPY(self, tag, messages, mailbox, uid=0):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbCopySelectedMailbox, tag, messages, mailbox, uid
).addErrback(self._ebCopySelectedMailbox, tag
)
select_COPY = (do_COPY, arg_seqset, arg_finalastring)
def _cbCopySelectedMailbox(self, mbox, tag, messages, mailbox, uid):
if not mbox:
self.sendNegativeResponse(tag, 'No such mailbox: ' + mailbox)
else:
maybeDeferred(self.mbox.fetch, messages, uid
).addCallback(self.__cbCopy, tag, mbox
).addCallback(self.__cbCopied, tag, mbox
).addErrback(self.__ebCopy, tag
)
def _ebCopySelectedMailbox(self, failure, tag):
self.sendBadResponse(tag, b'Server error: ' +
networkString(str(failure.value)))
def __cbCopy(self, messages, tag, mbox):
# XXX - This should handle failures with a rollback or something
addedDeferreds = []
fastCopyMbox = IMessageCopier(mbox, None)
for (id, msg) in messages:
if fastCopyMbox is not None:
d = maybeDeferred(fastCopyMbox.copy, msg)
addedDeferreds.append(d)
continue
# XXX - The following should be an implementation of IMessageCopier.copy
# on an IMailbox->IMessageCopier adapter.
flags = msg.getFlags()
date = msg.getInternalDate()
body = IMessageFile(msg, None)
if body is not None:
bodyFile = body.open()
d = maybeDeferred(mbox.addMessage, bodyFile, flags, date)
else:
def rewind(f):
f.seek(0)
return f
buffer = tempfile.TemporaryFile()
d = MessageProducer(msg, buffer, self._scheduler
).beginProducing(None
).addCallback(lambda _, b=buffer, f=flags, d=date: mbox.addMessage(rewind(b), f, d)
)
addedDeferreds.append(d)
return defer.DeferredList(addedDeferreds)
def __cbCopied(self, deferredIds, tag, mbox):
ids = []
failures = []
for (status, result) in deferredIds:
if status:
ids.append(result)
else:
failures.append(result.value)
if failures:
self.sendNegativeResponse(tag, '[ALERT] Some messages were not copied')
else:
self.sendPositiveResponse(tag, b'COPY completed')
def __ebCopy(self, failure, tag):
self.sendBadResponse(tag, b'COPY failed:' +
networkString(str(failure.value)))
log.err(failure)
def do_UID(self, tag, command, line):
command = command.upper()
if command not in (b'COPY', b'FETCH', b'STORE', b'SEARCH'):
raise IllegalClientResponse(command)
self.dispatchCommand(tag, command, line, uid=1)
select_UID = (do_UID, arg_atom, arg_line)
#
# IMailboxListener implementation
#
def modeChanged(self, writeable):
if writeable:
self.sendUntaggedResponse(message=b'[READ-WRITE]', isAsync=True)
else:
self.sendUntaggedResponse(message=b'[READ-ONLY]', isAsync=True)
def flagsChanged(self, newFlags):
for (mId, flags) in newFlags.items():
encodedFlags = [networkString(flag) for flag in flags]
msg = intToBytes(mId) + (
b' FETCH (FLAGS (' + b' '.join(encodedFlags) + b'))'
)
self.sendUntaggedResponse(msg, isAsync=True)
def newMessages(self, exists, recent):
if exists is not None:
self.sendUntaggedResponse(
intToBytes(exists) + b' EXISTS', isAsync=True)
if recent is not None:
self.sendUntaggedResponse(
intToBytes(recent) + b' RECENT', isAsync=True)
TIMEOUT_ERROR = error.TimeoutError()
@implementer(IMailboxListener)
class IMAP4Client(basic.LineReceiver, policies.TimeoutMixin):
"""IMAP4 client protocol implementation
@ivar state: A string representing the state the connection is currently
in.
"""
tags = None
waiting = None
queued = None
tagID = 1
state = None
startedTLS = False
# Number of seconds to wait before timing out a connection.
# If the number is <= 0 no timeout checking will be performed.
timeout = 0
# Capabilities are not allowed to change during the session
# So cache the first response and use that for all later
# lookups
_capCache = None
_memoryFileLimit = 1024 * 1024 * 10
# Authentication is pluggable. This maps names to IClientAuthentication
# objects.
authenticators = None
STATUS_CODES = ('OK', 'NO', 'BAD', 'PREAUTH', 'BYE')
STATUS_TRANSFORMATIONS = {
'MESSAGES': int, 'RECENT': int, 'UNSEEN': int
}
context = None
def __init__(self, contextFactory = None):
self.tags = {}
self.queued = []
self.authenticators = {}
self.context = contextFactory
self._tag = None
self._parts = None
self._lastCmd = None
def registerAuthenticator(self, auth):
"""
Register a new form of authentication
When invoking the authenticate() method of IMAP4Client, the first
matching authentication scheme found will be used. The ordering is
that in which the server lists support authentication schemes.
@type auth: Implementor of C{IClientAuthentication}
@param auth: The object to use to perform the client
side of this authentication scheme.
"""
self.authenticators[auth.getName().upper()] = auth
def rawDataReceived(self, data):
if self.timeout > 0:
self.resetTimeout()
self._pendingSize -= len(data)
if self._pendingSize > 0:
self._pendingBuffer.write(data)
else:
passon = b''
if self._pendingSize < 0:
data, passon = data[:self._pendingSize], data[self._pendingSize:]
self._pendingBuffer.write(data)
rest = self._pendingBuffer
self._pendingBuffer = None
self._pendingSize = None
rest.seek(0, 0)
self._parts.append(rest.read())
self.setLineMode(passon.lstrip(b'\r\n'))
# def sendLine(self, line):
# print 'S:', repr(line)
# return basic.LineReceiver.sendLine(self, line)
def _setupForLiteral(self, rest, octets):
self._pendingBuffer = self.messageFile(octets)
self._pendingSize = octets
if self._parts is None:
self._parts = [rest, b'\r\n']
else:
self._parts.extend([rest, b'\r\n'])
self.setRawMode()
def connectionMade(self):
if self.timeout > 0:
self.setTimeout(self.timeout)
def connectionLost(self, reason):
"""
We are no longer connected
"""
if self.timeout > 0:
self.setTimeout(None)
if self.queued is not None:
queued = self.queued
self.queued = None
for cmd in queued:
cmd.defer.errback(reason)
if self.tags is not None:
tags = self.tags
self.tags = None
for cmd in tags.values():
if cmd is not None and cmd.defer is not None:
cmd.defer.errback(reason)
def lineReceived(self, line):
"""
Attempt to parse a single line from the server.
@type line: L{bytes}
@param line: The line from the server, without the line delimiter.
@raise IllegalServerResponse: If the line or some part of the line
does not represent an allowed message from the server at this time.
"""
# print('C: ' + repr(line))
if self.timeout > 0:
self.resetTimeout()
lastPart = line.rfind(b'{')
if lastPart != -1:
lastPart = line[lastPart + 1:]
if lastPart.endswith(b'}'):
# It's a literal a-comin' in
try:
octets = int(lastPart[:-1])
except ValueError:
raise IllegalServerResponse(line)
if self._parts is None:
self._tag, parts = line.split(None, 1)
else:
parts = line
self._setupForLiteral(parts, octets)
return
if self._parts is None:
# It isn't a literal at all
self._regularDispatch(line)
else:
# If an expression is in progress, no tag is required here
# Since we didn't find a literal indicator, this expression
# is done.
self._parts.append(line)
tag, rest = self._tag, b''.join(self._parts)
self._tag = self._parts = None
self.dispatchCommand(tag, rest)
def timeoutConnection(self):
if self._lastCmd and self._lastCmd.defer is not None:
d, self._lastCmd.defer = self._lastCmd.defer, None
d.errback(TIMEOUT_ERROR)
if self.queued:
for cmd in self.queued:
if cmd.defer is not None:
d, cmd.defer = cmd.defer, d
d.errback(TIMEOUT_ERROR)
self.transport.loseConnection()
def _regularDispatch(self, line):
parts = line.split(None, 1)
if len(parts) != 2:
parts.append(b'')
tag, rest = parts
self.dispatchCommand(tag, rest)
def messageFile(self, octets):
"""
Create a file to which an incoming message may be written.
@type octets: L{int}
@param octets: The number of octets which will be written to the file
@rtype: Any object which implements C{write(string)} and
C{seek(int, int)}
@return: A file-like object
"""
if octets > self._memoryFileLimit:
return tempfile.TemporaryFile()
else:
return BytesIO()
def makeTag(self):
tag = (u'%0.4X' % self.tagID).encode("ascii")
self.tagID += 1
return tag
def dispatchCommand(self, tag, rest):
if self.state is None:
f = self.response_UNAUTH
else:
f = getattr(self, 'response_' + self.state.upper(), None)
if f:
try:
f(tag, rest)
except:
log.err()
self.transport.loseConnection()
else:
log.err("Cannot dispatch: %s, %r, %r" % (self.state, tag, rest))
self.transport.loseConnection()
def response_UNAUTH(self, tag, rest):
if self.state is None:
# Server greeting, this is
status, rest = rest.split(None, 1)
if status.upper() == b'OK':
self.state = 'unauth'
elif status.upper() == b'PREAUTH':
self.state = 'auth'
else:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + b' ' + rest)
b, e = rest.find(b'['), rest.find(b']')
if b != -1 and e != -1:
self.serverGreeting(
self.__cbCapabilities(
([parseNestedParens(rest[b + 1:e])], None)))
else:
self.serverGreeting(None)
else:
self._defaultHandler(tag, rest)
def response_AUTH(self, tag, rest):
self._defaultHandler(tag, rest)
def _defaultHandler(self, tag, rest):
if tag == b'*' or tag == b'+':
if not self.waiting:
self._extraInfo([parseNestedParens(rest)])
else:
cmd = self.tags[self.waiting]
if tag == b'+':
cmd.continuation(rest)
else:
cmd.lines.append(rest)
else:
try:
cmd = self.tags[tag]
except KeyError:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + b' ' + rest)
else:
status, line = rest.split(None, 1)
if status == b'OK':
# Give them this last line, too
cmd.finish(rest, self._extraInfo)
else:
cmd.defer.errback(IMAP4Exception(line))
del self.tags[tag]
self.waiting = None
self._flushQueue()
def _flushQueue(self):
if self.queued:
cmd = self.queued.pop(0)
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
def _extraInfo(self, lines):
# XXX - This is terrible.
# XXX - Also, this should collapse temporally proximate calls into single
# invocations of IMailboxListener methods, where possible.
flags = {}
recent = exists = None
for response in lines:
elements = len(response)
if elements == 1 and response[0] == [b'READ-ONLY']:
self.modeChanged(False)
elif elements == 1 and response[0] == [b'READ-WRITE']:
self.modeChanged(True)
elif elements == 2 and response[1] == b'EXISTS':
exists = int(response[0])
elif elements == 2 and response[1] == b'RECENT':
recent = int(response[0])
elif elements == 3 and response[1] == b'FETCH':
mId = int(response[0])
values, _ = self._parseFetchPairs(response[2])
flags.setdefault(mId, []).extend(values.get('FLAGS', ()))
else:
log.msg('Unhandled unsolicited response: %s' % (response,))
if flags:
self.flagsChanged(flags)
if recent is not None or exists is not None:
self.newMessages(exists, recent)
def sendCommand(self, cmd):
cmd.defer = defer.Deferred()
if self.waiting:
self.queued.append(cmd)
return cmd.defer
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
self._lastCmd = cmd
return cmd.defer
def getCapabilities(self, useCache=1):
"""
Request the capabilities available on this server.
This command is allowed in any state of connection.
@type useCache: C{bool}
@param useCache: Specify whether to use the capability-cache or to
re-retrieve the capabilities from the server. Server capabilities
should never change, so for normal use, this flag should never be
false.
@rtype: L{Deferred}
@return: A deferred whose callback will be invoked with a
dictionary mapping capability types to lists of supported
mechanisms, or to None if a support list is not applicable.
"""
if useCache and self._capCache is not None:
return defer.succeed(self._capCache)
cmd = b'CAPABILITY'
resp = (b'CAPABILITY',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbCapabilities)
return d
def __cbCapabilities(self, result):
(lines, tagline) = result
caps = {}
for rest in lines:
for cap in rest[1:]:
parts = cap.split(b'=', 1)
if len(parts) == 1:
category, value = parts[0], None
else:
category, value = parts
caps.setdefault(category, []).append(value)
# Preserve a non-ideal API for backwards compatibility. It would
# probably be entirely sensible to have an object with a wider API than
# dict here so this could be presented less insanely.
for category in caps:
if caps[category] == [None]:
caps[category] = None
self._capCache = caps
return caps
def logout(self):
"""
Inform the server that we are done with the connection.
This command is allowed in any state of connection.
@rtype: L{Deferred}
@return: A deferred whose callback will be invoked with None
when the proper server acknowledgement has been received.
"""
d = self.sendCommand(Command(b'LOGOUT', wantResponse=(b'BYE',)))
d.addCallback(self.__cbLogout)
return d
def __cbLogout(self, result):
(lines, tagline) = result
self.transport.loseConnection()
# We don't particularly care what the server said
return None
def noop(self):
"""
Perform no operation.
This command is allowed in any state of connection.
@rtype: L{Deferred}
@return: A deferred whose callback will be invoked with a list
of untagged status updates the server responds with.
"""
d = self.sendCommand(Command(b'NOOP'))
d.addCallback(self.__cbNoop)
return d
def __cbNoop(self, result):
# Conceivable, this is elidable.
# It is, afterall, a no-op.
(lines, tagline) = result
return lines
def startTLS(self, contextFactory=None):
"""
Initiates a 'STARTTLS' request and negotiates the TLS / SSL
Handshake.
@param contextFactory: The TLS / SSL Context Factory to
leverage. If the contextFactory is None the IMAP4Client will
either use the current TLS / SSL Context Factory or attempt to
create a new one.
@type contextFactory: C{ssl.ClientContextFactory}
@return: A Deferred which fires when the transport has been
secured according to the given contextFactory, or which fails
if the transport cannot be secured.
"""
assert not self.startedTLS, "Client and Server are currently communicating via TLS"
if contextFactory is None:
contextFactory = self._getContextFactory()
if contextFactory is None:
return defer.fail(IMAP4Exception(
"IMAP4Client requires a TLS context to "
"initiate the STARTTLS handshake"))
if b'STARTTLS' not in self._capCache:
return defer.fail(IMAP4Exception(
"Server does not support secure communication "
"via TLS / SSL"))
tls = interfaces.ITLSTransport(self.transport, None)
if tls is None:
return defer.fail(IMAP4Exception(
"IMAP4Client transport does not implement "
"interfaces.ITLSTransport"))
d = self.sendCommand(Command(b'STARTTLS'))
d.addCallback(self._startedTLS, contextFactory)
d.addCallback(lambda _: self.getCapabilities())
return d
def authenticate(self, secret):
"""
Attempt to enter the authenticated state with the server
This command is allowed in the Non-Authenticated state.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked if the authentication
succeeds and whose errback will be invoked otherwise.
"""
if self._capCache is None:
d = self.getCapabilities()
else:
d = defer.succeed(self._capCache)
d.addCallback(self.__cbAuthenticate, secret)
return d
def __cbAuthenticate(self, caps, secret):
auths = caps.get(b'AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command(b'AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
if self.startedTLS:
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
else:
def ebStartTLS(err):
err.trap(IMAP4Exception)
# We couldn't negotiate TLS for some reason
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
d = self.startTLS()
d.addErrback(ebStartTLS)
d.addCallback(lambda _: self.getCapabilities())
d.addCallback(self.__cbAuthTLS, secret)
return d
def __cbContinueAuth(self, rest, scheme, secret):
try:
chal = decodebytes(rest + b'\n')
except binascii.Error:
self.sendLine(b'*')
raise IllegalServerResponse(rest)
else:
auth = self.authenticators[scheme]
chal = auth.challengeResponse(secret, chal)
self.sendLine(encodebytes(chal).strip())
def __cbAuthTLS(self, caps, secret):
auths = caps.get(b'AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command(b'AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
raise NoSupportedAuthentication(auths, self.authenticators.keys())
def login(self, username, password):
"""
Authenticate with the server using a username and password
This command is allowed in the Non-Authenticated state. If the
server supports the STARTTLS capability and our transport supports
TLS, TLS is negotiated before the login command is issued.
A more secure way to log in is to use C{startTLS} or
C{authenticate} or both.
@type username: L{str}
@param username: The username to log in with
@type password: L{str}
@param password: The password to log in with
@rtype: L{Deferred}
@return: A deferred whose callback is invoked if login is successful
and whose errback is invoked otherwise.
"""
d = maybeDeferred(self.getCapabilities)
d.addCallback(self.__cbLoginCaps, username, password)
return d
def serverGreeting(self, caps):
"""
Called when the server has sent us a greeting.
@type caps: C{dict}
@param caps: Capabilities the server advertised in its greeting.
"""
def _getContextFactory(self):
if self.context is not None:
return self.context
try:
from twisted.internet import ssl
except ImportError:
return None
else:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
def __cbLoginCaps(self, capabilities, username, password):
# If the server advertises STARTTLS, we might want to try to switch to TLS
tryTLS = b'STARTTLS' in capabilities
# If our transport supports switching to TLS, we might want to try to switch to TLS.
tlsableTransport = interfaces.ITLSTransport(self.transport, None) is not None
# If our transport is not already using TLS, we might want to try to switch to TLS.
nontlsTransport = interfaces.ISSLTransport(self.transport, None) is None
if not self.startedTLS and tryTLS and tlsableTransport and nontlsTransport:
d = self.startTLS()
d.addCallbacks(
self.__cbLoginTLS,
self.__ebLoginTLS,
callbackArgs=(username, password),
)
return d
else:
if nontlsTransport:
log.msg("Server has no TLS support. logging in over cleartext!")
args = b' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command(b'LOGIN', args))
def _startedTLS(self, result, context):
self.transport.startTLS(context)
self._capCache = None
self.startedTLS = True
return result
def __cbLoginTLS(self, result, username, password):
args = b' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command(b'LOGIN', args))
def __ebLoginTLS(self, failure):
log.err(failure)
return failure
def namespace(self):
"""
Retrieve information about the namespaces available to this account
This command is allowed in the Authenticated and Selected states.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with namespace
information. An example of this information is::
[[['', '/']], [], []]
which indicates a single personal namespace called '' with '/'
as its hierarchical delimiter, and no shared or user namespaces.
"""
cmd = b'NAMESPACE'
resp = (b'NAMESPACE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbNamespace)
return d
def __cbNamespace(self, result):
(lines, last) = result
# Namespaces and their delimiters qualify and delimit
# mailboxes, so they should be native strings
#
# On Python 2, no decoding is necessary to maintain
# the API contract.
#
# On Python 3, users specify mailboxes with native strings, so
# they should receive namespaces and delimiters as native
# strings. Both cases are possible because of the imap4-utf-7
# encoding.
if _PY3:
def _prepareNamespaceOrDelimiter(namespaceList):
return [
element.decode('imap4-utf-7') for element in namespaceList
]
else:
def _prepareNamespaceOrDelimiter(element):
return element
for parts in lines:
if len(parts) == 4 and parts[0] == b'NAMESPACE':
return [
[]
if pairOrNone is None else
[
_prepareNamespaceOrDelimiter(value)
for value in pairOrNone
]
for pairOrNone in parts[1:]
]
log.err("No NAMESPACE response to NAMESPACE command")
return [[], [], []]
def select(self, mailbox):
"""
Select a mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: L{str}
@param mailbox: The name of the mailbox to select
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the select is successful and whose errback is
invoked otherwise. Mailbox information consists of a dictionary
with the following L{str} keys and values::
FLAGS: A list of strings containing the flags settable on
messages in this mailbox.
EXISTS: An integer indicating the number of messages in this
mailbox.
RECENT: An integer indicating the number of "recent"
messages in this mailbox.
UNSEEN: The message sequence number (an integer) of the
first unseen message in the mailbox.
PERMANENTFLAGS: A list of strings containing the flags that
can be permanently set on messages in this mailbox.
UIDVALIDITY: An integer uniquely identifying this mailbox.
"""
cmd = b'SELECT'
args = _prepareMailboxName(mailbox)
# This appears not to be used, so we can use native strings to
# indicate that the return type is native strings.
resp = ('FLAGS', 'EXISTS', 'RECENT',
'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 1)
return d
def examine(self, mailbox):
"""
Select a mailbox in read-only mode
This command is allowed in the Authenticated and Selected states.
@type mailbox: L{str}
@param mailbox: The name of the mailbox to examine
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the examine is successful and whose errback
is invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
'FLAGS': A list of strings containing the flags settable on
messages in this mailbox.
'EXISTS': An integer indicating the number of messages in this
mailbox.
'RECENT': An integer indicating the number of \"recent\"
messages in this mailbox.
'UNSEEN': An integer indicating the number of messages not
flagged \\Seen in this mailbox.
'PERMANENTFLAGS': A list of strings containing the flags that
can be permanently set on messages in this mailbox.
'UIDVALIDITY': An integer uniquely identifying this mailbox.
"""
cmd = b'EXAMINE'
args = _prepareMailboxName(mailbox)
resp = (b'FLAGS', b'EXISTS', b'RECENT', b'UNSEEN', b'PERMANENTFLAGS', b'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 0)
return d
def _intOrRaise(self, value, phrase):
"""
Parse C{value} as an integer and return the result or raise
L{IllegalServerResponse} with C{phrase} as an argument if C{value}
cannot be parsed as an integer.
"""
try:
return int(value)
except ValueError:
raise IllegalServerResponse(phrase)
def __cbSelect(self, result, rw):
"""
Handle lines received in response to a SELECT or EXAMINE command.
See RFC 3501, section 6.3.1.
"""
(lines, tagline) = result
# In the absence of specification, we are free to assume:
# READ-WRITE access
datum = {'READ-WRITE': rw}
lines.append(parseNestedParens(tagline))
for split in lines:
if len(split) > 0 and split[0].upper() == b'OK':
# Handle all the kinds of OK response.
content = split[1]
if isinstance(content, list):
key = content[0]
else:
# not multi-valued, like OK LOGIN
key = content
key = key.upper()
if key == b'READ-ONLY':
datum['READ-WRITE'] = False
elif key == b'READ-WRITE':
datum['READ-WRITE'] = True
elif key == b'UIDVALIDITY':
datum['UIDVALIDITY'] = self._intOrRaise(content[1], split)
elif key == b'UNSEEN':
datum['UNSEEN'] = self._intOrRaise(content[1], split)
elif key == b'UIDNEXT':
datum['UIDNEXT'] = self._intOrRaise(content[1], split)
elif key == b'PERMANENTFLAGS':
datum['PERMANENTFLAGS'] = tuple(
nativeString(flag) for flag in content[1])
else:
log.err('Unhandled SELECT response (2): %s' % (split,))
elif len(split) == 2:
# Handle FLAGS, EXISTS, and RECENT
if split[0].upper() == b'FLAGS':
datum['FLAGS'] = tuple(
nativeString(flag) for flag in split[1])
elif isinstance(split[1], bytes):
# Must make sure things are strings before treating them as
# strings since some other forms of response have nesting in
# places which results in lists instead.
if split[1].upper() == b'EXISTS':
datum['EXISTS'] = self._intOrRaise(split[0], split)
elif split[1].upper() == b'RECENT':
datum['RECENT'] = self._intOrRaise(split[0], split)
else:
log.err('Unhandled SELECT response (0): %s' % (split,))
else:
log.err('Unhandled SELECT response (1): %s' % (split,))
else:
log.err('Unhandled SELECT response (4): %s' % (split,))
return datum
def create(self, name):
"""
Create a new mailbox on the server
This command is allowed in the Authenticated and Selected states.
@type name: L{str}
@param name: The name of the mailbox to create.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked if the mailbox creation
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command(b'CREATE', _prepareMailboxName(name)))
def delete(self, name):
"""
Delete a mailbox
This command is allowed in the Authenticated and Selected states.
@type name: L{str}
@param name: The name of the mailbox to delete.
@rtype: L{Deferred}
@return: A deferred whose calblack is invoked if the mailbox is
deleted successfully and whose errback is invoked otherwise.
"""
return self.sendCommand(Command(b'DELETE', _prepareMailboxName(name)))
def rename(self, oldname, newname):
"""
Rename a mailbox
This command is allowed in the Authenticated and Selected states.
@type oldname: L{str}
@param oldname: The current name of the mailbox to rename.
@type newname: L{str}
@param newname: The new name to give the mailbox.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked if the rename is
successful and whose errback is invoked otherwise.
"""
oldname = _prepareMailboxName(oldname)
newname = _prepareMailboxName(newname)
return self.sendCommand(Command(b'RENAME', b' '.join((oldname, newname))))
def subscribe(self, name):
"""
Add a mailbox to the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: L{str}
@param name: The mailbox to mark as 'active' or 'subscribed'
@rtype: L{Deferred}
@return: A deferred whose callback is invoked if the subscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command(b'SUBSCRIBE', _prepareMailboxName(name)))
def unsubscribe(self, name):
"""
Remove a mailbox from the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: L{str}
@param name: The mailbox to unsubscribe
@rtype: L{Deferred}
@return: A deferred whose callback is invoked if the unsubscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command(b'UNSUBSCRIBE', _prepareMailboxName(name)))
def list(self, reference, wildcard):
"""
List a subset of the available mailboxes
This command is allowed in the Authenticated and Selected
states.
@type reference: L{str}
@param reference: The context in which to interpret
C{wildcard}
@type wildcard: L{str}
@param wildcard: The pattern of mailbox names to match,
optionally including either or both of the '*' and '%'
wildcards. '*' will match zero or more characters and
cross hierarchical boundaries. '%' will also match zero
or more characters, but is limited to a single
hierarchical level.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a list of
L{tuple}s, the first element of which is a L{tuple} of
mailbox flags, the second element of which is the
hierarchy delimiter for this mailbox, and the third of
which is the mailbox name; if the command is unsuccessful,
the deferred's errback is invoked instead. B{NB}: the
delimiter and the mailbox name are L{str}s.
"""
cmd = b'LIST'
args = ('"%s" "%s"' % (reference, wildcard)).encode("imap4-utf-7")
resp = (b'LIST',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, b'LIST')
return d
def lsub(self, reference, wildcard):
"""
List a subset of the subscribed available mailboxes
This command is allowed in the Authenticated and Selected states.
The parameters and returned object are the same as for the L{list}
method, with one slight difference: Only mailboxes which have been
subscribed can be included in the resulting list.
"""
cmd = b'LSUB'
encodedReference = reference.encode('ascii')
encodedWildcard = wildcard.encode('imap4-utf-7')
args = b"".join([
b'"', encodedReference, b'"'
b' "', encodedWildcard, b'"',
])
resp = (b'LSUB',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, b'LSUB')
return d
def __cbList(self, result, command):
(lines, last) = result
results = []
for parts in lines:
if len(parts) == 4 and parts[0] == command:
# flags
parts[1] = tuple(nativeString(flag) for flag in parts[1])
# The mailbox should be a native string.
# On Python 2, this maintains the API's contract.
#
# On Python 3, users specify mailboxes with native
# strings, so they should receive mailboxes as native
# strings. Both cases are possible because of the
# imap4-utf-7 encoding.
#
# Mailbox names contain the hierarchical delimiter, so
# it too should be a native string.
if _PY3:
# delimiter
parts[2] = parts[2].decode('imap4-utf-7')
# mailbox
parts[3] = parts[3].decode('imap4-utf-7')
results.append(tuple(parts[1:]))
return results
_statusNames = {
name: name.encode('ascii') for name in (
'MESSAGES',
'RECENT',
'UIDNEXT',
'UIDVALIDITY',
'UNSEEN',
)
}
def status(self, mailbox, *names):
"""
Retrieve the status of the given mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: L{str}
@param mailbox: The name of the mailbox to query
@type *names: L{bytes}
@param *names: The status names to query. These may be any number of:
C{'MESSAGES'}, C{'RECENT'}, C{'UIDNEXT'}, C{'UIDVALIDITY'}, and
C{'UNSEEN'}.
@rtype: L{Deferred}
@return: A deferred which fires with the status information if the
command is successful and whose errback is invoked otherwise. The
status information is in the form of a C{dict}. Each element of
C{names} is a key in the dictionary. The value for each key is the
corresponding response from the server.
"""
cmd = b'STATUS'
preparedMailbox = _prepareMailboxName(mailbox)
try:
names = b' '.join(self._statusNames[name] for name in names)
except KeyError:
raise ValueError("Unknown names: {!r}".format(
set(names) - set(self._statusNames)
))
args = b''.join([preparedMailbox,
b" (", names, b")"])
resp = (b'STATUS',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbStatus)
return d
def __cbStatus(self, result):
(lines, last) = result
status = {}
for parts in lines:
if parts[0] == b'STATUS':
items = parts[2]
items = [items[i:i+2] for i in range(0, len(items), 2)]
for k, v in items:
try:
status[nativeString(k)] = v
except UnicodeDecodeError:
raise IllegalServerResponse(repr(items))
for k in status.keys():
t = self.STATUS_TRANSFORMATIONS.get(k)
if t:
try:
status[k] = t(status[k])
except Exception as e:
raise IllegalServerResponse('(' + k + ' '+ status[k] + '): ' + str(e))
return status
def append(self, mailbox, message, flags = (), date = None):
"""
Add the given message to the given mailbox.
This command is allowed in the Authenticated and Selected states.
@type mailbox: L{str}
@param mailbox: The mailbox to which to add this message.
@type message: Any file-like object opened in B{binary mode}.
@param message: The message to add, in RFC822 format. Newlines
in this file should be \\r\\n-style.
@type flags: Any iterable of L{str}
@param flags: The flags to associated with this message.
@type date: L{str}
@param date: The date to associate with this message. This should
be of the format DD-MM-YYYY HH:MM:SS +/-HHMM. For example, in
Eastern Standard Time, on July 1st 2004 at half past 1 PM,
\"01-07-2004 13:30:00 -0500\".
@rtype: L{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
message.seek(0, 2)
L = message.tell()
message.seek(0, 0)
if date:
date = networkString(' "%s"' % nativeString(date))
else:
date = b''
encodedFlags = [networkString(flag) for flag in flags]
cmd = b''.join([
_prepareMailboxName(mailbox),
b" (", b" ".join(encodedFlags), b")",
date,
b" {", intToBytes(L), b"}",
])
d = self.sendCommand(Command(b'APPEND', cmd, (), self.__cbContinueAppend, message))
return d
def __cbContinueAppend(self, lines, message):
s = basic.FileSender()
return s.beginFileTransfer(message, self.transport, None
).addCallback(self.__cbFinishAppend)
def __cbFinishAppend(self, foo):
self.sendLine(b'')
def check(self):
"""
Tell the server to perform a checkpoint
This command is allowed in the Selected state.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
return self.sendCommand(Command(b'CHECK'))
def close(self):
"""
Return the connection to the Authenticated state.
This command is allowed in the Selected state.
Issuing this command will also remove all messages flagged \\Deleted
from the selected mailbox if it is opened in read-write mode,
otherwise it indicates success by no messages are removed.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked when the command
completes successfully or whose errback is invoked if it fails.
"""
return self.sendCommand(Command(b'CLOSE'))
def expunge(self):
"""
Return the connection to the Authenticate state.
This command is allowed in the Selected state.
Issuing this command will perform the same actions as issuing the
close command, but will also generate an 'expunge' response for
every message deleted.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a list of the
'expunge' responses when this command is successful or whose errback
is invoked otherwise.
"""
cmd = b'EXPUNGE'
resp = (b'EXPUNGE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbExpunge)
return d
def __cbExpunge(self, result):
(lines, last) = result
ids = []
for parts in lines:
if len(parts) == 2 and parts[1] == b'EXPUNGE':
ids.append(self._intOrRaise(parts[0], parts))
return ids
def search(self, *queries, **kwarg):
"""
Search messages in the currently selected mailbox
This command is allowed in the Selected state.
Any non-zero number of queries are accepted by this method, as returned
by the C{Query}, C{Or}, and C{Not} functions.
@param uid: if true, the server is asked to return message UIDs instead
of message sequence numbers. (This is a keyword-only argument.)
@type uid: L{bool}
@rtype: L{Deferred}
@return: A deferred whose callback will be invoked with a list of all
the message sequence numbers return by the search, or whose errback
will be invoked if there is an error.
"""
# Queries should be encoded as ASCII unless a charset
# identifier is provided. See #9201.
if _PY3:
queries = [query.encode('charmap') for query in queries]
if kwarg.get('uid'):
cmd = b'UID SEARCH'
else:
cmd = b'SEARCH'
args = b' '.join(queries)
d = self.sendCommand(Command(cmd, args, wantResponse=(cmd,)))
d.addCallback(self.__cbSearch)
return d
def __cbSearch(self, result):
(lines, end) = result
ids = []
for parts in lines:
if len(parts) > 0 and parts[0] == b'SEARCH':
ids.extend([self._intOrRaise(p, parts) for p in parts[1:]])
return ids
def fetchUID(self, messages, uid=0):
"""
Retrieve the unique identifier for one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message sequence numbers to unique message identifiers, or whose
errback is invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, uid=1)
def fetchFlags(self, messages, uid=0):
"""
Retrieve the flags for one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: The messages for which to retrieve flags.
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to lists of flags, or whose errback is invoked if
there is an error.
"""
return self._fetch(messages, useUID=uid, flags=1)
def fetchInternalDate(self, messages, uid=0):
"""
Retrieve the internal date associated with one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: The messages for which to retrieve the internal date.
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to date strings, or whose errback is invoked
if there is an error. Date strings take the format of
\"day-month-year time timezone\".
"""
return self._fetch(messages, useUID=uid, internaldate=1)
def fetchEnvelope(self, messages, uid=0):
"""
Retrieve the envelope data for one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: The messages for which to retrieve envelope
data.
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of
message numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict
mapping message numbers to envelope data, or whose errback
is invoked if there is an error. Envelope data consists
of a sequence of the date, subject, from, sender,
reply-to, to, cc, bcc, in-reply-to, and message-id header
fields. The date, subject, in-reply-to, and message-id
fields are L{str}, while the from, sender, reply-to, to,
cc, and bcc fields contain address data as L{str}s.
Address data consists of a sequence of name, source route,
mailbox name, and hostname. Fields which are not present
for a particular address may be L{None}.
"""
return self._fetch(messages, useUID=uid, envelope=1)
def fetchBodyStructure(self, messages, uid=0):
"""
Retrieve the structure of the body of one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: The messages for which to retrieve body structure
data.
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body structure data, or whose errback is invoked
if there is an error. Body structure data describes the MIME-IMB
format of a message and consists of a sequence of mime type, mime
subtype, parameters, content id, description, encoding, and size.
The fields following the size field are variable: if the mime
type/subtype is message/rfc822, the contained message's envelope
information, body structure data, and number of lines of text; if
the mime type is text, the number of lines of text. Extension fields
may also be included; if present, they are: the MD5 hash of the body,
body disposition, body language.
"""
return self._fetch(messages, useUID=uid, bodystructure=1)
def fetchSimplifiedBody(self, messages, uid=0):
"""
Retrieve the simplified body structure of one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body data, or whose errback is invoked
if there is an error. The simplified body structure is the same
as the body structure, except that extension fields will never be
present.
"""
return self._fetch(messages, useUID=uid, body=1)
def fetchMessage(self, messages, uid=0):
"""
Retrieve one or more entire messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A L{Deferred} which will fire with a C{dict} mapping message
sequence numbers to C{dict}s giving message data for the
corresponding message. If C{uid} is true, the inner dictionaries
have a C{'UID'} key mapped to a L{str} giving the UID for the
message. The text of the message is a L{str} associated with the
C{'RFC822'} key in each dictionary.
"""
return self._fetch(messages, useUID=uid, rfc822=1)
def fetchHeaders(self, messages, uid=0):
"""
Retrieve headers of one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dicts of message headers, or whose errback is
invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, rfc822header=1)
def fetchBody(self, messages, uid=0):
"""
Retrieve body text of one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to file-like objects containing body text, or whose
errback is invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, rfc822text=1)
def fetchSize(self, messages, uid=0):
"""
Retrieve the size, in octets, of one or more messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to sizes, or whose errback is invoked if there is
an error.
"""
return self._fetch(messages, useUID=uid, rfc822size=1)
def fetchFull(self, messages, uid=0):
"""
Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, C{fetchEnvelope}, and C{fetchSimplifiedBody}
functions.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", "envelope", and "body".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1, body=1)
def fetchAll(self, messages, uid=0):
"""
Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, and C{fetchEnvelope} functions.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", and "envelope".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1)
def fetchFast(self, messages, uid=0):
"""
Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate}, and
C{fetchSize} functions.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys are
"flags", "date", and "size".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1, rfc822size=1)
def _parseFetchPairs(self, fetchResponseList):
"""
Given the result of parsing a single I{FETCH} response, construct a
L{dict} mapping response keys to response values.
@param fetchResponseList: The result of parsing a I{FETCH} response
with L{parseNestedParens} and extracting just the response data
(that is, just the part that comes after C{"FETCH"}). The form
of this input (and therefore the output of this method) is very
disagreeable. A valuable improvement would be to enumerate the
possible keys (representing them as structured objects of some
sort) rather than using strings and tuples of tuples of strings
and so forth. This would allow the keys to be documented more
easily and would allow for a much simpler application-facing API
(one not based on looking up somewhat hard to predict keys in a
dict). Since C{fetchResponseList} notionally represents a
flattened sequence of pairs (identifying keys followed by their
associated values), collapsing such complex elements of this
list as C{["BODY", ["HEADER.FIELDS", ["SUBJECT"]]]} into a
single object would also greatly simplify the implementation of
this method.
@return: A C{dict} of the response data represented by C{pairs}. Keys
in this dictionary are things like C{"RFC822.TEXT"}, C{"FLAGS"}, or
C{("BODY", ("HEADER.FIELDS", ("SUBJECT",)))}. Values are entirely
dependent on the key with which they are associated, but retain the
same structured as produced by L{parseNestedParens}.
"""
# TODO: RFC 3501 Section 7.4.2, "FETCH Response", says for
# BODY responses that "8-bit textual data is permitted if a
# charset identifier is part of the body parameter
# parenthesized list". Every other component is 7-bit. This
# should parse out the charset identifier and use it to decode
# 8-bit bodies. Until then, on Python 2 it should continue to
# return native (byte) strings, while on Python 3 it should
# decode bytes to native strings via charmap, ensuring data
# fidelity at the cost of mojibake.
if _PY3:
def nativeStringResponse(thing):
if isinstance(thing, bytes):
return thing.decode('charmap')
elif isinstance(thing, list):
return [nativeStringResponse(subthing)
for subthing in thing]
else:
def nativeStringResponse(thing):
return thing
values = {}
unstructured = []
responseParts = iter(fetchResponseList)
while True:
try:
key = next(responseParts)
except StopIteration:
break
try:
value = next(responseParts)
except StopIteration:
raise IllegalServerResponse(
b"Not enough arguments", fetchResponseList)
# The parsed forms of responses like:
#
# BODY[] VALUE
# BODY[TEXT] VALUE
# BODY[HEADER.FIELDS (SUBJECT)] VALUE
# BODY[HEADER.FIELDS (SUBJECT)]<N.M> VALUE
#
# are:
#
# ["BODY", [], VALUE]
# ["BODY", ["TEXT"], VALUE]
# ["BODY", ["HEADER.FIELDS", ["SUBJECT"]], VALUE]
# ["BODY", ["HEADER.FIELDS", ["SUBJECT"]], "<N.M>", VALUE]
#
# Additionally, BODY responses for multipart messages are
# represented as:
#
# ["BODY", VALUE]
#
# with list as the type of VALUE and the type of VALUE[0].
#
# See #6281 for ideas on how this might be improved.
if key not in (b"BODY", b"BODY.PEEK"):
# Only BODY (and by extension, BODY.PEEK) responses can have
# body sections.
hasSection = False
elif not isinstance(value, list):
# A BODY section is always represented as a list. Any non-list
# is not a BODY section.
hasSection = False
elif len(value) > 2:
# The list representing a BODY section has at most two elements.
hasSection = False
elif value and isinstance(value[0], list):
# A list containing a list represents the body structure of a
# multipart message, instead.
hasSection = False
else:
# Otherwise it must have a BODY section to examine.
hasSection = True
# If it has a BODY section, grab some extra elements and shuffle
# around the shape of the key a little bit.
key = nativeString(key)
unstructured.append(key)
if hasSection:
if len(value) < 2:
value = [nativeString(v) for v in value]
unstructured.append(value)
key = (key, tuple(value))
else:
valueHead = nativeString(value[0])
valueTail = [nativeString(v) for v in value[1]]
unstructured.append([valueHead, valueTail])
key = (key, (valueHead, tuple(valueTail)))
try:
value = next(responseParts)
except StopIteration:
raise IllegalServerResponse(
b"Not enough arguments", fetchResponseList)
# Handle partial ranges
if value.startswith(b'<') and value.endswith(b'>'):
try:
int(value[1:-1])
except ValueError:
# This isn't really a range, it's some content.
pass
else:
value = nativeString(value)
unstructured.append(value)
key = key + (value,)
try:
value = next(responseParts)
except StopIteration:
raise IllegalServerResponse(
b"Not enough arguments", fetchResponseList)
value = nativeStringResponse(value)
unstructured.append(value)
values[key] = value
return values, unstructured
def _cbFetch(self, result, requestedParts, structured):
(lines, last) = result
info = {}
for parts in lines:
if len(parts) == 3 and parts[1] == b'FETCH':
id = self._intOrRaise(parts[0], parts)
if id not in info:
info[id] = [parts[2]]
else:
info[id][0].extend(parts[2])
results = {}
decodedInfo = {}
for (messageId, values) in info.items():
structuredMap, unstructuredList = self._parseFetchPairs(values[0])
decodedInfo.setdefault(messageId, [[]])[0].extend(unstructuredList)
results.setdefault(messageId, {}).update(structuredMap)
info = decodedInfo
flagChanges = {}
for messageId in list(results.keys()):
values = results[messageId]
for part in list(values.keys()):
if part not in requestedParts and part == 'FLAGS':
flagChanges[messageId] = values['FLAGS']
# Find flags in the result and get rid of them.
for i in range(len(info[messageId][0])):
if info[messageId][0][i] == 'FLAGS':
del info[messageId][0][i:i+2]
break
del values['FLAGS']
if not values:
del results[messageId]
if flagChanges:
self.flagsChanged(flagChanges)
if structured:
return results
else:
return info
def fetchSpecific(self, messages, uid=0, headerType=None,
headerNumber=None, headerArgs=None, peek=None,
offset=None, length=None):
"""
Retrieve a specific section of one or more messages
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@type headerType: L{str}
@param headerType: If specified, must be one of HEADER, HEADER.FIELDS,
HEADER.FIELDS.NOT, MIME, or TEXT, and will determine which part of
the message is retrieved. For HEADER.FIELDS and HEADER.FIELDS.NOT,
C{headerArgs} must be a sequence of header names. For MIME,
C{headerNumber} must be specified.
@type headerNumber: L{int} or L{int} sequence
@param headerNumber: The nested rfc822 index specifying the entity to
retrieve. For example, C{1} retrieves the first entity of the
message, and C{(2, 1, 3}) retrieves the 3rd entity inside the first
entity inside the second entity of the message.
@type headerArgs: A sequence of L{str}
@param headerArgs: If C{headerType} is HEADER.FIELDS, these are the
headers to retrieve. If it is HEADER.FIELDS.NOT, these are the
headers to exclude from retrieval.
@type peek: C{bool}
@param peek: If true, cause the server to not set the \\Seen flag on
this message as a result of this command.
@type offset: L{int}
@param offset: The number of octets at the beginning of the result to
skip.
@type length: L{int}
@param length: The number of octets to retrieve.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a mapping of message
numbers to retrieved data, or whose errback is invoked if there is
an error.
"""
fmt = '%s BODY%s[%s%s%s]%s'
if headerNumber is None:
number = ''
elif isinstance(headerNumber, int):
number = str(headerNumber)
else:
number = '.'.join(map(str, headerNumber))
if headerType is None:
header = ''
elif number:
header = '.' + headerType
else:
header = headerType
if header and headerType in ('HEADER.FIELDS', 'HEADER.FIELDS.NOT'):
if headerArgs is not None:
payload = ' (%s)' % ' '.join(headerArgs)
else:
payload = ' ()'
else:
payload = ''
if offset is None:
extra = ''
else:
extra = '<%d.%d>' % (offset, length)
fetch = uid and b'UID FETCH' or b'FETCH'
cmd = fmt % (messages, peek and '.PEEK' or '', number, header, payload, extra)
# APPEND components should be encoded as ASCII unless a
# charset identifier is provided. See #9201.
if _PY3:
cmd = cmd.encode('charmap')
d = self.sendCommand(Command(fetch, cmd, wantResponse=(b'FETCH',)))
d.addCallback(self._cbFetch, (), False)
return d
def _fetch(self, messages, useUID=0, **terms):
messages = str(messages).encode('ascii')
fetch = useUID and b'UID FETCH' or b'FETCH'
if 'rfc822text' in terms:
del terms['rfc822text']
terms['rfc822.text'] = True
if 'rfc822size' in terms:
del terms['rfc822size']
terms['rfc822.size'] = True
if 'rfc822header' in terms:
del terms['rfc822header']
terms['rfc822.header'] = True
# The terms in 6.4.5 are all ASCII congruent, so wing it.
# Note that this isn't a public API, so terms in responses
# should not be decoded to native strings.
encodedTerms = [networkString(s) for s in terms]
cmd = messages + b' (' + b' '.join(
[s.upper() for s in encodedTerms]
) + b')'
d = self.sendCommand(Command(fetch, cmd, wantResponse=(b'FETCH',)))
d.addCallback(self._cbFetch, [t.upper() for t in terms.keys()], True)
return d
def setFlags(self, messages, flags, silent=1, uid=0):
"""
Set the flags for one or more messages.
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type flags: Any iterable of L{str}
@param flags: The flags to set
@type silent: L{bool}
@param silent: If true, cause the server to suppress its verbose
response.
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a list of the
server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(messages, b'FLAGS', silent, flags, uid)
def addFlags(self, messages, flags, silent=1, uid=0):
"""
Add to the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or L{str}
@param messages: A message sequence set
@type flags: Any iterable of L{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to suppress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a list of the
server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(messages, b'+FLAGS', silent, flags, uid)
def removeFlags(self, messages, flags, silent=1, uid=0):
"""
Remove from the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type flags: Any iterable of L{str}
@param flags: The flags to set
@type silent: L{bool}
@param silent: If true, cause the server to suppress its verbose
response.
@type uid: L{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a list of the
server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(messages, b'-FLAGS', silent, flags, uid)
def _store(self, messages, cmd, silent, flags, uid):
messages = str(messages).encode('ascii')
encodedFlags = [networkString(flag) for flag in flags]
if silent:
cmd = cmd + b'.SILENT'
store = uid and b'UID STORE' or b'STORE'
args = b' '.join((messages, cmd, b'('+ b' '.join(encodedFlags) + b')'))
d = self.sendCommand(Command(store, args, wantResponse=(b'FETCH',)))
expected = ()
if not silent:
expected = ('FLAGS',)
d.addCallback(self._cbFetch, expected, True)
return d
def copy(self, messages, mailbox, uid):
"""
Copy the specified messages to the specified mailbox.
This command is allowed in the Selected state.
@type messages: L{MessageSet} or L{str}
@param messages: A message sequence set
@type mailbox: L{str}
@param mailbox: The mailbox to which to copy the messages
@type uid: C{bool}
@param uid: If true, the C{messages} refers to message UIDs, rather
than message sequence numbers.
@rtype: L{Deferred}
@return: A deferred whose callback is invoked with a true value
when the copy is successful, or whose errback is invoked if there
is an error.
"""
messages = str(messages).encode('ascii')
if uid:
cmd = b'UID COPY'
else:
cmd = b'COPY'
args = b' '.join([messages, _prepareMailboxName(mailbox)])
return self.sendCommand(Command(cmd, args))
#
# IMailboxListener methods
#
def modeChanged(self, writeable):
"""Override me"""
def flagsChanged(self, newFlags):
"""Override me"""
def newMessages(self, exists, recent):
"""Override me"""
def parseIdList(s, lastMessageId=None):
"""
Parse a message set search key into a C{MessageSet}.
@type s: L{bytes}
@param s: A string description of an id list, for example "1:3, 4:*"
@type lastMessageId: L{int}
@param lastMessageId: The last message sequence id or UID, depending on
whether we are parsing the list in UID or sequence id context. The
caller should pass in the correct value.
@rtype: C{MessageSet}
@return: A C{MessageSet} that contains the ids defined in the list
"""
res = MessageSet()
parts = s.split(b',')
for p in parts:
if b':' in p:
low, high = p.split(b':', 1)
try:
if low == b'*':
low = None
else:
low = int(low)
if high == b'*':
high = None
else:
high = int(high)
if low is high is None:
# *:* does not make sense
raise IllegalIdentifierError(p)
# non-positive values are illegal according to RFC 3501
if ((low is not None and low <= 0) or
(high is not None and high <= 0)):
raise IllegalIdentifierError(p)
# star means "highest value of an id in the mailbox"
high = high or lastMessageId
low = low or lastMessageId
res.add(low, high)
except ValueError:
raise IllegalIdentifierError(p)
else:
try:
if p == b'*':
p = None
else:
p = int(p)
if p is not None and p <= 0:
raise IllegalIdentifierError(p)
except ValueError:
raise IllegalIdentifierError(p)
else:
res.extend(p or lastMessageId)
return res
_SIMPLE_BOOL = (
'ALL', 'ANSWERED', 'DELETED', 'DRAFT', 'FLAGGED', 'NEW', 'OLD',
'RECENT', 'SEEN', 'UNANSWERED', 'UNDELETED', 'UNDRAFT', 'UNFLAGGED',
'UNSEEN'
)
_NO_QUOTES = (
'LARGER', 'SMALLER', 'UID'
)
_sorted = sorted
def Query(sorted=0, **kwarg):
"""
Create a query string
Among the accepted keywords are::
all : If set to a true value, search all messages in the
current mailbox
answered : If set to a true value, search messages flagged with
\\Answered
bcc : A substring to search the BCC header field for
before : Search messages with an internal date before this
value. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
body : A substring to search the body of the messages for
cc : A substring to search the CC header field for
deleted : If set to a true value, search messages flagged with
\\Deleted
draft : If set to a true value, search messages flagged with
\\Draft
flagged : If set to a true value, search messages flagged with
\\Flagged
from : A substring to search the From header field for
header : A two-tuple of a header name and substring to search
for in that header
keyword : Search for messages with the given keyword set
larger : Search for messages larger than this number of octets
messages : Search only the given message sequence set.
new : If set to a true value, search messages flagged with
\\Recent but not \\Seen
old : If set to a true value, search messages not flagged with
\\Recent
on : Search messages with an internal date which is on this
date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
recent : If set to a true value, search for messages flagged with
\\Recent
seen : If set to a true value, search for messages flagged with
\\Seen
sentbefore : Search for messages with an RFC822 'Date' header before
this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
senton : Search for messages with an RFC822 'Date' header which is
on this date The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
sentsince : Search for messages with an RFC822 'Date' header which is
after this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
since : Search for messages with an internal date that is after
this date.. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
smaller : Search for messages smaller than this number of octets
subject : A substring to search the 'subject' header for
text : A substring to search the entire message for
to : A substring to search the 'to' header for
uid : Search only the messages in the given message set
unanswered : If set to a true value, search for messages not
flagged with \\Answered
undeleted : If set to a true value, search for messages not
flagged with \\Deleted
undraft : If set to a true value, search for messages not
flagged with \\Draft
unflagged : If set to a true value, search for messages not
flagged with \\Flagged
unkeyword : Search for messages without the given keyword set
unseen : If set to a true value, search for messages not
flagged with \\Seen
@type sorted: C{bool}
@param sorted: If true, the output will be sorted, alphabetically.
The standard does not require it, but it makes testing this function
easier. The default is zero, and this should be acceptable for any
application.
@rtype: L{str}
@return: The formatted query string
"""
cmd = []
keys = kwarg.keys()
if sorted:
keys = _sorted(keys)
for k in keys:
v = kwarg[k]
k = k.upper()
if k in _SIMPLE_BOOL and v:
cmd.append(k)
elif k == 'HEADER':
cmd.extend([k, str(v[0]), str(v[1])])
elif k == 'KEYWORD' or k == 'UNKEYWORD':
# Discard anything that does not fit into an "atom". Perhaps turn
# the case where this actually removes bytes from the value into a
# warning and then an error, eventually. See #6277.
v = _nonAtomRE.sub("", v)
cmd.extend([k, v])
elif k not in _NO_QUOTES:
if isinstance(v, MessageSet):
fmt = '"%s"'
elif isinstance(v, str):
fmt = '"%s"'
else:
fmt = '"%d"'
cmd.extend([k, fmt % (v,)])
elif isinstance(v, int):
cmd.extend([k, '%d' % (v,)])
else:
cmd.extend([k, '%s' % (v,)])
if len(cmd) > 1:
return '(' + ' '.join(cmd) + ')'
else:
return ' '.join(cmd)
def Or(*args):
"""
The disjunction of two or more queries
"""
if len(args) < 2:
raise IllegalQueryError(args)
elif len(args) == 2:
return '(OR %s %s)' % args
else:
return '(OR %s %s)' % (args[0], Or(*args[1:]))
def Not(query):
"""The negation of a query"""
return '(NOT %s)' % (query,)
def wildcardToRegexp(wildcard, delim=None):
wildcard = wildcard.replace('*', '(?:.*?)')
if delim is None:
wildcard = wildcard.replace('%', '(?:.*?)')
else:
wildcard = wildcard.replace('%', '(?:(?:[^%s])*?)' % re.escape(delim))
return re.compile(wildcard, re.I)
def splitQuoted(s):
"""
Split a string into whitespace delimited tokens
Tokens that would otherwise be separated but are surrounded by \"
remain as a single token. Any token that is not quoted and is
equal to \"NIL\" is tokenized as L{None}.
@type s: L{bytes}
@param s: The string to be split
@rtype: L{list} of L{bytes}
@return: A list of the resulting tokens
@raise MismatchedQuoting: Raised if an odd number of quotes are present
"""
s = s.strip()
result = []
word = []
inQuote = inWord = False
qu = _matchingString('"', s)
esc = _matchingString('\x5c', s)
empty = _matchingString('', s)
nil = _matchingString('NIL', s)
for i, c in enumerate(iterbytes(s)):
if c == qu:
if i and s[i-1:i] == esc:
word.pop()
word.append(qu)
elif not inQuote:
inQuote = True
else:
inQuote = False
result.append(empty.join(word))
word = []
elif (
not inWord and not inQuote and
c not in (qu + (string.whitespace.encode("ascii")))
):
inWord = True
word.append(c)
elif inWord and not inQuote and c in string.whitespace.encode("ascii"):
w = empty.join(word)
if w == nil:
result.append(None)
else:
result.append(w)
word = []
inWord = False
elif inWord or inQuote:
word.append(c)
if inQuote:
raise MismatchedQuoting(s)
if inWord:
w = empty.join(word)
if w == nil:
result.append(None)
else:
result.append(w)
return result
def splitOn(sequence, predicate, transformers):
result = []
mode = predicate(sequence[0])
tmp = [sequence[0]]
for e in sequence[1:]:
p = predicate(e)
if p != mode:
result.extend(transformers[mode](tmp))
tmp = [e]
mode = p
else:
tmp.append(e)
result.extend(transformers[mode](tmp))
return result
def collapseStrings(results):
"""
Turns a list of length-one strings and lists into a list of longer
strings and lists. For example,
['a', 'b', ['c', 'd']] is returned as ['ab', ['cd']]
@type results: L{list} of L{bytes} and L{list}
@param results: The list to be collapsed
@rtype: L{list} of L{bytes} and L{list}
@return: A new list which is the collapsed form of C{results}
"""
copy = []
begun = None
pred = lambda e: isinstance(e, tuple)
tran = {
0: lambda e: splitQuoted(b''.join(e)),
1: lambda e: [b''.join([i[0] for i in e])]
}
for i, c in enumerate(results):
if isinstance(c, list):
if begun is not None:
copy.extend(splitOn(results[begun:i], pred, tran))
begun = None
copy.append(collapseStrings(c))
elif begun is None:
begun = i
if begun is not None:
copy.extend(splitOn(results[begun:], pred, tran))
return copy
def parseNestedParens(s, handleLiteral = 1):
"""
Parse an s-exp-like string into a more useful data structure.
@type s: L{bytes}
@param s: The s-exp-like string to parse
@rtype: L{list} of L{bytes} and L{list}
@return: A list containing the tokens present in the input.
@raise MismatchedNesting: Raised if the number or placement
of opening or closing parenthesis is invalid.
"""
s = s.strip()
inQuote = 0
contentStack = [[]]
try:
i = 0
L = len(s)
while i < L:
c = s[i:i+1]
if inQuote:
if c == b'\\':
contentStack[-1].append(s[i:i+2])
i += 2
continue
elif c == b'"':
inQuote = not inQuote
contentStack[-1].append(c)
i += 1
else:
if c == b'"':
contentStack[-1].append(c)
inQuote = not inQuote
i += 1
elif handleLiteral and c == b'{':
end = s.find(b'}', i)
if end == -1:
raise ValueError("Malformed literal")
literalSize = int(s[i+1:end])
contentStack[-1].append((s[end+3:end+3+literalSize],))
i = end + 3 + literalSize
elif c == b'(' or c == b'[':
contentStack.append([])
i += 1
elif c == b')' or c == b']':
contentStack[-2].append(contentStack.pop())
i += 1
else:
contentStack[-1].append(c)
i += 1
except IndexError:
raise MismatchedNesting(s)
if len(contentStack) != 1:
raise MismatchedNesting(s)
return collapseStrings(contentStack[0])
def _quote(s):
qu = _matchingString('"', s)
esc = _matchingString('\x5c', s)
return qu + s.replace(esc, esc + esc).replace(qu, esc + qu) + qu
def _literal(s):
return b'{' + intToBytes(len(s)) + b'}\r\n' + s
class DontQuoteMe:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
_ATOM_SPECIALS = b'(){ %*"'
def _needsQuote(s):
if s == b'':
return 1
for c in iterbytes(s):
if c < b'\x20' or c > b'\x7f':
return 1
if c in _ATOM_SPECIALS:
return 1
return 0
def _parseMbox(name):
if isinstance(name, unicode):
return name
try:
return name.decode('imap4-utf-7')
except:
log.err()
raise IllegalMailboxEncoding(name)
def _prepareMailboxName(name):
if not isinstance(name, unicode):
name = name.decode("charmap")
name = name.encode('imap4-utf-7')
if _needsQuote(name):
return _quote(name)
return name
def _needsLiteral(s):
# change this to "return 1" to wig out stupid clients
cr = _matchingString("\n", s)
lf = _matchingString("\r", s)
return cr in s or lf in s or len(s) > 1000
def collapseNestedLists(items):
"""
Turn a nested list structure into an s-exp-like string.
Strings in C{items} will be sent as literals if they contain CR or LF,
otherwise they will be quoted. References to None in C{items} will be
translated to the atom NIL. Objects with a 'read' attribute will have
it called on them with no arguments and the returned string will be
inserted into the output as a literal. Integers will be converted to
strings and inserted into the output unquoted. Instances of
C{DontQuoteMe} will be converted to strings and inserted into the output
unquoted.
This function used to be much nicer, and only quote things that really
needed to be quoted (and C{DontQuoteMe} did not exist), however, many
broken IMAP4 clients were unable to deal with this level of sophistication,
forcing the current behavior to be adopted for practical reasons.
@type items: Any iterable
@rtype: L{str}
"""
pieces = []
for i in items:
if isinstance(i, unicode):
# anything besides ASCII will have to wait for an RFC 5738
# implementation. See
# https://twistedmatrix.com/trac/ticket/9258
i = i.encode("ascii")
if i is None:
pieces.extend([b' ', b'NIL'])
elif isinstance(i, (int, long)):
pieces.extend([b' ', networkString(str(i))])
elif isinstance(i, DontQuoteMe):
pieces.extend([b' ', i.value])
elif isinstance(i, bytes):
# XXX warning
if _needsLiteral(i):
pieces.extend([b' ', b'{', intToBytes(len(i)), b'}',
IMAP4Server.delimiter, i])
else:
pieces.extend([b' ', _quote(i)])
elif hasattr(i, 'read'):
d = i.read()
pieces.extend([b' ', b'{', intToBytes(len(d)), b'}',
IMAP4Server.delimiter, d])
else:
pieces.extend([b' ', b'(' + collapseNestedLists(i) + b')'])
return b''.join(pieces[1:])
@implementer(IAccount)
class MemoryAccountWithoutNamespaces(object):
mailboxes = None
subscriptions = None
top_id = 0
def __init__(self, name):
self.name = name
self.mailboxes = {}
self.subscriptions = []
def allocateID(self):
id = self.top_id
self.top_id += 1
return id
##
## IAccount
##
def addMailbox(self, name, mbox = None):
name = _parseMbox(name.upper())
if name in self.mailboxes:
raise MailboxCollision(name)
if mbox is None:
mbox = self._emptyMailbox(name, self.allocateID())
self.mailboxes[name] = mbox
return 1
def create(self, pathspec):
paths = [path for path in pathspec.split('/') if path]
for accum in range(1, len(paths)):
try:
self.addMailbox('/'.join(paths[:accum]))
except MailboxCollision:
pass
try:
self.addMailbox('/'.join(paths))
except MailboxCollision:
if not pathspec.endswith('/'):
return False
return True
def _emptyMailbox(self, name, id):
raise NotImplementedError
def select(self, name, readwrite=1):
return self.mailboxes.get(_parseMbox(name.upper()))
def delete(self, name):
name = _parseMbox(name.upper())
# See if this mailbox exists at all
mbox = self.mailboxes.get(name)
if not mbox:
raise MailboxException("No such mailbox")
# See if this box is flagged \Noselect
if r'\Noselect' in mbox.getFlags():
# Check for hierarchically inferior mailboxes with this one
# as part of their root.
for others in self.mailboxes.keys():
if others != name and others.startswith(name):
raise MailboxException("Hierarchically inferior mailboxes exist and \\Noselect is set")
mbox.destroy()
# iff there are no hierarchically inferior names, we will
# delete it from our ken.
if len(self._inferiorNames(name)) > 1:
raise MailboxException(
'Name "%s" has inferior hierarchical names' % (name,))
del self.mailboxes[name]
def rename(self, oldname, newname):
oldname = _parseMbox(oldname.upper())
newname = _parseMbox(newname.upper())
if oldname not in self.mailboxes:
raise NoSuchMailbox(oldname)
inferiors = self._inferiorNames(oldname)
inferiors = [(o, o.replace(oldname, newname, 1)) for o in inferiors]
for (old, new) in inferiors:
if new in self.mailboxes:
raise MailboxCollision(new)
for (old, new) in inferiors:
self.mailboxes[new] = self.mailboxes[old]
del self.mailboxes[old]
def _inferiorNames(self, name):
inferiors = []
for infname in self.mailboxes.keys():
if infname.startswith(name):
inferiors.append(infname)
return inferiors
def isSubscribed(self, name):
return _parseMbox(name.upper()) in self.subscriptions
def subscribe(self, name):
name = _parseMbox(name.upper())
if name not in self.subscriptions:
self.subscriptions.append(name)
def unsubscribe(self, name):
name = _parseMbox(name.upper())
if name not in self.subscriptions:
raise MailboxException("Not currently subscribed to %s" % (name,))
self.subscriptions.remove(name)
def listMailboxes(self, ref, wildcard):
ref = self._inferiorNames(_parseMbox(ref.upper()))
wildcard = wildcardToRegexp(wildcard, '/')
return [(i, self.mailboxes[i]) for i in ref if wildcard.match(i)]
@implementer(INamespacePresenter)
class MemoryAccount(MemoryAccountWithoutNamespaces):
##
## INamespacePresenter
##
def getPersonalNamespaces(self):
return [[b"", b"/"]]
def getSharedNamespaces(self):
return None
def getOtherNamespaces(self):
return None
_statusRequestDict = {
'MESSAGES': 'getMessageCount',
'RECENT': 'getRecentCount',
'UIDNEXT': 'getUIDNext',
'UIDVALIDITY': 'getUIDValidity',
'UNSEEN': 'getUnseenCount'
}
def statusRequestHelper(mbox, names):
r = {}
for n in names:
r[n] = getattr(mbox, _statusRequestDict[n.upper()])()
return r
def parseAddr(addr):
if addr is None:
return [(None, None, None),]
addr = email.utils.getaddresses([addr])
return [[fn or None, None] + address.split('@') for fn, address in addr]
def getEnvelope(msg):
headers = msg.getHeaders(True)
date = headers.get('date')
subject = headers.get('subject')
from_ = headers.get('from')
sender = headers.get('sender', from_)
reply_to = headers.get('reply-to', from_)
to = headers.get('to')
cc = headers.get('cc')
bcc = headers.get('bcc')
in_reply_to = headers.get('in-reply-to')
mid = headers.get('message-id')
return (date, subject, parseAddr(from_), parseAddr(sender),
reply_to and parseAddr(reply_to), to and parseAddr(to),
cc and parseAddr(cc), bcc and parseAddr(bcc), in_reply_to, mid)
def getLineCount(msg):
# XXX - Super expensive, CACHE THIS VALUE FOR LATER RE-USE
# XXX - This must be the number of lines in the ENCODED version
lines = 0
for _ in msg.getBodyFile():
lines += 1
return lines
def unquote(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def _getContentType(msg):
"""
Return a two-tuple of the main and subtype of the given message.
"""
attrs = None
mm = msg.getHeaders(False, 'content-type').get('content-type', '')
mm = ''.join(mm.splitlines())
if mm:
mimetype = mm.split(';')
type = mimetype[0].split('/', 1)
if len(type) == 1:
major = type[0]
minor = None
else:
# length must be 2, because of split('/', 1)
major, minor = type
attrs = dict(x.strip().lower().split('=', 1) for x in mimetype[1:])
else:
major = minor = None
return major, minor, attrs
def _getMessageStructure(message):
"""
Construct an appropriate type of message structure object for the given
message object.
@param message: A L{IMessagePart} provider
@return: A L{_MessageStructure} instance of the most specific type available
for the given message, determined by inspecting the MIME type of the
message.
"""
main, subtype, attrs = _getContentType(message)
if main is not None:
main = main.lower()
if subtype is not None:
subtype = subtype.lower()
if main == 'multipart':
return _MultipartMessageStructure(message, subtype, attrs)
elif (main, subtype) == ('message', 'rfc822'):
return _RFC822MessageStructure(message, main, subtype, attrs)
elif main == 'text':
return _TextMessageStructure(message, main, subtype, attrs)
else:
return _SinglepartMessageStructure(message, main, subtype, attrs)
class _MessageStructure(object):
"""
L{_MessageStructure} is a helper base class for message structure classes
representing the structure of particular kinds of messages, as defined by
their MIME type.
"""
def __init__(self, message, attrs):
"""
@param message: An L{IMessagePart} provider which this structure object
reports on.
@param attrs: A C{dict} giving the parameters of the I{Content-Type}
header of the message.
"""
self.message = message
self.attrs = attrs
def _disposition(self, disp):
"""
Parse a I{Content-Disposition} header into a two-sequence of the
disposition and a flattened list of its parameters.
@return: L{None} if there is no disposition header value, a L{list} with
two elements otherwise.
"""
if disp:
disp = disp.split('; ')
if len(disp) == 1:
disp = (disp[0].lower(), None)
elif len(disp) > 1:
# XXX Poorly tested parser
params = [x for param in disp[1:] for x in param.split('=', 1)]
disp = [disp[0].lower(), params]
return disp
else:
return None
def _unquotedAttrs(self):
"""
@return: The I{Content-Type} parameters, unquoted, as a flat list with
each Nth element giving a parameter name and N+1th element giving
the corresponding parameter value.
"""
if self.attrs:
unquoted = [(k, unquote(v)) for (k, v) in self.attrs.items()]
return [y for x in sorted(unquoted) for y in x]
return None
class _SinglepartMessageStructure(_MessageStructure):
"""
L{_SinglepartMessageStructure} represents the message structure of a
non-I{multipart/*} message.
"""
_HEADERS = [
'content-id', 'content-description',
'content-transfer-encoding']
def __init__(self, message, main, subtype, attrs):
"""
@param message: An L{IMessagePart} provider which this structure object
reports on.
@param main: A L{str} giving the main MIME type of the message (for
example, C{"text"}).
@param subtype: A L{str} giving the MIME subtype of the message (for
example, C{"plain"}).
@param attrs: A C{dict} giving the parameters of the I{Content-Type}
header of the message.
"""
_MessageStructure.__init__(self, message, attrs)
self.main = main
self.subtype = subtype
self.attrs = attrs
def _basicFields(self):
"""
Return a list of the basic fields for a single-part message.
"""
headers = self.message.getHeaders(False, *self._HEADERS)
# Number of octets total
size = self.message.getSize()
major, minor = self.main, self.subtype
# content-type parameter list
unquotedAttrs = self._unquotedAttrs()
return [
major, minor, unquotedAttrs,
headers.get('content-id'),
headers.get('content-description'),
headers.get('content-transfer-encoding'),
size,
]
def encode(self, extended):
"""
Construct and return a list of the basic and extended fields for a
single-part message. The list suitable to be encoded into a BODY or
BODYSTRUCTURE response.
"""
result = self._basicFields()
if extended:
result.extend(self._extended())
return result
def _extended(self):
"""
The extension data of a non-multipart body part are in the
following order:
1. body MD5
A string giving the body MD5 value as defined in [MD5].
2. body disposition
A parenthesized list with the same content and function as
the body disposition for a multipart body part.
3. body language
A string or parenthesized list giving the body language
value as defined in [LANGUAGE-TAGS].
4. body location
A string list giving the body content URI as defined in
[LOCATION].
"""
result = []
headers = self.message.getHeaders(
False, 'content-md5', 'content-disposition',
'content-language', 'content-language')
result.append(headers.get('content-md5'))
result.append(self._disposition(headers.get('content-disposition')))
result.append(headers.get('content-language'))
result.append(headers.get('content-location'))
return result
class _TextMessageStructure(_SinglepartMessageStructure):
"""
L{_TextMessageStructure} represents the message structure of a I{text/*}
message.
"""
def encode(self, extended):
"""
A body type of type TEXT contains, immediately after the basic
fields, the size of the body in text lines. Note that this
size is the size in its content transfer encoding and not the
resulting size after any decoding.
"""
result = _SinglepartMessageStructure._basicFields(self)
result.append(getLineCount(self.message))
if extended:
result.extend(self._extended())
return result
class _RFC822MessageStructure(_SinglepartMessageStructure):
"""
L{_RFC822MessageStructure} represents the message structure of a
I{message/rfc822} message.
"""
def encode(self, extended):
"""
A body type of type MESSAGE and subtype RFC822 contains,
immediately after the basic fields, the envelope structure,
body structure, and size in text lines of the encapsulated
message.
"""
result = _SinglepartMessageStructure.encode(self, extended)
contained = self.message.getSubPart(0)
result.append(getEnvelope(contained))
result.append(getBodyStructure(contained, False))
result.append(getLineCount(contained))
return result
class _MultipartMessageStructure(_MessageStructure):
"""
L{_MultipartMessageStructure} represents the message structure of a
I{multipart/*} message.
"""
def __init__(self, message, subtype, attrs):
"""
@param message: An L{IMessagePart} provider which this structure object
reports on.
@param subtype: A L{str} giving the MIME subtype of the message (for
example, C{"plain"}).
@param attrs: A C{dict} giving the parameters of the I{Content-Type}
header of the message.
"""
_MessageStructure.__init__(self, message, attrs)
self.subtype = subtype
def _getParts(self):
"""
Return an iterator over all of the sub-messages of this message.
"""
i = 0
while True:
try:
part = self.message.getSubPart(i)
except IndexError:
break
else:
yield part
i += 1
def encode(self, extended):
"""
Encode each sub-message and added the additional I{multipart} fields.
"""
result = [_getMessageStructure(p).encode(extended) for p in self._getParts()]
result.append(self.subtype)
if extended:
result.extend(self._extended())
return result
def _extended(self):
"""
The extension data of a multipart body part are in the following order:
1. body parameter parenthesized list
A parenthesized list of attribute/value pairs [e.g., ("foo"
"bar" "baz" "rag") where "bar" is the value of "foo", and
"rag" is the value of "baz"] as defined in [MIME-IMB].
2. body disposition
A parenthesized list, consisting of a disposition type
string, followed by a parenthesized list of disposition
attribute/value pairs as defined in [DISPOSITION].
3. body language
A string or parenthesized list giving the body language
value as defined in [LANGUAGE-TAGS].
4. body location
A string list giving the body content URI as defined in
[LOCATION].
"""
result = []
headers = self.message.getHeaders(
False, 'content-language', 'content-location',
'content-disposition')
result.append(self._unquotedAttrs())
result.append(self._disposition(headers.get('content-disposition')))
result.append(headers.get('content-language', None))
result.append(headers.get('content-location', None))
return result
def getBodyStructure(msg, extended=False):
"""
RFC 3501, 7.4.2, BODYSTRUCTURE::
A parenthesized list that describes the [MIME-IMB] body structure of a
message. This is computed by the server by parsing the [MIME-IMB] header
fields, defaulting various fields as necessary.
For example, a simple text message of 48 lines and 2279 octets can have
a body structure of: ("TEXT" "PLAIN" ("CHARSET" "US-ASCII") NIL NIL
"7BIT" 2279 48)
This is represented as::
["TEXT", "PLAIN", ["CHARSET", "US-ASCII"], None, None, "7BIT", 2279, 48]
These basic fields are documented in the RFC as:
1. body type
A string giving the content media type name as defined in
[MIME-IMB].
2. body subtype
A string giving the content subtype name as defined in
[MIME-IMB].
3. body parameter parenthesized list
A parenthesized list of attribute/value pairs [e.g., ("foo"
"bar" "baz" "rag") where "bar" is the value of "foo" and
"rag" is the value of "baz"] as defined in [MIME-IMB].
4. body id
A string giving the content id as defined in [MIME-IMB].
5. body description
A string giving the content description as defined in
[MIME-IMB].
6. body encoding
A string giving the content transfer encoding as defined in
[MIME-IMB].
7. body size
A number giving the size of the body in octets. Note that this size is
the size in its transfer encoding and not the resulting size after any
decoding.
Put another way, the body structure is a list of seven elements. The
semantics of the elements of this list are:
1. Byte string giving the major MIME type
2. Byte string giving the minor MIME type
3. A list giving the Content-Type parameters of the message
4. A byte string giving the content identifier for the message part, or
None if it has no content identifier.
5. A byte string giving the content description for the message part, or
None if it has no content description.
6. A byte string giving the Content-Encoding of the message body
7. An integer giving the number of octets in the message body
The RFC goes on::
Multiple parts are indicated by parenthesis nesting. Instead of a body
type as the first element of the parenthesized list, there is a sequence
of one or more nested body structures. The second element of the
parenthesized list is the multipart subtype (mixed, digest, parallel,
alternative, etc.).
For example, a two part message consisting of a text and a
BASE64-encoded text attachment can have a body structure of: (("TEXT"
"PLAIN" ("CHARSET" "US-ASCII") NIL NIL "7BIT" 1152 23)("TEXT" "PLAIN"
("CHARSET" "US-ASCII" "NAME" "cc.diff")
"<960723163407.20117h@cac.washington.edu>" "Compiler diff" "BASE64" 4554
73) "MIXED")
This is represented as::
[["TEXT", "PLAIN", ["CHARSET", "US-ASCII"], None, None, "7BIT", 1152,
23],
["TEXT", "PLAIN", ["CHARSET", "US-ASCII", "NAME", "cc.diff"],
"<960723163407.20117h@cac.washington.edu>", "Compiler diff",
"BASE64", 4554, 73],
"MIXED"]
In other words, a list of N + 1 elements, where N is the number of parts in
the message. The first N elements are structures as defined by the previous
section. The last element is the minor MIME subtype of the multipart
message.
Additionally, the RFC describes extension data::
Extension data follows the multipart subtype. Extension data is never
returned with the BODY fetch, but can be returned with a BODYSTRUCTURE
fetch. Extension data, if present, MUST be in the defined order.
The C{extended} flag controls whether extension data might be returned with
the normal data.
"""
return _getMessageStructure(msg).encode(extended)
def _formatHeaders(headers):
# TODO: This should use email.header.Header, which handles encoding
hdrs = [': '.join((k.title(), '\r\n'.join(v.splitlines()))) for (k, v)
in headers.items()]
hdrs = '\r\n'.join(hdrs) + '\r\n'
return networkString(hdrs)
def subparts(m):
i = 0
try:
while True:
yield m.getSubPart(i)
i += 1
except IndexError:
pass
def iterateInReactor(i):
"""
Consume an interator at most a single iteration per reactor iteration.
If the iterator produces a Deferred, the next iteration will not occur
until the Deferred fires, otherwise the next iteration will be taken
in the next reactor iteration.
@rtype: C{Deferred}
@return: A deferred which fires (with None) when the iterator is
exhausted or whose errback is called if there is an exception.
"""
from twisted.internet import reactor
d = defer.Deferred()
def go(last):
try:
r = next(i)
except StopIteration:
d.callback(last)
except:
d.errback()
else:
if isinstance(r, defer.Deferred):
r.addCallback(go)
else:
reactor.callLater(0, go, r)
go(None)
return d
class MessageProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
_uuid4 = staticmethod(uuid.uuid4)
def __init__(self, msg, buffer = None, scheduler = None):
"""
Produce this message.
@param msg: The message I am to produce.
@type msg: L{IMessage}
@param buffer: A buffer to hold the message in. If None, I will
use a L{tempfile.TemporaryFile}.
@type buffer: file-like
"""
self.msg = msg
if buffer is None:
buffer = tempfile.TemporaryFile()
self.buffer = buffer
if scheduler is None:
scheduler = iterateInReactor
self.scheduler = scheduler
self.write = self.buffer.write
def beginProducing(self, consumer):
self.consumer = consumer
return self.scheduler(self._produce())
def _produce(self):
headers = self.msg.getHeaders(True)
boundary = None
if self.msg.isMultipart():
content = headers.get('content-type')
parts = [x.split('=', 1) for x in content.split(';')[1:]]
parts = dict([(k.lower().strip(), v) for (k, v) in parts])
boundary = parts.get('boundary')
if boundary is None:
# Bastards
boundary = '----=%s' % (self._uuid4().hex,)
headers['content-type'] += '; boundary="%s"' % (boundary,)
else:
if boundary.startswith('"') and boundary.endswith('"'):
boundary = boundary[1:-1]
boundary = networkString(boundary)
self.write(_formatHeaders(headers))
self.write(b'\r\n')
if self.msg.isMultipart():
for p in subparts(self.msg):
self.write(b'\r\n--' + boundary + b'\r\n')
yield MessageProducer(p, self.buffer, self.scheduler
).beginProducing(None
)
self.write(b'\r\n--' + boundary + b'--\r\n' )
else:
f = self.msg.getBodyFile()
while True:
b = f.read(self.CHUNK_SIZE)
if b:
self.buffer.write(b)
yield None
else:
break
if self.consumer:
self.buffer.seek(0, 0)
yield FileProducer(self.buffer
).beginProducing(self.consumer
).addCallback(lambda _: self
)
class _FetchParser:
class Envelope:
# Response should be a list of fields from the message:
# date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
# and message-id.
#
# from, sender, reply-to, to, cc, and bcc are themselves lists of
# address information:
# personal name, source route, mailbox name, host name
#
# reply-to and sender must not be None. If not present in a message
# they should be defaulted to the value of the from field.
type = 'envelope'
__str__ = lambda self: 'envelope'
class Flags:
type = 'flags'
__str__ = lambda self: 'flags'
class InternalDate:
type = 'internaldate'
__str__ = lambda self: 'internaldate'
class RFC822Header:
type = 'rfc822header'
__str__ = lambda self: 'rfc822.header'
class RFC822Text:
type = 'rfc822text'
__str__ = lambda self: 'rfc822.text'
class RFC822Size:
type = 'rfc822size'
__str__ = lambda self: 'rfc822.size'
class RFC822:
type = 'rfc822'
__str__ = lambda self: 'rfc822'
class UID:
type = 'uid'
__str__ = lambda self: 'uid'
class Body:
type = 'body'
peek = False
header = None
mime = None
text = None
part = ()
empty = False
partialBegin = None
partialLength = None
def __str__(self):
return nativeString(self.__bytes__())
def __bytes__(self):
base = b'BODY'
part = b''
separator = b''
if self.part:
part = b'.'.join([unicode(x + 1).encode("ascii")
for x in self.part])
separator = b'.'
# if self.peek:
# base += '.PEEK'
if self.header:
base += (b'[' + part + separator +
str(self.header).encode("ascii") + b']')
elif self.text:
base += b'[' + part + separator + b'TEXT]'
elif self.mime:
base += b'[' + part + separator + b'MIME]'
elif self.empty:
base += b'[' + part + b']'
if self.partialBegin is not None:
base += b'<' + intToBytes(self.partialBegin) + b'.' + intToBytes(self.partialLength) + b'>'
return base
class BodyStructure:
type = 'bodystructure'
__str__ = lambda self: 'bodystructure'
# These three aren't top-level, they don't need type indicators
class Header:
negate = False
fields = None
part = None
def __str__(self):
return nativeString(self.__bytes__())
def __bytes__(self):
base = b'HEADER'
if self.fields:
base += b'.FIELDS'
if self.negate:
base += b'.NOT'
fields = []
for f in self.fields:
f = f.title()
if _needsQuote(f):
f = _quote(f)
fields.append(f)
base += b' (' + b' '.join(fields) + b')'
if self.part:
# TODO: _FetchParser never assigns Header.part - dead
# code?
base = b'.'.join([(x + 1).__bytes__() for x in self.part]) + b'.' + base
return base
class Text:
pass
class MIME:
pass
parts = None
_simple_fetch_att = [
(b'envelope', Envelope),
(b'flags', Flags),
(b'internaldate', InternalDate),
(b'rfc822.header', RFC822Header),
(b'rfc822.text', RFC822Text),
(b'rfc822.size', RFC822Size),
(b'rfc822', RFC822),
(b'uid', UID),
(b'bodystructure', BodyStructure),
]
def __init__(self):
self.state = ['initial']
self.result = []
self.remaining = b''
def parseString(self, s):
s = self.remaining + s
try:
while s or self.state:
if not self.state:
raise IllegalClientResponse("Invalid Argument")
# print 'Entering state_' + self.state[-1] + ' with', repr(s)
state = self.state.pop()
try:
used = getattr(self, 'state_' + state)(s)
except:
self.state.append(state)
raise
else:
# print state, 'consumed', repr(s[:used])
s = s[used:]
finally:
self.remaining = s
def state_initial(self, s):
# In the initial state, the literals "ALL", "FULL", and "FAST"
# are accepted, as is a ( indicating the beginning of a fetch_att
# token, as is the beginning of a fetch_att token.
if s == b'':
return 0
l = s.lower()
if l.startswith(b'all'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope()
))
return 3
if l.startswith(b'full'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope(),
self.Body()
))
return 4
if l.startswith(b'fast'):
self.result.extend((
self.Flags(), self.InternalDate(), self.RFC822Size(),
))
return 4
if l.startswith(b'('):
self.state.extend(('close_paren', 'maybe_fetch_att', 'fetch_att'))
return 1
self.state.append('fetch_att')
return 0
def state_close_paren(self, s):
if s.startswith(b')'):
return 1
# TODO: does maybe_fetch_att's startswith(b')') make this dead
# code?
raise Exception("Missing )")
def state_whitespace(self, s):
# Eat up all the leading whitespace
if not s or not s[0:1].isspace():
raise Exception("Whitespace expected, none found")
i = 0
for i in range(len(s)):
if not s[i:i + 1].isspace():
break
return i
def state_maybe_fetch_att(self, s):
if not s.startswith(b')'):
self.state.extend(('maybe_fetch_att', 'fetch_att', 'whitespace'))
return 0
def state_fetch_att(self, s):
# Allowed fetch_att tokens are "ENVELOPE", "FLAGS", "INTERNALDATE",
# "RFC822", "RFC822.HEADER", "RFC822.SIZE", "RFC822.TEXT", "BODY",
# "BODYSTRUCTURE", "UID",
# "BODY [".PEEK"] [<section>] ["<" <number> "." <nz_number> ">"]
l = s.lower()
for (name, cls) in self._simple_fetch_att:
if l.startswith(name):
self.result.append(cls())
return len(name)
b = self.Body()
if l.startswith(b'body.peek'):
b.peek = True
used = 9
elif l.startswith(b'body'):
used = 4
else:
raise Exception("Nothing recognized in fetch_att: %s" % (l,))
self.pending_body = b
self.state.extend(('got_body', 'maybe_partial', 'maybe_section'))
return used
def state_got_body(self, s):
self.result.append(self.pending_body)
del self.pending_body
return 0
def state_maybe_section(self, s):
if not s.startswith(b"["):
return 0
self.state.extend(('section', 'part_number'))
return 1
_partExpr = re.compile(b'(\d+(?:\.\d+)*)\.?')
def state_part_number(self, s):
m = self._partExpr.match(s)
if m is not None:
self.parts = [int(p) - 1 for p in m.groups()[0].split(b'.')]
return m.end()
else:
self.parts = []
return 0
def state_section(self, s):
# Grab "HEADER]" or "HEADER.FIELDS (Header list)]" or
# "HEADER.FIELDS.NOT (Header list)]" or "TEXT]" or "MIME]" or
# just "]".
l = s.lower()
used = 0
if l.startswith(b']'):
self.pending_body.empty = True
used += 1
elif l.startswith(b'header]'):
h = self.pending_body.header = self.Header()
h.negate = True
h.fields = ()
used += 7
elif l.startswith(b'text]'):
self.pending_body.text = self.Text()
used += 5
elif l.startswith(b'mime]'):
self.pending_body.mime = self.MIME()
used += 5
else:
h = self.Header()
if l.startswith(b'header.fields.not'):
h.negate = True
used += 17
elif l.startswith(b'header.fields'):
used += 13
else:
raise Exception("Unhandled section contents: %r" % (l,))
self.pending_body.header = h
self.state.extend(('finish_section', 'header_list', 'whitespace'))
self.pending_body.part = tuple(self.parts)
self.parts = None
return used
def state_finish_section(self, s):
if not s.startswith(b']'):
raise Exception("section must end with ]")
return 1
def state_header_list(self, s):
if not s.startswith(b'('):
raise Exception("Header list must begin with (")
end = s.find(b')')
if end == -1:
raise Exception("Header list must end with )")
headers = s[1:end].split()
self.pending_body.header.fields = [h.upper() for h in headers]
return end + 1
def state_maybe_partial(self, s):
# Grab <number.number> or nothing at all
if not s.startswith(b'<'):
return 0
end = s.find(b'>')
if end == -1:
raise Exception("Found < but not >")
partial = s[1:end]
parts = partial.split(b'.', 1)
if len(parts) != 2:
raise Exception("Partial specification did not include two .-delimited integers")
begin, length = map(int, parts)
self.pending_body.partialBegin = begin
self.pending_body.partialLength = length
return end + 1
class FileProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
firstWrite = True
def __init__(self, f):
self.f = f
def beginProducing(self, consumer):
self.consumer = consumer
self.produce = consumer.write
d = self._onDone = defer.Deferred()
self.consumer.registerProducer(self, False)
return d
def resumeProducing(self):
b = b''
if self.firstWrite:
b = b'{' + intToBytes(self._size()) + b'}\r\n'
self.firstWrite = False
if not self.f:
return
b = b + self.f.read(self.CHUNK_SIZE)
if not b:
self.consumer.unregisterProducer()
self._onDone.callback(self)
self._onDone = self.f = self.consumer = None
else:
self.produce(b)
def pauseProducing(self):
"""
Pause the producer. This does nothing.
"""
def stopProducing(self):
"""
Stop the producer. This does nothing.
"""
def _size(self):
b = self.f.tell()
self.f.seek(0, 2)
e = self.f.tell()
self.f.seek(b, 0)
return e - b
def parseTime(s):
# XXX - This may require localization :(
months = [
'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december'
]
expr = {
'day': r"(?P<day>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'mon': r"(?P<mon>\w+)",
'year': r"(?P<year>\d\d\d\d)"
}
m = re.match('%(day)s-%(mon)s-%(year)s' % expr, s)
if not m:
raise ValueError("Cannot parse time string %r" % (s,))
d = m.groupdict()
try:
d['mon'] = 1 + (months.index(d['mon'].lower()) % 12)
d['year'] = int(d['year'])
d['day'] = int(d['day'])
except ValueError:
raise ValueError("Cannot parse time string %r" % (s,))
else:
return time.struct_time(
(d['year'], d['mon'], d['day'], 0, 0, 0, -1, -1, -1)
)
# we need to cast Python >=3.3 memoryview to chars (from unsigned bytes), but
# cast is absent in previous versions: thus, the lambda returns the
# memoryview instance while ignoring the format
memory_cast = getattr(memoryview, "cast", lambda *x: x[0])
def modified_base64(s):
s_utf7 = s.encode('utf-7')
return s_utf7[1:-1].replace(b'/', b',')
def modified_unbase64(s):
s_utf7 = b'+' + s.replace(b',', b'/') + b'-'
return s_utf7.decode('utf-7')
def encoder(s, errors=None):
"""
Encode the given C{unicode} string using the IMAP4 specific variation of
UTF-7.
@type s: C{unicode}
@param s: The text to encode.
@param errors: Policy for handling encoding errors. Currently ignored.
@return: L{tuple} of a L{str} giving the encoded bytes and an L{int}
giving the number of code units consumed from the input.
"""
r = bytearray()
_in = []
valid_chars = set(map(chr, range(0x20,0x7f))) - {u"&"}
for c in s:
if c in valid_chars:
if _in:
r += b'&' + modified_base64(''.join(_in)) + b'-'
del _in[:]
r.append(ord(c))
elif c == u'&':
if _in:
r += b'&' + modified_base64(''.join(_in)) + b'-'
del _in[:]
r += b'&-'
else:
_in.append(c)
if _in:
r.extend(b'&' + modified_base64(''.join(_in)) + b'-')
return (bytes(r), len(s))
def decoder(s, errors=None):
"""
Decode the given L{str} using the IMAP4 specific variation of UTF-7.
@type s: L{str}
@param s: The bytes to decode.
@param errors: Policy for handling decoding errors. Currently ignored.
@return: a L{tuple} of a C{unicode} string giving the text which was
decoded and an L{int} giving the number of bytes consumed from the
input.
"""
r = []
decode = []
s = memory_cast(memoryview(s), 'c')
for c in s:
if c == b'&' and not decode:
decode.append(b'&')
elif c == b'-' and decode:
if len(decode) == 1:
r.append(u'&')
else:
r.append(modified_unbase64(b''.join(decode[1:])))
decode = []
elif decode:
decode.append(c)
else:
r.append(c.decode())
if decode:
r.append(modified_unbase64(b''.join(decode[1:])))
return (u''.join(r), len(s))
class StreamReader(codecs.StreamReader):
def decode(self, s, errors='strict'):
return decoder(s)
class StreamWriter(codecs.StreamWriter):
def encode(self, s, errors='strict'):
return encoder(s)
_codecInfo = codecs.CodecInfo(encoder, decoder, StreamReader, StreamWriter)
def imap4_utf_7(name):
if name == 'imap4-utf-7':
return _codecInfo
codecs.register(imap4_utf_7)
__all__ = [
# Protocol classes
'IMAP4Server', 'IMAP4Client',
# Interfaces
'IMailboxListener', 'IClientAuthentication', 'IAccount', 'IMailbox',
'INamespacePresenter', 'ICloseableMailbox', 'IMailboxInfo',
'IMessage', 'IMessageCopier', 'IMessageFile', 'ISearchableMailbox',
'IMessagePart',
# Exceptions
'IMAP4Exception', 'IllegalClientResponse', 'IllegalOperation',
'IllegalMailboxEncoding', 'UnhandledResponse', 'NegativeResponse',
'NoSupportedAuthentication', 'IllegalServerResponse',
'IllegalIdentifierError', 'IllegalQueryError', 'MismatchedNesting',
'MismatchedQuoting', 'MailboxException', 'MailboxCollision',
'NoSuchMailbox', 'ReadOnlyMailbox',
# Auth objects
'CramMD5ClientAuthenticator', 'PLAINAuthenticator', 'LOGINAuthenticator',
'PLAINCredentials', 'LOGINCredentials',
# Simple query interface
'Query', 'Not', 'Or',
# Miscellaneous
'MemoryAccount',
'statusRequestHelper',
]
| 32.983763
| 111
| 0.574025
|
7949efad6b6e2c4a01f588e3be883d963235f4c0
| 3,561
|
py
|
Python
|
currencies/management/commands/_exchangeratesapi_client.py
|
wardsi/django-currencies
|
175cba42835591d9b544f14dbbf96345d46cdb8a
|
[
"BSD-3-Clause"
] | null | null | null |
currencies/management/commands/_exchangeratesapi_client.py
|
wardsi/django-currencies
|
175cba42835591d9b544f14dbbf96345d46cdb8a
|
[
"BSD-3-Clause"
] | null | null | null |
currencies/management/commands/_exchangeratesapi_client.py
|
wardsi/django-currencies
|
175cba42835591d9b544f14dbbf96345d46cdb8a
|
[
"BSD-3-Clause"
] | null | null | null |
import decimal
import requests
__version__ = '0.1.0'
__author__ = 'Ward and Partners'
__license__ = 'MIT'
__copyright__ = 'Copyright 2021 Ward and Partners'
# https://exchangeratesapi.io/documentation/
class ExchangeRatesApiClientException(requests.exceptions.RequestException):
"""Base client exception wraps all kinds of ``requests`` lib exceptions"""
pass
class ExchangeRatesApiClient(object):
"""This class is a client implementation for openexchangerate.org service
"""
BASE_URL = 'http://api.exchangeratesapi.io/v1'
ENDPOINT_LATEST = BASE_URL + '/latest'
ENDPOINT_CURRENCIES = BASE_URL + '/symbols'
ENDPOINT_HISTORICAL = BASE_URL + '/%s'
def __init__(self, api_key):
"""Convenient constructor"""
self.client = requests.Session()
self.client.params.update({'access_key': api_key})
def latest(self, base='USD'):
"""Fetches latest exchange rate data from service
https://api.exchangeratesapi.io/v1/latest
? access_key = API_KEY
& base = USD
& symbols = GBP,JPY,EUR
:Example Data:
{
"success": true,
"timestamp": 1519296206,
"base": "USD",
"date": "2021-03-17",
"rates": {
"GBP": 0.72007,
"JPY": 107.346001,
"EUR": 0.813399,
}
}
"""
try:
resp = self.client.get(self.ENDPOINT_LATEST, params={'base': base})
resp.raise_for_status()
except requests.exceptions.RequestException as e:
raise ExchangeRatesApiClientException(e)
return resp.json(parse_int=decimal.Decimal,
parse_float=decimal.Decimal)
def currencies(self):
"""Fetches current currency data of the service
:Example Data:
{
"success": true,
"symbols": {
"AED": "United Arab Emirates Dirham",
"AFN": "Afghan Afghani",
"ALL": "Albanian Lek",
"AMD": "Armenian Dram",
[...]
}
}
"""
try:
resp = self.client.get(self.ENDPOINT_CURRENCIES)
except requests.exceptions.RequestException as e:
raise ExchangeRatesApiClientException(e)
d = resp.json()
return d['symbols']
def historical(self, date, base='USD'):
"""Fetches historical exchange rate data from service
:Example Data:
{
disclaimer: "<Disclaimer data>",
license: "<License data>",
timestamp: 1358150409,
base: "USD",
rates: {
AED: 3.666311,
AFN: 51.2281,
ALL: 104.748751,
AMD: 406.919999,
ANG: 1.7831,
...
}
}
"""
try:
resp = self.client.get(self.ENDPOINT_HISTORICAL %
date.strftime("%Y-%m-%d"),
params={'base': base})
resp.raise_for_status()
except requests.exceptions.RequestException as e:
raise ExchangeRatesApiClientException(e)
return resp.json(parse_int=decimal.Decimal,
parse_float=decimal.Decimal)
| 32.669725
| 80
| 0.505757
|
7949f0e4ee9c10b0a0a3900abf472fcd7e987dda
| 16,110
|
py
|
Python
|
osprofiler/profiler.py
|
kklimonda/osprofiler
|
d0d65fd4f9fc762ec09b10f24f9f59c840af0198
|
[
"Apache-2.0"
] | null | null | null |
osprofiler/profiler.py
|
kklimonda/osprofiler
|
d0d65fd4f9fc762ec09b10f24f9f59c840af0198
|
[
"Apache-2.0"
] | null | null | null |
osprofiler/profiler.py
|
kklimonda/osprofiler
|
d0d65fd4f9fc762ec09b10f24f9f59c840af0198
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import datetime
import functools
import inspect
import socket
import threading
from oslo_utils import reflection
from oslo_utils import uuidutils
import six
from osprofiler import _utils as utils
from osprofiler import notifier
# NOTE(boris-42): Thread safe storage for profiler instances.
__local_ctx = threading.local()
def clean():
__local_ctx.profiler = None
def _ensure_no_multiple_traced(traceable_attrs):
for attr_name, attr in traceable_attrs:
traced_times = getattr(attr, "__traced__", 0)
if traced_times:
raise ValueError("Can not apply new trace on top of"
" previously traced attribute '%s' since"
" it has been traced %s times previously"
% (attr_name, traced_times))
def init(hmac_key, base_id=None, parent_id=None):
"""Init profiler instance for current thread.
You should call profiler.init() before using osprofiler.
Otherwise profiler.start() and profiler.stop() methods won't do anything.
:param hmac_key: secret key to sign trace information.
:param base_id: Used to bind all related traces.
:param parent_id: Used to build tree of traces.
:returns: Profiler instance
"""
if get() is None:
__local_ctx.profiler = _Profiler(hmac_key, base_id=base_id,
parent_id=parent_id)
return __local_ctx.profiler
def get():
"""Get profiler instance.
:returns: Profiler instance or None if profiler wasn't inited.
"""
return getattr(__local_ctx, "profiler", None)
def start(name, info=None):
"""Send new start notification if profiler instance is presented.
:param name: The name of action. E.g. wsgi, rpc, db, etc..
:param info: Dictionary with extra trace information. For example in wsgi
it can be url, in rpc - message or in db sql - request.
"""
profiler = get()
if profiler:
profiler.start(name, info=info)
def stop(info=None):
"""Send new stop notification if profiler instance is presented."""
profiler = get()
if profiler:
profiler.stop(info=info)
def trace(name, info=None, hide_args=False, hide_result=True,
allow_multiple_trace=True):
"""Trace decorator for functions.
Very useful if you would like to add trace point on existing function:
>> @profiler.trace("my_point")
>> def my_func(self, some_args):
>> #code
:param name: The name of action. E.g. wsgi, rpc, db, etc..
:param info: Dictionary with extra trace information. For example in wsgi
it can be url, in rpc - message or in db sql - request.
:param hide_args: Don't push to trace info args and kwargs. Quite useful
if you have some info in args that you wont to share,
e.g. passwords.
:param hide_result: Boolean value to hide/show function result in trace.
True - hide function result (default).
False - show function result in trace.
:param allow_multiple_trace: If the wrapped function has already been
traced either allow the new trace to occur
or raise a value error denoting that multiple
tracing is not allowed (by default allow).
"""
if not info:
info = {}
else:
info = info.copy()
info["function"] = {}
def decorator(f):
trace_times = getattr(f, "__traced__", 0)
if not allow_multiple_trace and trace_times:
raise ValueError("Function '%s' has already"
" been traced %s times" % (f, trace_times))
try:
f.__traced__ = trace_times + 1
except AttributeError:
# Tries to work around the following:
#
# AttributeError: 'instancemethod' object has no
# attribute '__traced__'
try:
f.im_func.__traced__ = trace_times + 1
except AttributeError: # nosec
pass
@functools.wraps(f)
def wrapper(*args, **kwargs):
# NOTE(tovin07): Workaround for this issue
# F823 local variable 'info'
# (defined in enclosing scope on line xxx)
# referenced before assignment
info_ = info
if "name" not in info_["function"]:
# Get this once (as it should **not** be changing in
# subsequent calls).
info_["function"]["name"] = reflection.get_callable_name(f)
if not hide_args:
info_["function"]["args"] = str(args)
info_["function"]["kwargs"] = str(kwargs)
stop_info = None
try:
start(name, info=info_)
result = f(*args, **kwargs)
except Exception as ex:
stop_info = {
"etype": reflection.get_class_name(ex),
"message": six.text_type(ex)
}
raise
else:
if not hide_result:
stop_info = {"function": {"result": repr(result)}}
return result
finally:
if stop_info:
stop(info=stop_info)
else:
stop()
return wrapper
return decorator
def trace_cls(name, info=None, hide_args=False, hide_result=True,
trace_private=False, allow_multiple_trace=True,
trace_class_methods=False, trace_static_methods=False):
"""Trace decorator for instances of class .
Very useful if you would like to add trace point on existing method:
>> @profiler.trace_cls("rpc")
>> RpcManagerClass(object):
>>
>> def my_method(self, some_args):
>> pass
>>
>> def my_method2(self, some_arg1, some_arg2, kw=None, kw2=None)
>> pass
>>
:param name: The name of action. E.g. wsgi, rpc, db, etc..
:param info: Dictionary with extra trace information. For example in wsgi
it can be url, in rpc - message or in db sql - request.
:param hide_args: Don't push to trace info args and kwargs. Quite useful
if you have some info in args that you wont to share,
e.g. passwords.
:param hide_result: Boolean value to hide/show function result in trace.
True - hide function result (default).
False - show function result in trace.
:param trace_private: Trace methods that starts with "_". It wont trace
methods that starts "__" even if it is turned on.
:param trace_static_methods: Trace staticmethods. This may be prone to
issues so careful usage is recommended (this
is also why this defaults to false).
:param trace_class_methods: Trace classmethods. This may be prone to
issues so careful usage is recommended (this
is also why this defaults to false).
:param allow_multiple_trace: If wrapped attributes have already been
traced either allow the new trace to occur
or raise a value error denoting that multiple
tracing is not allowed (by default allow).
"""
def trace_checker(attr_name, to_be_wrapped):
if attr_name.startswith("__"):
# Never trace really private methods.
return (False, None)
if not trace_private and attr_name.startswith("_"):
return (False, None)
if isinstance(to_be_wrapped, staticmethod):
if not trace_static_methods:
return (False, None)
return (True, staticmethod)
if isinstance(to_be_wrapped, classmethod):
if not trace_class_methods:
return (False, None)
return (True, classmethod)
return (True, None)
def decorator(cls):
clss = cls if inspect.isclass(cls) else cls.__class__
mro_dicts = [c.__dict__ for c in inspect.getmro(clss)]
traceable_attrs = []
traceable_wrappers = []
for attr_name, attr in inspect.getmembers(cls):
if not (inspect.ismethod(attr) or inspect.isfunction(attr)):
continue
wrapped_obj = None
for cls_dict in mro_dicts:
if attr_name in cls_dict:
wrapped_obj = cls_dict[attr_name]
break
should_wrap, wrapper = trace_checker(attr_name, wrapped_obj)
if not should_wrap:
continue
traceable_attrs.append((attr_name, attr))
traceable_wrappers.append(wrapper)
if not allow_multiple_trace:
# Check before doing any other further work (so we don't
# halfway trace this class).
_ensure_no_multiple_traced(traceable_attrs)
for i, (attr_name, attr) in enumerate(traceable_attrs):
wrapped_method = trace(name, info=info, hide_args=hide_args,
hide_result=hide_result)(attr)
wrapper = traceable_wrappers[i]
if wrapper is not None:
wrapped_method = wrapper(wrapped_method)
setattr(cls, attr_name, wrapped_method)
return cls
return decorator
class TracedMeta(type):
"""Metaclass to comfortably trace all children of a specific class.
Possible usage:
>>> @six.add_metaclass(profiler.TracedMeta)
>>> class RpcManagerClass(object):
>>> __trace_args__ = {'name': 'rpc',
>>> 'info': None,
>>> 'hide_args': False,
>>> 'hide_result': True,
>>> 'trace_private': False}
>>>
>>> def my_method(self, some_args):
>>> pass
>>>
>>> def my_method2(self, some_arg1, some_arg2, kw=None, kw2=None)
>>> pass
Adding of this metaclass requires to set __trace_args__ attribute to the
class we want to modify. __trace_args__ is the dictionary with one
mandatory key included - "name", that will define name of action to be
traced - E.g. wsgi, rpc, db, etc...
"""
def __init__(cls, cls_name, bases, attrs):
super(TracedMeta, cls).__init__(cls_name, bases, attrs)
trace_args = dict(getattr(cls, "__trace_args__", {}))
trace_private = trace_args.pop("trace_private", False)
allow_multiple_trace = trace_args.pop("allow_multiple_trace", True)
if "name" not in trace_args:
raise TypeError("Please specify __trace_args__ class level "
"dictionary attribute with mandatory 'name' key - "
"e.g. __trace_args__ = {'name': 'rpc'}")
traceable_attrs = []
for attr_name, attr_value in attrs.items():
if not (inspect.ismethod(attr_value)
or inspect.isfunction(attr_value)):
continue
if attr_name.startswith("__"):
continue
if not trace_private and attr_name.startswith("_"):
continue
traceable_attrs.append((attr_name, attr_value))
if not allow_multiple_trace:
# Check before doing any other further work (so we don't
# halfway trace this class).
_ensure_no_multiple_traced(traceable_attrs)
for attr_name, attr_value in traceable_attrs:
setattr(cls, attr_name, trace(**trace_args)(getattr(cls,
attr_name)))
class Trace(object):
def __init__(self, name, info=None):
"""With statement way to use profiler start()/stop().
>> with profiler.Trace("rpc", info={"any": "values"})
>> some code
instead of
>> profiler.start()
>> try:
>> your code
>> finally:
profiler.stop()
"""
self._name = name
self._info = info
def __enter__(self):
start(self._name, info=self._info)
def __exit__(self, etype, value, traceback):
if etype:
info = {
"etype": reflection.get_class_name(etype),
"message": value.args[0] if value.args else None
}
stop(info=info)
else:
stop()
class _Profiler(object):
def __init__(self, hmac_key, base_id=None, parent_id=None):
self.hmac_key = hmac_key
if not base_id:
base_id = str(uuidutils.generate_uuid())
self._trace_stack = collections.deque([base_id, parent_id or base_id])
self._name = collections.deque()
self._host = socket.gethostname()
def get_shorten_id(self, uuid_id):
"""Return shorten id of a uuid that will be used in OpenTracing drivers
:param uuid_id: A string of uuid that was generated by uuidutils
:returns: A shorter 64-bit long id
"""
return format(utils.shorten_id(uuid_id), "x")
def get_base_id(self):
"""Return base id of a trace.
Base id is the same for all elements in one trace. It's main goal is
to be able to retrieve by one request all trace elements from storage.
"""
return self._trace_stack[0]
def get_parent_id(self):
"""Returns parent trace element id."""
return self._trace_stack[-2]
def get_id(self):
"""Returns current trace element id."""
return self._trace_stack[-1]
def start(self, name, info=None):
"""Start new event.
Adds new trace_id to trace stack and sends notification
to collector. With "info" and 3 ids:
base_id - to be able to retrieve all trace elements by one query
parent_id - to build tree of events (not just a list)
trace_id - current event id.
:param name: name of trace element (db, wsgi, rpc, etc..)
:param info: Dictionary with any useful information related to this
trace element. (sql request, rpc message or url...)
"""
info = info or {}
info["host"] = self._host
self._name.append(name)
self._trace_stack.append(str(uuidutils.generate_uuid()))
self._notify("%s-start" % name, info)
def stop(self, info=None):
"""Finish latest event.
Same as a start, but instead of pushing trace_id to stack it pops it.
:param info: Dict with useful info. It will be send in notification.
"""
info = info or {}
info["host"] = self._host
self._notify("%s-stop" % self._name.pop(), info)
self._trace_stack.pop()
def _notify(self, name, info):
payload = {
"name": name,
"base_id": self.get_base_id(),
"trace_id": self.get_id(),
"parent_id": self.get_parent_id(),
"timestamp": datetime.datetime.utcnow().strftime(
"%Y-%m-%dT%H:%M:%S.%f"),
}
if info:
payload["info"] = info
notifier.notify(payload)
| 36.697039
| 79
| 0.582619
|
7949f2bda4a53e7a6a1b7087467ca88ff49ec83f
| 1,388
|
py
|
Python
|
modules/utils.py
|
GeoffreyWesthoff/AuximCore
|
5dcfa9ca51284b1c8f13d081d67ec87232b56c80
|
[
"MIT"
] | 5
|
2018-05-15T17:40:27.000Z
|
2021-05-03T23:45:21.000Z
|
modules/utils.py
|
GeoffreyWesthoff/AuximCore
|
5dcfa9ca51284b1c8f13d081d67ec87232b56c80
|
[
"MIT"
] | 3
|
2018-05-11T22:56:03.000Z
|
2021-12-24T04:11:30.000Z
|
modules/utils.py
|
GeoffreyWesthoff/AuximCore
|
5dcfa9ca51284b1c8f13d081d67ec87232b56c80
|
[
"MIT"
] | 4
|
2020-01-22T21:47:44.000Z
|
2021-12-23T16:03:11.000Z
|
import redis
import aiohttp
import json
import asyncio
import os
class SettingsLoader:
config = json.load(open('settings.json'))
class LanguageHandler:
def get_language(self, guild_id):
try:
lang = DatabaseHandler.db.get(str(guild_id) + ":language").decode('utf-8')
file = json.load(open('modules/languages/{}.json'.format(lang), encoding='utf-8'))
return file
except:
return json.load(open('modules/languages/en.json', encoding='utf-8'))
def list_languages(self):
languages = []
for file in os.listdir('modules/languages'):
if file.endswith('.json'):
languages.append(file.replace('.json',''))
return languages
def guild_lang(self, guild_id):
try:
lang = DatabaseHandler.db.get(str(guild_id) + ":language").decode('utf-8')
return lang
except:
return 'en'
class PrefixHelper:
def get_prefix(self, message):
guild_id = message.guild.id
try:
prefix = DatabaseHandler.db.get(str(guild_id) + ':prefix').decode('utf-8')
except AttributeError:
prefix = SettingsLoader.config['prefix']
return prefix
class DatabaseHandler:
db = redis.Redis(db=10)
class WebHelper:
session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
| 24.785714
| 94
| 0.612392
|
7949f3c9bf5c8b9765a3573fe3b12bc2ee0e162f
| 1,619
|
py
|
Python
|
lib/rucio/db/sqla/migrate_repo/versions/1d96f484df21_asynchronous_rules_and_rule_approval.py
|
balrampariyarath/rucio
|
8a68017af6b44485a9620566f1afc013838413c1
|
[
"Apache-2.0"
] | 1
|
2017-08-07T13:34:55.000Z
|
2017-08-07T13:34:55.000Z
|
lib/rucio/db/sqla/migrate_repo/versions/1d96f484df21_asynchronous_rules_and_rule_approval.py
|
pujanm/rucio
|
355a997a5ea213c427a5d841ab151ceb01073eb4
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/migrate_repo/versions/1d96f484df21_asynchronous_rules_and_rule_approval.py
|
pujanm/rucio
|
355a997a5ea213c427a5d841ab151ceb01073eb4
|
[
"Apache-2.0"
] | 1
|
2021-06-17T14:15:15.000Z
|
2021-06-17T14:15:15.000Z
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2015-2017
"""asynchronous rules and rule approval
Revision ID: 1d96f484df21
Revises: 1fc15ab60d43
Create Date: 2015-07-08 16:59:23.710208
"""
from alembic.op import (add_column, create_check_constraint,
drop_constraint, drop_column)
from alembic import context
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1d96f484df21' # pylint: disable=invalid-name
down_revision = '3d9813fab443' # pylint: disable=invalid-name
def upgrade():
'''
upgrade method
'''
if context.get_context().dialect.name not in ('sqlite'):
add_column('rules', sa.Column('ignore_account_limit', sa.Boolean(name='RULES_IGNORE_ACCOUNT_LIMIT_CHK'), default=False))
if context.get_context().dialect.name not in ('mysql'):
drop_constraint('RULES_STATE_CHK', 'rules')
create_check_constraint('RULES_STATE_CHK', 'rules', 'state IN (\'S\', \'R\', \'U\', \'O\', \'A\', \'I\')')
def downgrade():
'''
downgrade method
'''
if context.get_context().dialect.name not in ('sqlite'):
drop_column('rules', 'ignore_account_limit')
drop_constraint('RULES_STATE_CHK', 'rules')
create_check_constraint('RULES_STATE_CHK', 'rules', 'state IN (\'S\', \'R\', \'U\', \'O\')')
| 34.446809
| 128
| 0.681285
|
7949f440f70640b56f74d6d4fcc8d64ee29d7e4a
| 168,495
|
py
|
Python
|
py-polars/polars/internals/frame.py
|
paulstey/polars
|
5ed1c2de8224311bc4df6c1e8fa5b371a6bc99f6
|
[
"MIT"
] | null | null | null |
py-polars/polars/internals/frame.py
|
paulstey/polars
|
5ed1c2de8224311bc4df6c1e8fa5b371a6bc99f6
|
[
"MIT"
] | null | null | null |
py-polars/polars/internals/frame.py
|
paulstey/polars
|
5ed1c2de8224311bc4df6c1e8fa5b371a6bc99f6
|
[
"MIT"
] | null | null | null |
"""
Module containing logic related to eager DataFrames
"""
import os
import sys
import warnings
from io import BytesIO, IOBase, StringIO
from pathlib import Path
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Sequence,
TextIO,
Tuple,
Type,
TypeVar,
Union,
overload,
)
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal # pragma: no cover
import numpy as np
try:
import pyarrow as pa
import pyarrow.compute
import pyarrow.parquet
_PYARROW_AVAILABLE = True
except ImportError: # pragma: no cover
_PYARROW_AVAILABLE = False
from polars import internals as pli
from polars.internals.construction import (
arrow_to_pydf,
dict_to_pydf,
numpy_to_pydf,
pandas_to_pydf,
sequence_to_pydf,
series_to_pydf,
)
try:
from polars.polars import PyDataFrame, PySeries
_DOCUMENTING = False
except ImportError: # pragma: no cover
_DOCUMENTING = True
from polars._html import NotebookFormatter
from polars.datatypes import Boolean, DataType, UInt32, Utf8, py_type_to_dtype
from polars.utils import (
_prepare_row_count_args,
_process_null_values,
handle_projection_columns,
is_int_sequence,
is_str_sequence,
range_to_slice,
)
try:
import pandas as pd
_PANDAS_AVAILABLE = True
except ImportError: # pragma: no cover
_PANDAS_AVAILABLE = False
# A type variable used to refer to a polars.DataFrame or any subclass of it.
# Used to annotate DataFrame methods which returns the same type as self.
DF = TypeVar("DF", bound="DataFrame")
def wrap_df(df: "PyDataFrame") -> "DataFrame":
return DataFrame._from_pydf(df)
def _prepare_other_arg(other: Any) -> "pli.Series":
# if not a series create singleton series such that it will broadcast
if not isinstance(other, pli.Series):
if isinstance(other, str):
pass
elif isinstance(other, Sequence):
raise ValueError("Operation not supported.")
other = pli.Series("", [other])
return other
class DataFrame:
"""
A DataFrame is a two-dimensional data structure that represents data as a table
with rows and columns.
Parameters
----------
data : dict, Sequence, ndarray, Series, or pandas.DataFrame
Two-dimensional data in various forms. dict must contain Sequences.
Sequence may contain Series or other Sequences.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
orient : {'col', 'row'}, default None
Whether to interpret two-dimensional data as columns or as rows. If None,
the orientation is inferred by matching the columns and data dimensions. If
this does not yield conclusive results, column orientation is used.
Examples
--------
Constructing a DataFrame from a dictionary:
>>> data = {"a": [1, 2], "b": [3, 4]}
>>> df = pl.DataFrame(data)
>>> df
shape: (2, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 4 │
└─────┴─────┘
Notice that the dtype is automatically inferred as a polars Int64:
>>> df.dtypes
[<class 'polars.datatypes.Int64'>, <class 'polars.datatypes.Int64'>]
In order to specify dtypes for your columns, initialize the DataFrame with a list
of Series instead:
>>> data = [
... pl.Series("col1", [1, 2], dtype=pl.Float32),
... pl.Series("col2", [3, 4], dtype=pl.Int64),
... ]
>>> df2 = pl.DataFrame(data)
>>> df2
shape: (2, 2)
┌──────┬──────┐
│ col1 ┆ col2 │
│ --- ┆ --- │
│ f32 ┆ i64 │
╞══════╪══════╡
│ 1 ┆ 3 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ 4 │
└──────┴──────┘
Constructing a DataFrame from a numpy ndarray, specifying column names:
>>> import numpy as np
>>> data = np.array([(1, 2), (3, 4)], dtype=np.int64)
>>> df3 = pl.DataFrame(data, columns=["a", "b"], orient="col")
>>> df3
shape: (2, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 4 │
└─────┴─────┘
Constructing a DataFrame from a list of lists, row orientation inferred:
>>> data = [[1, 2, 3], [4, 5, 6]]
>>> df4 = pl.DataFrame(data, columns=["a", "b", "c"])
>>> df4
shape: (2, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 5 ┆ 6 │
└─────┴─────┴─────┘
"""
def __init__(
self,
data: Optional[
Union[
Dict[str, Sequence[Any]],
Sequence[Any],
np.ndarray,
"pa.Table",
"pd.DataFrame",
"pli.Series",
]
] = None,
columns: Optional[Sequence[str]] = None,
orient: Optional[str] = None,
):
if data is None:
self._df = dict_to_pydf({}, columns=columns)
elif isinstance(data, dict):
self._df = dict_to_pydf(data, columns=columns)
elif isinstance(data, np.ndarray):
self._df = numpy_to_pydf(data, columns=columns, orient=orient)
elif _PYARROW_AVAILABLE and isinstance(data, pa.Table):
self._df = arrow_to_pydf(data, columns=columns)
elif isinstance(data, Sequence) and not isinstance(data, str):
self._df = sequence_to_pydf(data, columns=columns, orient=orient)
elif isinstance(data, pli.Series):
self._df = series_to_pydf(data, columns=columns)
elif _PANDAS_AVAILABLE and isinstance(data, pd.DataFrame):
if not _PYARROW_AVAILABLE:
raise ImportError( # pragma: no cover
"'pyarrow' is required for converting a pandas DataFrame to a polars DataFrame."
)
self._df = pandas_to_pydf(data, columns=columns)
else:
raise ValueError("DataFrame constructor not called properly.")
@classmethod
def _from_pydf(cls: Type[DF], py_df: "PyDataFrame") -> DF:
"""
Construct Polars DataFrame from FFI PyDataFrame object.
"""
df = cls.__new__(cls)
df._df = py_df
return df
@classmethod
def _from_dicts(
cls: Type[DF],
data: Sequence[Dict[str, Any]],
) -> DF:
pydf = PyDataFrame.read_dicts(data)
return cls._from_pydf(pydf)
@classmethod
def _from_dict(
cls: Type[DF],
data: Dict[str, Sequence[Any]],
columns: Optional[Sequence[str]] = None,
) -> DF:
"""
Construct a DataFrame from a dictionary of sequences.
Parameters
----------
data : dict of sequences
Two-dimensional data represented as a dictionary. dict must contain
Sequences.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
Returns
-------
DataFrame
"""
return cls._from_pydf(dict_to_pydf(data, columns=columns))
@classmethod
def _from_records(
cls: Type[DF],
data: Union[np.ndarray, Sequence[Sequence[Any]]],
columns: Optional[Sequence[str]] = None,
orient: Optional[str] = None,
) -> DF:
"""
Construct a DataFrame from a numpy ndarray or sequence of sequences.
Parameters
----------
data : numpy ndarray or Sequence of sequences
Two-dimensional data represented as numpy ndarray or sequence of sequences.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. Must match data dimensions.
If not specified, columns will be named `column_0`, `column_1`, etc.
orient : {'col', 'row'}, default None
Whether to interpret two-dimensional data as columns or as rows. If None,
the orientation is inferred by matching the columns and data dimensions. If
this does not yield conclusive results, column orientation is used.
Returns
-------
DataFrame
"""
if isinstance(data, np.ndarray):
pydf = numpy_to_pydf(data, columns=columns, orient=orient)
else:
pydf = sequence_to_pydf(data, columns=columns, orient=orient)
return cls._from_pydf(pydf)
@classmethod
def _from_arrow(
cls: Type[DF],
data: "pa.Table",
columns: Optional[Sequence[str]] = None,
rechunk: bool = True,
) -> DF:
"""
Construct a DataFrame from an Arrow table.
This operation will be zero copy for the most part. Types that are not
supported by Polars may be cast to the closest supported type.
Parameters
----------
data : numpy ndarray or Sequence of sequences
Two-dimensional data represented as Arrow table.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. Must match data dimensions.
If not specified, existing Array table columns are used, with missing names
named as `column_0`, `column_1`, etc.
rechunk : bool, default True
Make sure that all data is contiguous.
Returns
-------
DataFrame
"""
return cls._from_pydf(arrow_to_pydf(data, columns=columns, rechunk=rechunk))
@classmethod
def _from_pandas(
cls: Type[DF],
data: "pd.DataFrame",
columns: Optional[Sequence[str]] = None,
rechunk: bool = True,
nan_to_none: bool = True,
) -> DF:
"""
Construct a Polars DataFrame from a pandas DataFrame.
Parameters
----------
data : pandas DataFrame
Two-dimensional data represented as a pandas DataFrame.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
rechunk : bool, default True
Make sure that all data is contiguous.
nan_to_none : bool, default True
If data contains NaN values PyArrow will convert the NaN to None
Returns
-------
DataFrame
"""
# path for table without rows that keeps datatype
if data.shape[0] == 0:
series = []
for name in data.columns:
pd_series = data[name]
if pd_series.dtype == np.dtype("O"):
series.append(pli.Series(name, [], dtype=Utf8))
else:
col = pli.Series(name, pd_series)
series.append(pli.Series(name, col))
return cls(series)
return cls._from_pydf(
pandas_to_pydf(
data, columns=columns, rechunk=rechunk, nan_to_none=nan_to_none
)
)
@classmethod
def _read_csv(
cls: Type[DF],
file: Union[str, BinaryIO, bytes],
has_header: bool = True,
columns: Optional[Union[List[int], List[str]]] = None,
sep: str = ",",
comment_char: Optional[str] = None,
quote_char: Optional[str] = r'"',
skip_rows: int = 0,
dtypes: Optional[
Union[Mapping[str, Type[DataType]], List[Type[DataType]]]
] = None,
null_values: Optional[Union[str, List[str], Dict[str, str]]] = None,
ignore_errors: bool = False,
parse_dates: bool = False,
n_threads: Optional[int] = None,
infer_schema_length: Optional[int] = 100,
batch_size: int = 8192,
n_rows: Optional[int] = None,
encoding: str = "utf8",
low_memory: bool = False,
rechunk: bool = True,
skip_rows_after_header: int = 0,
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
) -> DF:
"""
see pl.read_csv
"""
self = cls.__new__(cls)
path: Optional[str]
if isinstance(file, str):
path = file
else:
path = None
if isinstance(file, BytesIO):
file = file.getvalue()
if isinstance(file, StringIO):
file = file.getvalue().encode()
dtype_list: Optional[List[Tuple[str, Type[DataType]]]] = None
dtype_slice: Optional[List[Type[DataType]]] = None
if dtypes is not None:
if isinstance(dtypes, dict):
dtype_list = []
for k, v in dtypes.items():
dtype_list.append((k, py_type_to_dtype(v)))
elif isinstance(dtypes, list):
dtype_slice = dtypes
else:
raise ValueError("dtype arg should be list or dict")
processed_null_values = _process_null_values(null_values)
if isinstance(file, str) and "*" in file:
dtypes_dict = None
if dtype_list is not None:
dtypes_dict = {name: dt for (name, dt) in dtype_list}
if dtype_slice is not None:
raise ValueError(
"cannot use glob patterns and unamed dtypes as `dtypes` argument; Use dtypes: Mapping[str, Type[DataType]"
)
from polars import scan_csv
scan = scan_csv(
file,
has_header=has_header,
sep=sep,
comment_char=comment_char,
quote_char=quote_char,
skip_rows=skip_rows,
dtypes=dtypes_dict,
null_values=null_values,
ignore_errors=ignore_errors,
infer_schema_length=infer_schema_length,
n_rows=n_rows,
low_memory=low_memory,
rechunk=rechunk,
skip_rows_after_header=skip_rows_after_header,
row_count_name=row_count_name,
row_count_offset=row_count_offset,
)
if columns is None:
return self._from_pydf(scan.collect()._df)
elif is_str_sequence(columns, False):
return self._from_pydf(scan.select(columns).collect()._df)
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]"
)
projection, columns = handle_projection_columns(columns)
self._df = PyDataFrame.read_csv(
file,
infer_schema_length,
batch_size,
has_header,
ignore_errors,
n_rows,
skip_rows,
projection,
sep,
rechunk,
columns,
encoding,
n_threads,
path,
dtype_list,
dtype_slice,
low_memory,
comment_char,
quote_char,
processed_null_values,
parse_dates,
skip_rows_after_header,
_prepare_row_count_args(row_count_name, row_count_offset),
)
return self
@classmethod
def _read_parquet(
cls: Type[DF],
file: Union[str, BinaryIO],
columns: Optional[Union[List[int], List[str]]] = None,
n_rows: Optional[int] = None,
parallel: bool = True,
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
) -> DF:
"""
Read into a DataFrame from a parquet file.
Parameters
----------
file
Path to a file or a file-like object. Any valid filepath can be used.
columns
Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.
n_rows
Stop reading from parquet file after reading ``n_rows``.
parallel
Read the parquet file in parallel. The single threaded reader consumes less memory.
"""
if isinstance(file, str) and "*" in file:
from polars import scan_parquet
scan = scan_parquet(
file,
n_rows=n_rows,
rechunk=True,
parallel=parallel,
row_count_name=row_count_name,
row_count_offset=row_count_offset,
)
if columns is None:
return cls._from_pydf(scan.collect()._df)
elif is_str_sequence(columns, False):
return cls._from_pydf(scan.select(columns).collect()._df)
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]"
)
projection, columns = handle_projection_columns(columns)
self = cls.__new__(cls)
self._df = PyDataFrame.read_parquet(
file,
columns,
projection,
n_rows,
parallel,
_prepare_row_count_args(row_count_name, row_count_offset),
)
return self
@classmethod
def _read_avro(
cls: Type[DF],
file: Union[str, BinaryIO],
n_rows: Optional[int] = None,
) -> DF:
"""
Read into a DataFrame from Apache Avro format.
Parameters
----------
file
Path to a file or a file-like object.
n_rows
Stop reading from Apache Avro file after reading ``n_rows``.
Returns
-------
DataFrame
"""
self = cls.__new__(cls)
self._df = PyDataFrame.read_avro(file, n_rows)
return self
@classmethod
def _read_ipc(
cls: Type[DF],
file: Union[str, BinaryIO],
columns: Optional[Union[List[int], List[str]]] = None,
n_rows: Optional[int] = None,
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
) -> DF:
"""
Read into a DataFrame from Arrow IPC stream format. This is also called the Feather (v2) format.
Parameters
----------
file
Path to a file or a file-like object.
columns
Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.
n_rows
Stop reading from IPC file after reading ``n_rows``.
Returns
-------
DataFrame
"""
if isinstance(file, str) and "*" in file:
from polars import scan_ipc
scan = scan_ipc(
file,
n_rows=n_rows,
rechunk=True,
row_count_name=row_count_name,
row_count_offset=row_count_offset,
)
if columns is None:
scan.collect()
elif is_str_sequence(columns, False):
scan.select(columns).collect()
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]"
)
projection, columns = handle_projection_columns(columns)
self = cls.__new__(cls)
self._df = PyDataFrame.read_ipc(
file,
columns,
projection,
n_rows,
_prepare_row_count_args(row_count_name, row_count_offset),
)
return self
@classmethod
def _read_json(
cls: Type[DF],
file: Union[str, IOBase],
json_lines: bool = False,
) -> DF:
"""
See Also pl.read_json
"""
if isinstance(file, StringIO):
file = BytesIO(file.getvalue().encode())
self = cls.__new__(cls)
self._df = PyDataFrame.read_json(file, json_lines)
return self
def to_arrow(self) -> "pa.Table":
"""
Collect the underlying arrow arrays in an Arrow Table.
This operation is mostly zero copy.
Data types that do copy:
- CategoricalType
"""
if not _PYARROW_AVAILABLE:
raise ImportError( # pragma: no cover
"'pyarrow' is required for converting a polars DataFrame to an Arrow Table."
)
record_batches = self._df.to_arrow()
return pa.Table.from_batches(record_batches)
@overload
def to_dict(self, as_series: Literal[True] = ...) -> Dict[str, "pli.Series"]:
...
@overload
def to_dict(self, as_series: Literal[False]) -> Dict[str, List[Any]]:
...
@overload
def to_dict(
self, as_series: bool = True
) -> Union[Dict[str, "pli.Series"], Dict[str, List[Any]]]:
...
def to_dict(
self, as_series: bool = True
) -> Union[Dict[str, "pli.Series"], Dict[str, List[Any]]]:
"""
Convert DataFrame to a dictionary mapping column name to values.
Parameters
----------
as_series
True -> Values are series
False -> Values are List[Any]
Examples
--------
>>> df = pl.DataFrame(
... {
... "A": [1, 2, 3, 4, 5],
... "fruits": ["banana", "banana", "apple", "apple", "banana"],
... "B": [5, 4, 3, 2, 1],
... "cars": ["beetle", "audi", "beetle", "beetle", "beetle"],
... "optional": [28, 300, None, 2, -30],
... }
... )
>>> df
shape: (5, 5)
┌─────┬────────┬─────┬────────┬──────────┐
│ A ┆ fruits ┆ B ┆ cars ┆ optional │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ i64 ┆ str ┆ i64 │
╞═════╪════════╪═════╪════════╪══════════╡
│ 1 ┆ banana ┆ 5 ┆ beetle ┆ 28 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ banana ┆ 4 ┆ audi ┆ 300 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ apple ┆ 3 ┆ beetle ┆ null │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ apple ┆ 2 ┆ beetle ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ banana ┆ 1 ┆ beetle ┆ -30 │
└─────┴────────┴─────┴────────┴──────────┘
>>> df.to_dict(as_series=False)
{'A': [1, 2, 3, 4, 5],
'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'],
'B': [5, 4, 3, 2, 1],
'cars': ['beetle', 'audi', 'beetle', 'beetle', 'beetle'],
'optional': [28, 300, None, 2, -30]}
>>> df.to_dict(as_series=True)
{'A': shape: (5,)
Series: 'A' [i64]
[
1
2
3
4
5
], 'fruits': shape: (5,)
Series: 'fruits' [str]
[
"banana"
"banana"
"apple"
"apple"
"banana"
], 'B': shape: (5,)
Series: 'B' [i64]
[
5
4
3
2
1
], 'cars': shape: (5,)
Series: 'cars' [str]
[
"beetle"
"audi"
"beetle"
"beetle"
"beetle"
], 'optional': shape: (5,)
Series: 'optional' [i64]
[
28
300
null
2
-30
]}
"""
if as_series:
return {s.name: s for s in self}
else:
return {s.name: s.to_list() for s in self}
@overload
def to_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: Literal[True],
) -> str:
...
@overload
def to_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: Literal[False] = ...,
) -> None:
...
@overload
def to_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: bool = ...,
) -> Optional[str]:
...
def to_json(
self,
file: Optional[Union[IOBase, str, Path]] = None,
pretty: bool = False,
row_oriented: bool = False,
json_lines: bool = False,
*,
to_string: bool = False,
) -> Optional[str]:
"""
Serialize to JSON representation.
Parameters
----------
file
Write to this file instead of returning an string.
pretty
Pretty serialize json.
row_oriented
Write to row oriented json. This is slower, but more common.
json_lines
Write to Json Lines format
to_string
Ignore file argument and return a string.
"""
to_string_io = (file is not None) and isinstance(file, StringIO)
if to_string or file is None or to_string_io:
with BytesIO() as buf:
self._df.to_json(buf, pretty, row_oriented, json_lines)
json_bytes = buf.getvalue()
json_str = json_bytes.decode("utf8")
if to_string_io:
file.write(json_str) # type: ignore[union-attr]
else:
return json_str
else:
self._df.to_json(file, pretty, row_oriented, json_lines)
return None
def to_pandas(
self, *args: Any, date_as_object: bool = False, **kwargs: Any
) -> "pd.DataFrame": # noqa: F821
"""
Cast to a Pandas DataFrame. This requires that Pandas is installed.
This operation clones data.
Parameters
----------
args
Arguments will be sent to pyarrow.Table.to_pandas.
date_as_object
Cast dates to objects. If False, convert to datetime64[ns] dtype.
kwargs
Arguments will be sent to pyarrow.Table.to_pandas.
Examples
--------
>>> import pandas
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> pandas_df = df.to_pandas()
>>> type(pandas_df)
<class 'pandas.core.frame.DataFrame'>
"""
record_batches = self._df.to_pandas()
tbl = pa.Table.from_batches(record_batches)
return tbl.to_pandas(*args, date_as_object=date_as_object, **kwargs)
def to_csv(
self,
file: Optional[Union[TextIO, BytesIO, str, Path]] = None,
has_header: bool = True,
sep: str = ",",
) -> Optional[str]:
"""
Write Dataframe to comma-separated values file (csv).
Parameters
----------
file
File path to which the file should be written.
has_header
Whether or not to include header in the CSV output.
sep
Separate CSV fields with this symbol.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3, 4, 5],
... "bar": [6, 7, 8, 9, 10],
... "ham": ["a", "b", "c", "d", "e"],
... }
... )
>>> df.to_csv("new_file.csv", sep=",")
"""
if file is None:
buffer = BytesIO()
self._df.to_csv(buffer, has_header, ord(sep))
return str(buffer.getvalue(), encoding="utf-8")
if isinstance(file, Path):
file = str(file)
self._df.to_csv(file, has_header, ord(sep))
return None
def to_avro(
self,
file: Union[BinaryIO, BytesIO, str, Path],
compression: Literal["uncompressed", "snappy", "deflate"] = "uncompressed",
) -> None:
"""
Write to Apache Avro file.
Parameters
----------
file
File path to which the file should be written.
compression
Compression method. Choose one of:
- "uncompressed"
- "snappy"
- "deflate"
"""
if isinstance(file, Path):
file = str(file)
self._df.to_avro(file, compression)
def to_ipc(
self,
file: Union[BinaryIO, BytesIO, str, Path],
compression: Optional[Literal["uncompressed", "lz4", "zstd"]] = "uncompressed",
) -> None:
"""
Write to Arrow IPC binary stream, or a feather file.
Parameters
----------
file
File path to which the file should be written.
compression
Compression method. Choose one of:
- "uncompressed"
- "lz4"
- "zstd"
"""
if compression is None:
compression = "uncompressed"
if isinstance(file, Path):
file = str(file)
self._df.to_ipc(file, compression)
def to_dicts(self) -> List[Dict[str, Any]]:
pydf = self._df
names = self.columns
return [
{k: v for k, v in zip(names, pydf.row_tuple(i))}
for i in range(0, self.height)
]
def transpose(
self: DF,
include_header: bool = False,
header_name: str = "column",
column_names: Optional[Union[Iterator[str], Sequence[str]]] = None,
) -> DF:
"""
Transpose a DataFrame over the diagonal.
Parameters
----------
include_header:
If set, the column names will be added as first column.
header_name:
If `include_header` is set, this determines the name of the column that will be inserted
column_names:
Optional generator/iterator that yields column names. Will be used to replace the columns in the DataFrame.
Notes
-----
This is a very expensive operation. Perhaps you can do it differently.
Returns
-------
DataFrame
Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
>>> df.transpose(include_header=True)
shape: (2, 4)
┌────────┬──────────┬──────────┬──────────┐
│ column ┆ column_0 ┆ column_1 ┆ column_2 │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 ┆ i64 │
╞════════╪══════════╪══════════╪══════════╡
│ a ┆ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ 1 ┆ 2 ┆ 3 │
└────────┴──────────┴──────────┴──────────┘
# replace the auto generated column names with a list
>>> df.transpose(include_header=False, column_names=["a", "b", "c"])
shape: (2, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 2 ┆ 3 │
└─────┴─────┴─────┘
Include the header as a separate column
>>> df.transpose(
... include_header=True, header_name="foo", column_names=["a", "b", "c"]
... )
shape: (2, 4)
┌─────┬─────┬─────┬─────┐
│ foo ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╪═════╡
│ a ┆ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 1 ┆ 2 ┆ 3 │
└─────┴─────┴─────┴─────┘
Replace the auto generated column with column names from a generator function
>>> def name_generator():
... base_name = "my_column_"
... count = 0
... while True:
... yield f"{base_name}{count}"
... count += 1
...
>>> df.transpose(include_header=False, column_names=name_generator())
shape: (2, 3)
┌─────────────┬─────────────┬─────────────┐
│ my_column_0 ┆ my_column_1 ┆ my_column_2 │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════════════╪═════════════╪═════════════╡
│ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 1 ┆ 2 ┆ 3 │
└─────────────┴─────────────┴─────────────┘
"""
df = self._from_pydf(self._df.transpose(include_header, header_name))
if column_names is not None:
names = []
n = df.width
if include_header:
names.append(header_name)
n -= 1
column_names = iter(column_names)
for _ in range(n):
names.append(next(column_names))
df.columns = names
return df
def to_parquet(
self,
file: Union[str, Path, BytesIO],
compression: Optional[
Union[
Literal[
"uncompressed", "snappy", "gzip", "lzo", "brotli", "lz4", "zstd"
],
str,
]
] = "snappy",
statistics: bool = False,
use_pyarrow: bool = False,
**kwargs: Any,
) -> None:
"""
Write the DataFrame disk in parquet format.
Parameters
----------
file
File path to which the file should be written.
compression
Compression method. Choose one of:
- "uncompressed" (not supported by pyarrow)
- "snappy"
- "gzip"
- "lzo"
- "brotli"
- "lz4"
- "zstd"
statistics
Write statistics to the parquet headers. This requires extra compute.
use_pyarrow
Use C++ parquet implementation vs rust parquet implementation.
At the moment C++ supports more features.
**kwargs are passed to pyarrow.parquet.write_table
"""
if compression is None:
compression = "uncompressed"
if isinstance(file, Path):
file = str(file)
if use_pyarrow:
if not _PYARROW_AVAILABLE:
raise ImportError( # pragma: no cover
"'pyarrow' is required when using 'to_parquet(..., use_pyarrow=True)'."
)
tbl = self.to_arrow()
data = {}
for i, column in enumerate(tbl):
# extract the name before casting
if column._name is None:
name = f"column_{i}"
else:
name = column._name
data[name] = column
tbl = pa.table(data)
pa.parquet.write_table(
table=tbl,
where=file,
compression=compression,
write_statistics=statistics,
**kwargs,
)
else:
self._df.to_parquet(file, compression, statistics)
def to_numpy(self) -> np.ndarray:
"""
Convert DataFrame to a 2d numpy array.
This operation clones data.
Examples
--------
>>> df = pl.DataFrame(
... {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]}
... )
>>> numpy_array = df.to_numpy()
>>> type(numpy_array)
<class 'numpy.ndarray'>
"""
out = self._df.to_numpy()
if out is None:
return np.vstack(
[self.to_series(i).to_numpy() for i in range(self.width)]
).T
else:
return out
def __getstate__(self): # type: ignore
return self.get_columns()
def __setstate__(self, state): # type: ignore
self._df = DataFrame(state)._df
def __mul__(
self: DF, other: Union["DataFrame", "pli.Series", int, float, bool]
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.mul_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.mul(other._s))
def __truediv__(
self: DF, other: Union["DataFrame", "pli.Series", int, float, bool]
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.div_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.div(other._s))
def __add__(
self: DF,
other: Union["DataFrame", "pli.Series", int, float, bool, str],
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.add_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.add(other._s))
def __sub__(
self: DF, other: Union["DataFrame", "pli.Series", int, float, bool]
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.sub_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.sub(other._s))
def __mod__(
self: DF, other: Union["DataFrame", "pli.Series", int, float, bool]
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.rem_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.rem(other._s))
def __str__(self) -> str:
return self._df.as_str()
def __repr__(self) -> str:
return self.__str__()
def __getattr__(self, item: Any) -> "PySeries":
"""
Access columns as attribute.
"""
# it is important that we return an AttributeError here
# this is used by ipython to check some private
# `_ipython_canary_method_should_not_exist_`
# if we return any other error than AttributeError pretty printing
# will not work in notebooks.
# See: https://github.com/jupyter/notebook/issues/2014
if item.startswith("_"):
raise AttributeError(item)
try:
warnings.warn("accessing series as Attribute of a DataFrame is deprecated")
return pli.wrap_s(self._df.column(item))
except Exception:
raise AttributeError(item)
def __iter__(self) -> Iterator[Any]:
return self.get_columns().__iter__()
def find_idx_by_name(self, name: str) -> int:
"""
Find the index of a column by name.
Parameters
----------
name
Name of the column to find.
Examples
--------
>>> df = pl.DataFrame(
... {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]}
... )
>>> df.find_idx_by_name("ham")
2
"""
return self._df.find_idx_by_name(name)
def _pos_idx(self, idx: int, dim: int) -> int:
if idx >= 0:
return idx
else:
return self.shape[dim] + idx
# __getitem__() mostly returns a dataframe. The major exception is when a string is passed in. Note that there are
# more subtle cases possible where a non-string value leads to a Series.
@overload
def __getitem__(self, item: str) -> "pli.Series":
...
@overload
def __getitem__(
self: DF,
item: Union[
int, range, slice, np.ndarray, "pli.Expr", "pli.Series", List, tuple
],
) -> DF:
...
def __getitem__(
self: DF,
item: Union[
str, int, range, slice, np.ndarray, "pli.Expr", "pli.Series", List, tuple
],
) -> Union[DF, "pli.Series"]:
"""
Does quite a lot. Read the comments.
"""
if isinstance(item, pli.Expr):
return self.select(item)
# select rows and columns at once
# every 2d selection, i.e. tuple is row column order, just like numpy
if isinstance(item, tuple) and len(item) == 2:
row_selection, col_selection = item
# df[:, unknown]
if isinstance(row_selection, slice):
# multiple slices
# df[:, :]
if isinstance(col_selection, slice):
# slice can be
# by index
# [1:8]
# or by column name
# ["foo":"bar"]
# first we make sure that the slice is by index
start = col_selection.start
stop = col_selection.stop
if isinstance(col_selection.start, str):
start = self.find_idx_by_name(col_selection.start)
if isinstance(col_selection.stop, str):
stop = self.find_idx_by_name(col_selection.stop) + 1
col_selection = slice(start, stop, col_selection.step)
df = self.__getitem__(self.columns[col_selection])
return df[row_selection]
# slice and boolean mask
# df[:2, [True, False, True]]
if isinstance(col_selection, (Sequence, pli.Series)):
if (
isinstance(col_selection[0], bool)
or isinstance(col_selection, pli.Series)
and col_selection.dtype() == Boolean
):
df = self.__getitem__(row_selection)
select = []
for col, valid in zip(df.columns, col_selection):
if valid:
select.append(col)
return df.select(select)
# single slice
# df[:, unknown]
series = self.__getitem__(col_selection)
# s[:]
pli.wrap_s(series[row_selection])
# df[2, :] (select row as df)
if isinstance(row_selection, int):
if isinstance(col_selection, (slice, list, np.ndarray)):
df = self[:, col_selection]
return df.slice(row_selection, 1)
# df[2, "a"]
if isinstance(col_selection, str):
return self[col_selection][row_selection]
# column selection can be "a" and ["a", "b"]
if isinstance(col_selection, str):
col_selection = [col_selection]
# df[:, 1]
if isinstance(col_selection, int):
series = self.to_series(col_selection)
return series[row_selection]
if isinstance(col_selection, list):
# df[:, [1, 2]]
# select by column indexes
if isinstance(col_selection[0], int):
series_list = [self.to_series(i) for i in col_selection]
df = self.__class__(series_list)
return df[row_selection]
df = self.__getitem__(col_selection)
return df.__getitem__(row_selection)
# select single column
# df["foo"]
if isinstance(item, str):
return pli.wrap_s(self._df.column(item))
# df[idx]
if isinstance(item, int):
return self.slice(self._pos_idx(item, dim=0), 1)
# df[range(n)]
if isinstance(item, range):
return self[range_to_slice(item)]
# df[:]
if isinstance(item, slice):
# special case df[::-1]
if item.start is None and item.stop is None and item.step == -1:
return self.select(pli.col("*").reverse())
if getattr(item, "end", False):
raise ValueError("A slice with steps larger than 1 is not supported.")
if item.start is None:
start = 0
else:
start = item.start
if item.stop is None:
stop = self.height
else:
stop = item.stop
length = stop - start
if item.step is None:
# df[start:stop]
return self.slice(start, length)
else:
# df[start:stop:step]
return self.select(
pli.col("*").slice(start, length).take_every(item.step)
)
# select rows by numpy mask or index
# df[[1, 2, 3]]
# df[[true, false, true]]
if isinstance(item, np.ndarray):
if item.dtype == int:
return self._from_pydf(self._df.take(item))
if isinstance(item[0], str):
return self._from_pydf(self._df.select(item))
if item.dtype == bool:
return self._from_pydf(self._df.filter(pli.Series("", item).inner()))
if isinstance(item, Sequence):
if isinstance(item[0], str):
# select multiple columns
# df[["foo", "bar"]]
return self._from_pydf(self._df.select(item))
elif isinstance(item[0], pli.Expr):
return self.select(item)
elif type(item[0]) == bool:
item = pli.Series("", item) # fall through to next if isinstance
elif is_int_sequence(item):
return self._from_pydf(
self._df.take([self._pos_idx(i, dim=0) for i in item])
)
if isinstance(item, pli.Series):
dtype = item.dtype
if dtype == Boolean:
return self._from_pydf(self._df.filter(item.inner()))
if dtype == UInt32:
return self._from_pydf(self._df.take_with_series(item.inner()))
# if no data has been returned, the operation is not supported
raise NotImplementedError
def __setitem__(
self, key: Union[str, List, Tuple[Any, Union[str, int]]], value: Any
) -> None:
warnings.warn(
"setting a DataFrame by indexing is deprecated; Consider using DataFrame.with_column"
)
# df["foo"] = series
if isinstance(key, str):
try:
self.replace(key, pli.Series(key, value))
except Exception:
self.hstack([pli.Series(key, value)], in_place=True)
# df[["C", "D"]]
elif isinstance(key, list):
value = np.array(value)
if len(value.shape) != 2:
raise ValueError("can only set multiple columns with 2D matrix")
if value.shape[1] != len(key):
raise ValueError(
"matrix columns should be equal to list use to determine column names"
)
for (i, name) in enumerate(key):
self[name] = value[:, i]
# df[a, b]
elif isinstance(key, tuple):
row_selection, col_selection = key
# get series column selection
if isinstance(col_selection, str):
s = self.__getitem__(col_selection)
elif isinstance(col_selection, int):
s = self[:, col_selection] # type: ignore
else:
raise ValueError(f"column selection not understood: {col_selection}")
# dispatch to __setitem__ of Series to do modification
s[row_selection] = value
# now find the location to place series
# df[idx]
if isinstance(col_selection, int):
self.replace_at_idx(col_selection, s)
# df["foo"]
elif isinstance(col_selection, str):
self.replace(col_selection, s)
else:
raise NotImplementedError
def __len__(self) -> int:
return self.height
def _repr_html_(self) -> str:
"""
Used by jupyter notebooks to get a html table.
Output rows and columns can be modified by setting the following ENVIRONMENT variables:
* POLARS_FMT_MAX_COLS: set the number of columns
* POLARS_FMT_MAX_ROWS: set the number of rows
"""
max_cols = int(os.environ.get("POLARS_FMT_MAX_COLS", default=75))
max_rows = int(os.environ.get("POLARS_FMT_MAX_ROWS", default=25))
return "\n".join(NotebookFormatter(self, max_cols, max_rows).render())
def to_series(self, index: int = 0) -> "pli.Series":
"""
Select column as Series at index location.
Parameters
----------
index
Location of selection.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.to_series(1)
shape: (3,)
Series: 'bar' [i64]
[
6
7
8
]
"""
return pli.wrap_s(self._df.select_at_idx(index))
def rename(self: DF, mapping: Dict[str, str]) -> DF:
"""
Rename column names.
Parameters
----------
mapping
Key value pairs that map from old name to new name.
Examples
--------
>>> df = pl.DataFrame(
... {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]}
... )
>>> df.rename({"foo": "apple"})
shape: (3, 3)
┌───────┬─────┬─────┐
│ apple ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└───────┴─────┴─────┘
"""
return self._from_pydf(
self.lazy().rename(mapping).collect(no_optimization=True)._df
)
def insert_at_idx(self, index: int, series: "pli.Series") -> None:
"""
Insert a Series at a certain column index. This operation is in place.
Parameters
----------
index
Column to insert the new `Series` column.
series
`Series` to insert.
"""
self._df.insert_at_idx(index, series._s)
def filter(self: DF, predicate: "pli.Expr") -> DF:
"""
Filter the rows in the DataFrame based on a predicate expression.
Parameters
----------
predicate
Expression that evaluates to a boolean Series.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
Filter on one condition:
>>> df.filter(pl.col("foo") < 3)
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
Filter on multiple conditions:
>>> df.filter((pl.col("foo") < 3) & (pl.col("ham") == "a"))
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
└─────┴─────┴─────┘
"""
return self._from_pydf(
self.lazy()
.filter(predicate)
.collect(no_optimization=True, string_cache=False)
._df
)
@property
def shape(self) -> Tuple[int, int]:
"""
Get the shape of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.shape
(5, 1)
"""
return self._df.shape()
@property
def height(self) -> int:
"""
Get the height of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.height
5
"""
return self._df.height()
@property
def width(self) -> int:
"""
Get the width of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.width
1
"""
return self._df.width()
@property
def columns(self) -> List[str]:
"""
Get or set column names.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.columns
['foo', 'bar', 'ham']
Set column names:
>>> df.columns = ["apple", "banana", "orange"]
>>> df
shape: (3, 3)
┌───────┬────────┬────────┐
│ apple ┆ banana ┆ orange │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪════════╪════════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└───────┴────────┴────────┘
"""
return self._df.columns()
@columns.setter
def columns(self, columns: Sequence[str]) -> None:
"""
Change the column names of the `DataFrame`.
Parameters
----------
columns
A list with new names for the `DataFrame`.
The length of the list should be equal to the width of the `DataFrame`.
"""
self._df.set_column_names(columns)
@property
def dtypes(self) -> List[Type[DataType]]:
"""
Get dtypes of columns in DataFrame. Dtypes can also be found in column headers when printing the DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.dtypes
[<class 'polars.datatypes.Int64'>, <class 'polars.datatypes.Float64'>, <class 'polars.datatypes.Utf8'>]
>>> df
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
See Also
--------
schema : Return a dict of [column name, dtype]
"""
return self._df.dtypes()
@property
def schema(self) -> Dict[str, Type[DataType]]:
"""
Get a dict[column name, DataType]
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.schema
{'foo': <class 'polars.datatypes.Int64'>, 'bar': <class 'polars.datatypes.Float64'>, 'ham': <class 'polars.datatypes.Utf8'>}
"""
return dict(zip(self.columns, self.dtypes))
def describe(self: DF) -> DF:
"""
Summary statistics for a DataFrame. Only summarizes numeric datatypes at the moment and returns nulls for non numeric datatypes.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1.0, 2.8, 3.0],
... "b": [4, 5, 6],
... "c": [True, False, True],
... }
... )
>>> df.describe()
shape: (5, 4)
┌──────────┬────────────────────┬─────┬──────┐
│ describe ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 │
╞══════════╪════════════════════╪═════╪══════╡
│ mean ┆ 2.2666666666666666 ┆ 5 ┆ null │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ std ┆ 1.1015141094572205 ┆ 1 ┆ null │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ min ┆ 1 ┆ 4 ┆ 0.0 │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ max ┆ 3 ┆ 6 ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ median ┆ 2.8 ┆ 5 ┆ null │
└──────────┴────────────────────┴─────┴──────┘
"""
def describe_cast(self: DF) -> DF:
columns = []
for s in self:
if s.is_numeric() or s.is_boolean():
columns.append(s.cast(float))
else:
columns.append(s)
return self.__class__(columns)
summary = self._from_pydf(
pli.concat(
[
describe_cast(self.mean()),
describe_cast(self.std()),
describe_cast(self.min()),
describe_cast(self.max()),
describe_cast(self.median()),
]
)._df
)
summary.insert_at_idx(
0, pli.Series("describe", ["mean", "std", "min", "max", "median"])
)
return summary
def replace_at_idx(self, index: int, series: "pli.Series") -> None:
"""
Replace a column at an index location.
Parameters
----------
index
Column index.
series
Series that will replace the column.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> x = pl.Series("apple", [10, 20, 30])
>>> df.replace_at_idx(0, x)
>>> df
shape: (3, 3)
┌───────┬─────┬─────┐
│ apple ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪═════╪═════╡
│ 10 ┆ 6 ┆ a │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 20 ┆ 7 ┆ b │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 30 ┆ 8 ┆ c │
└───────┴─────┴─────┘
"""
self._df.replace_at_idx(index, series._s)
@overload
def sort(
self: DF,
by: Union[str, "pli.Expr", List[str], List["pli.Expr"]],
reverse: Union[bool, List[bool]] = ...,
*,
in_place: Literal[False] = ...,
) -> DF:
...
@overload
def sort(
self,
by: Union[str, "pli.Expr", List[str], List["pli.Expr"]],
reverse: Union[bool, List[bool]] = ...,
*,
in_place: Literal[True],
) -> None:
...
@overload
def sort(
self: DF,
by: Union[str, "pli.Expr", List[str], List["pli.Expr"]],
reverse: Union[bool, List[bool]] = ...,
*,
in_place: bool,
) -> Optional[DF]:
...
def sort(
self: DF,
by: Union[str, "pli.Expr", List[str], List["pli.Expr"]],
reverse: Union[bool, List[bool]] = False,
*,
in_place: bool = False,
) -> Optional[DF]:
"""
Sort the DataFrame by column.
Parameters
----------
by
By which column to sort. Only accepts string.
reverse
Reverse/descending sort.
in_place
Perform operation in-place.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.sort("foo", reverse=True)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 6 ┆ a │
└─────┴─────┴─────┘
**Sort by multiple columns.**
For multiple columns we can also use expression syntax.
>>> df.sort(
... [pl.col("foo"), pl.col("bar") ** 2],
... reverse=[True, False],
... )
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 64 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 49 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 36 ┆ a │
└─────┴─────┴─────┘
"""
if type(by) is list or isinstance(by, pli.Expr):
df = (
self.lazy()
.sort(by, reverse)
.collect(no_optimization=True, string_cache=False)
._df
)
if in_place:
self._df = df
return self
return self._from_pydf(df)
if in_place:
self._df.sort_in_place(by, reverse)
return None
else:
return self._from_pydf(self._df.sort(by, reverse))
def frame_equal(self, other: "DataFrame", null_equal: bool = True) -> bool:
"""
Check if DataFrame is equal to other.
Parameters
----------
other
DataFrame to compare with.
null_equal
Consider null values as equal.
Examples
--------
>>> df1 = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df2 = pl.DataFrame(
... {
... "foo": [3, 2, 1],
... "bar": [8.0, 7.0, 6.0],
... "ham": ["c", "b", "a"],
... }
... )
>>> df1.frame_equal(df1)
True
>>> df1.frame_equal(df2)
False
"""
return self._df.frame_equal(other._df, null_equal)
def replace(self, column: str, new_col: "pli.Series") -> None:
"""
Replace a column by a new Series.
Parameters
----------
column
Column to replace.
new_col
New column to insert.
"""
self._df.replace(column, new_col.inner())
def slice(self: DF, offset: int, length: int) -> DF:
"""
Slice this DataFrame over the rows direction.
Parameters
----------
offset
Offset index.
length
Length of the slice.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.slice(1, 2)
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
"""
if length < 0:
length = self.height - offset + length
return self._from_pydf(self._df.slice(offset, length))
def limit(self: DF, length: int = 5) -> DF:
"""
Get first N rows as DataFrame.
See Also `DataFrame.head`
Parameters
----------
length
Amount of rows to take.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.limit(2)
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
"""
return self.head(length)
def head(self: DF, length: int = 5) -> DF:
"""
Get first N rows as DataFrame.
Parameters
----------
length
Length of the head.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3, 4, 5],
... "bar": [6, 7, 8, 9, 10],
... "ham": ["a", "b", "c", "d", "e"],
... }
... )
>>> df.head(3)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
"""
return self._from_pydf(self._df.head(length))
def tail(self: DF, length: int = 5) -> DF:
"""
Get last N rows as DataFrame.
Parameters
----------
length
Length of the tail.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3, 4, 5],
... "bar": [6, 7, 8, 9, 10],
... "ham": ["a", "b", "c", "d", "e"],
... }
... )
>>> df.tail(3)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 9 ┆ d │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 5 ┆ 10 ┆ e │
└─────┴─────┴─────┘
"""
return self._from_pydf(self._df.tail(length))
def drop_nulls(self: DF, subset: Optional[Union[str, List[str]]] = None) -> DF:
"""
Return a new DataFrame where the null values are dropped.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, None, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.drop_nulls()
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
This method only drops nulls row-wise if any single value of the row is null.
Below are some example snippets that show how you could drop null values based on other
conditions
>>> df = pl.DataFrame(
... {
... "a": [None, None, None, None],
... "b": [1, 2, None, 1],
... "c": [1, None, None, 1],
... }
... )
>>> df
shape: (4, 3)
┌──────┬──────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪══════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴──────┴──────┘
Drop a row only if all values are null:
>>> df.filter(
... ~pl.fold(
... acc=True,
... f=lambda acc, s: acc & s.is_null(),
... exprs=pl.all(),
... )
... )
shape: (3, 3)
┌──────┬─────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪═════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴─────┴──────┘
Drop a column if all values are null:
>>> df[:, [not (s.null_count() == df.height) for s in df]]
shape: (4, 2)
┌──────┬──────┐
│ b ┆ c │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞══════╪══════╡
│ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 1 ┆ 1 │
└──────┴──────┘
"""
if isinstance(subset, str):
subset = [subset]
return self._from_pydf(self._df.drop_nulls(subset))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""
Apply a function on Self.
Parameters
----------
func
Callable.
args
Arguments.
kwargs
Keyword arguments.
Examples
--------
>>> def cast_str_to_int(data, col_name):
... return data.with_column(pl.col(col_name).cast(pl.Int64))
...
>>> df = pl.DataFrame({"a": [1, 2, 3, 4], "b": ["10", "20", "30", "40"]})
>>> df.pipe(cast_str_to_int, col_name="b")
shape: (4, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 10 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 20 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 30 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 40 │
└─────┴─────┘
"""
return func(self, *args, **kwargs)
def with_row_count(self: DF, name: str = "row_nr", offset: int = 0) -> DF:
"""
Add a column at index 0 that counts the rows.
Parameters
----------
name
Name of the column to add.
offset
Start the row count at this offset. Default = 0
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.with_row_count()
shape: (3, 3)
┌────────┬─────┬─────┐
│ row_nr ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ u32 ┆ i64 ┆ i64 │
╞════════╪═════╪═════╡
│ 0 ┆ 1 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 3 ┆ 4 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 5 ┆ 6 │
└────────┴─────┴─────┘
"""
return self._from_pydf(self._df.with_row_count(name, offset))
def groupby(
self,
by: Union[str, "pli.Expr", Sequence[str], Sequence["pli.Expr"]],
maintain_order: bool = False,
) -> "GroupBy":
"""
Start a groupby operation.
Parameters
----------
by
Column(s) to group by.
maintain_order
Make sure that the order of the groups remain consistent. This is more expensive than a default groupby.
Note that this only works in expression aggregations.
Examples
--------
Below we group by column `"a"`, and we sum column `"b"`.
>>> df = pl.DataFrame(
... {
... "a": ["a", "b", "a", "b", "b", "c"],
... "b": [1, 2, 3, 4, 5, 6],
... "c": [6, 5, 4, 3, 2, 1],
... }
... )
>>> df.groupby("a")["b"].sum().sort(by="a")
shape: (3, 2)
┌─────┬───────┐
│ a ┆ b_sum │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═══════╡
│ a ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ b ┆ 11 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ c ┆ 6 │
└─────┴───────┘
We can also loop over the grouped `DataFrame`
>>> for sub_df in df.groupby("a"):
... print(sub_df) # doctest: +IGNORE_RESULT
...
shape: (3, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ b ┆ 2 ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 4 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 5 ┆ 2 │
└─────┴─────┴─────┘
shape: (1, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ c ┆ 6 ┆ 1 │
└─────┴─────┴─────┘
"""
if isinstance(by, str):
by = [by]
return GroupBy(self._df, by, maintain_order=maintain_order) # type: ignore
def groupby_rolling(
self,
index_column: str,
period: str,
offset: Optional[str] = None,
closed: str = "right",
) -> "RollingGroupBy":
"""
Create rolling groups based on a time column (or index value of type Int32, Int64).
Different from a rolling groupby the windows are now determined by the individual values and are not of constant
intervals. For constant intervals use *groupby_dynamic*
.. seealso::
groupby_dynamic
The `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
In case of a groupby_rolling on an integer column, the windows are defined by:
- **"1i" # length 1**
- **"10i" # length 10**
Parameters
----------
index_column
Column used to group based on the time window.
Often to type Date/Datetime
This column must be sorted in ascending order. If not the output will not make sense.
In case of a rolling groupby on indices, dtype needs to be one of {Int32, Int64}. Note that
Int32 gets temporarely cast to Int64, so if performance matters use an Int64 column.
period
length of the window
offset
offset of the window. Default is -period
closed
Defines if the window interval is closed or not.
Any of {"left", "right", "both" "none"}
Examples
--------
>>> dates = [
... "2020-01-01 13:45:48",
... "2020-01-01 16:42:13",
... "2020-01-01 16:45:09",
... "2020-01-02 18:12:48",
... "2020-01-03 19:45:32",
... "2020-01-08 23:16:43",
... ]
>>> df = pl.DataFrame({"dt": dates, "a": [3, 7, 5, 9, 2, 1]}).with_column(
... pl.col("dt").str.strptime(pl.Datetime)
... )
>>> out = df.groupby_rolling(index_column="dt", period="2d").agg(
... [
... pl.sum("a").alias("sum_a"),
... pl.min("a").alias("min_a"),
... pl.max("a").alias("max_a"),
... ]
... )
>>> assert out["sum_a"].to_list() == [3, 10, 15, 24, 11, 1]
>>> assert out["max_a"].to_list() == [3, 7, 7, 9, 9, 1]
>>> assert out["min_a"].to_list() == [3, 3, 3, 3, 2, 1]
>>> out
shape: (6, 4)
┌─────────────────────┬───────┬───────┬───────┐
│ dt ┆ a_sum ┆ a_max ┆ a_min │
│ --- ┆ --- ┆ --- ┆ --- │
│ datetime[ms] ┆ i64 ┆ i64 ┆ i64 │
╞═════════════════════╪═══════╪═══════╪═══════╡
│ 2020-01-01 13:45:48 ┆ 3 ┆ 3 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-01 16:42:13 ┆ 10 ┆ 7 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-01 16:45:09 ┆ 15 ┆ 7 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-02 18:12:48 ┆ 24 ┆ 9 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-03 19:45:32 ┆ 11 ┆ 9 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-08 23:16:43 ┆ 1 ┆ 1 ┆ 1 │
└─────────────────────┴───────┴───────┴───────┘
"""
return RollingGroupBy(
self,
index_column,
period,
offset,
closed,
)
def groupby_dynamic(
self,
index_column: str,
every: str,
period: Optional[str] = None,
offset: Optional[str] = None,
truncate: bool = True,
include_boundaries: bool = False,
closed: str = "right",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
) -> "DynamicGroupBy":
"""
Groups based on a time value (or index value of type Int32, Int64). Time windows are calculated and rows are assigned to windows.
Different from a normal groupby is that a row can be member of multiple groups. The time/index window could
be seen as a rolling window, with a window size determined by dates/times/values instead of slots in the DataFrame.
A window is defined by:
- every: interval of the window
- period: length of the window
- offset: offset of the window
The `every`, `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
In case of a groupby_dynamic on an integer column, the windows are defined by:
- "1i" # length 1
- "10i" # length 10
Parameters
----------
index_column
Column used to group based on the time window.
Often to type Date/Datetime
This column must be sorted in ascending order. If not the output will not make sense.
In case of a dynamic groupby on indices, dtype needs to be one of {Int32, Int64}. Note that
Int32 gets temporarely cast to Int64, so if performance matters use an Int64 column.
every
interval of the window
period
length of the window, if None it is equal to 'every'
offset
offset of the window if None and period is None it will be equal to negative `every`
truncate
truncate the time value to the window lower bound
include_boundaries
add the lower and upper bound of the window to the "_lower_bound" and "_upper_bound" columns.
this will impact performance because it's harder to parallelize
closed
Defines if the window interval is closed or not.
Any of {"left", "right", "both" "none"}
by
Also group by this column/these columns
Examples
--------
>>> from datetime import datetime
>>> # create an example dataframe
>>> df = pl.DataFrame(
... {
... "time": pl.date_range(
... low=datetime(2021, 12, 16),
... high=datetime(2021, 12, 16, 3),
... interval="30m",
... ),
... "n": range(7),
... }
... )
>>> df
shape: (7, 2)
┌─────────────────────┬─────┐
│ time ┆ n │
│ --- ┆ --- │
│ datetime[ns] ┆ i64 │
╞═════════════════════╪═════╡
│ 2021-12-16 00:00:00 ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 00:30:00 ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 01:30:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 4 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 02:30:00 ┆ 5 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 6 │
└─────────────────────┴─────┘
Group by windows of 1 hour starting at 2021-12-16 00:00:00.
>>> (
... df.groupby_dynamic("time", every="1h").agg(
... [
... pl.col("time").min().alias("time_min"),
... pl.col("time").max().alias("time_max"),
... ]
... )
... )
shape: (3, 3)
┌─────────────────────┬─────────────────────┬─────────────────────┐
│ time ┆ time_min ┆ time_max │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] │
╞═════════════════════╪═════════════════════╪═════════════════════╡
│ 2021-12-16 00:00:00 ┆ 2021-12-16 00:30:00 ┆ 2021-12-16 01:00:00 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2021-12-16 01:30:00 ┆ 2021-12-16 02:00:00 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2021-12-16 02:30:00 ┆ 2021-12-16 03:00:00 │
└─────────────────────┴─────────────────────┴─────────────────────┘
The window boundaries can also be added to the aggregation result
>>> (
... df.groupby_dynamic("time", every="1h", include_boundaries=True).agg(
... [pl.col("time").count().alias("time_count")]
... )
... )
shape: (3, 4)
┌─────────────────────┬─────────────────────┬─────────────────────┬────────────┐
│ _lower_boundary ┆ _upper_boundary ┆ time ┆ time_count │
│ --- ┆ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] ┆ u32 │
╞═════════════════════╪═════════════════════╪═════════════════════╪════════════╡
│ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 2 │
└─────────────────────┴─────────────────────┴─────────────────────┴────────────┘
When closed="left", should not include right end of interval [lower_bound, upper_bound)
>>> (
... df.groupby_dynamic("time", every="1h", closed="left").agg(
... [
... pl.col("time").count().alias("time_count"),
... pl.col("time").list().alias("time_agg_list"),
... ]
... )
... )
shape: (4, 3)
┌─────────────────────┬────────────┬─────────────────────────────────────┐
│ time ┆ time_count ┆ time_agg_list │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ u32 ┆ list [datetime[ns]] │
╞═════════════════════╪════════════╪═════════════════════════════════════╡
│ 2021-12-16 00:00:00 ┆ 2 ┆ [2021-12-16 00:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2 ┆ [2021-12-16 01:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2 ┆ [2021-12-16 02:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 1 ┆ [2021-12-16 03:00:00] │
└─────────────────────┴────────────┴─────────────────────────────────────┘
When closed="both" the time values at the window boundaries belong to 2 groups.
>>> (
... df.groupby_dynamic("time", every="1h", closed="both").agg(
... [pl.col("time").count().alias("time_count")]
... )
... )
shape: (4, 2)
┌─────────────────────┬────────────┐
│ time ┆ time_count │
│ --- ┆ --- │
│ datetime[ns] ┆ u32 │
╞═════════════════════╪════════════╡
│ 2021-12-16 00:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 1 │
└─────────────────────┴────────────┘
Dynamic groupbys can also be combined with grouping on normal keys
>>> df = pl.DataFrame(
... {
... "time": pl.date_range(
... low=datetime(2021, 12, 16),
... high=datetime(2021, 12, 16, 3),
... interval="30m",
... ),
... "groups": ["a", "a", "a", "b", "b", "a", "a"],
... }
... )
>>> df
shape: (7, 2)
┌─────────────────────┬────────┐
│ time ┆ groups │
│ --- ┆ --- │
│ datetime[ns] ┆ str │
╞═════════════════════╪════════╡
│ 2021-12-16 00:00:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 00:30:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:30:00 ┆ b │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ b │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:30:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ a │
└─────────────────────┴────────┘
>>> (
... df.groupby_dynamic(
... "time",
... every="1h",
... closed="both",
... by="groups",
... include_boundaries=True,
... ).agg([pl.col("time").count().alias("time_count")])
... )
shape: (6, 5)
┌────────┬─────────────────────┬─────────────────────┬─────────────────────┬────────────┐
│ groups ┆ _lower_boundary ┆ _upper_boundary ┆ time ┆ time_count │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] ┆ u32 │
╞════════╪═════════════════════╪═════════════════════╪═════════════════════╪════════════╡
│ a ┆ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 1 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 04:00:00 ┆ 2021-12-16 03:00:00 ┆ 1 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 1 │
└────────┴─────────────────────┴─────────────────────┴─────────────────────┴────────────┘
Dynamic groupby on an index column
>>> df = pl.DataFrame(
... {
... "idx": np.arange(6),
... "A": ["A", "A", "B", "B", "B", "C"],
... }
... )
>>> (
... df.groupby_dynamic(
... "idx",
... every="2i",
... period="3i",
... include_boundaries=True,
... ).agg(pl.col("A").list().alias("A_agg_list"))
... )
shape: (3, 4)
┌─────────────────┬─────────────────┬─────┬─────────────────┐
│ _lower_boundary ┆ _upper_boundary ┆ idx ┆ A_agg_list │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 ┆ list [str] │
╞═════════════════╪═════════════════╪═════╪═════════════════╡
│ 0 ┆ 3 ┆ 0 ┆ ["A", "B", "B"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ 5 ┆ 2 ┆ ["B", "B", "C"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ 7 ┆ 4 ┆ ["C"] │
└─────────────────┴─────────────────┴─────┴─────────────────┘
"""
return DynamicGroupBy(
self,
index_column,
every,
period,
offset,
truncate,
include_boundaries,
closed,
by,
)
def upsample(
self: DF,
time_column: str,
every: str,
offset: Optional[str] = None,
by: Optional[Union[str, Sequence[str]]] = None,
maintain_order: bool = False,
) -> DF:
"""
Upsample a DataFrame at a regular frequency.
Parameters
----------
time_column
time column will be used to determine a date_range.
Note that this column has to be sorted for the output to make sense.
every
interval will start 'every' duration
offset
change the start of the date_range by this offset.
by
First group by these columns and then upsample for every group
maintain_order
Keep the ordering predictable. This is slower.
The `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
Examples
--------
Upsample a DataFrame by a certain interval.
>>> from datetime import datetime
>>> df = pl.DataFrame(
... {
... "time": [
... datetime(2021, 2, 1),
... datetime(2021, 4, 1),
... datetime(2021, 5, 1),
... datetime(2021, 6, 1),
... ],
... "groups": ["A", "B", "A", "B"],
... "values": [0, 1, 2, 3],
... }
... )
>>> (
... df.upsample(
... time_column="time", every="1mo", by="groups", maintain_order=True
... ).select(pl.all().forward_fill())
... )
shape: (7, 3)
┌─────────────────────┬────────┬────────┐
│ time ┆ groups ┆ values │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ str ┆ i64 │
╞═════════════════════╪════════╪════════╡
│ 2021-02-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-03-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-04-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-05-01 00:00:00 ┆ A ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-04-01 00:00:00 ┆ B ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-05-01 00:00:00 ┆ B ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-06-01 00:00:00 ┆ B ┆ 3 │
└─────────────────────┴────────┴────────┘
"""
if by is None:
by = []
if isinstance(by, str):
by = [by]
if offset is None:
offset = "0ns"
return self._from_pydf(
self._df.upsample(by, time_column, every, offset, maintain_order)
)
def join_asof(
self: DF,
df: "DataFrame",
left_on: Optional[str] = None,
right_on: Optional[str] = None,
on: Optional[str] = None,
by_left: Optional[Union[str, List[str]]] = None,
by_right: Optional[Union[str, List[str]]] = None,
by: Optional[Union[str, List[str]]] = None,
strategy: str = "backward",
suffix: str = "_right",
tolerance: Optional[Union[str, int, float]] = None,
allow_parallel: bool = True,
force_parallel: bool = False,
) -> DF:
"""
Perform an asof join. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
The default is "backward".
Parameters
----------
ldf
Lazy DataFrame to join with.
left_on
Join column of the left DataFrame.
right_on
Join column of the right DataFrame.
on
Join column of both DataFrames. If set, `left_on` and `right_on` should be None.
by
join on these columns before doing asof join
by_left
join on these columns before doing asof join
by_right
join on these columns before doing asof join
strategy
One of {'forward', 'backward'}
suffix
Suffix to append to columns with a duplicate name.
tolerance
Numeric tolerance. By setting this the join will only be done if the near keys are within this distance.
If an asof join is done on columns of dtype "Date", "Datetime", "Duration" or "Time" you
use the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
allow_parallel
Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel.
force_parallel
Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel.
"""
return self._from_pydf(
self.lazy()
.join_asof(
df.lazy(),
left_on=left_on,
right_on=right_on,
on=on,
by_left=by_left,
by_right=by_right,
by=by,
strategy=strategy,
suffix=suffix,
tolerance=tolerance,
allow_parallel=allow_parallel,
force_parallel=force_parallel,
)
.collect(no_optimization=True)
._df
)
def join(
self: DF,
df: "DataFrame",
left_on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
right_on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
how: str = "inner",
suffix: str = "_right",
asof_by: Optional[Union[str, List[str]]] = None,
asof_by_left: Optional[Union[str, List[str]]] = None,
asof_by_right: Optional[Union[str, List[str]]] = None,
) -> DF:
"""
SQL like joins.
Parameters
----------
df
DataFrame to join with.
left_on
Name(s) of the left join column(s).
right_on
Name(s) of the right join column(s).
on
Name(s) of the join columns in both DataFrames.
how
Join strategy
- "inner"
- "left"
- "outer"
- "asof"
- "cross"
suffix
Suffix to append to columns with a duplicate name.
asof_by
join on these columns before doing asof join
asof_by_left
join on these columns before doing asof join
asof_by_right
join on these columns before doing asof join
Returns
-------
Joined DataFrame
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> other_df = pl.DataFrame(
... {
... "apple": ["x", "y", "z"],
... "ham": ["a", "b", "d"],
... }
... )
>>> df.join(other_df, on="ham")
shape: (2, 4)
┌─────┬─────┬─────┬───────┐
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str ┆ str │
╞═════╪═════╪═════╪═══════╡
│ 1 ┆ 6 ┆ a ┆ x │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b ┆ y │
└─────┴─────┴─────┴───────┘
>>> df.join(other_df, on="ham", how="outer")
shape: (4, 4)
┌──────┬──────┬─────┬───────┐
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str ┆ str │
╞══════╪══════╪═════╪═══════╡
│ 1 ┆ 6 ┆ a ┆ x │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b ┆ y │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ null ┆ null ┆ d ┆ z │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c ┆ null │
└──────┴──────┴─────┴───────┘
**Asof join**
This is similar to a left-join except that we match on near keys rather than equal keys.
The direction is backward
The keys must be sorted to perform an asof join
"""
if how == "asof":
warnings.warn(
"using asof join via DataFrame.join is deprecated, please use DataFrame.join_asof"
)
if how == "cross":
return self._from_pydf(self._df.join(df._df, [], [], how, suffix))
left_on_: Optional[List[Union[str, pli.Expr]]]
if isinstance(left_on, (str, pli.Expr)):
left_on_ = [left_on]
else:
left_on_ = left_on
right_on_: Optional[List[Union[str, pli.Expr]]]
if isinstance(right_on, (str, pli.Expr)):
right_on_ = [right_on]
else:
right_on_ = right_on
if isinstance(on, str):
left_on_ = [on]
right_on_ = [on]
elif isinstance(on, list):
left_on_ = on
right_on_ = on
if left_on_ is None or right_on_ is None:
raise ValueError("You should pass the column to join on as an argument.")
if (
isinstance(left_on_[0], pli.Expr)
or isinstance(right_on_[0], pli.Expr)
or asof_by_left is not None
or asof_by_right is not None
or asof_by is not None
):
return self._from_pydf(
self.lazy()
.join(
df.lazy(),
left_on,
right_on,
on=on,
how=how,
suffix=suffix,
asof_by_right=asof_by_right,
asof_by_left=asof_by_left,
asof_by=asof_by,
)
.collect(no_optimization=True)
._df
)
else:
return self._from_pydf(
self._df.join(df._df, left_on_, right_on_, how, suffix)
)
def apply(
self: DF,
f: Callable[[Tuple[Any, ...]], Any],
return_dtype: Optional[Type[DataType]] = None,
inference_size: int = 256,
) -> DF:
"""
Apply a custom function over the rows of the DataFrame. The rows are passed as tuple.
Beware, this is slow.
Parameters
----------
f
Custom function/ lambda function.
return_dtype
Output type of the operation. If none given, Polars tries to infer the type.
inference_size
Only used in the case when the custom function returns rows.
This uses the first `n` rows to determine the output schema
"""
out, is_df = self._df.apply(f, return_dtype, inference_size)
if is_df:
return self._from_pydf(out)
else:
return self._from_pydf(pli.wrap_s(out).to_frame()._df)
def with_column(self: DF, column: Union["pli.Series", "pli.Expr"]) -> DF:
"""
Return a new DataFrame with the column added or replaced.
Parameters
----------
column
Series, where the name of the Series refers to the column in the DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.with_column((pl.col("b") ** 2).alias("b_squared")) # added
shape: (3, 3)
┌─────┬─────┬───────────┐
│ a ┆ b ┆ b_squared │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ f64 │
╞═════╪═════╪═══════════╡
│ 1 ┆ 2 ┆ 4.0 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ 4 ┆ 16.0 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ 6 ┆ 36.0 │
└─────┴─────┴───────────┘
>>> df.with_column(pl.col("a") ** 2) # replaced
shape: (3, 2)
┌──────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ f64 ┆ i64 │
╞══════╪═════╡
│ 1.0 ┆ 2 │
├╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 9.0 ┆ 4 │
├╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 25.0 ┆ 6 │
└──────┴─────┘
"""
if isinstance(column, pli.Expr):
return self.with_columns([column])
else:
return self._from_pydf(self._df.with_column(column._s))
def hstack(
self: DF,
columns: Union[List["pli.Series"], "DataFrame"],
in_place: bool = False,
) -> Optional[DF]:
"""
Return a new DataFrame grown horizontally by stacking multiple Series to it.
Parameters
----------
columns
Series to stack.
in_place
Modify in place.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> x = pl.Series("apple", [10, 20, 30])
>>> df.hstack([x])
shape: (3, 4)
┌─────┬─────┬─────┬───────┐
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str ┆ i64 │
╞═════╪═════╪═════╪═══════╡
│ 1 ┆ 6 ┆ a ┆ 10 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b ┆ 20 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c ┆ 30 │
└─────┴─────┴─────┴───────┘
"""
if not isinstance(columns, list):
columns = columns.get_columns()
if in_place:
self._df.hstack_mut([s.inner() for s in columns])
return None
else:
return self._from_pydf(self._df.hstack([s.inner() for s in columns]))
@overload
def vstack(self, df: "DataFrame", in_place: Literal[True]) -> None:
...
@overload
def vstack(self: DF, df: "DataFrame", in_place: Literal[False] = ...) -> DF:
...
@overload
def vstack(self: DF, df: "DataFrame", in_place: bool) -> Optional[DF]:
...
def vstack(self: DF, df: "DataFrame", in_place: bool = False) -> Optional[DF]:
"""
Grow this DataFrame vertically by stacking a DataFrame to it.
Parameters
----------
df
DataFrame to stack.
in_place
Modify in place
Examples
--------
>>> df1 = pl.DataFrame(
... {
... "foo": [1, 2],
... "bar": [6, 7],
... "ham": ["a", "b"],
... }
... )
>>> df2 = pl.DataFrame(
... {
... "foo": [3, 4],
... "bar": [8, 9],
... "ham": ["c", "d"],
... }
... )
>>> df1.vstack(df2)
shape: (4, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 9 ┆ d │
└─────┴─────┴─────┘
"""
if in_place:
self._df.vstack_mut(df._df)
return None
else:
return self._from_pydf(self._df.vstack(df._df))
def extend(self, other: "DataFrame") -> None:
"""
Extend the memory backed by this `DataFrame` with the values from `other`.
Different from `vstack` which adds the chunks from `other` to the chunks of this `DataFrame`
`extent` appends the data from `other` to the underlying memory locations and thus may cause a reallocation.
If this does not cause a reallocation, the resulting data structure will not have any extra chunks
and thus will yield faster queries.
Prefer `extend` over `vstack` when you want to do a query after a single append. For instance during
online operations where you add `n` rows and rerun a query.
Prefer `vstack` over `extend` when you want to append many times before doing a query. For instance
when you read in multiple files and when to store them in a single `DataFrame`.
In the latter case, finish the sequence of `vstack` operations with a `rechunk`.
Parameters
----------
other
DataFrame to vertically add.
"""
self._df.extend(other._df)
def drop(self: DF, name: Union[str, List[str]]) -> DF:
"""
Remove column from DataFrame and return as new.
Parameters
----------
name
Column(s) to drop.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.drop("ham")
shape: (3, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 1 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 │
└─────┴─────┘
"""
if isinstance(name, list):
df = self.clone()
for name in name:
df._df.drop_in_place(name)
return df
return self._from_pydf(self._df.drop(name))
def drop_in_place(self, name: str) -> "pli.Series":
"""
Drop in place.
Parameters
----------
name
Column to drop.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.drop_in_place("ham")
shape: (3,)
Series: 'ham' [str]
[
"a"
"b"
"c"
]
"""
return pli.wrap_s(self._df.drop_in_place(name))
def select_at_idx(self, idx: int) -> "pli.Series":
"""
Select column at index location.
Parameters
----------
idx
Location of selection.
.. deprecated:: 0.10.20
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.select_at_idx(1)
shape: (3,)
Series: 'bar' [i64]
[
6
7
8
]
"""
return pli.wrap_s(self._df.select_at_idx(idx))
def clone(self: DF) -> DF:
"""
Very cheap deep clone.
"""
return self._from_pydf(self._df.clone())
def __copy__(self: DF) -> DF:
return self.clone()
def __deepcopy__(self: DF, memodict={}) -> DF: # type: ignore
return self.clone()
def get_columns(self) -> List["pli.Series"]:
"""
Get the DataFrame as a List of Series.
"""
return list(map(lambda s: pli.wrap_s(s), self._df.get_columns()))
def get_column(self, name: str) -> "pli.Series":
"""
Get a single column as Series by name.
"""
return self[name]
def fill_null(self: DF, strategy: Union[str, "pli.Expr", Any]) -> DF:
"""
Fill null values using a filling strategy, literal, or Expr.
Parameters
----------
strategy
One of:
- "backward"
- "forward"
- "mean"
- "min'
- "max"
- "zero"
- "one"
Or an expression.
Returns
-------
DataFrame with None values replaced by the filling strategy.
"""
if isinstance(strategy, pli.Expr):
return self._from_pydf(
self.lazy().fill_null(strategy).collect(no_optimization=True)._df
)
if not isinstance(strategy, str):
return self.fill_null(pli.lit(strategy))
return self._from_pydf(self._df.fill_null(strategy))
def fill_nan(self: DF, fill_value: Union["pli.Expr", int, float]) -> DF:
"""
Fill floating point NaN values by an Expression evaluation.
Warnings
--------
NOTE that floating point NaN (No a Number) are not missing values!
to replace missing values, use `fill_null`.
Parameters
----------
fill_value
value to fill NaN with
Returns
-------
DataFrame with NaN replaced with fill_value
"""
return self._from_pydf(
self.lazy().fill_nan(fill_value).collect(no_optimization=True)._df
)
def explode(
self: DF,
columns: Union[str, List[str], "pli.Expr", List["pli.Expr"]],
) -> DF:
"""
Explode `DataFrame` to long format by exploding a column with Lists.
Parameters
----------
columns
Column of LargeList type.
Returns
-------
DataFrame
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬────────────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ list [i64] │
╞═════════╪════════════╡
│ c ┆ [1, 2] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ c ┆ [1, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ [4, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ c ┆ [5, 5, 5] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ [6] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ [2, 1, 2] │
└─────────┴────────────┘
>>> df.explode("nrs")
shape: (13, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ ... ┆ ... │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 2 │
└─────────┴─────┘
"""
return self._from_pydf(
self.lazy().explode(columns).collect(no_optimization=True)._df
)
def pivot(
self: DF,
values: Union[List[str], str],
index: Union[List[str], str],
columns: Union[List[str], str],
aggregate_fn: str = "first",
maintain_order: bool = False,
) -> DF:
"""
Create a spreadsheet-style pivot table as a DataFrame.
Parameters
----------
values
Column values to aggregate. Can be multiple columns if the *columns* arguments contains multiple columns as well
index
One or multiple keys to group by
columns
Columns whose values will be used as the header of the output DataFrame
aggregate_fn
Any of:
- "sum"
- "max"
- "min"
- "mean"
- "median"
- "first"
- "last"
- "count"
maintain_order
Sort the grouped keys so that the output order is predictable.
Returns
-------
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": ["one", "one", "one", "two", "two", "two"],
... "bar": ["A", "B", "C", "A", "B", "C"],
... "baz": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df.pivot(values="baz", index="foo", columns="bar")
shape: (2, 4)
┌─────┬─────┬─────┬─────┐
│ foo ┆ A ┆ B ┆ C │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╪═════╡
│ one ┆ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ two ┆ 4 ┆ 5 ┆ 6 │
└─────┴─────┴─────┴─────┘
"""
if isinstance(values, str):
values = [values]
if isinstance(index, str):
index = [index]
if isinstance(columns, str):
columns = [columns]
return self._from_pydf(
self._df.pivot2(values, index, columns, aggregate_fn, maintain_order)
)
def melt(
self: DF,
id_vars: Optional[Union[List[str], str]] = None,
value_vars: Optional[Union[List[str], str]] = None,
) -> DF:
"""
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
This function is useful to massage a DataFrame into a format where one or more columns are identifier variables
(id_vars), while all other columns, considered measured variables (value_vars), are “unpivoted” to the row axis,
leaving just two non-identifier columns, ‘variable’ and ‘value’.
Parameters
----------
id_vars
Columns to use as identifier variables.
value_vars
Values to use as identifier variables.
If `value_vars` is empty all columns that are not in `id_vars` will be used.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": ["x", "y", "z"],
... "b": [1, 3, 5],
... "c": [2, 4, 6],
... }
... )
>>> df.melt(id_vars="a", value_vars=["b", "c"])
shape: (6, 3)
┌─────┬──────────┬───────┐
│ a ┆ variable ┆ value │
│ --- ┆ --- ┆ --- │
│ str ┆ str ┆ i64 │
╞═════╪══════════╪═══════╡
│ x ┆ b ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ y ┆ b ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ z ┆ b ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ x ┆ c ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ y ┆ c ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ z ┆ c ┆ 6 │
└─────┴──────────┴───────┘
"""
if isinstance(value_vars, str):
value_vars = [value_vars]
if isinstance(id_vars, str):
id_vars = [id_vars]
if value_vars is None:
value_vars = []
if id_vars is None:
id_vars = []
return self._from_pydf(self._df.melt(id_vars, value_vars))
def shift(self: DF, periods: int) -> DF:
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with `Nones`.
Parameters
----------
periods
Number of places to shift (may be negative).
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.shift(periods=1)
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└──────┴──────┴──────┘
>>> df.shift(periods=-1)
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞══════╪══════╪══════╡
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
└──────┴──────┴──────┘
"""
return self._from_pydf(self._df.shift(periods))
def shift_and_fill(
self: DF, periods: int, fill_value: Union[int, str, float]
) -> DF:
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with the result of the `fill_value` expression.
Parameters
----------
periods
Number of places to shift (may be negative).
fill_value
fill None values with this value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.shift_and_fill(periods=1, fill_value=0)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 0 ┆ 0 ┆ 0 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
"""
return self._from_pydf(
self.lazy()
.shift_and_fill(periods, fill_value)
.collect(no_optimization=True, string_cache=False)
._df
)
def is_duplicated(self) -> "pli.Series":
"""
Get a mask of all duplicated rows in this DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 1],
... "b": ["x", "y", "z", "x"],
... }
... )
>>> df.is_duplicated()
shape: (4,)
Series: '' [bool]
[
true
false
false
true
]
"""
return pli.wrap_s(self._df.is_duplicated())
def is_unique(self) -> "pli.Series":
"""
Get a mask of all unique rows in this DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 1],
... "b": ["x", "y", "z", "x"],
... }
... )
>>> df.is_unique()
shape: (4,)
Series: '' [bool]
[
false
true
true
false
]
"""
return pli.wrap_s(self._df.is_unique())
def lazy(self) -> "pli.LazyFrame":
"""
Start a lazy query from this point. This returns a `LazyFrame` object.
Operations on a `LazyFrame` are not executed until this is requested by either calling:
* `.fetch()` (run on a small number of rows)
* `.collect()` (run on all data)
* `.describe_plan()` (print unoptimized query plan)
* `.describe_optimized_plan()` (print optimized query plan)
* `.show_graph()` (show (un)optimized query plan) as graphiz graph)
Lazy operations are advised because they allow for query optimization and more parallelization.
"""
return pli.wrap_ldf(self._df.lazy())
def select(
self: DF,
exprs: Union[
str,
"pli.Expr",
Sequence[Union[str, "pli.Expr", bool, int, float, "pli.Series"]],
"pli.Series",
],
) -> DF:
"""
Select columns from this DataFrame.
Parameters
----------
exprs
Column or columns to select.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.select("foo")
shape: (3, 1)
┌─────┐
│ foo │
│ --- │
│ i64 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 2 │
├╌╌╌╌╌┤
│ 3 │
└─────┘
"""
return self._from_pydf(
self.lazy()
.select(exprs) # type: ignore
.collect(no_optimization=True, string_cache=False)
._df
)
def with_columns(self: DF, exprs: Union["pli.Expr", List["pli.Expr"]]) -> DF:
"""
Add or overwrite multiple columns in a DataFrame.
Parameters
----------
exprs
List of Expressions that evaluate to columns.
"""
if not isinstance(exprs, list):
exprs = [exprs]
return self._from_pydf(
self.lazy()
.with_columns(exprs)
.collect(no_optimization=True, string_cache=False)
._df
)
def n_chunks(self) -> int:
"""
Get number of chunks used by the ChunkedArrays of this DataFrame.
"""
return self._df.n_chunks()
@overload
def max(self: DF, axis: Literal[0] = ...) -> DF:
...
@overload
def max(self, axis: Literal[1]) -> "pli.Series":
...
@overload
def max(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
...
def max(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their maximum value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.max()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 3 ┆ 8 ┆ null │
└─────┴─────┴──────┘
"""
if axis == 0:
return self._from_pydf(self._df.max())
if axis == 1:
return pli.wrap_s(self._df.hmax())
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
@overload
def min(self: DF, axis: Literal[0] = ...) -> DF:
...
@overload
def min(self, axis: Literal[1]) -> "pli.Series":
...
@overload
def min(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
...
def min(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their minimum value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.min()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 6 ┆ null │
└─────┴─────┴──────┘
"""
if axis == 0:
return self._from_pydf(self._df.min())
if axis == 1:
return pli.wrap_s(self._df.hmin())
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
@overload
def sum(self: DF, *, axis: Literal[0] = ..., null_strategy: str = "ignore") -> DF:
...
@overload
def sum(self, *, axis: Literal[1], null_strategy: str = "ignore") -> "pli.Series":
...
@overload
def sum(
self: DF, *, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
...
def sum(
self: DF, *, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their sum value.
Parameters
----------
axis
either 0 or 1
null_strategy
{'ignore', 'propagate'}
this argument is only used if axis == 1
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.sum()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 6 ┆ 21 ┆ null │
└─────┴─────┴──────┘
"""
if axis == 0:
return self._from_pydf(self._df.sum())
if axis == 1:
return pli.wrap_s(self._df.hsum(null_strategy))
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
@overload
def mean(self: DF, *, axis: Literal[0] = ..., null_strategy: str = "ignore") -> DF:
...
@overload
def mean(self, *, axis: Literal[1], null_strategy: str = "ignore") -> "pli.Series":
...
@overload
def mean(
self: DF, *, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
...
def mean(
self: DF, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their mean value.
Parameters
----------
axis
either 0 or 1
null_strategy
{'ignore', 'propagate'}
this argument is only used if axis == 1
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.mean()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 2 ┆ 7 ┆ null │
└─────┴─────┴──────┘
Note: the mean of booleans evaluates to null.
>>> df = pl.DataFrame(
... {
... "a": [True, True, False],
... "b": [True, True, True],
... }
... )
# mean evaluates to null
>>> df.mean()
shape: (1, 2)
┌──────┬──────┐
│ a ┆ b │
│ --- ┆ --- │
│ bool ┆ bool │
╞══════╪══════╡
│ null ┆ null │
└──────┴──────┘
# instead, cast to numeric type
>>> df.select(pl.all().cast(pl.UInt8)).mean()
shape: (1, 2)
┌──────────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ f64 ┆ f64 │
╞══════════╪═════╡
│ 0.666667 ┆ 1.0 │
└──────────┴─────┘
"""
if axis == 0:
return self._from_pydf(self._df.mean())
if axis == 1:
return pli.wrap_s(self._df.hmean(null_strategy))
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
def std(self: DF) -> DF:
"""
Aggregate the columns of this DataFrame to their standard deviation value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.std()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 1 ┆ null │
└─────┴─────┴──────┘
"""
return self._from_pydf(self._df.std())
def var(self: DF) -> DF:
"""
Aggregate the columns of this DataFrame to their variance value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.var()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 1 ┆ null │
└─────┴─────┴──────┘
"""
return self._from_pydf(self._df.var())
def median(self: DF) -> DF:
"""
Aggregate the columns of this DataFrame to their median value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.median()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 2 ┆ 7 ┆ null │
└─────┴─────┴──────┘
"""
return self._from_pydf(self._df.median())
def product(self: DF) -> DF:
"""
Aggregate the columns of this DataFrame to their product values
"""
return self.select(pli.all().product())
def quantile(self: DF, quantile: float, interpolation: str = "nearest") -> DF:
"""
Aggregate the columns of this DataFrame to their quantile value.
Parameters
----------
quantile
quantile between 0.0 and 1.0
interpolation
interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.quantile(0.5, "nearest")
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 2 ┆ 7 ┆ null │
└─────┴─────┴──────┘
"""
return self._from_pydf(self._df.quantile(quantile, interpolation))
def to_dummies(self: DF) -> DF:
"""
Get one hot encoded dummy variables.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.to_dummies()
shape: (3, 9)
┌───────┬───────┬───────┬───────┬─────┬───────┬───────┬───────┬───────┐
│ foo_1 ┆ foo_2 ┆ foo_3 ┆ bar_6 ┆ ... ┆ bar_8 ┆ ham_a ┆ ham_b ┆ ham_c │
│ --- ┆ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │
│ u8 ┆ u8 ┆ u8 ┆ u8 ┆ ┆ u8 ┆ u8 ┆ u8 ┆ u8 │
╞═══════╪═══════╪═══════╪═══════╪═════╪═══════╪═══════╪═══════╪═══════╡
│ 1 ┆ 0 ┆ 0 ┆ 1 ┆ ... ┆ 0 ┆ 1 ┆ 0 ┆ 0 │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 0 ┆ 1 ┆ 0 ┆ 0 ┆ ... ┆ 0 ┆ 0 ┆ 1 ┆ 0 │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 0 ┆ 0 ┆ 1 ┆ 0 ┆ ... ┆ 1 ┆ 0 ┆ 0 ┆ 1 │
└───────┴───────┴───────┴───────┴─────┴───────┴───────┴───────┴───────┘
"""
return self._from_pydf(self._df.to_dummies())
def distinct(
self: DF,
maintain_order: bool = True,
subset: Optional[Union[str, List[str]]] = None,
keep: str = "first",
) -> DF:
"""
Drop duplicate rows from this DataFrame.
Note that this fails if there is a column of type `List` in the DataFrame or subset.
Parameters
----------
maintain_order
Keep the same order as the original DataFrame. This requires more work to compute.
subset
Subset to use to compare rows
keep
any of {"first", "last"}
Returns
-------
DataFrame with unique rows
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return self._from_pydf(self._df.distinct(maintain_order, subset, keep))
def rechunk(self: DF) -> DF:
"""
Rechunk the data in this DataFrame to a contiguous allocation.
This will make sure all subsequent operations have optimal and predictable performance.
"""
return self._from_pydf(self._df.rechunk())
def null_count(self: DF) -> DF:
"""
Create a new DataFrame that shows the null counts per column.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, None, 3],
... "bar": [6, 7, None],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.null_count()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ u32 ┆ u32 ┆ u32 │
╞═════╪═════╪═════╡
│ 1 ┆ 1 ┆ 0 │
└─────┴─────┴─────┘
"""
return self._from_pydf(self._df.null_count())
def sample(
self: DF,
n: Optional[int] = None,
frac: Optional[float] = None,
with_replacement: bool = False,
seed: int = 0,
) -> DF:
"""
Sample from this DataFrame by setting either `n` or `frac`.
Parameters
----------
n
Number of samples < self.len() .
frac
Fraction between 0.0 and 1.0 .
with_replacement
Sample with replacement.
seed
Initialization seed
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.sample(n=2) # doctest: +IGNORE_RESULT
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
"""
if n is not None:
return self._from_pydf(self._df.sample_n(n, with_replacement, seed))
return self._from_pydf(self._df.sample_frac(frac, with_replacement, seed))
def fold(
self, operation: Callable[["pli.Series", "pli.Series"], "pli.Series"]
) -> "pli.Series":
"""
Apply a horizontal reduction on a DataFrame. This can be used to effectively
determine aggregations on a row level, and can be applied to any DataType that
can be supercasted (casted to a similar parent type).
An example of the supercast rules when applying an arithmetic operation on two DataTypes are for instance:
Int8 + Utf8 = Utf8
Float32 + Int64 = Float32
Float32 + Float64 = Float64
Examples
--------
A horizontal sum operation:
>>> df = pl.DataFrame(
... {
... "a": [2, 1, 3],
... "b": [1, 2, 3],
... "c": [1.0, 2.0, 3.0],
... }
... )
>>> df.fold(lambda s1, s2: s1 + s2)
shape: (3,)
Series: 'a' [f64]
[
4
5
9
]
A horizontal minimum operation:
>>> df = pl.DataFrame({"a": [2, 1, 3], "b": [1, 2, 3], "c": [1.0, 2.0, 3.0]})
>>> df.fold(lambda s1, s2: s1.zip_with(s1 < s2, s2))
shape: (3,)
Series: 'a' [f64]
[
1
1
3
]
A horizontal string concattenation:
>>> df = pl.DataFrame(
... {
... "a": ["foo", "bar", 2],
... "b": [1, 2, 3],
... "c": [1.0, 2.0, 3.0],
... }
... )
>>> df.fold(lambda s1, s2: s1 + s2)
shape: (3,)
Series: 'a' [str]
[
"foo11.0"
"bar22.0"
null
]
A horizontal boolean or, similar to a row-wise .any():
>>> df = pl.DataFrame(
... {
... "a": [False, False, True],
... "b": [False, True, False],
... }
... )
>>> df.fold(lambda s1, s2: s1 | s2)
shape: (3,)
Series: 'a' [bool]
[
false
true
true
]
Parameters
----------
operation
function that takes two `Series` and returns a `Series`.
"""
acc = self.to_series(0)
for i in range(1, self.width):
acc = operation(acc, self.to_series(i))
return acc
def row(self, index: int) -> Tuple[Any]:
"""
Get a row as tuple.
Parameters
----------
index
Row index.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.row(2)
(3, 8, 'c')
"""
return self._df.row_tuple(index)
def rows(self) -> List[Tuple]:
"""
Convert columnar data to rows as python tuples.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.rows()
[(1, 2), (3, 4), (5, 6)]
"""
return self._df.row_tuples()
@overload
def shrink_to_fit(self: DF, in_place: Literal[False] = ...) -> DF:
...
@overload
def shrink_to_fit(self, in_place: Literal[True]) -> None:
...
@overload
def shrink_to_fit(self: DF, in_place: bool) -> Optional[DF]:
...
def shrink_to_fit(self: DF, in_place: bool = False) -> Optional[DF]:
"""
Shrink memory usage of this DataFrame to fit the exact capacity needed to hold the data.
"""
if in_place:
self._df.shrink_to_fit()
return None
else:
df = self.clone()
df._df.shrink_to_fit()
return df
def hash_rows(
self, k0: int = 0, k1: int = 1, k2: int = 2, k3: int = 3
) -> "pli.Series":
"""
Hash and combine the rows in this DataFrame.
Hash value is UInt64
Parameters
----------
k0
seed parameter
k1
seed parameter
k2
seed parameter
k3
seed parameter
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.hash(k0=42) # doctest: +SKIP
shape: (3,)
Series: '' [u64]
[
1208206736888326229
8040480609798856146
18282897888575762835
]
"""
return pli.wrap_s(self._df.hash_rows(k0, k1, k2, k3))
def interpolate(self: DF) -> DF:
"""
Interpolate intermediate values. The interpolation method is linear.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, None, 9, 10],
... "bar": [6, 7, 9, None],
... "baz": [1, None, None, 9],
... }
... )
>>> df.interpolate()
shape: (4, 3)
┌─────┬──────┬─────┐
│ foo ┆ bar ┆ baz │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪══════╪═════╡
│ 1 ┆ 6 ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 5 ┆ 7 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 9 ┆ 9 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 10 ┆ null ┆ 9 │
└─────┴──────┴─────┘
"""
return self.select(pli.col("*").interpolate())
def is_empty(self) -> bool:
"""
Check if the dataframe is empty
"""
return self.height == 0
def to_struct(self, name: str) -> "pli.Series":
"""
Convert a ``DataFrame`` to a ``Series`` of type ``Struct``
Parameters
----------
name
Name for the struct Series
"""
return pli.wrap_s(self._df.to_struct(name))
def unnest(self, names: Union[str, List[str]]) -> "DataFrame":
"""
Decompose a struct into its fields. The fields will be inserted in to the `DataFrame` on the
location of the `struct` type.
Parameters
----------
names
Names of the struct columns that will be decomposed by its fields
"""
if isinstance(names, str):
names = [names]
return wrap_df(self._df.unnest(names))
class RollingGroupBy:
"""
A rolling grouper. This has an `.agg` method which will allow you to run all polars expressions
in a groupby context.
"""
def __init__(
self,
df: "DataFrame",
index_column: str,
period: str,
offset: Optional[str],
closed: str = "none",
):
self.df = df
self.time_column = index_column
self.period = period
self.offset = offset
self.closed = closed
def agg(
self,
column_to_agg: Union[
List[Tuple[str, List[str]]],
Dict[str, Union[str, List[str]]],
List["pli.Expr"],
"pli.Expr",
],
) -> DataFrame:
return (
self.df.lazy()
.groupby_rolling(
self.time_column,
self.period,
self.offset,
self.closed,
)
.agg(column_to_agg) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
class DynamicGroupBy:
"""
A dynamic grouper. This has an `.agg` method which will allow you to run all polars expressions
in a groupby context.
"""
def __init__(
self,
df: "DataFrame",
index_column: str,
every: str,
period: Optional[str],
offset: Optional[str],
truncate: bool = True,
include_boundaries: bool = True,
closed: str = "none",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
):
self.df = df
self.time_column = index_column
self.every = every
self.period = period
self.offset = offset
self.truncate = truncate
self.include_boundaries = include_boundaries
self.closed = closed
self.by = by
def agg(
self,
column_to_agg: Union[
List[Tuple[str, List[str]]],
Dict[str, Union[str, List[str]]],
List["pli.Expr"],
"pli.Expr",
],
) -> DataFrame:
return (
self.df.lazy()
.groupby_dynamic(
self.time_column,
self.every,
self.period,
self.offset,
self.truncate,
self.include_boundaries,
self.closed,
self.by,
)
.agg(column_to_agg) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
class GroupBy:
"""
Starts a new GroupBy operation.
You can also loop over this Object to loop over `DataFrames` with unique groups.
Examples
--------
>>> df = pl.DataFrame({"foo": ["a", "a", "b"], "bar": [1, 2, 3]})
>>> for group in df.groupby("foo"):
... print(group)
... # doctest: +IGNORE_RESULT
...
shape: (2, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ a ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 2 │
└─────┴─────┘
shape: (1, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ b ┆ 3 │
└─────┴─────┘
"""
def __init__(
self,
df: "PyDataFrame",
by: Union[str, List[str]],
maintain_order: bool = False,
):
self._df = df
self.by = by
self.maintain_order = maintain_order
def __getitem__(self, item: Any) -> "GBSelection":
print(
"accessing GroupBy by index is deprecated, consider using the `.agg` method"
)
return self._select(item)
def _select(self, columns: Union[str, List[str]]) -> "GBSelection":
"""
Select the columns that will be aggregated.
Parameters
----------
columns
One or multiple columns.
"""
warnings.warn(
"accessing GroupBy by index is deprecated, consider using the `.agg` method"
)
if isinstance(columns, str):
columns = [columns]
return GBSelection(self._df, self.by, columns)
def __iter__(self) -> Iterable[Any]:
groups_df = self.groups()
groups = groups_df["groups"]
df = wrap_df(self._df)
for i in range(groups_df.height):
yield df[groups[i]]
def get_group(self, group_value: Union[Any, Tuple[Any]]) -> DataFrame:
"""
Select a single group as a new DataFrame.
Parameters
----------
group_value
Group to select.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": ["one", "one", "one", "two", "two", "two"],
... "bar": ["A", "B", "C", "A", "B", "C"],
... "baz": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df.groupby("foo").get_group("one")
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ baz │
│ --- ┆ --- ┆ --- │
│ str ┆ str ┆ i64 │
╞═════╪═════╪═════╡
│ one ┆ A ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ one ┆ B ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ one ┆ C ┆ 3 │
└─────┴─────┴─────┘
"""
groups_df = self.groups()
groups = groups_df["groups"]
if not isinstance(group_value, list):
group_value = [group_value]
by = self.by
if not isinstance(by, list):
by = [by]
mask = None
for column, group_val in zip(by, group_value):
local_mask = groups_df[column] == group_val
if mask is None:
mask = local_mask
else:
mask = mask & local_mask
# should be only one match
try:
groups_idx = groups[mask][0] # type: ignore
except IndexError:
raise ValueError(f"no group: {group_value} found")
df = wrap_df(self._df)
return df[groups_idx]
def groups(self) -> DataFrame:
"""
Return a `DataFrame` with:
* the groupby keys
* the group indexes aggregated as lists
"""
warnings.warn(
"accessing GroupBy by index is deprecated, consider using the `.agg` method"
)
return wrap_df(self._df.groupby(self.by, None, "groups"))
def apply(self, f: Callable[[DataFrame], DataFrame]) -> DataFrame:
"""
Apply a function over the groups as a sub-DataFrame.
Beware, this is slow.
Parameters
----------
f
Custom function.
Returns
-------
DataFrame
"""
return wrap_df(self._df.groupby_apply(self.by, f))
def agg(
self,
column_to_agg: Union[
List[Tuple[str, List[str]]],
Dict[str, Union[str, List[str]]],
List["pli.Expr"],
"pli.Expr",
],
) -> DataFrame:
"""
Use multiple aggregations on columns. This can be combined with complete lazy API
and is considered idiomatic polars.
Parameters
----------
column_to_agg
map column to aggregation functions.
Returns
-------
Result of groupby split apply operations.
Examples
--------
>>> df.groupby(["foo", "bar"]).agg(
... [
... pl.sum("ham"),
... pl.col("spam").tail(4).sum(),
... ]
... ) # doctest: +SKIP
"""
# a single list comprehension would be cleaner, but mypy complains on different
# lines for py3.7 vs py3.10 about typing errors, so this is the same logic,
# but broken down into two small functions
def _str_to_list(y: Any) -> Any:
return [y] if isinstance(y, str) else y
def _wrangle(x: Any) -> list:
return [(xi[0], _str_to_list(xi[1])) for xi in x]
if isinstance(column_to_agg, pli.Expr):
column_to_agg = [column_to_agg]
if isinstance(column_to_agg, dict):
column_to_agg = _wrangle(column_to_agg.items())
elif isinstance(column_to_agg, list):
if isinstance(column_to_agg[0], tuple):
column_to_agg = _wrangle(column_to_agg)
elif isinstance(column_to_agg[0], pli.Expr):
return (
wrap_df(self._df)
.lazy()
.groupby(self.by, maintain_order=self.maintain_order)
.agg(column_to_agg) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
pass
else:
raise ValueError(
f"argument: {column_to_agg} not understood, have you passed a list of expressions?"
)
else:
raise ValueError(
f"argument: {column_to_agg} not understood, have you passed a list of expressions?"
)
return wrap_df(self._df.groupby_agg(self.by, column_to_agg))
def head(self, n: int = 5) -> DataFrame:
"""
Return first n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 6 │
└─────────┴─────┘
>>> df.groupby("letters").head(2).sort("letters")
shape: (5, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ a ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
└─────────┴─────┘
"""
return (
wrap_df(self._df)
.lazy()
.groupby(self.by, self.maintain_order)
.head(n)
.collect(no_optimization=True, string_cache=False)
)
def tail(self, n: int = 5) -> DataFrame:
"""
Return last n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 6 │
└─────────┴─────┘
>>> (df.groupby("letters").tail(2).sort("letters"))
shape: (5, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ a ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 4 │
└─────────┴─────┘
"""
return (
wrap_df(self._df)
.lazy()
.groupby(self.by, self.maintain_order)
.tail(n)
.collect(no_optimization=True, string_cache=False)
)
def _select_all(self) -> "GBSelection":
"""
Select all columns for aggregation.
"""
return GBSelection(self._df, self.by, None)
def pivot(
self, pivot_column: Union[str, List[str]], values_column: Union[str, List[str]]
) -> "PivotOps":
"""
Do a pivot operation based on the group key, a pivot column and an aggregation function on the values column.
Parameters
----------
pivot_column
Column to pivot.
values_column
Column that will be aggregated.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": ["one", "one", "one", "two", "two", "two"],
... "bar": ["A", "B", "C", "A", "B", "C"],
... "baz": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df.groupby("foo").pivot(pivot_column="bar", values_column="baz").first()
shape: (2, 4)
┌─────┬─────┬─────┬─────┐
│ foo ┆ A ┆ B ┆ C │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╪═════╡
│ one ┆ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ two ┆ 4 ┆ 5 ┆ 6 │
└─────┴─────┴─────┴─────┘
"""
if isinstance(pivot_column, str):
pivot_column = [pivot_column]
if isinstance(values_column, str):
values_column = [values_column]
return PivotOps(self._df, self.by, pivot_column, values_column)
def first(self) -> DataFrame:
"""
Aggregate the first values in the group.
"""
return self.agg(pli.all().first())
def last(self) -> DataFrame:
"""
Aggregate the last values in the group.
"""
return self.agg(pli.all().last())
def sum(self) -> DataFrame:
"""
Reduce the groups to the sum.
"""
return self.agg(pli.all().sum())
def min(self) -> DataFrame:
"""
Reduce the groups to the minimal value.
"""
return self.agg(pli.all().min())
def max(self) -> DataFrame:
"""
Reduce the groups to the maximal value.
"""
return self.agg(pli.all().max())
def count(self) -> DataFrame:
"""
Count the number of values in each group.
"""
return self.agg(pli.lazy_functions.count())
def mean(self) -> DataFrame:
"""
Reduce the groups to the mean values.
"""
return self.agg(pli.all().mean())
def n_unique(self) -> DataFrame:
"""
Count the unique values per group.
"""
return self.agg(pli.all().n_unique())
def quantile(self, quantile: float, interpolation: str = "nearest") -> DataFrame:
"""
Compute the quantile per group.
Parameters
----------
quantile
quantile between 0.0 and 1.0
interpolation
interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']
"""
return self.agg(pli.all().quantile(quantile, interpolation))
def median(self) -> DataFrame:
"""
Return the median per group.
"""
return self.agg(pli.all().median())
def agg_list(self) -> DataFrame:
"""
Aggregate the groups into Series.
Examples
--------
>>> df = pl.DataFrame({"a": ["one", "two", "one", "two"], "b": [1, 2, 3, 4]})
>>> df.groupby("a").agg_list()
shape: (2, 2)
┌─────┬────────────┐
│ a ┆ b │
│ --- ┆ --- │
│ str ┆ list [i64] │
╞═════╪════════════╡
│ one ┆ [1, 3] │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ two ┆ [2, 4] │
└─────┴────────────┘
"""
return self.agg(pli.all().list())
class PivotOps:
"""
Utility class returned in a pivot operation.
"""
def __init__(
self,
df: DataFrame,
by: Union[str, List[str]],
pivot_column: Union[str, List[str]],
values_column: Union[str, List[str]],
):
self._df = df
self.by = by
self.pivot_column = pivot_column
self.values_column = values_column
def first(self) -> DataFrame:
"""
Get the first value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "first")
)
def sum(self) -> DataFrame:
"""
Get the sum per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "sum")
)
def min(self) -> DataFrame:
"""
Get the minimal value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "min")
)
def max(self) -> DataFrame:
"""
Get the maximal value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "max")
)
def mean(self) -> DataFrame:
"""
Get the mean value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "mean")
)
def count(self) -> DataFrame:
"""
Count the values per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "count")
)
def median(self) -> DataFrame:
"""
Get the median value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "median")
)
def last(self) -> DataFrame:
"""
Get the last value per group.
"""
return wrap_df(
self._df.pivot(self.by, self.pivot_column, self.values_column, "last")
)
class GBSelection:
"""
Utility class returned in a groupby operation.
"""
def __init__(
self,
df: "PyDataFrame",
by: Union[str, List[str]],
selection: Optional[List[str]],
):
self._df = df
self.by = by
self.selection = selection
def first(self) -> DataFrame:
"""
Aggregate the first values in the group.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "first"))
def last(self) -> DataFrame:
"""
Aggregate the last values in the group.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "last"))
def sum(self) -> DataFrame:
"""
Reduce the groups to the sum.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "sum"))
def min(self) -> DataFrame:
"""
Reduce the groups to the minimal value.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "min"))
def max(self) -> DataFrame:
"""
Reduce the groups to the maximal value.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "max"))
def count(self) -> DataFrame:
"""
Count the number of values in each group.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, None, 3, 4],
... "bar": ["a", "b", "c", "a"],
... }
... )
>>> df.groupby("bar").count() # counts nulls
shape: (3, 2)
┌─────┬───────┐
│ bar ┆ count │
│ --- ┆ --- │
│ str ┆ u32 │
╞═════╪═══════╡
│ c ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ a ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ b ┆ 1 │
└─────┴───────┘
"""
return wrap_df(self._df.groupby(self.by, self.selection, "count"))
def mean(self) -> DataFrame:
"""
Reduce the groups to the mean values.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "mean"))
def n_unique(self) -> DataFrame:
"""
Count the unique values per group.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "n_unique"))
def quantile(self, quantile: float, interpolation: str = "nearest") -> DataFrame:
"""
Compute the quantile per group.
Parameters
----------
quantile
quantile between 0.0 and 1.0
interpolation
interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']
"""
return wrap_df(
self._df.groupby_quantile(self.by, self.selection, quantile, interpolation)
)
def median(self) -> DataFrame:
"""
Return the median per group.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "median"))
def agg_list(self) -> DataFrame:
"""
Aggregate the groups into Series.
"""
return wrap_df(self._df.groupby(self.by, self.selection, "agg_list"))
def apply(
self,
func: Callable[[Any], Any],
return_dtype: Optional[Type[DataType]] = None,
) -> DataFrame:
"""
Apply a function over the groups.
"""
df = self.agg_list()
if self.selection is None:
raise TypeError(
"apply not available for Groupby.select_all(). Use select() instead."
)
for name in self.selection:
s = df.drop_in_place(name + "_agg_list").apply(func, return_dtype)
s.rename(name, in_place=True)
df[name] = s
return df
| 29.896203
| 137
| 0.411763
|
7949f44d09b56a1711c7722c0ab5858c20364337
| 10,895
|
py
|
Python
|
python_modules/libraries/dagster-airflow/dagster_airflow/operators/docker_operator.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-airflow/dagster_airflow/operators/docker_operator.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-airflow/dagster_airflow/operators/docker_operator.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
import ast
import json
from contextlib import contextmanager
from airflow.exceptions import AirflowException, AirflowSkipException
from dagster_airflow.vendor.docker_operator import DockerOperator
from docker import APIClient, from_env
import dagster._check as check
import dagster.seven as seven
from dagster.core.execution.api import create_execution_plan
from dagster.core.execution.plan.plan import should_skip_step
from dagster.core.instance import AIRFLOW_EXECUTION_DATE_STR, DagsterInstance
from dagster.grpc.types import ExecuteStepArgs
from dagster.serdes import deserialize_json_to_dagster_namedtuple, serialize_dagster_namedtuple
from .util import check_events_for_failures, check_events_for_skips, get_aws_environment
DOCKER_TEMPDIR = "/tmp"
class DagsterDockerOperator(DockerOperator):
"""Dagster operator for Apache Airflow.
Wraps a modified DockerOperator incorporating https://github.com/apache/airflow/pull/4315.
Additionally, if a Docker client can be initialized using docker.from_env,
Unlike the standard DockerOperator, this operator also supports config using docker.from_env,
so it isn't necessary to explicitly set docker_url, tls_config, or api_version.
Incorporates https://github.com/apache/airflow/pull/4315/ and an implementation of
https://issues.apache.org/jira/browse/AIRFLOW-3825.
Parameters:
host_tmp_dir (str): Specify the location of the temporary directory on the host which will
be mapped to tmp_dir. If not provided defaults to using the standard system temp directory.
"""
def __init__(self, dagster_operator_parameters, *args):
kwargs = dagster_operator_parameters.op_kwargs
tmp_dir = kwargs.pop("tmp_dir", DOCKER_TEMPDIR)
host_tmp_dir = kwargs.pop("host_tmp_dir", seven.get_system_temp_directory())
self.host_tmp_dir = host_tmp_dir
self.docker_conn_id_set = kwargs.get("docker_conn_id") is not None
self.run_config = dagster_operator_parameters.run_config
self.pipeline_name = dagster_operator_parameters.pipeline_name
self.pipeline_snapshot = dagster_operator_parameters.pipeline_snapshot
self.execution_plan_snapshot = dagster_operator_parameters.execution_plan_snapshot
self.parent_pipeline_snapshot = dagster_operator_parameters.parent_pipeline_snapshot
self.mode = dagster_operator_parameters.mode
self.step_keys = dagster_operator_parameters.step_keys
self.recon_repo = dagster_operator_parameters.recon_repo
self._run_id = None
self.instance_ref = dagster_operator_parameters.instance_ref
check.invariant(self.instance_ref)
self.instance = DagsterInstance.from_ref(self.instance_ref)
# These shenanigans are so we can override DockerOperator.get_hook in order to configure
# a docker client using docker.from_env, rather than messing with the logic of
# DockerOperator.execute
if not self.docker_conn_id_set:
try:
from_env().version()
except Exception:
pass
else:
kwargs["docker_conn_id"] = True
if "environment" not in kwargs:
kwargs["environment"] = get_aws_environment()
super(DagsterDockerOperator, self).__init__(
task_id=dagster_operator_parameters.task_id,
dag=dagster_operator_parameters.dag,
tmp_dir=tmp_dir,
host_tmp_dir=host_tmp_dir,
xcom_push=True,
# We do this because log lines won't necessarily be emitted in order (!) -- so we can't
# just check the last log line to see if it's JSON.
xcom_all=True,
*args,
**kwargs,
)
@contextmanager
def get_host_tmp_dir(self):
yield self.host_tmp_dir
def execute_raw(self, context):
"""Modified only to use the get_host_tmp_dir helper."""
self.log.info("Starting docker container from image %s", self.image)
tls_config = self.__get_tls_config()
if self.docker_conn_id:
self.cli = self.get_hook().get_conn()
else:
self.cli = APIClient(base_url=self.docker_url, version=self.api_version, tls=tls_config)
if self.force_pull or len(self.cli.images(name=self.image)) == 0:
self.log.info("Pulling docker image %s", self.image)
for l in self.cli.pull(self.image, stream=True):
output = seven.json.loads(l.decode("utf-8").strip())
if "status" in output:
self.log.info("%s", output["status"])
with self.get_host_tmp_dir() as host_tmp_dir:
self.environment["AIRFLOW_TMP_DIR"] = self.tmp_dir
self.volumes.append("{0}:{1}".format(host_tmp_dir, self.tmp_dir))
self.container = self.cli.create_container(
command=self.get_docker_command(context.get("ts")),
environment=self.environment,
host_config=self.cli.create_host_config(
auto_remove=self.auto_remove,
binds=self.volumes,
network_mode=self.network_mode,
shm_size=self.shm_size,
dns=self.dns,
dns_search=self.dns_search,
cpu_shares=int(round(self.cpus * 1024)),
mem_limit=self.mem_limit,
),
image=self.image,
user=self.user,
working_dir=self.working_dir,
)
self.cli.start(self.container["Id"])
res = []
line = ""
for new_line in self.cli.logs(
container=self.container["Id"], stream=True, stdout=True, stderr=False
):
line = new_line.strip()
if hasattr(line, "decode"):
line = line.decode("utf-8")
self.log.info(line)
res.append(line)
result = self.cli.wait(self.container["Id"])
if result["StatusCode"] != 0:
full_logs = self.cli.logs(container=self.container["Id"], stdout=True, stderr=True)
raise AirflowException(
"docker container failed with result: {result} and logs: {logs}".format(
result=repr(result),
logs=full_logs,
)
)
if self.xcom_push_flag:
# Try to avoid any kind of race condition?
return res if self.xcom_all else str(line)
# This is a class-private name on DockerOperator for no good reason --
# all that the status quo does is inhibit extension of the class.
# See https://issues.apache.org/jira/browse/AIRFLOW-3880
def __get_tls_config(self):
# pylint: disable=no-member
return super(DagsterDockerOperator, self)._DockerOperator__get_tls_config()
@property
def run_id(self):
if self._run_id is None:
return ""
else:
return self._run_id
def query(self, airflow_ts):
check.opt_str_param(airflow_ts, "airflow_ts")
recon_pipeline = self.recon_repo.get_reconstructable_pipeline(self.pipeline_name)
input_json = serialize_dagster_namedtuple(
ExecuteStepArgs(
pipeline_origin=recon_pipeline.get_python_origin(),
pipeline_run_id=self.run_id,
instance_ref=self.instance_ref,
step_keys_to_execute=self.step_keys,
)
)
command = "dagster api execute_step {}".format(json.dumps(input_json))
self.log.info("Executing: {command}\n".format(command=command))
return command
def get_docker_command(self, airflow_ts):
"""Deliberately renamed from get_command to avoid shadoowing the method of the base class"""
check.opt_str_param(airflow_ts, "airflow_ts")
if self.command is not None and self.command.strip().find("[") == 0:
commands = ast.literal_eval(self.command)
elif self.command is not None:
commands = self.command
else:
commands = self.query(airflow_ts)
return commands
def get_hook(self):
if self.docker_conn_id_set:
return super(DagsterDockerOperator, self).get_hook()
class _DummyHook:
def get_conn(self):
return from_env().api
return _DummyHook()
def _should_skip(self, pipeline_run):
recon_pipeline = self.recon_repo.get_reconstructable_pipeline(self.pipeline_name)
execution_plan = create_execution_plan(
recon_pipeline.subset_for_execution_from_existing_pipeline(
pipeline_run.solids_to_execute
),
run_config=self.run_config,
step_keys_to_execute=self.step_keys,
mode=self.mode,
)
return should_skip_step(execution_plan, instance=self.instance, run_id=pipeline_run.run_id)
def execute(self, context):
if "run_id" in self.params:
self._run_id = self.params["run_id"]
elif "dag_run" in context and context["dag_run"] is not None:
self._run_id = context["dag_run"].run_id
try:
tags = {AIRFLOW_EXECUTION_DATE_STR: context.get("ts")} if "ts" in context else {}
pipeline_run = self.instance.register_managed_run(
pipeline_name=self.pipeline_name,
run_id=self.run_id,
run_config=self.run_config,
mode=self.mode,
solids_to_execute=None,
step_keys_to_execute=None,
tags=tags,
root_run_id=None,
parent_run_id=None,
pipeline_snapshot=self.pipeline_snapshot,
execution_plan_snapshot=self.execution_plan_snapshot,
parent_pipeline_snapshot=self.parent_pipeline_snapshot,
)
if self._should_skip(pipeline_run):
raise AirflowSkipException(
"Dagster emitted skip event, skipping execution in Airflow"
)
res = self.execute_raw(context)
self.log.info("Finished executing container.")
if not res:
raise AirflowException("Missing query response")
try:
events = [deserialize_json_to_dagster_namedtuple(line) for line in res if line]
except Exception:
raise AirflowException(
"Could not parse response {response}".format(response=repr(res))
)
check_events_for_failures(events)
check_events_for_skips(events)
return events
finally:
self._run_id = None
| 40.055147
| 100
| 0.634328
|
7949f5bb169874551aa2000bc092d6ee34253c01
| 7,829
|
py
|
Python
|
nbviewer/providers/gist/handlers.py
|
krishnatg/nbviewer
|
e07f8651c6b71c0482de26bed77352b965f9ddc1
|
[
"BSD-3-Clause-Clear"
] | 1
|
2020-04-01T07:17:27.000Z
|
2020-04-01T07:17:27.000Z
|
nbviewer/providers/gist/handlers.py
|
krishnatg/nbviewer
|
e07f8651c6b71c0482de26bed77352b965f9ddc1
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
nbviewer/providers/gist/handlers.py
|
krishnatg/nbviewer
|
e07f8651c6b71c0482de26bed77352b965f9ddc1
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-09-10T17:18:44.000Z
|
2021-09-10T17:18:44.000Z
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
from tornado import web, gen
from tornado.log import app_log
from ..base import (
BaseHandler,
cached,
RenderingHandler,
)
from ...utils import (
clean_filename,
quote,
response_text,
)
from ..github.handlers import GithubClientMixin
PROVIDER_CTX = {
'provider_label': 'Gist',
'provider_icon': 'github-square',
'executor_label': 'Binder',
'executor_icon': 'icon-binder',
}
BINDER_TMPL = '{binder_base_url}/gist/{user}/{gist_id}/master'
BINDER_PATH_TMPL = BINDER_TMPL+'?filepath={path}'
class GistClientMixin(GithubClientMixin):
def client_error_message(self, exc, url, body, msg=None):
if exc.code == 403 and 'too big' in body.lower():
return 400, "GitHub will not serve raw gists larger than 10MB"
return super(GistClientMixin, self).client_error_message(
exc, url, body, msg
)
class UserGistsHandler(GistClientMixin, BaseHandler):
"""list a user's gists containing notebooks
.ipynb file extension is required for listing (not for rendering).
"""
@cached
@gen.coroutine
def get(self, user):
page = self.get_argument("page", None)
params = {}
if page:
params['page'] = page
with self.catch_client_error():
response = yield self.github_client.get_gists(user, params=params)
prev_url, next_url = self.get_page_links(response)
gists = json.loads(response_text(response))
entries = []
for gist in gists:
notebooks = [f for f in gist['files'] if f.endswith('.ipynb')]
if notebooks:
entries.append(dict(
id=gist['id'],
notebooks=notebooks,
description=gist['description'] or '',
))
provider_url = u"https://gist.github.com/{user}".format(user=user)
html = self.render_template("usergists.html",
entries=entries, user=user, provider_url=provider_url, prev_url=prev_url, next_url=next_url, **PROVIDER_CTX
)
yield self.cache_and_finish(html)
class GistHandler(GistClientMixin, RenderingHandler):
"""render a gist notebook, or list files if a multifile gist"""
@cached
@gen.coroutine
def get(self, user, gist_id, filename=''):
with self.catch_client_error():
response = yield self.github_client.get_gist(gist_id)
gist = json.loads(response_text(response))
gist_id=gist['id']
if user is None:
# redirect to /gist/user/gist_id if no user given
owner_dict = gist.get('owner', {})
if owner_dict:
user = owner_dict['login']
else:
user = 'anonymous'
new_url = u"{format}/gist/{user}/{gist_id}".format(
format=self.format_prefix, user=user, gist_id=gist_id)
if filename:
new_url = new_url + "/" + filename
self.redirect(self.from_base(new_url))
return
files = gist['files']
many_files_gist = (len(files) > 1)
if not many_files_gist and not filename:
filename = list(files.keys())[0]
if filename and filename in files:
file = files[filename]
if (file['type'] or '').startswith('image/'):
app_log.debug("Fetching raw image (%s) %s/%s: %s", file['type'], gist_id, filename, file['raw_url'])
response = yield self.fetch(file['raw_url'])
# use raw bytes for images:
content = response.body
elif file['truncated']:
app_log.debug("Gist %s/%s truncated, fetching %s", gist_id, filename, file['raw_url'])
response = yield self.fetch(file['raw_url'])
content = response_text(response, encoding='utf-8')
else:
content = file['content']
# Enable a binder navbar icon if a binder base URL is configured
executor_url = BINDER_PATH_TMPL.format(
binder_base_url=self.binder_base_url,
user=user.rstrip('/'),
gist_id=gist_id,
path=quote(filename)
) if self.binder_base_url else None
if not many_files_gist or filename.endswith('.ipynb'):
yield self.finish_notebook(
content,
file['raw_url'],
provider_url=gist['html_url'],
executor_url=executor_url,
msg="gist: %s" % gist_id,
public=gist['public'],
format=self.format,
request=self.request,
**PROVIDER_CTX
)
else:
self.set_header('Content-Type', file.get('type') or 'text/plain')
# cannot redirect because of X-Frame-Content
self.finish(content)
return
elif filename:
raise web.HTTPError(404, "No such file in gist: %s (%s)", filename, list(files.keys()))
else:
entries = []
ipynbs = []
others = []
for file in files.values():
e = {}
e['name'] = file['filename']
if file['filename'].endswith('.ipynb'):
e['url'] = quote('/%s/%s' % (gist_id, file['filename']))
e['class'] = 'fa-book'
ipynbs.append(e)
else:
provider_url = u"https://gist.github.com/{user}/{gist_id}#file-{clean_name}".format(
user=user,
gist_id=gist_id,
clean_name=clean_filename(file['filename']),
)
e['url'] = provider_url
e['class'] = 'fa-share'
others.append(e)
entries.extend(ipynbs)
entries.extend(others)
# Enable a binder navbar icon if a binder base URL is configured
executor_url = BINDER_TMPL.format(
binder_base_url=self.binder_base_url,
user=user.rstrip('/'),
gist_id=gist_id
) if self.binder_base_url else None
html = self.render_template(
'treelist.html',
entries=entries,
tree_type='gist',
tree_label='gists',
user=user.rstrip('/'),
provider_url=gist['html_url'],
executor_url=executor_url,
**PROVIDER_CTX
)
yield self.cache_and_finish(html)
class GistRedirectHandler(BaseHandler):
"""redirect old /<gist-id> to new /gist/<gist-id>"""
def get(self, gist_id, file=''):
new_url = '%s/gist/%s' % (self.format_prefix, gist_id)
if file:
new_url = "%s/%s" % (new_url, file)
app_log.info("Redirecting %s to %s", self.request.uri, new_url)
self.redirect(self.from_base(new_url))
def default_handlers(handlers=[]):
"""Tornado handlers"""
return handlers + [
(r'/gist/([^\/]+/)?([0-9]+|[0-9a-f]{20,})', GistHandler),
(r'/gist/([^\/]+/)?([0-9]+|[0-9a-f]{20,})/(?:files/)?(.*)', GistHandler),
(r'/([0-9]+|[0-9a-f]{20,})', GistRedirectHandler),
(r'/([0-9]+|[0-9a-f]{20,})/(.*)', GistRedirectHandler),
(r'/gist/([^\/]+)/?', UserGistsHandler),
]
def uri_rewrites(rewrites=[]):
return [
(r'^([a-f0-9]+)/?$',
u'/{0}'),
('^https?://gist.github.com/([^\/]+/)?([a-f0-9]+)/?$',
u'/{1}'),
] + rewrites
| 34.187773
| 119
| 0.53404
|
7949f848dde62ff3495185ec9fc75138949e335b
| 12,722
|
py
|
Python
|
flow/benchmarks/1merge_scratch_FLOW.py
|
cuijiaxun/MITC
|
a226308424237a69b5e938baf72949de9b1b4bf2
|
[
"MIT"
] | 1
|
2021-06-17T03:25:13.000Z
|
2021-06-17T03:25:13.000Z
|
flow/benchmarks/1merge_scratch_FLOW.py
|
cuijiaxun/MITC
|
a226308424237a69b5e938baf72949de9b1b4bf2
|
[
"MIT"
] | null | null | null |
flow/benchmarks/1merge_scratch_FLOW.py
|
cuijiaxun/MITC
|
a226308424237a69b5e938baf72949de9b1b4bf2
|
[
"MIT"
] | 1
|
2021-03-18T16:20:57.000Z
|
2021-03-18T16:20:57.000Z
|
"""Open merge example.
Trains a a small perce1tage of rl vehicles to dissipate shockwaves caused by
merges in an open network.
"""
import json
import os
import random
from copy import deepcopy
import numpy as np
import pickle
from argparse import ArgumentParser
import ray
try:
from ray.rllib.agents.agent import get_agent_class
except ImportError:
from ray.rllib.agents.registry import get_agent_class
from ray.tune import run_experiments
from ray.tune.registry import register_env
from flow.envs import MergePOEnv,MergePOEnv_noheadway, TestEnv,Env
from flow.networks import Network
from flow.utils.registry import make_create_env
from flow.utils.rllib import FlowParamsEncoder
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \
InFlows, SumoCarFollowingParams
from flow.scenarios.merge import ADDITIONAL_NET_PARAMS
from flow.core.params import VehicleParams, SumoLaneChangeParams
from flow.controllers import SimCarFollowingController,IDMController, RLController, SimLaneChangeController, ContinuousRouter
# TODO hard coded
#scenarios_dir = os.path.join(os.path.expanduser("~/"), 'local', 'flow_2019_07', 'flow', 'scenarios')
scenarios_dir = os.path.join(os.path.expanduser("~/"), 'Documents', 'MITC', 'flow', 'scenarios')
# UNCOMMENT ONE OF THE FOLLOWING 3 VARIATIONS OF I696 SCENARIO
#
#one-lane (no lane-changes), smaller
####################################
#scenario_road_data = {"name" : "I696_ONE_LANE_CROPPED",
# "net" : os.path.join(scenarios_dir, 'i696', 'osm.net.i696_onelane_cropped.xml'),
# "rou" : [os.path.join(scenarios_dir, 'i696', 'i696.rou.i696_onelane_cropped.xml')],
# "edges_distribution" : ["8666737", "124433709", "491266613", "404969345#1"]
# }
#
#one-lane (no lane-changes)
###########################
scenario_road_data = {"name" : "I696_ONE_LANE",
"net" : os.path.join(scenarios_dir, 'i696', 'osm.net.i696_onelane.xml'),
"rou" : [os.path.join(scenarios_dir, 'i696', 'i696.rou.xml')],
#"rou" : [os.path.join(scenarios_dir, 'i696', 'i696.rou.i696_onelane_Evenshorter.xml')],
"edges_distribution" : ["404969345#0", "59440544#0", "124433709", "38726647"]
}
#
#the full I696 test
###################
#scenario_road_data = {"name" : "I696_FULL",
# "net" : os.path.join(scenarios_dir, 'i696', 'osm.net.xml'),
# "rou" : [os.path.join(scenarios_dir, 'i696', 'i696.rou.xml')],
# "edges_distribution" : ["404969345#0", "59440544#0", "124433709", "38726647"]
# }
# experiment number
# - 0: 10% RL penetration, 5 max controllable vehicles
# - 1: 25% RL penetration, 13 max controllable vehicles
# - 2: 33% RL penetration, 17 max controllable vehicles
EXP_NUM = 0
# time horizon of a single rollout
HORIZON = 2000 #128#600
# number of rollouts per training iteration
N_ROLLOUTS = 15#1#20
# number of parallel workers
N_CPUS = 4#8#2
# inflow rate at the highway
FLOW_RATE = 2000
MERGE_RATE = 200
# percent of autonomous vehicles
RL_PENETRATION = [0.1, 0.25, 0.33][EXP_NUM]
# num_rl term (see ADDITIONAL_ENV_PARAMs)
#NUM_RL = [5, 13, 17][EXP_NUM]
NUM_RL = [30, 250, 333][EXP_NUM]
## We consider a highway network with an upstream merging lane producing
# shockwaves
additional_net_params = ADDITIONAL_NET_PARAMS.copy()
#additional_net_params["merge_lanes"] = 1
#additional_net_params["highway_lanes"] = 1
#additional_net_params["pre_merge_length"] = 500
# RL vehicles constitute 5% of the total number of vehicles
# Daniel: adding vehicles and flow from osm.passenger.trips.xml
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {
#"noise": 0.2
}),
lane_change_controller=(SimLaneChangeController, {}),
#routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
# Define speed mode that will minimize collisions: https://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#speed_mode_.280xb3.29
speed_mode=9,#"all_checks", #"all_checks", #no_collide",
#decel=7.5, # avoid collisions at emergency stops
# desired time-gap from leader
#tau=2, #7,
#min_gap=2.5,
#speed_factor=1,
#speed_dev=0.1
),
lane_change_params=SumoLaneChangeParams(
model="SL2015",
# Define a lane changing mode that will allow lane changes
# See: https://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#lane_change_mode_.280xb6.29
# and: ~/local/flow_2019_07/flow/core/params.py, see LC_MODES = {"aggressive": 0 /*bug, 0 is no lane-changes*/, "no_lat_collide": 512, "strategic": 1621}, where "strategic" is the default behavior
lane_change_mode=1621,#0b011000000001, # (like default 1621 mode, but no lane changes other than strategic to follow route, # 512, #(collision avoidance and safety gap enforcement) # "strategic",
#lc_speed_gain=1000000,
lc_pushy=0, #0.5, #1,
lc_assertive=5, #20,
# the following two replace default values which are not read well by xml parser
lc_impatience=1e-8,
lc_time_to_impatience=1e12
),
num_vehicles=0)
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
lane_change_controller=(SimLaneChangeController, {}),
#routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
# Define speed mode that will minimize collisions: https://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#speed_mode_.280xb3.29
speed_mode=9,#"all_checks", #no_collide",
#decel=7.5, # avoid collisions at emergency stops
# desired time-gap from leader
#tau=2, #7,
#min_gap=2.5,
#speed_factor=1,
#speed_dev=0.1,
),
lane_change_params=SumoLaneChangeParams(
model="SL2015",
# Define a lane changing mode that will allow lane changes
# See: https://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#lane_change_mode_.280xb6.29
# and: ~/local/flow_2019_07/flow/core/params.py, see LC_MODES = {"aggressive": 0 /*bug, 0 is no lane-changes*/, "no_lat_collide": 512, "strategic": 1621}, where "strategic" is the default behavior
lane_change_mode=1621,#0b011000000001, # (like default 1621 mode, but no lane changes other than strategic to follow route, # 512, #(collision avoidance and safety gap enforcement) # "strategic",
#lc_speed_gain=1000000,
lc_pushy=0, #0.5, #1,
lc_assertive=5, #20,
# the following two replace default values which are not read well by xml parser
lc_impatience=1e-8,
lc_time_to_impatience=1e12
),
num_vehicles=0)
# Vehicles are introduced from both sides of merge, with RL vehicles entering
# from the highway portion as well
inflow = InFlows()
'''
inflow.add(
veh_type="human",
edge="404969345#0", # flow id sw2w1 from xml file
begin=10,#0,
end=90000,
#probability=(1 - RL_PENETRATION), #* FLOW_RATE,
vehs_per_hour = MERGE_RATE,#(1 - RL_PENETRATION)*FLOW_RATE,
departSpeed=20,
departLane="free",
)
'''
'''
inflow.add(
veh_type="rl",
edge="404969345#0", # flow id sw2w1 from xml file
begin=10,#0,
end=90000,
vehs_per_hour = RL_PENETRATION * FLOW_RATE,
depart_speed="max",
depart_lane="free",
)
'''
inflow.add(
veh_type="human",
edge="59440544#0", # flow id se2w1 from xml file
begin=10,#0,
end=90000,
vehs_per_hour = (1 - RL_PENETRATION)*FLOW_RATE,
departSpeed=10,
departLane="free",
)
inflow.add(
veh_type="rl",
edge="59440544#0", # flow id se2w1 from xml file
begin=10,#0,
end=90000,
#probability=RL_PENETRATION, # * 0.8, #* FLOW_RATE,
vehs_per_hour = RL_PENETRATION*FLOW_RATE,
depart_speed=10,
depart_lane="free",
)
inflow.add(
veh_type="human",
edge="124433709", # flow id e2w1 from xml file
begin=10,#0,
end=90000,
vehs_per_hour = MERGE_RATE, #(1 - RL_PENETRATION)*FLOW_RATE,
departSpeed=7.5,
departLane="free",
)
'''
inflow.add(
veh_type="rl",
edge="124433709", # flow id e2w1 from xml file
begin=10,#0,
end=90000,
probability=RL_PENETRATION, # * 0.8, # * FLOW_RATE,
depart_speed="max",
depart_lane="free",
)
'''
'''
inflow.add(
veh_type="human",
edge="38726647", # flow id n2w1 from xml file
begin=10,#0,
end=90000,
vehs_per_hour = MERGE_RATE,#(1 - RL_PENETRATION)*FLOW_RATE,
departSpeed=20,
departLane="free",
)
'''
'''
inflow.add(
veh_type="rl",
edge="38726647", # flow id n2w1 from xml file
begin=10,#0,
end=90000,
probability=RL_PENETRATION, # * 0.8, # * FLOW_RATE,
depart_speed="max",
depart_lane="free",
)
'''
flow_params = dict(
# name of the experiment
exp_tag="i696_1merge_scratch_Flow_target_20_old_params",
# name of the flow environment the experiment is running on
#env_name=MergePOEnv,
env_name=MergePOEnv,
# name of the scenario class the experiment is running on
network=Network,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
no_step_log=False, # this disables log writing?
sim_step=0.5, # Daniel updated from osm.sumocfg
lateral_resolution=0.25, # determines lateral discretization of lanes
render=False,#True, # False for training, True for debugging
restart_instance=True,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
sims_per_step=1, #5,
warmup_steps=0,
additional_params={
"max_accel": 2.6,
"max_decel": 4.5,
"target_velocity": 20,
"num_rl": NUM_RL, # used by WaveAttenuationMergePOEnv e.g. to fix action dimension
#"ignore_edges":["59440544#0"],
},
),
# network-related parameters (see flow.core.params.NetParams and the
# scenario's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
inflows=inflow,
#no_internal_links=False,
additional_params=additional_net_params,
template={
"net" : scenario_road_data["net"],# see above
"rou" : scenario_road_data["rou"],# see above
}
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(
# Distributing only at the beginning of routes
scenario_road_data["edges_distribution"]
),
)
'''
def setup_exps(seeds_file=None):
alg_run = "PPO"
agent_cls = get_agent_class(alg_run)
config = agent_cls._default_config.copy()
config["num_workers"] = N_CPUS
config["train_batch_size"] = HORIZON * N_ROLLOUTS
config["gamma"] = 0.999 # discount rate
config["model"].update({"fcnet_hiddens": [32, 32, 32]})
config["use_gae"] = True
config["lambda"] = 0.97
config["kl_target"] = 0.02
config["num_sgd_iter"] = 10
config['clip_actions'] = False # FIXME(ev) temporary ray bug
config["horizon"] = HORIZON
config["entropy_coeff"] = 0.001
# save the flow params for replay
flow_json = json.dumps(
flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4)
config['env_config']['flow_params'] = flow_json
config['env_config']['run'] = alg_run
create_env, gym_name = make_create_env(params=flow_params, version=0, seeds_file=seeds_file)
# Register as rllib env
register_env(gym_name, create_env)
return alg_run, gym_name, config
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-s", "--seeds_file", dest="seeds_file",
help="pickle file containing seeds", default=None)
parser.add_argument('--resume',help="continue training",type=bool,default=False)
args = parser.parse_args()
alg_run, gym_name, config = setup_exps(args.seeds_file)
ray.init(num_cpus=N_CPUS + 1)
trials = run_experiments({
flow_params["exp_tag"]: {
"run": alg_run,
"env": gym_name,
"config": {
**config
},
"checkpoint_freq": 1, #20,
"checkpoint_at_end": True,
"max_failures": 999,
"stop": {
"training_iteration": 1,
},
},
},
resume=False,
)
'''
| 34.570652
| 202
| 0.662003
|
7949f88a4100aadd89387a71af7559a72c5b7088
| 2,739
|
py
|
Python
|
examples/bulk_tagger/bulk_tagger.py
|
PaloAltoNetworks/cortex-xpanse-python-sdk
|
532d3fdb031a0e5943cc492222299f94ad93d030
|
[
"0BSD"
] | 3
|
2021-09-02T16:05:04.000Z
|
2021-09-10T01:10:48.000Z
|
examples/bulk_tagger/bulk_tagger.py
|
PaloAltoNetworks/cortex-xpanse-python-sdk
|
532d3fdb031a0e5943cc492222299f94ad93d030
|
[
"0BSD"
] | 7
|
2021-08-31T17:43:33.000Z
|
2021-12-01T00:36:48.000Z
|
examples/bulk_tagger/bulk_tagger.py
|
PaloAltoNetworks/cortex-xpanse-python-sdk
|
532d3fdb031a0e5943cc492222299f94ad93d030
|
[
"0BSD"
] | null | null | null |
from csv import reader
import click
from xpanse.client import ExClient
def fetch_tags(client):
return client.annotations.tags.list(disabled=False).dump()
def create_tag(client, tag):
print(f"Creating a new tag named {tag}")
return client.annotations.tags.create(name=tag)["id"]
def build_tag_id_list(client, tags, tag_map):
id_list = []
for tag in tags.split("|"):
tag = tag.strip()
if tag in tag_map:
id_list.append(tag_map[tag])
else:
new_tag = create_tag(client, tag)
tag_map[tag] = new_tag
id_list.append(new_tag)
return id_list, tag_map
def assign_tags(client, assetType, assetKey, tags, tag_map, operation):
"""
Create a bulk assignment request and send to the appropriate endpoint.
"""
assetID = None
if assetType == "domain":
assetID = client.assets.domains.get(name=assetKey).get("id")
elif assetType == "certificate":
assetID = client.assets.certificates.get(pemMd5Hash=assetKey).get("id")
elif assetType == "cloud-resource":
asseteID == client.assets.cloud_resources.get(cloudResourceId=assetKey).get("id")
tag_list, tag_map = build_tag_id_list(client, tags, tag_map)
if assetID is not None:
print(
f"Applying {len(tag_list)} tags to {assetType} asset {assetKey} with id {assetID}"
)
if assetType == "domain":
client.assets.domains.bulk_tag(
operation=operation, asset_ids=[assetID], tag_ids=tag_list
)
if assetType == "certificate":
client.assets.certificates.bulk_tag(
operation=operation, asset_ids=[assetID], tag_ids=tag_list
)
if assetType == "cloud-resource":
client.assets.cloud_resources.bulk_tag(
operation=operation, asset_ids=[assetID], tag_ids=tag_list
)
return tag_map
@click.command()
@click.argument('file_name', type=click.File('r'))
def cli(file_name):
# Initialize the Xpanse Client
client = ExClient()
# Populate our tag map from name to id
tag_map = {tag["name"]: tag["id"] for tag in fetch_tags(client)}
# Wrap the input file in a csv reader
csvreader = reader(file_name)
# Skip the header column
next(csvreader)
# For row of input do a bulk assignment request
for row in csvreader:
assetType, assetKey, tagNames, operation = row
tag_map = assign_tags(
client=client,
assetType=assetType.strip(),
assetKey=assetKey.strip(),
tags=tagNames,
tag_map=tag_map,
operation=operation,
)
if __name__ == "__main__":
cli()
| 28.53125
| 94
| 0.626871
|
7949f8f6b60bf2bf15a08972e6bef153493d0aae
| 11,735
|
py
|
Python
|
cuegui/cuegui/ServiceDialog.py
|
mb0rt/OpenCue
|
7cce111306ccfa6c94f12806c8668866f2ee70ad
|
[
"Apache-2.0"
] | null | null | null |
cuegui/cuegui/ServiceDialog.py
|
mb0rt/OpenCue
|
7cce111306ccfa6c94f12806c8668866f2ee70ad
|
[
"Apache-2.0"
] | null | null | null |
cuegui/cuegui/ServiceDialog.py
|
mb0rt/OpenCue
|
7cce111306ccfa6c94f12806c8668866f2ee70ad
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dialog for displaying and editing services."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from PySide2 import QtCore
from PySide2 import QtWidgets
import opencue
import cuegui.Constants
import cuegui.TagsWidget
import cuegui.Utils
class ServiceForm(QtWidgets.QWidget):
"""Widget for displaying and editing a service."""
saved = QtCore.Signal(object)
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.__service = None
self.gpu_max_mb = 2 * 1024
self.gpu_min_mb = 0
self.gpu_tick_mb = 256
self.name = QtWidgets.QLineEdit(self)
self.threadable = QtWidgets.QCheckBox(self)
self.min_cores = QtWidgets.QSpinBox(self)
self.min_cores.setRange(50, int(self._cfg().get('max_cores', 16)) * 100)
self.min_cores.setSingleStep(50)
self.min_cores.setValue(100)
self.max_cores = QtWidgets.QSpinBox(self)
self.max_cores.setRange(0, int(self._cfg().get('max_cores', 16)) * 100)
self.max_cores.setSingleStep(100)
self.max_cores.setValue(100)
self.min_memory = QtWidgets.QSpinBox(self)
self.min_memory.setRange(512, int(self._cfg().get('max_memory', 48)) * 1024)
self.min_memory.setValue(3276)
self.min_gpu = QtWidgets.QSpinBox(self)
self.min_gpu.setRange(self.gpu_min_mb, self.gpu_max_mb)
self.min_gpu.setValue(0)
self.min_gpu.setSingleStep(self.gpu_tick_mb)
self.min_gpu.setSuffix(" MB")
self.timeout = QtWidgets.QSpinBox(self)
self.timeout.setRange(0, 4320)
self.timeout.setValue(0)
self.timeout_llu = QtWidgets.QSpinBox(self)
self.timeout_llu.setRange(0, 4320)
self.timeout_llu.setValue(0)
layout = QtWidgets.QGridLayout(self)
layout.addWidget(QtWidgets.QLabel("Name:", self), 0, 0)
layout.addWidget(self.name, 0, 1)
layout.addWidget(QtWidgets.QLabel("Threadable:", self), 1, 0)
layout.addWidget(self.threadable, 1, 1)
layout.addWidget(QtWidgets.QLabel("Min Threads (100 = 1 thread):", self), 2, 0)
layout.addWidget(self.min_cores, 2, 1)
layout.addWidget(QtWidgets.QLabel("Max Threads (100 = 1 thread):", self), 3, 0)
layout.addWidget(self.max_cores, 3, 1)
layout.addWidget(QtWidgets.QLabel("Min Memory MB:", self), 4, 0)
layout.addWidget(self.min_memory, 4, 1)
layout.addWidget(QtWidgets.QLabel("Min Gpu Memory MB:", self), 5, 0)
layout.addWidget(self.min_gpu, 5, 1)
layout.addWidget(QtWidgets.QLabel("Timeout (in minutes):", self), 6, 0)
layout.addWidget(self.timeout, 6, 1)
layout.addWidget(QtWidgets.QLabel("Timeout LLU (in minutes):", self), 7, 0)
layout.addWidget(self.timeout_llu, 7, 1)
self._tags_w = cuegui.TagsWidget.TagsWidget(allowed_tags=cuegui.Constants.ALLOWED_TAGS)
layout.addWidget(self._tags_w, 8, 0, 1, 2)
self.__buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Save,
QtCore.Qt.Horizontal,
self)
self.__buttons.setDisabled(True)
layout.addWidget(self.__buttons, 9, 1)
self.__buttons.accepted.connect(self.save)
def _cfg(self):
"""
Loads (if necessary) and returns the config values.
Warns and returns an empty dict if there's a problem reading the config
@return: The keys & values stored in the config file
@rtype: dict<str:str>
"""
if not hasattr(self, '__config'):
self.__config = cuegui.Utils.getResourceConfig()
return self.__config
def setService(self, service):
"""
Update the form with data from the given service.
"""
self.__service = service
self.__buttons.setDisabled(False)
self.name.setText(service.data.name)
self.threadable.setChecked(service.data.threadable)
self.min_cores.setValue(service.data.min_cores)
self.max_cores.setValue(service.data.max_cores)
self.min_memory.setValue(service.data.min_memory // 1024)
self.min_gpu.setValue(service.data.min_gpu // 1024)
self._tags_w.set_tags(service.data.tags)
self.timeout.setValue(service.data.timeout)
self.timeout_llu.setValue(service.data.timeout_llu)
def new(self):
"""
Clear the form for a new service.
"""
self.__buttons.setDisabled(False)
self.__service = None
self.name.setFocus()
self.name.setText("")
self.threadable.setChecked(False)
self.min_cores.setValue(100)
self.max_cores.setValue(100)
self.min_memory.setValue(3276)
self.min_gpu.setValue(0)
self.timeout.setValue(0)
self.timeout_llu.setValue(0)
self._tags_w.set_tags(['general'])
def save(self):
"""
Create and emit a ServiceData object based
on the contents of the form.
"""
if len(str(self.name.text())) < 3:
QtWidgets.QMessageBox.critical(self, "Error",
"The service name must be at least 3 characters.")
return
if not str(self.name.text()).isalnum():
QtWidgets.QMessageBox.critical(self, "Error", "The service name must alphanumeric.")
return
service = opencue.wrappers.service.Service()
if self.__service:
service.data.id = self.__service.data.id
service.setName(str(self.name.text()))
service.setThreadable(self.threadable.isChecked())
service.setMinCores(self.min_cores.value())
service.setMaxCores(self.max_cores.value())
service.setMinMemory(self.min_memory.value() * 1024)
service.setMinGpu(self.min_gpu.value() * 1024)
service.setTimeout(self.timeout.value())
service.setTimeoutLLU(self.timeout_llu.value())
service.setTags(self._tags_w.get_tags())
self.saved.emit(service)
class ServiceManager(QtWidgets.QWidget):
"""
Wraps the ServiceForm widget with the logic and controls needed
to add, update, and delete services.
"""
def __init__(self, show, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.__show = show
self.__services = []
self.__selected = None
self.__new_service = False
layout = QtWidgets.QVBoxLayout(self)
self.__splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal, self)
self.__service_list = QtWidgets.QListWidget(self)
self.__form = ServiceForm(self)
layout.addWidget(self.__splitter)
self.__splitter.addWidget(self.__service_list)
self.__splitter.addWidget(self.__form)
self.__btn_new = QtWidgets.QPushButton("New", self)
self.__btn_del = QtWidgets.QPushButton("Del", self)
self.__btn_layout = QtWidgets.QHBoxLayout()
self.__btn_layout.addWidget(self.__btn_new)
self.__btn_layout.addWidget(self.__btn_del)
self.__btn_layout.addStretch()
layout.addLayout(self.__btn_layout)
self.__btn_new.clicked.connect(self.newService)
self.__btn_del.clicked.connect(self.delService)
self.__form.saved.connect(self.saved)
self.__service_list.currentItemChanged.connect(self.selected)
self.refresh()
self.__service_list.setCurrentRow(0, QtCore.QItemSelectionModel.Select)
def selected(self, item, old_item):
"""
Executes if an item is selected
"""
del old_item
self.__new_service = False
if not item:
return
if self.__show:
self.__selected = self.__show.getServiceOverride(str(item.text()))
else:
self.__selected = opencue.api.getService(str(item.text()))
self.__form.setService(self.__selected)
def saved(self, service):
"""
Save a service to opencue.
"""
if not self.__show:
msg = QtWidgets.QMessageBox()
msg.setText("You are about to modify a facility wide service configuration. "
"Are you in PSR-Resources?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.setDefaultButton(QtWidgets.QMessageBox.No)
if msg.exec_() == QtWidgets.QMessageBox.No:
return
if self.__new_service:
if self.__show:
self.__show.createServiceOverride(service.data)
else:
opencue.api.createService(service.data)
else:
service.update()
self.refresh()
self.__new_service = False
for i in range(0, self.__service_list.count()):
item = self.__service_list.item(i)
if item:
if str(item.text()) == service.name():
self.__service_list.setCurrentRow(i, QtCore.QItemSelectionModel.Select)
break
def refresh(self):
"""
Refresh the service list.
"""
selected = []
if not self.__new_service:
selected = [str(t.text()) for t in
self.__service_list.selectedItems()]
self.__service_list.clear()
if not self.__show:
self.__services = opencue.api.getDefaultServices()
else:
self.__services = self.__show.getServiceOverrides()
for service in self.__services:
item = QtWidgets.QListWidgetItem(service.name())
self.__service_list.addItem(item)
if service.name() in selected:
item.setSelected(True)
self.__service_list.sortItems()
def newService(self):
"""
Setup the interface for creating a new service.
"""
for item in self.__service_list.selectedItems():
item.setSelected(False)
self.__form.new()
self.__new_service = True
def delService(self):
"""
Delete the selected service.
"""
self.__selected.delete()
row = self.currentRow()
if row >= 1:
self.__service_list.setCurrentRow(row - 1, QtCore.QItemSelectionModel.Select)
self.refresh()
def currentRow(self):
"""
Return the integer value of the current row.
"""
for item in self.__service_list.selectedItems():
return self.__service_list.row(item)
return -1
class ServiceDialog(QtWidgets.QDialog):
"""
Wraps the ServiceManager in a dialog window.
"""
def __init__(self, show, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.__srv_manager = ServiceManager(show, self)
self.setWindowTitle("Services")
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setSizeGripEnabled(True)
self.resize(620, 420)
| 35.560606
| 96
| 0.634001
|
7949fa90b402d05771c5e9e9cfbd4d83bc44fa17
| 1,576
|
py
|
Python
|
textalyzer/texttools/word_freq.py
|
rubenpeters91/textalyzer
|
c56bd9d1bbc52e97b4d8ba034be99fa5777e5b41
|
[
"MIT"
] | null | null | null |
textalyzer/texttools/word_freq.py
|
rubenpeters91/textalyzer
|
c56bd9d1bbc52e97b4d8ba034be99fa5777e5b41
|
[
"MIT"
] | null | null | null |
textalyzer/texttools/word_freq.py
|
rubenpeters91/textalyzer
|
c56bd9d1bbc52e97b4d8ba034be99fa5777e5b41
|
[
"MIT"
] | null | null | null |
from textalyzer.texttools import TextTool
from wordcloud import WordCloud
from typing import List
class WordFreq(TextTool):
def __init__(self, language: str = "en"):
"""Word frequency
Preprocesses the text and then uses spacy filters to determine
the term frequency and plot the result.
Parameters
----------
language: str (default: "en")
Which languagemodel to use, only English ("en")
and Dutch ("nl") are supported at the moment
"""
super().__init__(language)
def plot_wordfreq(self, max_terms: int = 10) -> List:
"""Plot keyword frequency
Parameters
----------
max_terms: int (default: 10)
The maximum number of terms to plot
Returns
-------
A list of dictionaries with the wordfreq
(visualisation is done through d3)
"""
top_words = self.freq_word.most_common(max_terms)
return [{"term": word, "freq": freq} for word, freq in top_words]
def plot_wordcloud(self, max_terms: int = 10) -> str:
"""Plot wordcloud
Parameters
----------
max_terms: int (default: 10)
The maximum number of terms to plot
Returns
-------
A wordcloud with the max frequency terms
"""
wc = WordCloud(
background_color="white", max_words=max_terms, width=800, height=600
)
# generate word cloud
wc.generate_from_frequencies(self.freq_word)
return wc.to_array()
| 28.142857
| 80
| 0.581853
|
7949fafab04e551fa5e6420a55d4fee598fed0cb
| 10,777
|
py
|
Python
|
LearningAlgorithm/mnist.py
|
Mirotivo/biovid
|
4cc4b1d2afd3f37224c74fe982d67aee99b81dc0
|
[
"BSD-2-Clause"
] | null | null | null |
LearningAlgorithm/mnist.py
|
Mirotivo/biovid
|
4cc4b1d2afd3f37224c74fe982d67aee99b81dc0
|
[
"BSD-2-Clause"
] | null | null | null |
LearningAlgorithm/mnist.py
|
Mirotivo/biovid
|
4cc4b1d2afd3f37224c74fe982d67aee99b81dc0
|
[
"BSD-2-Clause"
] | null | null | null |
'''
2-D Convolutional Neural Networks using TensorFlow library for Kaggle competition.
Target competition on Kaggle: https://www.kaggle.com/c/digit-recognizer
Author: Taegyun Jeon
Project: https://github.com/tgjeon/cnnForMnist
Train instances: 42000 number images with vector format (1 number = 1 x 748)
Test instances: 20000 number images with vector format (1 number = 1 x 748)
'''
import numpy as np
import pandas as pd
import tensorflow as tf
# Parameters
LEARNING_RATE = 0.001
TRAINING_EPOCHS = 3000
BATCH_SIZE = 100
DISPLAY_STEP = 10
DROPOUT_CONV = 0.8
DROPOUT_HIDDEN = 0.6
VALIDATION_SIZE = 2000 # Set to 0 to train on all available data
# Weight initialization
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# Weight initialization (Xavier's init)
def weight_xavier_init(n_inputs, n_outputs, uniform=True):
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
# Bias initialization
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# 2D convolution
def conv2d(X, W):
return tf.nn.conv2d(X, W, strides=[1, 1, 1, 1], padding='SAME')
# Max Pooling
def max_pool_2x2(X):
return tf.nn.max_pool(X, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Serve data by batches
def next_batch(batch_size):
global train_images
global train_labels
global index_in_epoch
global epochs_completed
start = index_in_epoch
index_in_epoch += batch_size
# when all trainig data have been already used, it is reorder randomly
if index_in_epoch > num_examples:
# finished epoch
epochs_completed += 1
# shuffle the data
perm = np.arange(num_examples)
np.random.shuffle(perm)
train_images = train_images[perm]
train_labels = train_labels[perm]
# start next epoch
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
return train_images[start:end], train_labels[start:end]
# Convert class labels from scalars to one-hot vectors
# 0 => [1 0 0 0 0 0 0 0 0 0]
# 1 => [0 1 0 0 0 0 0 0 0 0]
def dense_to_one_hot(labels_dense, num_classes):
label_input = np.array(labels_dense).astype(int)
labels_one_hot = np.zeros((labels_dense.shape[0], num_classes))
labels_one_hot[np.arange(labels_dense.shape[0]), label_input] = 1
return labels_one_hot
'''
Preprocessing for MNIST dataset
# Read MNIST data set (Train data from CSV file)
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('MNIST_data', one_hot=False)
training_label = pd.DataFrame(data[0].labels.astype(np.float))
training = pd.DataFrame(data[0].images.astype(np.float))
training_combined = pd.concat([training_label,training], axis=1)
validation_label = pd.DataFrame(data[1].labels.astype(np.float))
validation = pd.DataFrame(data[1].images.astype(np.float))
validation_combined = pd.concat([validation_label,validation], axis=1)
testing_label = pd.DataFrame(data[2].labels.astype(np.float))
testing = pd.DataFrame(data[2].images.astype(np.float))
testing_combined = pd.concat([testing_label,testing], axis=1)
with open('train.csv', 'a') as f:
pd.concat([training_combined,validation_combined,testing_combined], axis=0).to_csv(f, header=False, index=False)
'''
data = pd.read_csv('./train.csv',header=None, engine='python')
# Extracting images and labels from given data
# For images
images = data.iloc[:,1:].values
images = images.astype(np.float)
# Normalize from [0:255] => [0.0:1.0]
images = np.multiply(images, 1.0 / 255.0)
image_size = images.shape[1]
image_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8)
# For labels
labels_flat = data[[0]].values.ravel()
labels_count = np.unique(labels_flat).shape[0]
labels = dense_to_one_hot(labels_flat, labels_count)
labels = labels.astype(np.uint8)
# Split data into training & validation
validation_images = images[:VALIDATION_SIZE]
validation_labels = labels[:VALIDATION_SIZE]
train_images = images[VALIDATION_SIZE:]
train_labels = labels[VALIDATION_SIZE:]
'''
Create model with 2D CNN
'''
# Create Input and Output
X = tf.placeholder('float', shape=[None, image_size]) # mnist data image of shape 28*28=784
Y_gt = tf.placeholder('float', shape=[None, labels_count]) # 0-9 digits recognition => 10 classes
drop_conv = tf.placeholder('float')
drop_hidden = tf.placeholder('float')
# Model Parameters
W1 = tf.get_variable("W1", shape=[5, 5, 1, 32], initializer=weight_xavier_init(5*5*1, 32))
W2 = tf.get_variable("W2", shape=[5, 5, 32, 64], initializer=weight_xavier_init(5*5*32, 64))
W3_FC1 = tf.get_variable("W3_FC1", shape=[64*7*7, 1024], initializer=weight_xavier_init(64*7*7, 1024))
W4_FC2 = tf.get_variable("W4_FC2", shape=[1024, labels_count], initializer=weight_xavier_init(1024, labels_count))
#W1 = weight_variable([5, 5, 1, 32]) # 5x5x1 conv, 32 outputs
#W2 = weight_variable([5, 5, 32, 64]) # 5x5x32 conv, 64 outputs
#W3_FC1 = weight_variable([64 * 7 * 7, 1024]) # FC: 64x7x7 inputs, 1024 outputs
#W4_FC2 = weight_variable([1024, labels_count]) # FC: 1024 inputs, 10 outputs (labels)
B1 = bias_variable([32])
B2 = bias_variable([64])
B3_FC1 = bias_variable([1024])
B4_FC2 = bias_variable([labels_count])
# CNN model
X1 = tf.reshape(X, [-1,image_width , image_height,1]) # shape=(?, 28, 28, 1)
# Layer 1
l1_conv = tf.nn.relu(conv2d(X1, W1) + B1) # shape=(?, 28, 28, 32)
l1_pool = max_pool_2x2(l1_conv) # shape=(?, 14, 14, 32)
l1_drop = tf.nn.dropout(l1_pool, drop_conv)
# Layer 2
l2_conv = tf.nn.relu(conv2d(l1_drop, W2)+ B2) # shape=(?, 14, 14, 64)
l2_pool = max_pool_2x2(l2_conv) # shape=(?, 7, 7, 64)
l2_drop = tf.nn.dropout(l2_pool, drop_conv)
# Layer 3 - FC1
l3_flat = tf.reshape(l2_drop, [-1, W3_FC1.get_shape().as_list()[0]]) # shape=(?, 1024)
l3_feed = tf.nn.relu(tf.matmul(l3_flat, W3_FC1)+ B3_FC1)
l3_drop = tf.nn.dropout(l3_feed, drop_hidden)
# Layer 4 - FC2
Y_pred = tf.nn.softmax(tf.matmul(l3_drop, W4_FC2)+ B4_FC2) # shape=(?, 10)
# Cost function and training
cost = -tf.reduce_sum(Y_gt*tf.log(Y_pred))
regularizer = (tf.nn.l2_loss(W3_FC1) + tf.nn.l2_loss(B3_FC1) + tf.nn.l2_loss(W4_FC2) + tf.nn.l2_loss(B4_FC2))
cost += 5e-4 * regularizer
#train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost)
train_op = tf.train.RMSPropOptimizer(LEARNING_RATE, 0.9).minimize(cost)
correct_predict = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y_gt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predict, 'float'))
predict = tf.argmax(Y_pred, 1)
'''
TensorFlow Session
'''
epochs_completed = 0
index_in_epoch = 0
num_examples = train_images.shape[0]
# start TensorFlow session
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
# visualisation variables
train_accuracies = []
validation_accuracies = []
DISPLAY_STEP=1
for i in range(TRAINING_EPOCHS):
#get new batch
batch_xs, batch_ys = next_batch(BATCH_SIZE)
# check progress on every 1st,2nd,...,10th,20th,...,100th... step
if i%DISPLAY_STEP == 0 or (i+1) == TRAINING_EPOCHS:
train_accuracy = accuracy.eval(feed_dict={X:batch_xs,
Y_gt: batch_ys,
drop_conv: DROPOUT_CONV,
drop_hidden: DROPOUT_HIDDEN})
if(VALIDATION_SIZE):
validation_accuracy = accuracy.eval(feed_dict={ X: validation_images[0:BATCH_SIZE],
Y_gt: validation_labels[0:BATCH_SIZE],
drop_conv: DROPOUT_CONV, drop_hidden: DROPOUT_HIDDEN})
print('training_accuracy / validation_accuracy => %.2f / %.2f for step %d'%(train_accuracy, validation_accuracy, i))
validation_accuracies.append(validation_accuracy)
else:
print('training_accuracy => %.4f for step %d'%(train_accuracy, i))
train_accuracies.append(train_accuracy)
# increase DISPLAY_STEP
if i%(DISPLAY_STEP*10) == 0 and i:
DISPLAY_STEP *= 10
# train on batch
sess.run(train_op, feed_dict={X: batch_xs, Y_gt: batch_ys, drop_conv: DROPOUT_CONV, drop_hidden: DROPOUT_HIDDEN})
# check final accuracy on validation set
if(VALIDATION_SIZE):
validation_accuracy = accuracy.eval(feed_dict={X: validation_images,
Y_gt: validation_labels,
drop_conv: DROPOUT_CONV, drop_hidden: DROPOUT_HIDDEN})
print('validation_accuracy => %.4f'%validation_accuracy)
'''
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
#-------------------------------
import tensorflow as tf
with tf.device('/gpu:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
with tf.Session() as sess:
print (sess.run(c))
#------------------------------
'''
print ("VERSION", tf.__version__sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)))
# read test data from CSV file
test_images = pd.read_csv('./input/test.csv').values
test_images = test_images.astype(np.float)
# convert from [0:255] => [0.0:1.0]
test_images = np.multiply(test_images, 1.0 / 255.0)
print('test_images({0[0]},{0[1]})'.format(test_images.shape))
# predict test set
#predicted_lables = predict.eval(feed_dict={X: test_images, keep_prob: 1.0})
# using batches is more resource efficient
predicted_lables = np.zeros(test_images.shape[0])
for i in range(0,test_images.shape[0]//BATCH_SIZE):
predicted_lables[i*BATCH_SIZE : (i+1)*BATCH_SIZE] = predict.eval(feed_dict={X: test_images[i*BATCH_SIZE : (i+1)*BATCH_SIZE], drop_conv: 1.0, drop_hidden: 1.0})
# save results
np.savetxt('submission.csv',
np.c_[range(1,len(test_images)+1),predicted_lables],
delimiter=',',
header = 'ImageId,Label',
comments = '',
fmt='%d')
sess.close()
| 32.558912
| 163
| 0.657419
|
7949fb4c8dceda6fcffc589a892ba99b45aa563a
| 1,561
|
py
|
Python
|
wsgidav/server/server_sample.py
|
monash-merc/wsgidav
|
ba4aa2f64edfd3e3bb21958b56e18d5d92e3065c
|
[
"MIT"
] | null | null | null |
wsgidav/server/server_sample.py
|
monash-merc/wsgidav
|
ba4aa2f64edfd3e3bb21958b56e18d5d92e3065c
|
[
"MIT"
] | null | null | null |
wsgidav/server/server_sample.py
|
monash-merc/wsgidav
|
ba4aa2f64edfd3e3bb21958b56e18d5d92e3065c
|
[
"MIT"
] | null | null | null |
# (c) 2009-2011 Martin Wendt and contributors; see WsgiDAV http://wsgidav.googlecode.com/
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Simple example how to a run WsgiDAV in a 3rd-party WSGI server.
"""
from tempfile import gettempdir
from wsgidav.fs_dav_provider import FilesystemProvider
from wsgidav.version import __version__
from wsgidav.wsgidav_app import DEFAULT_CONFIG, WsgiDAVApp
__docformat__ = "reStructuredText"
rootpath = gettempdir()
provider = FilesystemProvider(rootpath)
config = DEFAULT_CONFIG.copy()
config.update({
"provider_mapping": {"/": provider},
"user_mapping": {},
"verbose": 1,
"enable_loggers": [],
"propsmanager": True, # True: use property_manager.PropertyManager
"locksmanager": True, # True: use lock_manager.LockManager
"domaincontroller": None, # None: domain_controller.WsgiDAVDomainController(user_mapping)
})
app = WsgiDAVApp(config)
# For an example. use paste.httpserver
# (See http://pythonpaste.org/modules/httpserver.html for more options)
from paste import httpserver
httpserver.serve(app,
host="localhost",
port=8080,
server_version="WsgiDAV/%s" % __version__,
)
# Or we could use default the server that is part of the WsgiDAV package:
#from wsgidav.server import ext_wsgiutils_server
#ext_wsgiutils_server.serve(config, app)
| 37.166667
| 96
| 0.686739
|
7949fdb0c86444ae27d5e63a3f462ef8f84991dd
| 5,977
|
py
|
Python
|
examples/GAN/DCGAN.py
|
Pearl-UTexas/tensorpack
|
3ef33a341861769e66995e1630949113404cdd0c
|
[
"Apache-2.0"
] | 5
|
2018-05-04T02:04:15.000Z
|
2020-04-02T05:38:48.000Z
|
examples/GAN/DCGAN.py
|
Pearl-UTexas/tensorpack
|
3ef33a341861769e66995e1630949113404cdd0c
|
[
"Apache-2.0"
] | null | null | null |
examples/GAN/DCGAN.py
|
Pearl-UTexas/tensorpack
|
3ef33a341861769e66995e1630949113404cdd0c
|
[
"Apache-2.0"
] | 2
|
2018-04-23T13:43:10.000Z
|
2019-10-30T09:56:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: DCGAN.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import glob
import numpy as np
import os
import argparse
from tensorpack import *
from tensorpack.utils.viz import stack_patches
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
import tensorflow as tf
from GAN import GANTrainer, RandomZData, GANModelDesc
"""
1. Download the 'aligned&cropped' version of CelebA dataset
from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
2. Start training:
./DCGAN-CelebA.py --data /path/to/img_align_celeba/ --crop-size 140
Generated samples will be available through tensorboard
3. Visualize samples with an existing model:
./DCGAN-CelebA.py --load path/to/model --sample
You can also train on other images (just use any directory of jpg files in
`--data`). But you may need to change the preprocessing.
A pretrained model on CelebA is at http://models.tensorpack.com/GAN/
"""
class Model(GANModelDesc):
def __init__(self, shape, batch, z_dim):
self.shape = shape
self.batch = batch
self.zdim = z_dim
def inputs(self):
return [tf.placeholder(tf.float32, (None, self.shape, self.shape, 3), 'input')]
def generator(self, z):
""" return an image generated from z"""
nf = 64
l = FullyConnected('fc0', z, nf * 8 * 4 * 4, activation=tf.identity)
l = tf.reshape(l, [-1, 4, 4, nf * 8])
l = BNReLU(l)
with argscope(Conv2DTranspose, activation=BNReLU, kernel_size=4, strides=2):
l = Conv2DTranspose('deconv1', l, nf * 4)
l = Conv2DTranspose('deconv2', l, nf * 2)
l = Conv2DTranspose('deconv3', l, nf)
l = Conv2DTranspose('deconv4', l, 3, activation=tf.identity)
l = tf.tanh(l, name='gen')
return l
@auto_reuse_variable_scope
def discriminator(self, imgs):
""" return a (b, 1) logits"""
nf = 64
with argscope(Conv2D, kernel_size=4, strides=2):
l = (LinearWrap(imgs)
.Conv2D('conv0', nf, activation=tf.nn.leaky_relu)
.Conv2D('conv1', nf * 2)
.BatchNorm('bn1')
.tf.nn.leaky_relu()
.Conv2D('conv2', nf * 4)
.BatchNorm('bn2')
.tf.nn.leaky_relu()
.Conv2D('conv3', nf * 8)
.BatchNorm('bn3')
.tf.nn.leaky_relu()
.FullyConnected('fct', 1)())
return l
def build_graph(self, image_pos):
image_pos = image_pos / 128.0 - 1
z = tf.random_uniform([self.batch, self.zdim], -1, 1, name='z_train')
z = tf.placeholder_with_default(z, [None, self.zdim], name='z')
with argscope([Conv2D, Conv2DTranspose, FullyConnected],
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
self.build_losses(vecpos, vecneg)
self.collect_variables()
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
def get_augmentors():
augs = []
if args.load_size:
augs.append(imgaug.Resize(args.load_size))
if args.crop_size:
augs.append(imgaug.CenterCrop(args.crop_size))
augs.append(imgaug.Resize(args.final_size))
return augs
def get_data():
assert args.data
imgs = glob.glob(args.data + '/*.jpg')
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = AugmentImageComponent(ds, get_augmentors())
ds = BatchData(ds, args.batch)
ds = PrefetchDataZMQ(ds, 5)
return ds
def sample(model, model_path, output_name='gen/gen'):
pred = PredictConfig(
session_init=get_model_loader(model_path),
model=model,
input_names=['z'],
output_names=[output_name, 'z'])
pred = SimpleDatasetPredictor(pred, RandomZData((100, args.z_dim)))
for o in pred.get_result():
o = o[0] + 1
o = o * 128.0
o = np.clip(o, 0, 255)
o = o[:, :, :, ::-1]
stack_patches(o, nr_row=10, nr_col=10, viz=True)
def get_args(default_batch=128, default_z_dim=100):
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='view generated examples')
parser.add_argument('--data', help='a jpeg directory')
parser.add_argument('--load-size', help='size to load the original images', type=int)
parser.add_argument('--crop-size', help='crop the original images', type=int)
parser.add_argument(
'--final-size', default=64, type=int,
help='resize to this shape as inputs to network')
parser.add_argument('--z-dim', help='hidden dimension', type=int, default=default_z_dim)
parser.add_argument('--batch', help='batch size', type=int, default=default_batch)
global args
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
return args
if __name__ == '__main__':
args = get_args()
M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
if args.sample:
sample(M, args.load)
else:
logger.auto_set_dir()
GANTrainer(
input=QueueInput(get_data()),
model=M).train_with_defaults(
callbacks=[ModelSaver()],
steps_per_epoch=300,
max_epoch=200,
session_init=SaverRestore(args.load) if args.load else None
)
| 34.75
| 92
| 0.622386
|
7949fe1ab91513033227ea5936fcabea67a54f48
| 6,709
|
py
|
Python
|
examples/ResNet/imagenet-resnet.py
|
swift-n-brutal/tensorpack
|
521b0bb9653866ee2499b67fbfb2a8316b0cc225
|
[
"Apache-2.0"
] | null | null | null |
examples/ResNet/imagenet-resnet.py
|
swift-n-brutal/tensorpack
|
521b0bb9653866ee2499b67fbfb2a8316b0cc225
|
[
"Apache-2.0"
] | null | null | null |
examples/ResNet/imagenet-resnet.py
|
swift-n-brutal/tensorpack
|
521b0bb9653866ee2499b67fbfb2a8316b0cc225
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: imagenet-resnet.py
import argparse
import os
from tensorpack import QueueInput, TFDatasetInput, logger
from tensorpack.callbacks import *
from tensorpack.dataflow import FakeData
from tensorpack.models import *
from tensorpack.tfutils import argscope, get_model_loader
from tensorpack.train import SyncMultiGPUTrainerReplicated, TrainConfig, launch_train_with_config
from tensorpack.utils.gpu import get_num_gpu
from imagenet_utils import ImageNetModel, eval_on_ILSVRC12, get_imagenet_dataflow, get_imagenet_tfdata
from resnet_model import (
preresnet_basicblock, preresnet_bottleneck, preresnet_group, resnet_backbone, resnet_basicblock, resnet_bottleneck,
resnet_group, se_resnet_bottleneck)
class Model(ImageNetModel):
def __init__(self, depth, mode='resnet'):
if mode == 'se':
assert depth >= 50
self.mode = mode
basicblock = preresnet_basicblock if mode == 'preact' else resnet_basicblock
bottleneck = {
'resnet': resnet_bottleneck,
'preact': preresnet_bottleneck,
'se': se_resnet_bottleneck}[mode]
self.num_blocks, self.block_func = {
18: ([2, 2, 2, 2], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}[depth]
def get_logits(self, image):
with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format=self.data_format):
return resnet_backbone(
image, self.num_blocks,
preresnet_group if self.mode == 'preact' else resnet_group, self.block_func)
def get_config(model):
nr_tower = max(get_num_gpu(), 1)
assert args.batch % nr_tower == 0
batch = args.batch // nr_tower
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
if batch < 32 or batch > 64:
logger.warn("Batch size per tower not in [32, 64]. This probably will lead to worse accuracy than reported.")
if args.fake:
data = QueueInput(FakeData(
[[batch, 224, 224, 3], [batch]], 1000, random=False, dtype='uint8'))
callbacks = []
else:
if args.symbolic:
data = TFDatasetInput(get_imagenet_tfdata(args.data, 'train', batch))
else:
data = QueueInput(get_imagenet_dataflow(args.data, 'train', batch))
START_LR = 0.1
BASE_LR = START_LR * (args.batch / 256.0)
callbacks = [
ModelSaver(),
EstimatedTimeLeft(),
ScheduledHyperParamSetter(
'learning_rate', [
(0, min(START_LR, BASE_LR)), (30, BASE_LR * 1e-1), (60, BASE_LR * 1e-2),
(90, BASE_LR * 1e-3), (100, BASE_LR * 1e-4)]),
]
if BASE_LR > START_LR:
callbacks.append(
ScheduledHyperParamSetter(
'learning_rate', [(0, START_LR), (5, BASE_LR)], interp='linear'))
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
dataset_val = get_imagenet_dataflow(args.data, 'val', batch)
if nr_tower == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(QueueInput(dataset_val), infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))))
if get_num_gpu() > 0:
callbacks.append(GPUUtilizationTracker())
return TrainConfig(
model=model,
data=data,
callbacks=callbacks,
steps_per_epoch=100 if args.fake else 1281167 // args.batch,
max_epoch=105,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# generic:
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use. Default to use all available ones')
parser.add_argument('--eval', action='store_true', help='run offline evaluation instead of training')
parser.add_argument('--load', help='load a model for training or evaluation')
# data:
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--fake', help='use FakeData to debug or benchmark this model', action='store_true')
parser.add_argument('--symbolic', help='use symbolic data loader', action='store_true')
# model:
parser.add_argument('--data-format', help='the image data layout used by the model',
default='NCHW', choices=['NCHW', 'NHWC'])
parser.add_argument('-d', '--depth', help='ResNet depth',
type=int, default=50, choices=[18, 34, 50, 101, 152])
parser.add_argument('--weight-decay-norm', action='store_true',
help="apply weight decay on normalization layers (gamma & beta)."
"This is used in torch/pytorch, and slightly "
"improves validation accuracy of large models.")
parser.add_argument('--batch', default=256, type=int,
help="total batch size. "
"Note that it's best to keep per-GPU batch size in [32, 64] to obtain the best accuracy."
"Pretrained models listed in README were trained with batch=32x8.")
parser.add_argument('--mode', choices=['resnet', 'preact', 'se'],
help='variants of resnet to use', default='resnet')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model = Model(args.depth, args.mode)
model.data_format = args.data_format
if model.weight_decay_norm:
model.weight_decay_pattern = ".*/W|.*/gamma|.*/beta"
if args.eval:
batch = 128 # something that can run on one gpu
ds = get_imagenet_dataflow(args.data, 'val', batch)
eval_on_ILSVRC12(model, get_model_loader(args.load), ds)
else:
if args.fake:
logger.set_logger_dir(os.path.join('train_log', 'tmp'), 'd')
else:
logger.set_logger_dir(
os.path.join('train_log',
'imagenet-{}-d{}-batch{}'.format(
args.mode, args.depth, args.batch)))
config = get_config(model)
if args.load:
config.session_init = get_model_loader(args.load)
trainer = SyncMultiGPUTrainerReplicated(max(get_num_gpu(), 1))
launch_train_with_config(config, trainer)
| 42.194969
| 119
| 0.616336
|
7949fe9003d803da081b7acb7d387900c7bcdd0a
| 688
|
bzl
|
Python
|
scala/scala_proto_library.bzl
|
justinwp/rules_proto
|
76e30bc0ad6c2f4150f40e593db83eedeb069f1e
|
[
"Apache-2.0"
] | null | null | null |
scala/scala_proto_library.bzl
|
justinwp/rules_proto
|
76e30bc0ad6c2f4150f40e593db83eedeb069f1e
|
[
"Apache-2.0"
] | null | null | null |
scala/scala_proto_library.bzl
|
justinwp/rules_proto
|
76e30bc0ad6c2f4150f40e593db83eedeb069f1e
|
[
"Apache-2.0"
] | null | null | null |
load("@build_stack_rules_proto//scala:scala_proto_compile.bzl", "scala_proto_compile")
load("@io_bazel_rules_scala//scala:scala.bzl", "scala_library")
def scala_proto_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
scala_proto_compile(
name = name_pb,
deps = deps,
transitive = True,
visibility = visibility,
)
scala_library(
name = name,
srcs = [name_pb],
deps = [str(Label("//scala:proto_deps"))],
exports = [
str(Label("//scala:proto_deps")),
],
visibility = visibility,
)
| 25.481481
| 86
| 0.594477
|
7949ff2898db0dc8881c2e65c93c152478f3d64f
| 1,556
|
py
|
Python
|
tests/test_functions.py
|
pjhartout/fastwlk
|
deb78923c9a8450099c26bac09da94ae87892d0d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_functions.py
|
pjhartout/fastwlk
|
deb78923c9a8450099c26bac09da94ae87892d0d
|
[
"BSD-3-Clause"
] | 7
|
2022-03-21T08:46:44.000Z
|
2022-03-25T16:20:48.000Z
|
tests/test_functions.py
|
pjhartout/fastwlk
|
deb78923c9a8450099c26bac09da94ae87892d0d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""test_filename.py
***file description***
"""
import json
import pickle
from typing import List
import pytest
from fastwlk.utils.functions import (
chunks,
distribute_function,
flatten_lists,
generate_random_strings,
)
from pyprojroot import here
# Load toy data
with open(here() / "data/graphs.pkl", "rb") as f:
graphs = pickle.load(f)
with open("data/test_encoding_graph_1.json", "r") as f:
encoding = json.load(f)
def test_generate_random_strings_passes():
gen = generate_random_strings(10, 10)
assert len(gen) == 10
def test_generate_random_strings_fails():
with pytest.raises(Exception) as e_info:
generate_random_strings(1, 70)
def test_distribute_function():
def test_func(x):
return x * x
X = list(range(100))
res = distribute_function(test_func, X, -1)
assert type(res) == type(list())
test_lists_to_flatten = [
([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 1, 2, 3]),
([[1, 2, 3], [[1, 2, 3], [1, 2, 3]]], [1, 2, 3, [1, 2, 3], [1, 2, 3]]),
]
@pytest.mark.parametrize("input, expected", test_lists_to_flatten)
def test_flatten_lists(input, expected):
assert list(flatten_lists(input)) == expected
test_chunks_data = [
(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[[1, 2, 3], [4, 5, 6], [7, 8], [9, 10]],
),
([1, 2, 3, 4], [[1], [2], [3], [4]],),
]
@pytest.mark.parametrize("input, expected", test_chunks_data)
def test_chunks(input, expected):
assert list(chunks(input, 4)) == expected
| 21.027027
| 75
| 0.618895
|
7949ff407aff4dcbc55e4563afee2ca80403c944
| 73
|
py
|
Python
|
source/fiblist/conf/settings/production.py
|
nicorellius/fiblist
|
269c73096cf6e67184966dadd3efc5225ce9b01e
|
[
"Unlicense"
] | null | null | null |
source/fiblist/conf/settings/production.py
|
nicorellius/fiblist
|
269c73096cf6e67184966dadd3efc5225ce9b01e
|
[
"Unlicense"
] | null | null | null |
source/fiblist/conf/settings/production.py
|
nicorellius/fiblist
|
269c73096cf6e67184966dadd3efc5225ce9b01e
|
[
"Unlicense"
] | null | null | null |
from .base import *
DEBUG = False
ALLOWED_HOSTS += ['fiblist.opsys.io']
| 14.6
| 37
| 0.69863
|
7949fffd7e7c97e320402073971299f938ca79af
| 2,763
|
py
|
Python
|
src/pose_moves_final (copy).py
|
yousszr/Dji-TelloPy-PoseEstimation-FaceTracking
|
40ba58c307ea081a6ddef470ca2557b48df33680
|
[
"Apache-2.0"
] | null | null | null |
src/pose_moves_final (copy).py
|
yousszr/Dji-TelloPy-PoseEstimation-FaceTracking
|
40ba58c307ea081a6ddef470ca2557b48df33680
|
[
"Apache-2.0"
] | null | null | null |
src/pose_moves_final (copy).py
|
yousszr/Dji-TelloPy-PoseEstimation-FaceTracking
|
40ba58c307ea081a6ddef470ca2557b48df33680
|
[
"Apache-2.0"
] | null | null | null |
# from tellopy import Tello
from djitellopy import tello
import cv2
from simple_pid import PID
import math
class PoseMover:
def __init__(self, drone: tello.Tello):
self.drone = drone
self.target =[960/2,720*0.35,0] #Messo solo per sicurezza --> Quando non c'è naso, sto fermo al centro
self.ref_x = int(960/2)
self.ref_y = int(720*0.35)
self.ref_z_up = 240
self.ref_z_down = 230
# self.pid_yaw = PID(0.1,0,0.04,setpoint=0,output_limits=(-100,100))
# self.pid_throttle = PID(0.11,0,0.05,setpoint=0,output_limits=(-80,100))
# self.pid_pitch = PID(0.25,0,0.07,setpoint=0,output_limits=(-80,100))
self.pid_yaw = PID(0.12,0,0.00,setpoint=0,output_limits=(-100,100))
self.pid_throttle = PID(0.12,0,0.00,setpoint=0,output_limits=(-80,100))
self.pid_pitch = PID(0.25,0,0.00,setpoint=0,output_limits=(-80,100))
def move(self, mv,axis_speed,x,y,z,imageft):
#Se il naso non c'è x e y sono nan --> controllo
if(not math.isnan(x)):
self.target[0] = x
self.target[1] = y
self.target[2] = z
#FACE TRACKING MOTION DECISION
xoff = int(self.target[0]-self.ref_x)
yoff = int(self.ref_y-self.target[1])
zoff=0
if self.target[2]==0:
zoff=0
elif self.target[2]>self.ref_z_up:
zoff = int(self.target[2]-self.ref_z_up)
elif self.target[2]<self.ref_z_down:
zoff = -int(self.ref_z_down- self.target[2])
cv2.circle(imageft, (self.ref_x, self.ref_y), 15, (250,150,0), 1,cv2.LINE_AA)
cv2.arrowedLine(imageft, (self.ref_x, self.ref_y), (int(self.target[0]),int(self.target[1])), (250, 150, 0), 6)
axis_speed["yaw"] = int(-self.pid_yaw(xoff))
axis_speed["throttle"] = int(-self.pid_throttle(yoff))
axis_speed["pitch"] = -int(-self.pid_pitch(zoff))
#POSE MOTION DECISION
if mv == 0:
print('take_photo')
elif mv == 2:
axis_speed["roll"]=30 #Vado a destra
elif mv == 3:
axis_speed["roll"]=-30 #Vado a sinistra
elif mv == 4:
axis_speed["flip_left"]+=1 #flip sinistra
elif mv == 5:
axis_speed["flip_right"]+=1 #flip destra
elif mv == 6:
self.take_photo()
else :
axis_speed["roll"]=0
axis_speed["flip_left"]=0
axis_speed["flip_right"]=0
return axis_speed,imageft #ritorno i valori delle velocità da passare al drone e il frame
def take_photo(self):
"""Take a photo from video"""
frame_read = self.drone.get_frame_read()
cv2.imwrite("picture.png", frame_read.frame)
| 38.915493
| 119
| 0.581614
|
794a00a141bc5aacd3a7224404ae82b63d15499e
| 7,767
|
py
|
Python
|
docs/conf.py
|
spcial/crypto-prediction
|
dd568c4de386c551e36bd9b698472de83a847ef4
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
spcial/crypto-prediction
|
dd568c4de386c551e36bd9b698472de83a847ef4
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
spcial/crypto-prediction
|
dd568c4de386c551e36bd9b698472de83a847ef4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# sample documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 16 21:22:43 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Crypto-Tensor Documentation'
copyright = u'2017, Dennis Thiessen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v0.0.1'
# The full version, including alpha/beta/rc tags.
release = 'v0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sampledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sample.tex', u'Crypto-Tensor Documentation',
u'Dennis Thiessen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sample', u'Crypto-Tensor Documentation',
[u'Dennis Thiessen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sample', u'Crypto-Tensor Documentation',
u'Dennis Thiessen', 'crypto-tensor', 'Predicts the future.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.962963
| 80
| 0.71469
|
794a019b82f1f0b81472790f2864534060f68f95
| 14,216
|
py
|
Python
|
test/functional/wallet_bumpfee.py
|
Mirkolance1/altcoin
|
d77e25be0ce015ff03c3df79801ac208e68be963
|
[
"MIT"
] | null | null | null |
test/functional/wallet_bumpfee.py
|
Mirkolance1/altcoin
|
d77e25be0ce015ff03c3df79801ac208e68be963
|
[
"MIT"
] | null | null | null |
test/functional/wallet_bumpfee.py
|
Mirkolance1/altcoin
|
d77e25be0ce015ff03c3df79801ac208e68be963
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness
from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes, sync_mempools
import io
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-deprecatedrpc=addwitnessaddress",
"-walletrbf={}".format(i),
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
raise SkipTest("Altcoin doesn't support RBF.")
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# (32-byte p2sh-pwpkh output size + 148 p2pkh spend estimate) * 10k(discard_rate) / 1000 = 1800
# P2SH outputs are slightly "over-discarding" due to the IsDust calculation assuming it will
# be spent as a P2PKH.
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 50000-1800})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| 45.564103
| 159
| 0.728686
|
794a02b7324ca863ca662a4480f9e8c19b9b0cd1
| 12,357
|
py
|
Python
|
datalad/distribution/tests/test_clone.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | null | null | null |
datalad/distribution/tests/test_clone.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | 6
|
2015-11-20T21:41:13.000Z
|
2018-06-12T14:27:32.000Z
|
datalad/distribution/tests/test_clone.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | 1
|
2017-03-28T14:44:16.000Z
|
2017-03-28T14:44:16.000Z
|
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test clone action
"""
from os.path import join as opj
from os.path import isdir
from os.path import exists
from os.path import basename
from os.path import dirname
from os import mkdir
from os import chmod
from os import geteuid
from mock import patch
from datalad.api import create
from datalad.api import clone
from datalad.utils import chpwd
from datalad.utils import _path_
from datalad.utils import rmtree
from datalad.support.exceptions import IncompleteResultsError
from datalad.support.gitrepo import GitRepo
from datalad.support.annexrepo import AnnexRepo
from datalad.cmd import Runner
from datalad.tests.utils import create_tree
from datalad.tests.utils import with_tempfile
from datalad.tests.utils import assert_in
from datalad.tests.utils import with_tree
from datalad.tests.utils import with_testrepos
from datalad.tests.utils import eq_
from datalad.tests.utils import ok_
from datalad.tests.utils import assert_false
from datalad.tests.utils import ok_file_has_content
from datalad.tests.utils import assert_not_in
from datalad.tests.utils import assert_raises
from datalad.tests.utils import assert_status
from datalad.tests.utils import assert_message
from datalad.tests.utils import assert_result_count
from datalad.tests.utils import assert_result_values_equal
from datalad.tests.utils import ok_startswith
from datalad.tests.utils import ok_clean_git
from datalad.tests.utils import serve_path_via_http
from datalad.tests.utils import use_cassette
from datalad.tests.utils import skip_if_no_network
from datalad.tests.utils import skip_if_on_windows
from datalad.tests.utils import skip_if
from ..dataset import Dataset
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def test_invalid_args(path, otherpath, alienpath):
assert_raises(ValueError, clone, 'Zoidberg', path='Zoidberg')
# install to an invalid URL
assert_raises(ValueError, clone, 'Zoidberg', path='ssh://mars:Zoidberg')
# install to a remote location
assert_raises(ValueError, clone, 'Zoidberg', path='ssh://mars/Zoidberg')
# make fake dataset
ds = create(path)
assert_raises(IncompleteResultsError, ds.clone, '/higherup.', 'Zoidberg')
# make real dataset, try to install outside
ds_target = create(opj(otherpath, 'target'))
assert_raises(ValueError, ds_target.clone, ds.path, path=ds.path)
assert_status('error', ds_target.clone(ds.path, path=alienpath, on_failure='ignore'))
@skip_if_no_network
@use_cassette('test_install_crcns')
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def test_clone_crcns(tdir, ds_path):
with chpwd(tdir):
res = clone('///', path="all-nonrecursive", on_failure='ignore')
assert_status('ok', res)
# again, but into existing dataset:
ds = create(ds_path)
crcns = ds.clone("///crcns", result_xfm='datasets', return_type='item-or-list')
ok_(crcns.is_installed())
eq_(crcns.path, opj(ds_path, "crcns"))
assert_in(crcns.path, ds.subdatasets(result_xfm='paths'))
@skip_if_no_network
@use_cassette('test_install_crcns')
@with_tree(tree={'sub': {}})
def test_clone_datasets_root(tdir):
with chpwd(tdir):
ds = clone("///", result_xfm='datasets', return_type='item-or-list')
ok_(ds.is_installed())
eq_(ds.path, opj(tdir, 'datasets.datalad.org'))
# do it a second time:
res = clone("///", on_failure='ignore')
assert_message(
"dataset %s was already cloned from '%s'",
res)
assert_status('notneeded', res)
# and a third time into an existing something, that is not a dataset:
with open(opj(tdir, 'sub', 'a_file.txt'), 'w') as f:
f.write("something")
res = clone('///', path="sub", on_failure='ignore')
assert_message(
'target path already exists and not empty, refuse to clone into target path',
res)
assert_status('error', res)
@with_testrepos('.*basic.*', flavors=['local-url', 'network', 'local'])
@with_tempfile(mkdir=True)
def test_clone_simple_local(src, path):
origin = Dataset(path)
# now install it somewhere else
ds = clone(src, path, description='mydummy',
result_xfm='datasets', return_type='item-or-list')
eq_(ds.path, path)
ok_(ds.is_installed())
if not isinstance(origin.repo, AnnexRepo):
# this means it is a GitRepo
ok_(isinstance(origin.repo, GitRepo))
# stays plain Git repo
ok_(isinstance(ds.repo, GitRepo))
ok_(not isinstance(ds.repo, AnnexRepo))
ok_(GitRepo.is_valid_repo(ds.path))
eq_(set(ds.repo.get_indexed_files()),
{'test.dat', 'INFO.txt'})
ok_clean_git(path, annex=False)
else:
# must be an annex
ok_(isinstance(ds.repo, AnnexRepo))
ok_(AnnexRepo.is_valid_repo(ds.path, allow_noninitialized=False))
eq_(set(ds.repo.get_indexed_files()),
{'test.dat', 'INFO.txt', 'test-annex.dat'})
ok_clean_git(path, annex=True)
# no content was installed:
ok_(not ds.repo.file_has_content('test-annex.dat'))
uuid_before = ds.repo.uuid
eq_(ds.repo.get_description(), 'mydummy')
# installing it again, shouldn't matter:
res = clone(src, path)
assert_result_values_equal(res, 'source_url', [src])
assert_status('notneeded', res)
assert_message("dataset %s was already cloned from '%s'", res)
ok_(ds.is_installed())
if isinstance(origin.repo, AnnexRepo):
eq_(uuid_before, ds.repo.uuid)
@with_testrepos(flavors=['local-url', 'network', 'local'])
@with_tempfile
def test_clone_dataset_from_just_source(url, path):
with chpwd(path, mkdir=True):
ds = clone(url, result_xfm='datasets', return_type='item-or-list')
ok_startswith(ds.path, path)
ok_(ds.is_installed())
ok_(GitRepo.is_valid_repo(ds.path))
ok_clean_git(ds.path, annex=None)
assert_in('INFO.txt', ds.repo.get_indexed_files())
@with_tree(tree={
'ds': {'test.txt': 'some'},
})
@serve_path_via_http
@with_tempfile(mkdir=True)
def test_clone_dataladri(src, topurl, path):
# make plain git repo
ds_path = opj(src, 'ds')
gr = GitRepo(ds_path, create=True)
gr.add('test.txt')
gr.commit('demo')
Runner(cwd=gr.path)(['git', 'update-server-info'])
# now install it somewhere else
with patch('datalad.support.network.DATASETS_TOPURL', topurl):
ds = clone('///ds', path, result_xfm='datasets', return_type='item-or-list')
eq_(ds.path, path)
ok_clean_git(path, annex=False)
ok_file_has_content(opj(path, 'test.txt'), 'some')
@with_testrepos('submodule_annex', flavors=['local', 'local-url', 'network'])
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def test_clone_isnot_recursive(src, path_nr, path_r):
ds = clone(src, path_nr, result_xfm='datasets', return_type='item-or-list')
ok_(ds.is_installed())
# check nothin is unintentionally installed
subdss = ds.subdatasets(recursive=True)
assert_result_count(subdss, len(subdss), state='absent')
# this also means, subdatasets to be listed as not fulfilled:
eq_(set(ds.subdatasets(recursive=True, fulfilled=False, result_xfm='relpaths')),
{'subm 1', 'subm 2'})
@with_testrepos(flavors=['local'])
# 'local-url', 'network'
# TODO: Somehow annex gets confused while initializing installed ds, whose
# .git/config show a submodule url "file:///aaa/bbb%20b/..."
# this is delivered by with_testrepos as the url to clone
@with_tempfile
def test_clone_into_dataset(source, top_path):
ds = create(top_path)
ok_clean_git(ds.path)
subds = ds.clone(source, "sub",
result_xfm='datasets', return_type='item-or-list')
if isinstance(subds.repo, AnnexRepo) and subds.repo.is_direct_mode():
ok_(exists(opj(subds.path, '.git')))
else:
ok_(isdir(opj(subds.path, '.git')))
ok_(subds.is_installed())
assert_in('sub', ds.subdatasets(fulfilled=True, result_xfm='relpaths'))
# sub is clean:
ok_clean_git(subds.path, annex=None)
# top is clean:
ok_clean_git(ds.path, annex=None)
# but we could also save while installing and there should be no side-effect
# of saving any other changes if we state to not auto-save changes
# Create a dummy change
create_tree(ds.path, {'dummy.txt': 'buga'})
ok_clean_git(ds.path, untracked=['dummy.txt'])
subds_ = ds.clone(source, "sub2",
result_xfm='datasets', return_type='item-or-list')
eq_(subds_.path, opj(ds.path, "sub2")) # for paranoid yoh ;)
ok_clean_git(ds.path, untracked=['dummy.txt'])
@with_testrepos('submodule_annex', flavors=['local', 'local-url', 'network'])
@with_tempfile(mkdir=True)
def test_notclone_known_subdataset(src, path):
# get the superdataset:
ds = clone(src, path,
result_xfm='datasets', return_type='item-or-list')
# subdataset not installed:
subds = Dataset(opj(path, 'subm 1'))
assert_false(subds.is_installed())
assert_in('subm 1', ds.subdatasets(fulfilled=False, result_xfm='relpaths'))
assert_not_in('subm 1', ds.subdatasets(fulfilled=True, result_xfm='relpaths'))
# clone is not meaningful
res = ds.clone('subm 1', on_failure='ignore')
assert_status('error', res)
assert_message('Failed to clone data from any candidate source URL: %s',
res)
# get does the job
res = ds.get(path='subm 1', get_data=False)
assert_status('ok', res)
ok_(subds.is_installed())
ok_(AnnexRepo.is_valid_repo(subds.path, allow_noninitialized=False))
# Verify that it is the correct submodule installed and not
# new repository initiated
eq_(set(subds.repo.get_indexed_files()),
{'test.dat', 'INFO.txt', 'test-annex.dat'})
assert_not_in('subm 1', ds.subdatasets(fulfilled=False, result_xfm='relpaths'))
assert_in('subm 1', ds.subdatasets(fulfilled=True, result_xfm='relpaths'))
@with_tempfile(mkdir=True)
def test_failed_clone(dspath):
ds = create(dspath)
res = ds.clone("http://nonexistingreallyanything.somewhere/bla", "sub",
on_failure='ignore')
assert_status('error', res)
assert_message('Failed to clone data from any candidate source URL: %s',
res)
@with_testrepos('submodule_annex', flavors=['local'])
@with_tempfile(mkdir=True)
def test_reckless(path, top_path):
ds = clone(path, top_path, reckless=True,
result_xfm='datasets', return_type='item-or-list')
eq_(ds.config.get('annex.hardlink', None), 'true')
eq_(ds.repo.repo_info()['untrusted repositories'][0]['here'], True)
@with_tempfile
@with_tempfile
def test_install_source_relpath(src, dest):
create(src)
src_ = basename(src)
with chpwd(dirname(src)):
clone(src_, dest)
@with_tempfile
@with_tempfile
def test_clone_isnt_a_smartass(origin_path, path):
origin = create(origin_path)
cloned = clone(origin, path,
result_xfm='datasets', return_type='item-or-list')
with chpwd(path):
# no were are inside a dataset clone, and we make another one
# we do not want automatic subdatasetification without given a dataset
# explicitely
clonedsub = clone(origin, 'testsub',
result_xfm='datasets', return_type='item-or-list')
# correct destination
assert clonedsub.path.startswith(path)
# no subdataset relation
eq_(cloned.subdatasets(), [])
@skip_if_on_windows
@skip_if(not geteuid(), "Will fail under super-user")
@with_tempfile(mkdir=True)
def test_clone_report_permission_issue(tdir):
pdir = _path_(tdir, 'protected')
mkdir(pdir)
# make it read-only
chmod(pdir, 0o555)
with chpwd(pdir):
res = clone('///', result_xfm=None, return_type='list', on_failure='ignore')
assert_status('error', res)
assert_result_count(
res, 1, status='error',
message="could not create work tree dir '%s/datasets.datalad.org': Permission denied" % pdir)
| 36.886567
| 105
| 0.68358
|
794a02d1fdff5dc44f7d5064cfd3572d311d7688
| 3,888
|
py
|
Python
|
scale.py
|
vanvalenlab/kiosk-autoscaler
|
770cc8ec47927549e64c032b4df81732235bdbf9
|
[
"Apache-2.0"
] | null | null | null |
scale.py
|
vanvalenlab/kiosk-autoscaler
|
770cc8ec47927549e64c032b4df81732235bdbf9
|
[
"Apache-2.0"
] | 13
|
2018-10-29T19:54:17.000Z
|
2020-04-13T20:38:23.000Z
|
scale.py
|
vanvalenlab/kiosk-autoscaler
|
770cc8ec47927549e64c032b4df81732235bdbf9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2020 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/kiosk-autoscaler/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Turn on and off k8s resources based on items in the Redis queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import logging
import logging.handlers
import sys
import time
import decouple
import autoscaler
def initialize_logger(debug_mode=True):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'[%(asctime)s]:[%(levelname)s]:[%(name)s]: %(message)s')
console = logging.StreamHandler(stream=sys.stdout)
console.setFormatter(formatter)
fh = logging.handlers.RotatingFileHandler(
filename='autoscaler.log',
maxBytes=10000000,
backupCount=10)
fh.setFormatter(formatter)
if debug_mode:
console.setLevel(logging.DEBUG)
else:
console.setLevel(logging.INFO)
fh.setLevel(logging.DEBUG)
logger.addHandler(console)
logger.addHandler(fh)
logging.getLogger('kubernetes.client.rest').setLevel(logging.INFO)
if __name__ == '__main__':
initialize_logger()
_logger = logging.getLogger(__file__)
REDIS_CLIENT = autoscaler.redis.RedisClient(
host=decouple.config('REDIS_HOST', cast=str, default='redis-master'),
port=decouple.config('REDIS_PORT', default=6379, cast=int),
backoff=decouple.config('REDIS_INTERVAL', default=1, cast=int))
SCALER = autoscaler.Autoscaler(
redis_client=REDIS_CLIENT,
queues=decouple.config('QUEUES', default='predict,track', cast=str),
queue_delim=decouple.config('QUEUE_DELIMITER', ',', cast=str))
INTERVAL = decouple.config('INTERVAL', default=5, cast=int)
RESOURCE_NAMESPACE = decouple.config('RESOURCE_NAMESPACE', default='default')
RESOURCE_TYPE = decouple.config('RESOURCE_TYPE', default='deployment')
RESOURCE_NAME = decouple.config('RESOURCE_NAME')
MIN_PODS = decouple.config('MIN_PODS', default=0, cast=int)
MAX_PODS = decouple.config('MAX_PODS', default=1, cast=int)
KEYS_PER_POD = decouple.config('KEYS_PER_POD', default=1, cast=int)
while True:
try:
SCALER.scale(namespace=RESOURCE_NAMESPACE,
resource_type=RESOURCE_TYPE,
name=RESOURCE_NAME,
min_pods=MIN_PODS,
max_pods=MAX_PODS,
keys_per_pod=KEYS_PER_POD)
gc.collect()
time.sleep(INTERVAL)
except Exception as err: # pylint: disable=broad-except
_logger.critical('Fatal Error: %s: %s', type(err).__name__, err)
sys.exit(1)
| 36.336449
| 81
| 0.689043
|
794a02dd77dce6793e7f79b1fea0a6bd5a984a29
| 421
|
py
|
Python
|
bin/shutdown.py
|
robertdigital/lookyloo
|
c9cbf27accac519e7060edc1feeba364c0b8729a
|
[
"BSD-3-Clause"
] | 1
|
2020-02-05T16:50:17.000Z
|
2020-02-05T16:50:17.000Z
|
bin/shutdown.py
|
robertdigital/lookyloo
|
c9cbf27accac519e7060edc1feeba364c0b8729a
|
[
"BSD-3-Clause"
] | null | null | null |
bin/shutdown.py
|
robertdigital/lookyloo
|
c9cbf27accac519e7060edc1feeba364c0b8729a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from lookyloo.helpers import is_running, get_socket_path
import time
from redis import StrictRedis
if __name__ == '__main__':
r = StrictRedis(unix_socket_path=get_socket_path('cache'), db=1)
r.set('shutdown', 1)
time.sleep(5)
while True:
running = is_running()
if not running:
break
print(running)
time.sleep(5)
| 23.388889
| 68
| 0.638955
|
794a04b989b70c59ad16339e2dcca48f0624b450
| 2,416
|
py
|
Python
|
src/ggrc_workflows/models/cycle.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | null | null | null |
src/ggrc_workflows/models/cycle.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 12
|
2015-01-08T14:50:19.000Z
|
2017-11-29T19:37:53.000Z
|
src/ggrc_workflows/models/cycle.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 1
|
2015-01-08T13:25:09.000Z
|
2015-01-08T13:25:09.000Z
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
"""Module contains a workflow Cycle model
"""
from sqlalchemy import orm
from ggrc import db
from ggrc.models.mixins import Described
from ggrc.models.mixins import Slugged
from ggrc.models.mixins import Stateful
from ggrc.models.mixins import Timeboxed
from ggrc.models.mixins import Titled
from ggrc.models.mixins import WithContact
class Cycle(WithContact, Stateful, Timeboxed, Described, Titled, Slugged,
db.Model):
"""Workflow Cycle model
"""
__tablename__ = 'cycles'
_title_uniqueness = False
VALID_STATES = (u'Assigned', u'InProgress', u'Finished', u'Verified')
workflow_id = db.Column(
db.Integer, db.ForeignKey('workflows.id'), nullable=False)
cycle_task_groups = db.relationship(
'CycleTaskGroup', backref='cycle', cascade='all, delete-orphan')
cycle_task_group_object_tasks = db.relationship(
'CycleTaskGroupObjectTask', backref='cycle',
cascade='all, delete-orphan')
cycle_task_entries = db.relationship(
'CycleTaskEntry', backref='cycle', cascade='all, delete-orphan')
is_current = db.Column(db.Boolean, default=True, nullable=False)
next_due_date = db.Column(db.Date)
_publish_attrs = [
'workflow',
'cycle_task_groups',
'is_current',
'next_due_date',
]
_aliases = {
"cycle_workflow": {
"display_name": "Workflow",
"filter_by": "_filter_by_cycle_workflow",
},
}
@classmethod
def _filter_by_cycle_workflow(cls, predicate):
from ggrc_workflows.models.workflow import Workflow
return Workflow.query.filter(
(Workflow.id == cls.workflow_id) &
(predicate(Workflow.slug) | predicate(Workflow.title))
).exists()
@classmethod
def eager_query(cls):
"""Add cycle task groups to cycle eager query
This function adds cycle_task_groups as a join option when fetching cycles,
and makes sure we fetch all cycle related data needed for generating cycle
json, in one query.
Returns:
a query object with cycle_task_groups added to joined load options.
"""
query = super(Cycle, cls).eager_query()
return query.options(
orm.joinedload('cycle_task_groups'),
)
| 30.974359
| 79
| 0.709851
|
794a058737532503e8ff32d95cc2bf25f8aa25a2
| 1,075
|
py
|
Python
|
566 Reshape the Matrix.py
|
krishna13052001/LeetCode
|
cd6ec626bea61f0bd9e8493622074f9e69a7a1c3
|
[
"MIT"
] | 872
|
2015-06-15T12:02:41.000Z
|
2022-03-30T08:44:35.000Z
|
566 Reshape the Matrix.py
|
nadeemshaikh-github/LeetCode
|
3fb14aeea62a960442e47dfde9f964c7ffce32be
|
[
"MIT"
] | 8
|
2015-06-21T15:11:59.000Z
|
2022-02-01T11:22:34.000Z
|
566 Reshape the Matrix.py
|
nadeemshaikh-github/LeetCode
|
3fb14aeea62a960442e47dfde9f964c7ffce32be
|
[
"MIT"
] | 328
|
2015-06-28T03:10:35.000Z
|
2022-03-29T11:05:28.000Z
|
#!/usr/bin/python3
"""
In MATLAB, there is a very useful function called 'reshape', which can reshape a
matrix into a new one with different size but keep its original data.
You're given a matrix represented by a two-dimensional array, and two positive
integers r and c representing the row number and column number of the wanted
reshaped matrix, respectively.
The reshaped matrix need to be filled with all the elements of the original
matrix in the same row-traversing order as they were.
If the 'reshape' operation with given parameters is possible and legal, output
the new reshaped matrix; Otherwise, output the original matrix.
"""
from typing import List
class Solution:
def matrixReshape(self, nums: List[List[int]], r: int, c: int) -> List[List[int]]:
m, n = len(nums), len(nums[0])
if m * n != r * c:
return nums
ret = []
for i in range(m):
for j in range(n):
if (i * n + j) % c == 0:
ret.append([])
ret[-1].append(nums[i][j])
return ret
| 33.59375
| 86
| 0.645581
|
794a060464dcf3e529a7aca6eb88913e282a1655
| 1,999
|
py
|
Python
|
packages/vaex-arrow/vaex_arrow/dataset.py
|
claforte/vaex
|
adf0d9280c6a931465dd65f1ead6d0466eceb637
|
[
"MIT"
] | 1
|
2019-06-05T00:10:36.000Z
|
2019-06-05T00:10:36.000Z
|
packages/vaex-arrow/vaex_arrow/dataset.py
|
claforte/vaex
|
adf0d9280c6a931465dd65f1ead6d0466eceb637
|
[
"MIT"
] | 1
|
2019-06-03T21:25:01.000Z
|
2019-06-03T21:25:01.000Z
|
packages/vaex-arrow/vaex_arrow/dataset.py
|
claforte/vaex
|
adf0d9280c6a931465dd65f1ead6d0466eceb637
|
[
"MIT"
] | null | null | null |
__author__ = 'maartenbreddels'
import logging
import pyarrow as pa
import pyarrow.parquet as pq
import vaex.dataset
import vaex.file.other
from .convert import column_from_arrow_array
logger = logging.getLogger("vaex_arrow")
class DatasetArrow(vaex.dataset.DatasetLocal):
"""Implements storage using arrow"""
def __init__(self, filename=None, table=None, write=False):
super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])
self._write = write
if table is None:
self._load()
else:
self._load_table(table)
def _load(self):
source = pa.memory_map(self.path)
reader = pa.ipc.open_stream(source)
table = pa.Table.from_batches([b for b in reader])
self._load_table(table)
def _load_table(self, table):
self._length_unfiltered = self._length_original = table.num_rows
self._index_end = self._length_original = table.num_rows
for col in table.columns:
name = col.name
# TODO: keep the arrow columns, and support and test chunks
arrow_array = col.data.chunks[0]
column = column_from_arrow_array(arrow_array)
self.columns[name] = column
self.column_names.append(name)
self._save_assign_expression(name, vaex.expression.Expression(self, name))
@classmethod
def can_open(cls, path, *args, **kwargs):
return path.rpartition('.')[2] == 'arrow'
@classmethod
def get_options(cls, path):
return []
@classmethod
def option_to_args(cls, option):
return []
class DatasetParquet(DatasetArrow):
def _load(self):
# might not be optimal, but it works, we can always see if we can
# do mmapping later on
table = pq.read_table(self.path)
self._load_table(table)
vaex.file.other.dataset_type_map["arrow"] = DatasetArrow
vaex.file.other.dataset_type_map["parquet"] = DatasetParquet
| 30.287879
| 89
| 0.66083
|
794a0685c3f5710f03a016656286c6b413dab2c2
| 13,643
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/test_show_vtp.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/test_show_vtp.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | 1
|
2020-08-01T00:23:31.000Z
|
2020-08-01T00:40:05.000Z
|
src/genie/libs/parser/iosxe/tests/test_show_vtp.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | null | null | null |
# Python
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Metaparset
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# Parser
from genie.libs.parser.iosxe.show_vtp import ShowVtpStatus, \
ShowVtpPassword
# ============================================
# Parser for 'show vtp password'
# ============================================
class test_show_vtp_password(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"vtp": {
"configured": False,
}
}
golden_output = {'execute.return_value': '''\
The VTP password is not configured.
'''}
golden_parsed_output_2 = {
"vtp": {
"configured": True,
"password": 'testing',
}
}
golden_output_2 = {'execute.return_value': '''\
VTP Password: testing
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowVtpPassword(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowVtpPassword(device=self.device)
parsed_output = obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
obj = ShowVtpPassword(device=self.device)
parsed_output_2 = obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output_2,self.golden_parsed_output_2)
# ============================================
# Parser for 'show vtp status'
# ============================================
class test_show_vtp_status(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"vtp": {
"pruning_mode": False,
"device_id": "3820.56ff.c7a2",
"traps_generation": False,
"updater_id": "192.168.234.1",
"updater_interface": "Vl100",
"updater_reason": "lowest numbered VLAN interface found",
"configuration_revision": 55,
"maximum_vlans": 1005,
"md5_digest": '0x2D 0x35 0x38 0x3C 0x3D 0x55 0x62 0x66 0x67 0x70 '\
'0x72 0x74 0x9E 0xDD 0xDE 0xE9',
"existing_vlans": 53,
"enabled": True,
"operating_mode": "server",
"conf_last_modified_time": "12-5-17 09:35:46",
"conf_last_modified_by": "192.168.234.1",
"version": "1",
"version_capable": [1,2,3],
}
}
golden_output = {'execute.return_value': '''\
VTP Version capable : 1 to 3
VTP version running : 1
VTP Domain Name :
VTP Pruning Mode : Disabled
VTP Traps Generation : Disabled
Device ID : 3820.56ff.c7a2
Configuration last modified by 192.168.234.1 at 12-5-17 09:35:46
Local updater ID is 192.168.234.1 on interface Vl100 (lowest numbered VLAN interface found)
Feature VLAN:
--------------
VTP Operating Mode : Server
Maximum VLANs supported locally : 1005
Number of existing VLANs : 53
Configuration Revision : 55
MD5 digest : 0x9E 0x35 0x3C 0x74 0xDD 0xE9 0x3D 0x62
0xDE 0x2D 0x66 0x67 0x70 0x72 0x55 0x38
'''}
golden_parsed_output_2 = {
'vtp': {
'conf_last_modified_by': '0.0.0.0',
'conf_last_modified_time': '0-0-00 00:00:00',
'configuration_revision': 0,
'domain_name': '<>',
'enabled': False,
'existing_vlans': 100,
'maximum_vlans': 1005,
'md5_digest': '0x11 0x22 0x50 0x77 0x99 0xA1 0xB2 0xC3',
'operating_mode': 'transparent',
'pruning_mode': True,
'traps_generation': True,
'version': '1',
'version_capable': ['2']
}
}
golden_output_2 = {'execute.return_value': '''\
VTP Version : running VTP1 (VTP2 capable)
Configuration Revision : 0
Maximum VLANs supported locally : 1005
Number of existing VLANs : 100
VTP Operating Mode : Transparent
VTP Domain Name : <>
VTP Pruning Mode : Enabled
VTP V2 Mode : Disabled
VTP Traps Generation : Enabled
MD5 digest : 0x11 0xA1 0xB2 0x77 0x22 0x50 0xC3 0x99
Configuration last modified by 0.0.0.0 at 0-0-00 00:00:00
'''}
golden_parsed_output_3 = {
'vtp': {
'device_id': '3820.56ff.c7a2',
'feature': {
'mst': {
'configuration_revision': 0,
'enabled': True,
'operating_mode': 'server',
'primary_id': '0000.0000.0000',
},
'unknown': {
'enabled': False,
'operating_mode': 'transparent',
},
'vlan': {
'configuration_revision': 2,
'enabled': True,
'existing_extended_vlans': 0,
'existing_vlans': 100,
'maximum_vlans': 4096,
'md5_digest': '0x15 0x17 0x1A 0x1C 0x25 0x2C ' \
'0x3C 0x48 0x6B 0x70 0x7D 0x87 ' \
'0x92 0xC2 0xC7 0xFC',
'operating_mode': 'primary server',
'primary_description': 'SW1',
'primary_id': '3820.56ff.c7a2',
},
},
'pruning_mode': False,
'traps_generation': False,
'version': '3',
'version_capable': [1, 2, 3]
}
}
golden_output_3 = {'execute.return_value': '''\
VTP Version capable : 1 to 3
VTP version running : 3
VTP Domain Name :
VTP Pruning Mode : Disabled
VTP Traps Generation : Disabled
Device ID : 3820.56ff.c7a2
Feature VLAN:
--------------
VTP Operating Mode : Primary Server
Number of existing VLANs : 100
Number of existing extended VLANs : 0
Maximum VLANs supported locally : 4096
Configuration Revision : 2
Primary ID : 3820.56ff.c7a2
Primary Description : SW1
MD5 digest : 0xC2 0x3C 0x1A 0x2C 0x1C 0x48 0x7D 0xFC
0x6B 0x17 0x15 0x87 0x92 0xC7 0x70 0x25
Feature MST:
--------------
VTP Operating Mode : Server
Configuration Revision : 0
Primary ID : 0000.0000.0000
Primary Description :
MD5 digest :
Feature UNKNOWN:
--------------
VTP Operating Mode : Transparent
'''}
golden_output_4 = {'execute.return_value': '''\
show vtp status
VTP Version capable : 1 to 3
VTP version running : 3
VTP Domain Name : GENIE
VTP Pruning Mode : Disabled
VTP Traps Generation : Disabled
Device ID : 02da.30ff.a878
Feature VLAN:
--------------
VTP Operating Mode : Primary Server
Number of existing VLANs : 40
Number of existing extended VLANs : 0
Maximum VLANs supported locally : 2048
Configuration Revision : 25
Primary ID : 02da.30ff.a878
Primary Description : genie
MD5 digest : 0x3D 0x05 0xEE 0x1F 0x35 0xCC 0x7C 0x74
0x41 0x7A 0xB2 0x1F 0xE9 0x77 0x9A 0xCD
Feature MST:
--------------
VTP Operating Mode : Transparent
Feature UNKNOWN:
--------------
VTP Operating Mode : Transparent
'''
}
golden_parsed_output_4 = {
'vtp': {
'device_id': '02da.30ff.a878',
'domain_name': 'GENIE',
'feature': {
'mst': {
'enabled': False, 'operating_mode': 'transparent'
},
'unknown': {
'enabled': False,
'operating_mode': 'transparent'
},
'vlan': {
'configuration_revision': 25,
'enabled': True,
'existing_extended_vlans': 0,
'existing_vlans': 40,
'maximum_vlans': 2048,
'md5_digest': '0x05 0x1F 0x1F 0x35 0x3D 0x41 '
'0x74 0x77 0x7A 0x7C 0x9A 0xB2 '
'0xCC 0xCD 0xE9 0xEE',
'operating_mode': 'primary server',
'primary_description': 'genie',
'primary_id': '02da.30ff.a878'
}
},
'pruning_mode': False,
'traps_generation': False,
'version': '3',
'version_capable': [1, 2, 3]
}
}
golden_output_5 = {'execute.return_value': '''\
show vtp status
VTP Version capable : 1 to 3
VTP version running : 3
VTP Domain Name : Domain-Name
VTP Pruning Mode : Enabled
VTP Traps Generation : Enabled
Device ID : ffff.aaff.aaaa
Feature VLAN:
--------------
VTP Operating Mode : Client
Number of existing VLANs : 40
Number of existing extended VLANs : 2
Configuration Revision : 13
Primary ID : 3333.11ff.3333
Primary Description : description
MD5 digest :
Feature MST:
--------------
VTP Operating Mode : Transparent
Feature UNKNOWN:
--------------
VTP Operating Mode : Transparent
'''}
golden_parsed_output_5 = {
'vtp': {
'version_capable': [
1,
2,
3
],
'version': '3',
'feature': {
'vlan': {
'operating_mode': 'client',
'enabled': True,
'existing_vlans': 40,
'existing_extended_vlans': 2,
'configuration_revision': 13,
'primary_id': '3333.11ff.3333',
'primary_description': 'description'
},
'mst': {
'operating_mode': 'transparent',
'enabled': False
},
'unknown': {
'operating_mode': 'transparent',
'enabled': False
}
},
'domain_name': 'Domain-Name',
'pruning_mode': True,
'traps_generation': True,
'device_id': 'ffff.aaff.aaaa'
}
}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowVtpStatus(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowVtpStatus(device=self.device)
parsed_output = obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
obj = ShowVtpStatus(device=self.device)
parsed_output = obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output_2)
def test_golden_3(self):
self.device = Mock(**self.golden_output_3)
obj = ShowVtpStatus(device=self.device)
parsed_output = obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output_3)
def test_golden_4(self):
self.device = Mock(**self.golden_output_4)
obj = ShowVtpStatus(device=self.device)
parsed_output = obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output_4)
def test_golden_5(self):
self.device = Mock(**self.golden_output_5)
obj = ShowVtpStatus(device=self.device)
parsed_output = obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output_5)
if __name__ == '__main__':
unittest.main()
| 34.539241
| 99
| 0.47922
|
794a075c4d033339c3dd3c94c3442c16cbdbadb2
| 344
|
py
|
Python
|
daemonator/__init__.py
|
flaviocpontes/daemonator
|
bb70e698acd6ccda2940b80086449b0637aa4ecf
|
[
"X11",
"MIT"
] | null | null | null |
daemonator/__init__.py
|
flaviocpontes/daemonator
|
bb70e698acd6ccda2940b80086449b0637aa4ecf
|
[
"X11",
"MIT"
] | null | null | null |
daemonator/__init__.py
|
flaviocpontes/daemonator
|
bb70e698acd6ccda2940b80086449b0637aa4ecf
|
[
"X11",
"MIT"
] | null | null | null |
from .daemonator import Daemon
__author__ = 'Flávio Cardoso Pontes'
__author_email__ = '<flaviopontes@acerp.org.br>'
__copyright__ = 'Copyright © 2015 Associação de Comunicação Educativa Roquette Pinto - ACERP'
__version_info__ = (0, 4, 1, 1)
__version__ = '.'.join(map(str, __version_info__))
__package__ = 'daemonator'
__all__ = ['Daemon']
| 31.272727
| 93
| 0.755814
|
794a07e0a9fbf13b393dc78d3d79c3a18f9d5ea5
| 12,812
|
py
|
Python
|
common/asserts.py
|
CoderRushil/fyle-www-tests
|
d9a82d87c201fcc76cad73412ea85497d424f6fa
|
[
"MIT"
] | null | null | null |
common/asserts.py
|
CoderRushil/fyle-www-tests
|
d9a82d87c201fcc76cad73412ea85497d424f6fa
|
[
"MIT"
] | null | null | null |
common/asserts.py
|
CoderRushil/fyle-www-tests
|
d9a82d87c201fcc76cad73412ea85497d424f6fa
|
[
"MIT"
] | null | null | null |
from time import sleep
import logging
logger = logging.getLogger(__name__)
def assert_hero_section(browser, section):
h1s = section.find_elements_by_xpath('.//h1')
assert len(h1s) == 1, 'Hero section should have 1 h1'
h1 = h1s[0]
font_size = h1.value_of_css_property('font-size')
if browser.is_desktop():
assert font_size == '50px', 'Hero section h1 font size is wrong'
else:
assert font_size == '30px', 'Hero section h1 font size is wrong'
h2s = section.find_elements_by_xpath('.//h2')
assert len(h2s) == 0, 'Hero section should have no h2s'
def assert_hero_image(browser):
hero_image = browser.find(xpath='//section[contains(@class, "new-hero")]//img')
assert hero_image.is_displayed() is False, 'Hero image is being shown in mobile'
def assert_other_section(browser, section):
cl = section.get_attribute('class')
h2s = section.find_elements_by_xpath('.//h2')
assert len(h2s) == 1, f'Section with class {cl} has {len(h2s)} h2s'
h2 = h2s[0]
text = h2.text
font_size = h2.value_of_css_property('font-size')
font_weight = h2.value_of_css_property('font-weight')
is_logo_section = 'Loved by leading finance' in text
if browser.is_desktop():
if is_logo_section:
assert font_size == '24px', 'Font size of logo section is wrong'
else:
assert font_size == '30px', f'Font size of h2 is wrong for {text}'
else:
if is_logo_section:
assert font_size == '16px', 'Font size of logo section is wrong'
else:
assert font_size == '24px', f'Font size of h2 is wrong for {text}'
assert font_weight == '700', f'Font weight of h2 is wrong for {text}'
# Commented out the other sections typography because of inconsistencies
def assert_typography(browser):
sections = browser.find_many(xpath='//section')
hero_section = sections[0]
# other_sections = sections[1:]
assert_hero_section(browser=browser, section=hero_section)
# for other_section in other_sections:
# assert_other_section(browser=browser, section=other_section)
def assert_vertical_spacing_between(element1=None, element2=None, value=None):
padding_below = int(element1.value_of_css_property('padding-bottom').replace('px', ''))
margin_below = int(element1.value_of_css_property('margin-bottom').replace('px', ''))
space_below = padding_below + margin_below
padding_top = int(element2.value_of_css_property('padding-top').replace('px', ''))
margin_top = int(element2.value_of_css_property('margin-top').replace('px', ''))
space_top = padding_top + margin_top
space_between = space_below + space_top
assert space_between == value, f"Verical spacing between '{element1.text[:20]}...' and '{element2.text[:20]}...' is not correct"
def assert_horizontal_spacing_between(element1=None, element2=None, value=None):
padding_right = int(element1.value_of_css_property('padding-right').replace('px', ''))
margin_right = int(element1.value_of_css_property('margin-right').replace('px', ''))
space_right = padding_right + margin_right
padding_left = int(element2.value_of_css_property('padding-left').replace('px', ''))
margin_left = int(element2.value_of_css_property('margin-left').replace('px', ''))
space_left = padding_left + margin_left
space_between = space_right + space_left
assert space_between == value, f"Horizontal spacing between elements '{element1.text[:20]}...' and '{element2.text[:20]}...' is not correct"
def assert_spacing_bottom(element=None, value=None):
padding_below = int(element.value_of_css_property('padding-bottom').replace('px', ''))
margin_below = int(element.value_of_css_property('margin-bottom').replace('px', ''))
space_below = padding_below + margin_below
assert space_below == value, "spacing below is not correct"
def assert_spacing_top(element=None, value=None):
padding_top = int(element.value_of_css_property('padding-top').replace('px', ''))
margin_top = int(element.value_of_css_property('margin-top').replace('px', ''))
space_top = padding_top + margin_top
assert space_top == value, "spacing top is not correct"
def assert_spacing_right(element=None, value=None):
padding_right = int(element.value_of_css_property('padding-right').replace('px', ''))
margin_right = int(element.value_of_css_property('margin-right').replace('px', ''))
space_top = padding_right + margin_right
assert space_top == value, f"spacing right is not correct for '{element.text}'"
def assert_spacing_left(element=None, value=None):
padding_left = int(element.value_of_css_property('padding-left').replace('px', ''))
margin_left = int(element.value_of_css_property('margin-left').replace('px', ''))
space_top = padding_left + margin_left
assert space_top == value, "spacing left is not correct"
def assert_thank_you_modal(browser, ty_message, demoform=None):
e = browser.find(xpath="//div[contains(@id, 'contact-us-ty-modal')]")
assert e and e.is_displayed, "Thank you modal is not displayed"
if demoform:
ty_img = browser.find(xpath="//div[contains(@id, 'contact-us-ty-modal')]//div[contains(@class, 'demo-form-thank-you-img')]")
else:
ty_img = browser.find(xpath="//div[contains(@id, 'contact-us-ty-modal')]//div[not(contains(@class, 'demo-form-thank-you-img'))]")
assert ty_img and ty_img.is_displayed(), "Thank image is not correct"
ty_text = browser.find(xpath="//div[contains(@id, 'contact-us-ty-modal')]//span[contains(@class, 'ty-box')]").text
assert ty_text == ty_message, "Thank you message is not correct"
def assert_collapsible_feature_comparison_table(browser):
section = browser.find(xpath='//section[contains(@class, "alternative-fyle-comparison")]', scroll=True)
assert section, 'Collapsible table not found'
divs = browser.find_many(xpath='//div[contains(@class, "accordion-toggle")]')
for i, div in enumerate(divs):
div_class_names = div.get_attribute('class')
sub_contents_div_xpath = f'//div[contains(@id, "feature-main-row{i+1}")]'
# Check if the feature section is initially collapsed
# If it's collapsed, then check if it's opening up and it's sub-sections are displayed or not
# Else it's open, then check if it's collapsing successfully
if 'accordion-toggle' in div_class_names and 'collapsed' in div_class_names:
browser.click_element(div)
feature_contents = browser.find(xpath=sub_contents_div_xpath)
assert feature_contents.is_displayed(), f'Unable to see contents of feature: {div.text}'
else:
browser.click_element(div)
feature_contents = browser.find(xpath=sub_contents_div_xpath)
assert feature_contents.is_displayed() is False, f'Unable to collapse feature: {div.text}'
browser.scroll_up_or_down(50)
def assert_cards_redirection(browser, cards_xpath, redirect_to_urls, same_tab=False):
if same_tab:
for i, card_elem in enumerate(cards_xpath):
card = browser.find(card_elem, scroll=True)
browser.scroll_up_or_down(-100)
browser.click_element(card)
assert browser.get_current_url() == redirect_to_urls[i], "Redirecting to wrong page"
browser.back()
else:
cards = browser.find_many(xpath=cards_xpath)
assert len(cards) > 0, 'Wrong xpath given for cards'
for card in cards:
browser.click_element(card)
browser.switch_tab_next(1)
assert browser.get_current_url() in redirect_to_urls, 'Redirecting to wrong page'
browser.close_windows()
if browser.is_desktop() is False:
browser.scroll_up_or_down(300)
sleep(2)
def assert_cta_click_and_modal_show(browser, cta_section_xpath, cta_xpath):
section = browser.find(xpath=cta_section_xpath, scroll=True)
assert section and section.is_displayed(), 'Section not found'
browser.click(xpath=cta_xpath)
form_modal = browser.find(xpath='//div[contains(@class, "modal-content")]', scroll=True)
assert form_modal and form_modal.is_displayed(), 'Form modal not visible'
def assert_overflowing(browser):
assert browser.check_horizontal_overflow(), f'Horizontal Overflow is there in the page {browser.get_current_url()}'
def assert_customer_logo(browser):
browser.set_local_storage('ipInfo', '{"ip":"157.50.160.253","country":"India"}')
browser.refresh()
sleep(3)
indian_logo = browser.find("//div[contains(@class, 'customer-logo-india')]")
us_logo = browser.find("//div[contains(@class, 'customer-logo-non-india')]")
assert indian_logo.is_displayed() and not us_logo.is_displayed(), 'Found an US image in Indian IP'
browser.set_local_storage('ipInfo', '{"ip":"157.50.160.253","country":"United States"}')
browser.refresh()
sleep(3)
indian_logo = browser.find("//div[contains(@class, 'customer-logo-india')]")
us_logo = browser.find("//div[contains(@class, 'customer-logo-non-india')]")
assert us_logo.is_displayed() and not indian_logo.is_displayed(), 'Found an Indian image in US IP'
def assert_badges(browser):
total_badges = browser.find_many("//div[contains(@class, 'fyle-badge')]")
visible_badge = 0
for badge in total_badges:
if badge.is_displayed():
visible_badge += 1
assert visible_badge == 1, 'Badges aren\'t displayed properly.'
def get_active_index(carousel_items):
active_item = []
for i, item in enumerate(carousel_items):
if "active" in item.get_attribute("class"):
active_item.append(item)
active_index = i
no_of_active_items = len(active_item)
assert no_of_active_items != 0 and no_of_active_items <= 1, 'UI broken in customer testimonial section'
return active_index
def assert_customer_testimonial(browser):
carousel_items = browser.find_many("//section[contains(@class, 'customer-testimonial')]//div[contains(@class, 'carousel-item')]")
carousel_length = len(carousel_items)
current_active_index = get_active_index(carousel_items)
browser.find(xpath="//section[contains(@class, 'customer-testimonial')]", scroll=True)
sleep(1)
right_arrow = browser.find(xpath="//section[contains(@class, 'customer-testimonial')]//div[contains(@id, 'customer-carousel')]//a[contains(@class, 'right')]")
browser.find(xpath="//section[contains(@class, 'customer-testimonial')]//div[contains(@id, 'customer-carousel')]//a[contains(@class, 'right')]//span//img")
browser.click_element(right_arrow)
active_index = get_active_index(carousel_items)
assert active_index == ((current_active_index + 1) % carousel_length), 'Right click operation is not working'
current_active_index = active_index
left_arrow = browser.find(xpath="//section[contains(@class, 'customer-testimonial')]//div[contains(@id, 'customer-carousel')]//a[contains(@class, 'left')]")
browser.find(xpath="//section[contains(@class, 'customer-testimonial')]//div[contains(@id, 'customer-carousel')]//a[contains(@class, 'left')]//span//img")
browser.click_element(left_arrow)
active_index = get_active_index(carousel_items)
assert active_index == ((current_active_index + (carousel_length - 1)) % carousel_length), 'Left click operation is not working'
def assert_spacing_between_text_image(browser, section_xpath, feature_section_rows_xpath, slider_feature_section=False):
section = browser.find(xpath=section_xpath, scroll=True)
assert_spacing_top(section, 80)
assert_spacing_bottom(section, 80)
horizontal_spacing_value = 60 if slider_feature_section is False else 40
rows = browser.find_many(xpath=feature_section_rows_xpath)
columns = browser.find_many(xpath=f"{feature_section_rows_xpath}//div[contains(@class, 'col')]")
# Check horizontal spacing
for idx, val in enumerate(columns):
# Iterate on all divs having text as well as image
# Checing if idx is odd or even, and thus checking spacing for alternate divs
if idx^1 == idx+1:
assert_horizontal_spacing_between(element1=columns[idx], element2=columns[idx+1], value=horizontal_spacing_value)
else:
continue
# Check vertical spacing between rows
for idx, row in enumerate(rows):
if idx != len(rows)-1:
assert_vertical_spacing_between(element1=rows[idx], element2=rows[idx+1], value=80)
def assert_click_scroll_into_view(browser, clickable_elements_xpath):
clickable_elements = browser.find_many(xpath=clickable_elements_xpath)
for ele in clickable_elements:
browser.click_element(ele)
assert ele.is_displayed(), "Clicking element '{ele.text}' scrolled to wrong section"
| 54.288136
| 162
| 0.70715
|
794a087180168dc6794887566a90294fed73c259
| 6,669
|
py
|
Python
|
uptrop/cloud_slice_no2.py
|
eamarais/erc-uptrop
|
d7340ca7ad4be2e9b7e98da0847964c2a532e1ca
|
[
"MIT"
] | 3
|
2020-10-09T07:13:40.000Z
|
2022-02-15T13:14:36.000Z
|
uptrop/cloud_slice_no2.py
|
eamarais/erc-uptrop
|
d7340ca7ad4be2e9b7e98da0847964c2a532e1ca
|
[
"MIT"
] | 4
|
2020-07-04T18:56:25.000Z
|
2022-02-14T13:14:47.000Z
|
uptrop/cloud_slice_no2.py
|
eamarais/erc-uptrop
|
d7340ca7ad4be2e9b7e98da0847964c2a532e1ca
|
[
"MIT"
] | 3
|
2020-10-09T07:17:16.000Z
|
2022-01-09T09:36:46.000Z
|
#!/usr/bin/python
''' Cloud-slicing steps applied to a cluster of data using as input the partial NO2 columns in molecules/m2 and cloud top heights in hPa.
If successful, the output is NO2 mixing ratios in pptv. Other output is the estimated error on the NO2 mixing ratio and the mean cloud top pressure (hPa) for the cluster.
If the cloud-slicing step is unsuccessful, all values are NaN and a reason the cloud-slicing failed is provided.
Use as part of a Python script:
::
from uptrop.cloud_slice_ut_no2 import cldslice, CLOUD_SLICE_ERROR_ENUM
# Dummy functions
partial_columns = get_partial_cloud_columns()
cloud_top_heights = get_cloud_top_heights()
ratio, ratio_error, exception_number, mean_cloud_error = cldslice(partial_columns, cloud_top_heights)
if exception_number != 0:
print(CLOUD_SLICE_ERROR_ENUM[exception_number])
print("Cloud ratio: {}".format(ratio))
print("Cloud ratio error: {}".format(ratio_error))
print("Mean cloud error: {}".format(mean_cloud_error))
'''
# Import relevant packages:
import sys
import os
import numpy as np
# Import hack
sys.path.append(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..'))
from uptrop.bootstrap import rma
from uptrop.constants import AVOGADRO as na
from uptrop.constants import G as g
from uptrop.constants import MW_AIR as mmair
from scipy import stats
CLOUD_SLICE_ERROR_ENUM = {
1: "too_few_points",
2: "low_cloud_height_range",
3: "low_cloud_height_std",
4: "large_error",
5: "much_less_than_zero",
6: "no2_outlier",
7: "non_uni_strat"
}
def cldslice(pcolno2,cldtophgt,cld_diff_thold):
"""
Compute upper troposphere NO2 using partial columns above
cloudy scenes.
Determine NO2 mixing ratio by regressing NO2 partial columns
against cloud-top heights over cloudy scenes.
:param pcolno2: vectors of partial columns in molec/m2
:type pcolno2: list of floats
:param cldtophgt: corresponding cloud top heights in hPa.
:type cldtophgt: list of floats
:return: NO2 volumetric mixing ratio, corresponding estimated error on the
cloud-sliced NO2 value, a number to identify which filtering
criteria led to loss of data in the case that the cloud-sliced
NO2 value ia nan, and the mean cloud pressure of data retained
after 10th and 90th percentile filtering.
:rtype: tuple
"""
# Initialize:
utmrno2=0.0
utmrno2err=0.0
error_state=0
# Define factor to convert slope of NO2 partial column vs pressure
# to VMR:
den2mr=np.divide((np.multiply(g,mmair)),na)
# Get 10th and 90th percentiles of data population:
p10=np.percentile(pcolno2,10)
p90=np.percentile(pcolno2,90)
# Remove outliers determined as falling outside the 10th and 90th
# percentile range. Not include this or instead using 5th and 95th leads
# to overestimate in cloud-sliced UT NO2 compared to the "truth":
sind=np.where((pcolno2>p10)&(pcolno2<p90))[0]
# Trim the data to remove ouliers:
pcolno2=pcolno2[sind]
cldtophgt=cldtophgt[sind]
# Cloud pressure mean:
mean_cld_pres=np.mean(cldtophgt)
# Get number of points in vector:
npoints=len(cldtophgt)
# Only consider data with more than 5 points for reasonably
# robust statistics. This step is added to account for data loss
# removing outliers:
if npoints<=10:
error_state=1
utmrno2=np.nan
utmrno2err=np.nan
return (utmrno2, utmrno2err, error_state, mean_cld_pres)
# Get cloud top height standard deviation:
stdcld=np.std(cldtophgt)
# Get cloud top height range:
diffcld=np.nanmax(cldtophgt)-np.nanmin(cldtophgt)
# Only consider scenes with a dynamic range of clouds:
# (i) Cloud range:
if diffcld<=cld_diff_thold: #140:
error_state=2
utmrno2=np.nan
utmrno2err=np.nan
return (utmrno2, utmrno2err, error_state, mean_cld_pres)
# (ii) Cloud standard deviation:
if stdcld<=30:
error_state=3
utmrno2=np.nan
utmrno2err=np.nan
return (utmrno2, utmrno2err, error_state, mean_cld_pres)
# Get regression statistics:
# Partial NO2 column (molec/m2) vs cloud top height (hPa):
# 300 iterations of regression chosen to compromise between
# statistics and computational efficiency:
result=rma(cldtophgt*1e2,pcolno2,len(pcolno2),300)
# Try Theil-Sen regressor instead:
# ==== CONSIDER REMOVING OUTLIER FILTER ABOVE!!! ===
# Test with GEOS-Chem.
#result = stats.theilslopes(pcolno2, cldtophgt*1e2, 0.95)
#print(result)
#slope = result[0]
#slope_err = result[2:3] # lo_slope, up_slope for 95% CI.
# Remove data with relative error > 100%:
if np.absolute(np.divide(result[2], result[0]))>1.0:
error_state=4
utmrno2=np.nan
utmrno2err=np.nan
return (utmrno2, utmrno2err, error_state, mean_cld_pres)
# Account for negative values:
# Set points with sum of slope and error less than zero to nan.
# This is to account for noise in the data that hover near zero.
# This could be reduced to a single line. Eventually update to:
#if result[0]<0 and np.add(result[0],result[2])<0):
# error_state=5
# utmrno2=np.nan
# utmrno2err=np.nan
# return (utmrno2, utmrno2err, error_state, mean_cld_pres)
if result[0]<0 and (not np.isnan(utmrno2)):
if (np.add(result[0],result[2])<0):
error_state=5
utmrno2=np.nan
utmrno2err=np.nan
return (utmrno2, utmrno2err, error_state, mean_cld_pres)
# Proceed with estimating NO2 mixing ratios for retained data:
#if not np.isnan(utmrno2):
slope=result[0]
#slope=np.multiply(slope,sf)
slope_err=result[2]
#slope_err=np.multiply(slope_err,sf)
# Convert slope to mol/mol:
utmrno2=np.multiply(slope,den2mr)
# Convert error to mol/mol:
utmrno2err=np.multiply(slope_err,den2mr)
# Convert UT NO2 from mol/mol to ppt:
utmrno2=np.multiply(utmrno2,1e+12)
# Convert UT NO2 error from mol/mol to ppt
utmrno2err=np.multiply(utmrno2err,1e+12)
# Finally, remove outliers in the cloud-sliced NO2
# 200 pptv threshold is chosen, as far from likely range.
# Scale factor applied to TROPOMI UT NO2 to account for
# positive bias in free tropospheric NO2:
if utmrno2>200:
error_state=6
utmrno2=np.nan
utmrno2err=np.nan
return (utmrno2, utmrno2err, error_state, mean_cld_pres)
else:
return (utmrno2, utmrno2err, error_state, mean_cld_pres)
| 34.734375
| 170
| 0.690808
|
794a08ef810a1806de7b2a758ac31eca384ff250
| 4,239
|
py
|
Python
|
src/batou/lib/mercurial.py
|
ZeitOnline/batou
|
cade3526e7979a53d47bb020c5191702972ff2ff
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/batou/lib/mercurial.py
|
ZeitOnline/batou
|
cade3526e7979a53d47bb020c5191702972ff2ff
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2020-03-06T08:37:18.000Z
|
2020-04-24T06:33:18.000Z
|
src/batou/lib/mercurial.py
|
ZeitOnline/batou
|
cade3526e7979a53d47bb020c5191702972ff2ff
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from batou import UpdateNeeded, output
from batou.component import Component
from batou.lib.file import Directory
from batou.utils import CmdExecutionError
import os.path
import re
class Clone(Component):
namevar = 'url'
target = '.'
revision = None
branch = None
vcs_update = True
_revision_pattern = re.compile(r'changeset: +\d+:([a-f0-9]+)')
def configure(self):
if (not self.revision_or_branch) or (self.revision and self.branch):
raise ValueError(
'Clone(%s) needs exactly one of revision or branch' % self.url)
self.target = self.map(self.target)
self += Directory(self.target)
def verify(self):
with self.chdir(self.target):
if not os.path.exists('.hg'):
raise UpdateNeeded()
if not self.vcs_update:
return
if self.has_outgoing_changesets():
output.annotate(
'Hg clone at {} has outgoing changesets.'.format(
self.target), red=True)
if self.has_changes():
output.annotate(
'Hg clone at {} is dirty, going to lose changes.'.format(
self.target), red=True)
raise UpdateNeeded()
if self.revision:
long_rev = len(self.revision) == 40
if self.current_revision(long_rev) != self.revision:
raise UpdateNeeded()
if (self.branch and (
self.current_branch() != self.branch or
self.has_incoming_changesets())):
raise UpdateNeeded()
@property
def revision_or_branch(self):
# Mercurial often takes either a revision or a branch.
return self.revision or self.branch
def current_revision(self, long=False):
debug = '--debug' if long else ''
stdout, stderr = self.cmd(
self.expand(
'LANG=C hg --cwd {{component.target}} {{debug}} parent | '
'grep changeset:',
debug=debug))
match = self._revision_pattern.search(stdout)
if not match:
return None
return match.group(1)
def current_branch(self):
stdout, stderr = self.cmd('hg branch')
return stdout.strip()
def has_incoming_changesets(self):
try:
self.cmd('hg incoming -q -l1')
except CmdExecutionError as e:
if e.returncode == 1:
return False
raise
return True
def has_outgoing_changesets(self):
try:
with self.chdir(self.target):
self.cmd('hg outgoing -q -l1')
except CmdExecutionError as e:
if e.returncode == 1:
return False
raise
return True
def has_changes(self):
with self.chdir(self.target):
stdout, stderr = self.cmd('hg status')
return bool(stdout.strip())
def update(self):
with self.chdir(self.target):
if not os.path.exists('.hg'):
self.cmd(self.expand(
'hg clone -u {{component.revision_or_branch}} '
'{{component.url}} .'))
return
self.cmd(self.expand(
'hg pull --rev {{component.revision_or_branch}}'))
for filepath in self.untracked_files():
os.unlink(os.path.join(self.target, filepath))
self.cmd(self.expand(
'hg update --clean --rev {{component.revision_or_branch}}'))
def untracked_files(self):
stdout, stderr = self.cmd('hg status -q -u')
items = (line.split(None, 1) for line in stdout.splitlines())
return [filepath for status, filepath in items if status == '?']
def last_updated(self):
with self.chdir(self.target):
if not os.path.exists('.hg'):
return None
stdout, stderr = self.cmd(
'hg log -r %s --template "{date|hgdate}\n"' %
self.current_revision())
timestamp, offset = stdout.split()
return float(timestamp) - float(offset)
| 33.377953
| 79
| 0.544468
|
794a0906ad2331288671df2161069508cbb11720
| 32,856
|
py
|
Python
|
dlkit/abstract_osid/assessment_authoring/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/abstract_osid/assessment_authoring/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/abstract_osid/assessment_authoring/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""Implementations of assessment.authoring abstract base class queries."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class AssessmentPartQuery:
"""This is the query for searching assessment parts.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_assessment_id(self, assessment_id, match):
"""Sets the assessment ``Id`` for this query.
:param assessment_id: an assessment ``Id``
:type assessment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_id_terms(self):
"""Clears all assessment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_id_terms = property(fdel=clear_assessment_id_terms)
@abc.abstractmethod
def supports_assessment_query(self):
"""Tests if an ``AssessmentQuery`` is available.
:return: ``true`` if an assessment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_query(self):
"""Gets the query for an assessment.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment query
:rtype: ``osid.assessment.AssessmentQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_query()`` is ``true``.*
"""
return # osid.assessment.AssessmentQuery
assessment_query = property(fget=get_assessment_query)
@abc.abstractmethod
def clear_assessment_terms(self):
"""Clears all assessment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_terms = property(fdel=clear_assessment_terms)
@abc.abstractmethod
def match_parent_assessment_part_id(self, assessment_part_id, match):
"""Sets the assessment part ``Id`` for this query.
:param assessment_part_id: an assessment part ``Id``
:type assessment_part_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_part_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_parent_assessment_part_id_terms(self):
"""Clears all assessment part ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
parent_assessment_part_id_terms = property(fdel=clear_parent_assessment_part_id_terms)
@abc.abstractmethod
def supports_parent_assessment_part_query(self):
"""Tests if an ``AssessmentPartQuery`` is available.
:return: ``true`` if an assessment part query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_parent_assessment_part_query(self):
"""Gets the query for an assessment part.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment part query
:rtype: ``osid.assessment.authoring.AssessmentPartQuery``
:raise: ``Unimplemented`` -- ``supports_parent_assessment_part_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_parent_assessment_part_query()`` is ``true``.*
"""
return # osid.assessment.authoring.AssessmentPartQuery
parent_assessment_part_query = property(fget=get_parent_assessment_part_query)
@abc.abstractmethod
def match_any_parent_assessment_part(self, match):
"""Matches assessment parts with any parent assessment part.
:param match: ``true`` to match assessment parts with any parent, ``false`` to match assessment parts with no parents
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_parent_assessment_part_terms(self):
"""Clears all assessment part terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
parent_assessment_part_terms = property(fdel=clear_parent_assessment_part_terms)
@abc.abstractmethod
def match_section(self, match):
"""Matches assessment parts that are also used as sections.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_section_terms(self):
"""Clears all section terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
section_terms = property(fdel=clear_section_terms)
@abc.abstractmethod
def match_weight(self, low, high, match):
"""Matches assessment parts that fall in between the given weights inclusive.
:param low: low end of range
:type low: ``cardinal``
:param high: high end of range
:type high: ``cardinal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_weight(self, match):
"""Matches assessment parts with any weight assigned.
:param match: ``true`` to match assessment parts with any wieght, ``false`` to match assessment parts with no weight
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_weight_terms(self):
"""Clears all weight terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
weight_terms = property(fdel=clear_weight_terms)
@abc.abstractmethod
def match_allocated_time(self, low, high, match):
"""Matches assessment parts hose allocated time falls in between the given times inclusive.
:param low: low end of range
:type low: ``osid.calendaring.Duration``
:param high: high end of range
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_allocated_time(self, match):
"""Matches assessment parts with any time assigned.
:param match: ``true`` to match assessment parts with any alloocated time, ``false`` to match assessment parts with no allocated time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_allocated_time_terms(self):
"""Clears all allocated time terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
allocated_time_terms = property(fdel=clear_allocated_time_terms)
@abc.abstractmethod
def match_child_assessment_part_id(self, assessment_part_id, match):
"""Sets the assessment part ``Id`` for this query.
:param assessment_part_id: an assessment part ``Id``
:type assessment_part_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_part_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_child_assessment_part_id_terms(self):
"""Clears all assessment part ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
child_assessment_part_id_terms = property(fdel=clear_child_assessment_part_id_terms)
@abc.abstractmethod
def supports_child_assessment_part_query(self):
"""Tests if an ``AssessmentPartQuery`` is available.
:return: ``true`` if an assessment part query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_child_assessment_part_query(self):
"""Gets the query for an assessment part.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment part query
:rtype: ``osid.assessment.authoring.AssessmentPartQuery``
:raise: ``Unimplemented`` -- ``supports_child_assessment_part_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_child_assessment_part_query()`` is ``true``.*
"""
return # osid.assessment.authoring.AssessmentPartQuery
child_assessment_part_query = property(fget=get_child_assessment_part_query)
@abc.abstractmethod
def match_any_child_assessment_part(self, match):
"""Matches assessment parts with any child assessment part.
:param match: ``true`` to match assessment parts with any children, ``false`` to match assessment parts with no children
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_child_assessment_part_terms(self):
"""Clears all assessment part terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
child_assessment_part_terms = property(fdel=clear_child_assessment_part_terms)
@abc.abstractmethod
def match_bank_id(self, bank_id, match):
"""Matches constrainers mapped to the bank.
:param bank_id: the bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_bank_id_terms(self):
"""Clears the bank ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_id_terms = property(fdel=clear_bank_id_terms)
@abc.abstractmethod
def supports_bank_query(self):
"""Tests if an ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_bank_query(self):
"""Gets the query for a bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
@abc.abstractmethod
def clear_bank_terms(self):
"""Clears the bank query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_terms = property(fdel=clear_bank_terms)
@abc.abstractmethod
def get_assessment_part_query_record(self, assessment_part_record_type):
"""Gets the assessment part query record corresponding to the given ``AssessmentPart`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param assessment_part_record_type: an assessment part record type
:type assessment_part_record_type: ``osid.type.Type``
:return: the assessment part query record
:rtype: ``osid.assessment.authoring.records.AssessmentPartQueryRecord``
:raise: ``NullArgument`` -- ``assessment_part_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(assessment_part_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.authoring.records.AssessmentPartQueryRecord
class SequenceRuleQuery:
"""This is the query for searching sequence rules.
Each method match specifies a ``AND`` term while multiple
invocations of the same method produce a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_assessment_part_id(self, assessment_part_id, match):
"""Sets the assessment part ``Id`` for this query.
:param assessment_part_id: an assessment part ``Id``
:type assessment_part_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_part_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_assessment_part_id_terms(self):
"""Clears all assessment part ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_part_id_terms = property(fdel=clear_assessment_part_id_terms)
@abc.abstractmethod
def supports_assessment_part_query(self):
"""Tests if an ``AssessmentPartQuery`` is available.
:return: ``true`` if an assessment part query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assessment_part_query(self):
"""Gets the query for an assessment part.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment part query
:rtype: ``osid.assessment.authoring.AssessmentPartQuery``
:raise: ``Unimplemented`` -- ``supports_assessment_part_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_part_query()`` is ``true``.*
"""
return # osid.assessment.authoring.AssessmentPartQuery
assessment_part_query = property(fget=get_assessment_part_query)
@abc.abstractmethod
def clear_assessment_part_terms(self):
"""Clears all assessment part terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
assessment_part_terms = property(fdel=clear_assessment_part_terms)
@abc.abstractmethod
def match_next_assessment_part_id(self, assessment_part_id, match):
"""Sets the assessment part ``Id`` for this query.
:param assessment_part_id: an assessment part ``Id``
:type assessment_part_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_part_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_next_assessment_part_id_terms(self):
"""Clears all assessment part ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
next_assessment_part_id_terms = property(fdel=clear_next_assessment_part_id_terms)
@abc.abstractmethod
def supports_next_assessment_part_query(self):
"""Tests if an ``AssessmentPartQuery`` is available.
:return: ``true`` if an assessment part query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_next_assessment_part_query(self):
"""Gets the query for an assessment part.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment part query
:rtype: ``osid.assessment.authoring.AssessmentPartQuery``
:raise: ``Unimplemented`` -- ``supports_next_assessment_part_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_next_assessment_part_query()`` is ``true``.*
"""
return # osid.assessment.authoring.AssessmentPartQuery
next_assessment_part_query = property(fget=get_next_assessment_part_query)
@abc.abstractmethod
def clear_next_assessment_part_terms(self):
"""Clears all assessment part terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
next_assessment_part_terms = property(fdel=clear_next_assessment_part_terms)
@abc.abstractmethod
def match_minimum_score(self, low, high, match):
"""Matches minimum scores that fall in between the given scores inclusive.
:param low: low end of range
:type low: ``cardinal``
:param high: high end of range
:type high: ``cardinal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_minimum_score(self, match):
"""Matches assessment parts with any minimum score assigned.
:param match: ``true`` to match assessment parts with any minimum score, ``false`` to match assessment parts with no minimum score
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_minimum_score_terms(self):
"""Clears all minimum score terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
minimum_score_terms = property(fdel=clear_minimum_score_terms)
@abc.abstractmethod
def match_maximum_score(self, low, high, match):
"""Matches maximum scores that fall in between the given scores inclusive.
:param low: low end of range
:type low: ``cardinal``
:param high: high end of range
:type high: ``cardinal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_maximum_score(self, match):
"""Matches assessment parts with any maximum score assigned.
:param match: ``true`` to match assessment parts with any maximum score, ``false`` to match assessment parts with no maximum score
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_maximum_score_terms(self):
"""Clears all maximum score terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
maximum_score_terms = property(fdel=clear_maximum_score_terms)
@abc.abstractmethod
def match_cumulative(self, match):
"""Matches cumulative rules.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_cumulative_terms(self):
"""Clears all cumulative terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
cumulative_terms = property(fdel=clear_cumulative_terms)
@abc.abstractmethod
def match_applied_assessment_part_id(self, assessment_part_id, match):
"""Sets the assessment part ``Id`` for this query.
:param assessment_part_id: an assessment part ``Id``
:type assessment_part_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``assessment_part_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_applied_assessment_part_id_terms(self):
"""Clears all assessment part ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
applied_assessment_part_id_terms = property(fdel=clear_applied_assessment_part_id_terms)
@abc.abstractmethod
def supports_applied_assessment_part_query(self):
"""Tests if an ``AssessmentPartQuery`` is available.
:return: ``true`` if an assessment part query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_applied_assessment_part_query(self):
"""Gets the query for an assessment part.
Multiple retrievals produce a nested ``OR`` term.
:return: the assessment part query
:rtype: ``osid.assessment.authoring.AssessmentPartQuery``
:raise: ``Unimplemented`` -- ``supports_applied_assessment_part_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_applied_assessment_part_query()`` is ``true``.*
"""
return # osid.assessment.authoring.AssessmentPartQuery
applied_assessment_part_query = property(fget=get_applied_assessment_part_query)
@abc.abstractmethod
def match_any_applied_assessment_part(self, match):
"""Matches assessment parts with any applied assessment part.
:param match: ``true`` to match assessment parts with any applied assessment part, ``false`` to match assessment parts with no applied assessment parts
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_applied_assessment_part_terms(self):
"""Clears all assessment part terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
applied_assessment_part_terms = property(fdel=clear_applied_assessment_part_terms)
@abc.abstractmethod
def match_bank_id(self, bank_id, match):
"""Matches constrainers mapped to the bank.
:param bank_id: the bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_bank_id_terms(self):
"""Clears the bank ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_id_terms = property(fdel=clear_bank_id_terms)
@abc.abstractmethod
def supports_bank_query(self):
"""Tests if an ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_bank_query(self):
"""Gets the query for a bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
@abc.abstractmethod
def clear_bank_terms(self):
"""Clears the bank query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_terms = property(fdel=clear_bank_terms)
@abc.abstractmethod
def get_sequence_rule_query_record(self, sequence_rule_record_type):
"""Gets the sequence rule query record corresponding to the given ``SequenceRule`` record ``Type``.
Multiple record retrievals produce a nested ``OR`` term.
:param sequence_rule_record_type: a sequence rule record type
:type sequence_rule_record_type: ``osid.type.Type``
:return: the sequence rule query record
:rtype: ``osid.assessment.authoring.records.SequenceRuleQueryRecord``
:raise: ``NullArgument`` -- ``sequence_rule_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(sequence_rule_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.authoring.records.SequenceRuleQueryRecord
class SequenceRuleEnablerQuery:
"""This is the query for searching sequence rule enablers.
Each method match specifies a ``AND`` term while multiple
invocations of the same method produce a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_ruled_sequence_rule_id(self, sequence_rule_id, match):
"""Matches enablers mapped to the sequence rule.
:param sequence_rule_id: the sequence rule ``Id``
:type sequence_rule_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``sequence_rule_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ruled_sequence_rule_id_terms(self):
"""Clears the sequence rule ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ruled_sequence_rule_id_terms = property(fdel=clear_ruled_sequence_rule_id_terms)
@abc.abstractmethod
def supports_ruled_sequence_rule_query(self):
"""Tests if a ``SequenceRuleQuery`` is available.
:return: ``true`` if a sequence rule query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_ruled_sequence_rule_query(self):
"""Gets the query for a sequence rule.
Multiple retrievals produce a nested ``OR`` term.
:return: the sequence rule query
:rtype: ``osid.assessment.authoring.SequenceRuleQuery``
:raise: ``Unimplemented`` -- ``supports_ruled_sequence_rule_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_ruled_sequence_rule_query()`` is ``true``.*
"""
return # osid.assessment.authoring.SequenceRuleQuery
ruled_sequence_rule_query = property(fget=get_ruled_sequence_rule_query)
@abc.abstractmethod
def match_any_ruled_sequence_rule(self, match):
"""Matches enablers mapped to any sequence rule.
:param match: ``true`` for enablers mapped to any sequence rule, ``false`` to match enablers mapped to no sequence rules
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ruled_sequence_rule_terms(self):
"""Clears the sequence rule query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ruled_sequence_rule_terms = property(fdel=clear_ruled_sequence_rule_terms)
@abc.abstractmethod
def match_bank_id(self, bank_id, match):
"""Matches enablers mapped to the bank.
:param bank_id: the bank ``Id``
:type bank_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_bank_id_terms(self):
"""Clears the bank ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_id_terms = property(fdel=clear_bank_id_terms)
@abc.abstractmethod
def supports_bank_query(self):
"""Tests if an ``BankQuery`` is available.
:return: ``true`` if a bank query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_bank_query(self):
"""Gets the query for a bank.
Multiple retrievals produce a nested ``OR`` term.
:return: the bank query
:rtype: ``osid.assessment.BankQuery``
:raise: ``Unimplemented`` -- ``supports_bank_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bank_query()`` is ``true``.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
@abc.abstractmethod
def clear_bank_terms(self):
"""Clears the bank query terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
bank_terms = property(fdel=clear_bank_terms)
@abc.abstractmethod
def get_sequence_rule_enabler_query_record(self, sequence_rule_enabler_record_type):
"""Gets the sequence rule enabler query record corresponding to the given ``SequenceRuleEnabler`` record ``Type``.
Multiple record retrievals produce a nested ``OR`` term.
:param sequence_rule_enabler_record_type: a sequence rule enabler record type
:type sequence_rule_enabler_record_type: ``osid.type.Type``
:return: the sequence rule enabler query record
:rtype: ``osid.assessment.authoring.records.SequenceRuleEnablerQueryRecord``
:raise: ``NullArgument`` -- ``sequence_rule_enabler_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(sequence_rule_enabler_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.authoring.records.SequenceRuleEnablerQueryRecord
| 29.869091
| 159
| 0.648192
|
794a0a004093b2d5cb338f544a86b480153ed089
| 10,467
|
py
|
Python
|
tea/runsingle.py
|
SiddhantDeshmukh/TEA
|
beaa882b7084d380a38a6bf5f219b0ee848afb9e
|
[
"BSD-4-Clause-UC"
] | null | null | null |
tea/runsingle.py
|
SiddhantDeshmukh/TEA
|
beaa882b7084d380a38a6bf5f219b0ee848afb9e
|
[
"BSD-4-Clause-UC"
] | null | null | null |
tea/runsingle.py
|
SiddhantDeshmukh/TEA
|
beaa882b7084d380a38a6bf5f219b0ee848afb9e
|
[
"BSD-4-Clause-UC"
] | null | null | null |
#! /usr/bin/env python
############################# BEGIN FRONTMATTER ################################
# #
# TEA - calculates Thermochemical Equilibrium Abundances of chemical species #
# #
# TEA is part of the PhD dissertation work of Dr. Jasmina #
# Blecic, who developed it with coding assistance from #
# undergraduate M. Oliver Bowman and under the advice of #
# Prof. Joseph Harrington at the University of Central Florida, #
# Orlando, Florida, USA. #
# #
# Copyright (C) 2014-2016 University of Central Florida #
# #
# This program is reproducible-research software: you can #
# redistribute it and/or modify it under the terms of the #
# Reproducible Research Software License as published by #
# Prof. Joseph Harrington at the University of Central Florida, #
# either version 0.3 of the License, or (at your option) any later #
# version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# Reproducible Research Software License for more details. #
# #
# You should have received a copy of the Reproducible Research #
# Software License along with this program. If not, see #
# <http://planets.ucf.edu/resources/reproducible/>. The license's #
# preamble explains the situation, concepts, and reasons surrounding #
# reproducible research, and answers some common questions. #
# #
# This project was started with the support of the NASA Earth and #
# Space Science Fellowship Program, grant NNX12AL83H, held by #
# Jasmina Blecic, Principal Investigator Joseph Harrington, and the #
# NASA Science Mission Directorate Planetary Atmospheres Program, #
# grant NNX12AI69G. #
# #
# See the file ACKNOWLEDGING in the top-level TEA directory for #
# instructions on how to acknowledge TEA in publications. #
# #
# We welcome your feedback, but do not guarantee support. #
# Many questions are answered in the TEA forums: #
# #
# https://physics.ucf.edu/mailman/listinfo/tea-user #
# https://physics.ucf.edu/mailman/listinfo/tea-devel #
# #
# Visit our Github site: #
# #
# https://github.com/dzesmin/TEA/ #
# #
# Reach us directly at: #
# #
# Jasmina Blecic <jasmina@physics.ucf.edu> #
# Joseph Harrington <jh@physics.ucf.edu> #
# #
############################## END FRONTMATTER #################################
import ntpath
import os
import shutil
import subprocess
import numpy as np
import sys
import time
import six
import makeheader as mh
import readconf as rc
import iterate as it
import balance as bal
location_TEA = os.path.realpath(os.path.dirname(__file__) + "/..") + "/"
# =============================================================================
# This program runs TEA over an input file that contains only one T-P.
# The code retrieves the input file and the current directory name given by the
# user.
#
# This module prints on screen the code progress: the current iteration number,
# and informs the user that minimization is done.
# Example:
# 100
# Maximum iteration reached, ending minimization.
#
# The program is executed with in-shell inputs:
# runsingle.py <SINGLETP_INPUT_FILE_PATH> <DIRECTORY_NAME>
# Example: ../TEA/tea/runsingle.py ../TEA/doc/examples/singleTP/inputs/singleTP_Example.txt Single_Example
# =============================================================================
# Read configuration-file parameters:
TEApars, PREATpars = rc.readcfg()
maxiter, savefiles, verb, times, abun_file, location_out, xtol, ncpu = TEApars
# Print license
print("\n\
================= Thermal Equilibrium Abundances (TEA) =================\n\
A program to calculate species abundances under thermochemical equilibrium.\n\
\n\
Copyright (C) 2014-2016 University of Central Florida.\n\
\n\
This program is reproducible-research software. See the Reproducible\n\
Research Software License that accompanies the code, or visit:\n\
http://planets.ucf.edu/resources/reproducible\n\
Questions? Feedback? Search our mailing list archives or post a comment:\n\
https://physics.ucf.edu/mailman/listinfo/tea-user\n\
\n\
Direct contact: \n\
Jasmina Blecic <jasmina@physics.ucf.edu> \n\
========================================================================\n")
# Correct output location name
if location_out[-1] != '/':
location_out += '/'
# Retrieve user inputs file
infile = sys.argv[1:][0]
# Set start time
tstart = time.time()
# Check if config file exists in current working directory
TEA_config = 'TEA.cfg'
try:
f = open(TEA_config)
except IOError:
print("\nConfig file is missing, place TEA.cfg in the working directory.\n")
# If input file does not exist break
try:
f = open(infile)
except:
raise IOError ("\nSingle T-P input file does not exist.\n")
# Retrieve current output directory name given by user
desc = sys.argv[1:][1]
# Check if output directory exists and inform user
if os.path.exists(location_out + desc):
six.moves.input(" Output directory " + str(location_out + desc) +
"/\n already exists.\n"
" Press enter to continue and overwrite existing files,\n"
" or quit and choose another output name.\n")
# If output directory does not exist, create it
if not os.path.exists(location_out + desc):
os.makedirs(location_out + desc)
# Set up locations of necessary files
thermo_dir = location_TEA + "lib/gdata"
inputs_dir = location_out + desc + "/inputs/"
loc_outputs = location_out + desc + "/outputs/"
if savefiles:
# Create inputs directory
if not os.path.exists(inputs_dir):
os.makedirs(inputs_dir)
# Inform user if TEA.cfg file already exists in inputs/ directory
if verb >= 1 and os.path.isfile(inputs_dir + TEA_config):
print(" " + str(TEA_config) + " overwritten in inputs/ directory.")
# Copy TEA.cfg file to current inputs directory
shutil.copy2(TEA_config, inputs_dir + TEA_config)
# Inform user if abundances file already exists in inputs/ directory
head, abun_filename = ntpath.split(abun_file)
if verb >= 1 and os.path.isfile(inputs_dir + abun_filename):
print(" " + str(abun_filename) + " overwritten in inputs/ directory.")
# Copy abundances file to inputs/ directory
shutil.copy2(abun_file, inputs_dir + abun_filename)
# Inform user if single T-P input file already exists in inputs/ directory
if verb >= 1 and os.path.isfile(inputs_dir + infile.split("/")[-1]):
print(" " + str(infile.split("/")[-1])
+ " overwritten in inputs/ directory.\n")
# Copy single T-P input file to inputs directory
shutil.copy2(infile, inputs_dir + infile.split("/")[-1])
# Times / speed check for pre-loop runtime
if times:
tnew = time.time()
elapsed = tnew - tstart
print("pre-loop: " + str(elapsed))
# Execute main TEA loop
temp, pressure, speclist = mh.read_single(infile)
free_energy, heat = mh.read_gdata(speclist, thermo_dir)
g_RT = mh.calc_gRT(free_energy, heat, temp)
stoich_arr, stoich_atom, b = mh.read_stoich(speclist, getb=True)
guess = bal.balance(stoich_arr, b, verb)
# Save info for the iteration module and remove it afterwards if neccesery
save_info = location_out, desc, speclist, temp
# Perform iterations until reaching desired precision xtol
y, x, delta, y_bar, x_bar, delta_bar = it.iterate(pressure, stoich_arr, b,
g_RT, maxiter, verb, times, guess, xtol, save_info)
# Save or delete lagrange.py and lambdacorr.py outputs
if savefiles:
hfolder = location_out + desc + "/headers/"
mh.write_header(hfolder, desc, temp, pressure, speclist,
stoich_atom, stoich_arr, b, g_RT)
else:
# Results directory is automatically made when TEA is executed
datadirr = '{:s}{:s}/results/results_{:.0f}K_{:.2e}bar/'.format(
location_out, desc, temp, pressure)
# Copy
shutil.copy2(datadirr + "results-visual.txt",
location_out + desc + "/results-visual.txt")
shutil.rmtree(loc_outputs)
shutil.rmtree('{:s}{:s}/results'.format(location_out, desc))
# Print on-screen
if verb >= 1:
print("\n Species abundances calculated.\n Created results file.")
# Time / speed testing
if verb >= 1:
tend = time.time()
elapsed = tend - tstart
print("Overall run time: " + str(elapsed) + " seconds")
| 46.110132
| 106
| 0.542276
|
794a0b1b42d0fb8afa5e6d4a3d273799e375e33e
| 7,669
|
py
|
Python
|
utils/loss.py
|
edornd/satellite-mib
|
a4423dc866ecfb77dc62548764917c048006dd8a
|
[
"MIT"
] | null | null | null |
utils/loss.py
|
edornd/satellite-mib
|
a4423dc866ecfb77dc62548764917c048006dd8a
|
[
"MIT"
] | null | null | null |
utils/loss.py
|
edornd/satellite-mib
|
a4423dc866ecfb77dc62548764917c048006dd8a
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
import torch
def get_loss(loss_type):
if loss_type == 'focal_loss':
return FocalLoss(ignore_index=255, size_average=True)
elif loss_type == 'cross_entropy':
return nn.CrossEntropyLoss(ignore_index=255, reduction='mean')
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, size_average=True, ignore_index=255):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index=ignore_index
self.size_average=size_average
def forward(self, inputs, targets):
ce_loss = F.cross_entropy(inputs, targets, reduction='none', ignore_index=self.ignore_index)
pt = torch.exp(-ce_loss)
focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
if self.size_average:
return focal_loss.mean()
else:
return focal_loss.sum()
class BCEWithLogitsLossWithIgnoreIndex(nn.Module):
def __init__(self, reduction='mean', ignore_index=255):
super().__init__()
self.reduction = reduction
self.ignore_index = ignore_index
def forward(self, inputs, targets):
# inputs of size B x C x H x W
n_cl = torch.tensor(inputs.shape[1]).to(inputs.device)
labels_new = torch.where(targets != self.ignore_index, targets, n_cl)
# replace ignore with numclasses + 1 (to enable one hot and then remove it)
targets = F.one_hot(labels_new, inputs.shape[1] + 1).float().permute(0, 3, 1, 2)
targets = targets[:, :inputs.shape[1], :, :] # remove 255 from 1hot
# targets is B x C x H x W so shape[1] is C
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
# loss has shape B x C x H x W
loss = loss.sum(dim=1) # sum the contributions of the classes
if self.reduction == 'mean':
# if targets have only zeros, we skip them
return torch.masked_select(loss, targets.sum(dim=1) != 0).mean()
elif self.reduction == 'sum':
return torch.masked_select(loss, targets.sum(dim=1) != 0).sum()
else:
return loss * targets.sum(dim=1)
class IcarlLoss(nn.Module):
def __init__(self, reduction='mean', ignore_index=255, bkg=False):
super().__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.bkg = bkg
def forward(self, inputs, targets, output_old):
# inputs of size B x C x H x W
n_cl = torch.tensor(inputs.shape[1]).to(inputs.device)
labels_new = torch.where(targets != self.ignore_index, targets, n_cl)
# replace ignore with numclasses + 1 (to enable one hot and then remove it)
targets = F.one_hot(labels_new, inputs.shape[1] + 1).float().permute(0, 3, 1, 2)
targets = targets[:, :inputs.shape[1], :, :] # remove 255 from 1hot
if self.bkg:
targets[:, 1:output_old.shape[1], :, :] = output_old[:, 1:, :, :]
else:
targets[:, :output_old.shape[1], :, :] = output_old
# targets is B x C x H x W so shape[1] is C
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
# loss has shape B x C x H x W
loss = loss.sum(dim=1) # sum the contributions of the classes
if self.reduction == 'mean':
# if targets have only zeros, we skip them
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else:
return loss
class UnbiasedCrossEntropy(nn.Module):
def __init__(self, old_cl=None, reduction='mean', ignore_index=255):
super().__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.old_cl = old_cl
def forward(self, inputs, targets):
old_cl = self.old_cl
outputs = torch.zeros_like(inputs) # B, C (1+V+N), H, W
den = torch.logsumexp(inputs, dim=1) # B, H, W den of softmax
outputs[:, 0] = torch.logsumexp(inputs[:, 0:old_cl], dim=1) - den # B, H, W p(O)
outputs[:, old_cl:] = inputs[:, old_cl:] - den.unsqueeze(dim=1) # B, N, H, W p(N_i)
labels = targets.clone() # B, H, W
labels[targets < old_cl] = 0 # just to be sure that all labels old belongs to zero
loss = F.nll_loss(outputs, labels, ignore_index=self.ignore_index, reduction=self.reduction)
return loss
class UnbiasedFocalLoss(nn.Module):
def __init__(self, old_cl=None, reduction="mean", ignore_index=255, alpha=1, gamma=2):
super().__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.old_cl = old_cl
self.alpha = alpha
self.gamma = gamma
def forward(self, inputs, targets):
old_cl = self.old_cl
outputs = torch.zeros_like(inputs) # B, C (1+V+N), H, W
den = torch.logsumexp(inputs, dim=1) # B, H, W den of softmax
outputs[:, 0] = torch.logsumexp(inputs[:, 0:old_cl], dim=1) - den # B, H, W p(O)
outputs[:, old_cl:] = inputs[:, old_cl:] - den.unsqueeze(dim=1) # B, N, H, W p(N_i)
labels = targets.clone() # B, H, W
labels[targets < old_cl] = 0 # just to be sure that all labels old belongs to zero
ce = F.nll_loss(outputs, labels, ignore_index=self.ignore_index, reduction="none")
pt = torch.exp(-ce)
loss = self.alpha * (1-pt)**self.gamma * ce
return loss
class KnowledgeDistillationLoss(nn.Module):
def __init__(self, reduction='mean', alpha=1.):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, inputs, targets, mask=None):
inputs = inputs.narrow(1, 0, targets.shape[1])
outputs = torch.log_softmax(inputs, dim=1)
labels = torch.softmax(targets * self.alpha, dim=1)
loss = (outputs * labels).mean(dim=1)
if mask is not None:
loss = loss * mask.float()
if self.reduction == 'mean':
outputs = -torch.mean(loss)
elif self.reduction == 'sum':
outputs = -torch.sum(loss)
else:
outputs = -loss
return outputs
class UnbiasedKnowledgeDistillationLoss(nn.Module):
def __init__(self, reduction='mean', alpha=1.):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, inputs, targets, mask=None):
new_cl = inputs.shape[1] - targets.shape[1]
targets = targets * self.alpha
new_bkg_idx = torch.tensor([0] + [x for x in range(targets.shape[1], inputs.shape[1])]).to(inputs.device)
den = torch.logsumexp(inputs, dim=1) # B, H, W
outputs_no_bgk = inputs[:, 1:-new_cl] - den.unsqueeze(dim=1) # B, OLD_CL, H, W
outputs_bkg = torch.logsumexp(torch.index_select(inputs, index=new_bkg_idx, dim=1), dim=1) - den # B, H, W
labels = torch.softmax(targets, dim=1) # B, BKG + OLD_CL, H, W
# make the average on the classes 1/n_cl \sum{c=1..n_cl} L_c
loss = (labels[:, 0] * outputs_bkg + (labels[:, 1:] * outputs_no_bgk).sum(dim=1)) / targets.shape[1]
if mask is not None:
loss = loss * mask.float()
if self.reduction == 'mean':
outputs = -torch.mean(loss)
elif self.reduction == 'sum':
outputs = -torch.sum(loss)
else:
outputs = -loss
return outputs
| 38.928934
| 118
| 0.592255
|
794a0b5f11223ac40c91a68addef03c0aa932410
| 6,778
|
py
|
Python
|
indico/core/settings/models/base.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | 1
|
2021-12-27T17:51:27.000Z
|
2021-12-27T17:51:27.000Z
|
indico/core/settings/models/base.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | 5
|
2021-04-08T19:26:47.000Z
|
2022-01-24T16:30:18.000Z
|
indico/core/settings/models/base.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | 2
|
2019-02-24T17:29:10.000Z
|
2021-04-08T19:23:27.000Z
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from enum import Enum
from flask import g, has_request_context
from sqlalchemy.dialects.postgresql import JSONB
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalMixin, PrincipalType
from indico.util.decorators import strict_classproperty
def _coerce_value(value):
if isinstance(value, Enum):
return value.value
return value
class SettingsBase:
"""Base class for any kind of setting tables."""
id = db.Column(
db.Integer,
primary_key=True
)
module = db.Column(
db.String,
index=True,
nullable=False
)
name = db.Column(
db.String,
index=True,
nullable=False
)
@strict_classproperty
@staticmethod
def __auto_table_args():
return (db.CheckConstraint('module = lower(module)', 'lowercase_module'),
db.CheckConstraint('name = lower(name)', 'lowercase_name'))
@classmethod
def delete(cls, module, *names, **kwargs):
if not names:
return
(cls.query
.filter(cls.name.in_(names), cls.module == module)
.filter_by(**kwargs)
.delete(synchronize_session='fetch'))
db.session.flush()
cls._clear_cache()
@classmethod
def delete_all(cls, module, **kwargs):
cls.query.filter_by(module=module, **kwargs).delete()
db.session.flush()
cls._clear_cache()
@classmethod
def _get_cache(cls, kwargs):
if not has_request_context():
# disable the cache by always returning an empty one
return defaultdict(dict), False
key = (cls, frozenset(kwargs.items()))
try:
return g.global_settings_cache[key], True
except AttributeError:
# no cache at all
g.global_settings_cache = cache = dict()
cache[key] = rv = defaultdict(dict)
return rv, False
except KeyError:
# no cache for this settings class / kwargs
return g.global_settings_cache.setdefault(key, defaultdict(dict)), False
@staticmethod
def _clear_cache():
if has_request_context():
g.pop('global_settings_cache', None)
class JSONSettingsBase(SettingsBase):
"""Base class for setting tables with a JSON value."""
__tablename__ = 'settings'
value = db.Column(
JSONB,
nullable=False
)
@classmethod
def get_setting(cls, module, name, **kwargs):
return cls.query.filter_by(module=module, name=name, **kwargs).first()
@classmethod
def get_all_settings(cls, module, **kwargs):
return {s.name: s for s in cls.query.filter_by(module=module, **kwargs)}
@classmethod
def get_all(cls, module, **kwargs):
cache, hit = cls._get_cache(kwargs)
if hit:
return cache[module]
else:
for s in cls.query.filter_by(**kwargs):
cache[s.module][s.name] = s.value
return cache[module]
@classmethod
def get(cls, module, name, default=None, **kwargs):
setting = cls.get_setting(module, name, **kwargs)
if setting is None:
return default
return setting.value
@classmethod
def set(cls, module, name, value, **kwargs):
setting = cls.get_setting(module, name, **kwargs)
if setting is None:
setting = cls(module=module, name=name, **kwargs)
db.session.add(setting)
setting.value = _coerce_value(value)
db.session.flush()
cls._clear_cache()
@classmethod
def set_multi(cls, module, items, **kwargs):
existing = cls.get_all_settings(module, **kwargs)
for name in items.keys() - existing.keys():
setting = cls(module=module, name=name, value=_coerce_value(items[name]), **kwargs)
db.session.add(setting)
for name in items.keys() & existing.keys():
existing[name].value = _coerce_value(items[name])
db.session.flush()
cls._clear_cache()
class PrincipalSettingsBase(PrincipalMixin, SettingsBase):
"""Base class for principal setting tables."""
__tablename__ = 'settings_principals'
# Additional columns used to identitfy a setting (e.g. user/event id)
extra_key_cols = ()
@strict_classproperty
@classmethod
def unique_columns(cls):
return ('module', 'name') + cls.extra_key_cols
@classmethod
def get_all_acls(cls, module, **kwargs):
rv = defaultdict(set)
for setting in cls.query.filter_by(module=module, **kwargs):
rv[setting.name].add(setting.principal)
return rv
@classmethod
def get_acl(cls, module, name, raw=False, **kwargs):
return {x if raw else x.principal for x in cls.query.filter_by(module=module, name=name, **kwargs)}
@classmethod
def set_acl(cls, module, name, acl, **kwargs):
existing = cls.get_acl(module, name, raw=True, **kwargs)
existing_principals = {x.principal for x in existing}
for principal in acl - existing_principals:
db.session.add(cls(module=module, name=name, principal=principal, **kwargs))
for setting in existing:
if setting.principal not in acl:
db.session.delete(setting)
db.session.flush()
@classmethod
def set_acl_multi(cls, module, items, **kwargs):
for name, acl in items.items():
cls.set_acl(module, name, acl, **kwargs)
@classmethod
def add_principal(cls, module, name, principal, **kwargs):
if principal not in cls.get_acl(module, name):
db.session.add(cls(module=module, name=name, principal=principal, **kwargs))
db.session.flush()
@classmethod
def remove_principal(cls, module, name, principal, **kwargs):
for setting in cls.get_acl(module, name, raw=True, **kwargs):
if setting.principal == principal:
db.session.delete(setting)
db.session.flush()
@classmethod
def merge_users(cls, module, target, source):
settings = [(setting.module, setting.name, {x: getattr(setting, x) for x in cls.extra_key_cols})
for setting in cls.query.filter_by(module=module, type=PrincipalType.user, user=source)]
for module, name, extra in settings:
cls.remove_principal(module, name, source, **extra)
cls.add_principal(module, name, target, **extra)
db.session.flush()
| 32.902913
| 108
| 0.628504
|
794a0c2f96bc6f32cd8ecc8cdc0f0b258be0a96f
| 796
|
py
|
Python
|
tests/configuration_tests.py
|
Charestlab/eegprep
|
f8ca1e823fa50c0644bcbc807a02a49d9f8af283
|
[
"MIT"
] | 1
|
2019-04-30T12:14:40.000Z
|
2019-04-30T12:14:40.000Z
|
tests/configuration_tests.py
|
Charestlab/eegprep
|
f8ca1e823fa50c0644bcbc807a02a49d9f8af283
|
[
"MIT"
] | 18
|
2018-08-21T09:01:48.000Z
|
2020-01-08T21:03:06.000Z
|
tests/configuration_tests.py
|
Charestlab/eegprep
|
f8ca1e823fa50c0644bcbc807a02a49d9f8af283
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
class TestConfiguration(TestCase):
def test_configure(self):
from eegprep.configuration import Configuration
config = Configuration()
config.setDefaults({'downsample': 4096, 'out_file_format': 'fif'})
self.assertEqual(config['downsample'], 4096)
self.assertEqual(config['out_file_format'], 'fif')
config.updateFromString('# bla bla\ndownsample=256\nout_file_format=mat')
self.assertEqual(config['downsample'], 256)
self.assertEqual(config['out_file_format'], 'mat')
def test_toString(self):
from eegprep.configuration import Configuration
config = Configuration()
config.setDefaults({'downsample': 4096})
self.assertEqual(str(config), '\ndownsample=4096\n')
| 37.904762
| 81
| 0.689698
|
794a0cdd7525eb8ebcb057ec6d1ceb6cf48b34ab
| 6,822
|
py
|
Python
|
httprunner/context.py
|
MannixZ/httprunner
|
ab8ec80d8ed6913bd322592d721cd1de232f2dde
|
[
"Apache-2.0"
] | null | null | null |
httprunner/context.py
|
MannixZ/httprunner
|
ab8ec80d8ed6913bd322592d721cd1de232f2dde
|
[
"Apache-2.0"
] | null | null | null |
httprunner/context.py
|
MannixZ/httprunner
|
ab8ec80d8ed6913bd322592d721cd1de232f2dde
|
[
"Apache-2.0"
] | null | null | null |
from httprunner import exceptions, logger, parser, utils
class SessionContext(object):
""" HttpRunner session, store runtime variables.
Examples:
>>> variables = {"SECRET_KEY": "DebugTalk"}
>>> context = SessionContext(variables)
Equivalent to:
>>> context = SessionContext()
>>> context.update_session_variables(variables)
"""
def __init__(self, variables=None):
variables_mapping = utils.ensure_mapping_format(variables or {})
self.session_variables_mapping = parser.parse_variables_mapping(variables_mapping)
self.init_test_variables()
self.validation_results = []
def init_test_variables(self, variables_mapping=None):
""" init test variables, called when each test(api) starts.
variables_mapping will be evaluated first.
Args:
variables_mapping (dict)
{
"random": "${gen_random_string(5)}",
"authorization": "${gen_md5($TOKEN, $data, $random)}",
"data": '{"name": "user", "password": "123456"}',
"TOKEN": "debugtalk",
}
"""
variables_mapping = variables_mapping or {}
variables_mapping = utils.ensure_mapping_format(variables_mapping)
variables_mapping.update(self.session_variables_mapping)
parsed_variables_mapping = parser.parse_variables_mapping(variables_mapping)
self.test_variables_mapping = {}
# priority: extracted variable > teststep variable
self.test_variables_mapping.update(parsed_variables_mapping)
self.test_variables_mapping.update(self.session_variables_mapping)
def update_test_variables(self, variable_name, variable_value):
""" update test variables, these variables are only valid in the current test.
"""
self.test_variables_mapping[variable_name] = variable_value
def update_session_variables(self, variables_mapping):
""" update session with extracted variables mapping.
these variables are valid in the whole running session.
"""
variables_mapping = utils.ensure_mapping_format(variables_mapping)
self.session_variables_mapping.update(variables_mapping)
self.test_variables_mapping.update(self.session_variables_mapping)
def eval_content(self, content):
""" evaluate content recursively, take effect on each variable and function in content.
content may be in any data structure, include dict, list, tuple, number, string, etc.
"""
return parser.parse_lazy_data(content, self.test_variables_mapping)
def __eval_validator_check(self, check_item, resp_obj):
""" evaluate check item in validator.
Args:
check_item: check_item should only be the following 5 formats:
1, variable reference, e.g. $token
2, function reference, e.g. ${is_status_code_200($status_code)}
3, dict or list, maybe containing variable/function reference, e.g. {"var": "$abc"}
4, string joined by delimiter. e.g. "status_code", "headers.content-type"
5, regex string, e.g. "LB[\d]*(.*)RB[\d]*"
resp_obj: response object
"""
if isinstance(check_item, (dict, list)) \
or isinstance(check_item, parser.LazyString):
# format 1/2/3
check_value = self.eval_content(check_item)
else:
# format 4/5
check_value = resp_obj.extract_field(check_item)
return check_value
def __eval_validator_expect(self, expect_item):
""" evaluate expect item in validator.
Args:
expect_item: expect_item should only be in 2 types:
1, variable reference, e.g. $expect_status_code
2, actual value, e.g. 200
"""
expect_value = self.eval_content(expect_item)
return expect_value
def validate(self, validators, resp_obj):
""" make validation with comparators
"""
self.validation_results = []
if not validators:
return
logger.log_debug("start to validate.")
validate_pass = True
failures = []
for validator in validators:
# validator should be LazyFunction object
if not isinstance(validator, parser.LazyFunction):
raise exceptions.ValidationFailure(
"validator should be parsed first: {}".format(validators))
# evaluate validator args with context variable mapping.
validator_args = validator.get_args()
check_item, expect_item = validator_args
check_value = self.__eval_validator_check(
check_item,
resp_obj
)
expect_value = self.__eval_validator_expect(expect_item)
validator.update_args([check_value, expect_value])
comparator = validator.func_name
validator_dict = {
"comparator": comparator,
"check": check_item,
"check_value": check_value,
"expect": expect_item,
"expect_value": expect_value
}
validate_msg = "\nvalidate: {} {} {}({})".format(
check_item,
comparator,
expect_value,
type(expect_value).__name__
)
try:
validator.to_value(self.test_variables_mapping)
validator_dict["check_result"] = "pass"
validate_msg += "\t==> pass"
logger.log_debug(validate_msg)
except (AssertionError, TypeError):
validate_pass = False
validator_dict["check_result"] = "fail"
validate_msg += "\t==> fail"
validate_msg += "\n{}({}) {} {}({})".format(
check_value,
type(check_value).__name__,
comparator,
expect_value,
type(expect_value).__name__
)
logger.log_error(validate_msg)
failures.append(validate_msg)
self.validation_results.append(validator_dict)
# restore validator args, in case of running multiple times
validator.update_args(validator_args)
if not validate_pass:
failures_string = "\n".join([failure for failure in failures])
raise exceptions.ValidationFailure(failures_string)
| 39.894737
| 100
| 0.586485
|
794a0cff3534fc224bc8d274a3d787383f7e71e5
| 11,602
|
py
|
Python
|
nodes/cassandraBag.py
|
andre-dietrich/cassandra_ros
|
5e8a0c482ab2bf72ef647b2ad1ca5f9d84981e67
|
[
"MIT"
] | null | null | null |
nodes/cassandraBag.py
|
andre-dietrich/cassandra_ros
|
5e8a0c482ab2bf72ef647b2ad1ca5f9d84981e67
|
[
"MIT"
] | 1
|
2021-01-15T04:41:54.000Z
|
2021-01-29T11:12:59.000Z
|
nodes/cassandraBag.py
|
andre-dietrich/cassandra_ros
|
5e8a0c482ab2bf72ef647b2ad1ca5f9d84981e67
|
[
"MIT"
] | 2
|
2019-09-09T02:46:04.000Z
|
2021-01-15T04:36:33.000Z
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('cassandra_ros')
import rospy
import rostopic
from cassandra_ros.srv import *
import RosCassandra.RosCassandra as rc
import time
import threading
from PyQt4.QtCore import QThread
def enum(**enums):
return type('Enum', (), enums)
running = enum(START=1, STOP=0, PAUSE=-1)
class RosCassandraBag(QThread):
MAX_GWMTIME = 4294967295
MIN_GWMTIME = 0
def __init__(self, casTopic, parent=None):
QThread.__init__(self,parent)
self.casTopic= casTopic
self.record_start_time = self.MIN_GWMTIME
self.record_stop_time = self.MAX_GWMTIME
self.record_status = running.STOP
self.record_filter = None
self.play_start_time = self.MIN_GWMTIME
self.play_stop_time = self.MAX_GWMTIME
self.play_status = running.STOP
self.play_filter = None
def record(self, msg):
# start recording
# Andre
#msg = msg.transforms.pop()
if rospy.get_time() >= self.record_start_time:
if self.record_status == running.START:
if self.record_filter:
if( eval(self.record_filter) ):
self.casTopic.addData(msg, ttl=self.record_ttl)
else:
self.casTopic.addData(msg)
if rospy.get_time() >= self.record_stop_time:
self.stopRecord()
def startRecord(self, start_time=MIN_GWMTIME, stop_time=MAX_GWMTIME, filter=None, ttl=None):
self.record_start_time = min(self.record_start_time, start_time)
self.record_stop_time = max(self.record_stop_time, stop_time)
self.record_filter = filter
self.record_ttl = ttl
self.record_status = running.START
self.subscriber = rospy.Subscriber(self.casTopic.topic, self.casTopic.MsgClass, self.record)
def stopRecord(self):
if self.record_status == running.START or self.record_status == running.PAUSE:
self.record_status = running.STOP
self.subscriber.unregister()
def pauseRecord(self):
if self.record_status == running.START:
self.record_status = running.PAUSE
elif self.record_status == running.PAUSE:
self.record_status = running.START
def exit(self):
self.stopPlay()
self.stopRecord()
def startPlay(self, start_time=MIN_GWMTIME, stop_time=MAX_GWMTIME, speed=1, delay=0, queuesize=100, loop=False, filter=None):
self.play_start_time = start_time
self.play_stop_time = stop_time
self.play_speed = speed
self.play_delay = delay
self.play_queuesize = queuesize
self.play_loop = loop
self.play_filter = filter
self.play_status = running.START
if not self.play_filter or self.play_filter == "":
self.play_filter = None
self.publisher = rospy.Publisher(self.casTopic.topic, self.casTopic.MsgClass)
# start thread
self.start()
def stopPlay(self):
if self.isRunning():
self.terminate()
def pausePlay(self):
# waiting
if self.isRunning():
if self.play_status == running.START:
self.play_status = running.PAUSE
elif self.play_status == running.PAUSE:
self.play_status = running.START
# play ...
def run(self):
rospy.sleep(self.play_delay)
while True:
from_key = str(self.play_start_time)
to_key = str(self.play_stop_time)
_last_time = long(from_key)
_currentTime = rospy.Time.now().to_nsec()
while True:
data = self.casTopic.getData(from_key, to_key, self.play_queuesize)
for dat in data:
from_key, msg = dat
timestamp = long(from_key)
if _last_time > 0:
delta_t = float(timestamp - _last_time) / (self.play_speed*1000000000)
else:
delta_t = 0
time.sleep(delta_t)
_last_time = timestamp
if self.play_filter:
if eval(self.play_filter):
self.publisher.publish(msg)
else:
self.publisher.publish(msg)
# pause
while self.play_status == running.PAUSE:
self.yieldCurrentThread()
from_key = str(long(from_key)+1)
# end reached
if len(data) < self.play_queuesize:
break
if not self.play_loop:
break
self.play_status = running.STOP
rospy.loginfo("STOP")
def handle_record(req):
global rosCas, bag
response = ""
if req.ttl == 0:
req.ttl = None
for topic in (req.topics):
# start recording
if req.record==1:
if not bag.has_key(topic):
if not rosCas.existTopic(topic):
msg_class, _, _ = rostopic.get_topic_class(topic, blocking=True)
rosCas.addTopic(topic,
req.cassandra_format,
msg_class.__name__,
msg_class.__module__.split(".")[0],
'time', None,
comment='')
bag[topic] = RosCassandraBag(rosCas.getTopic(topic))
bag[topic].startRecord(req.start_time, req.stop_time, req.filter, req.ttl)
rospy.loginfo("start recording: "+topic)
# stop recording
elif req.record == 0:
if bag.has_key(topic):
bag[topic].stopRecord()
rospy.loginfo("stop recording: "+topic)
# pause recording
else:
if bag.has_key(topic):
bag[topic].pauseRecord()
rospy.loginfo("pause recording: "+topic)
return response
def handle_play(req):
global rosCas, bag
response = ""
for topic in (req.topics):
# start playing
if req.play == 1:
if not bag.has_key(topic):
if rosCas.existTopic(topic):
bag[topic] = RosCassandraBag(rosCas.getTopic(topic))
bag[topic].startPlay(start_time = req.start_time,
stop_time = req.stop_time,
speed = req.speed,
delay = req.delay,
queuesize = req.queuesize,
loop = req.loop,
filter = req.filter)
else:
rospy.loginfo("topic ("+topic+") does not exist: ")
else:
bag[topic].startPlay(start_time = req.start_time,
stop_time = req.stop_time,
speed = req.speed,
delay = req.delay,
queuesize = req.queuesize,
loop = req.loop,
filter = req.filter)
rospy.loginfo("start playing: "+topic)
# stop playing
elif req.play == 0:
if bag.has_key(topic):
bag[topic].stopPlay()
rospy.loginfo("stop playing: "+topic)
# pause playing
elif req.play == -1:
if bag.has_key(topic):
bag[topic].pausePlay()
rospy.loginfo("pause playing: "+topic)
return response
def handle_delete(req):
global rosCas, bag
response = ""
for topic in (req.topics):
# start delete
if rosCas.existTopic(topic):
_topic = rosCas.getTopic(topic)
rospy.loginfo("deleting "+topic)
_topic.removeData(key=str(req.start_time), to_key=str(req.stop_time))
else:
rospy.loginfo("deleting failed, topic ("+topic+") does not exist")
return response
def handle_truncate(req):
global rosCas, bag
response = ""
for topic in (req.topics):
# start delete
if rosCas.existTopic(topic):
rosCas.removeTopic(topic)
rospy.loginfo("truncate "+topic)
else:
rospy.loginfo("truncate failed, topic ("+topic+") does not exist")
return response
def handle_info(req):
global rosCas, bag
response = "\n"
print req.command
# return all available topics
if req.command == 'list':
topics = rosCas.getAllTopics()
response += "number of topics stored in CassandraDB: "+str(len(topics)) +"\n"
for i in range(len(topics)):
response += str(i+1)+". "+topics[i]+" ("+str(rosCas.countTopicData(topics[i]))+")"+"\n"
elif req.command == 'status':
response += "list of connected hosts: "+str(rosCas.host) +"\n"
for topic in bag.keys():
if bag[topic].play_status == running.START:
response += topic + ": playback is running\n"
elif bag[topic].play_status == running.PAUSE:
response += topic + ": playback is paused\n"
elif bag[topic].record_status == running.START:
response += topic + ": recording is running\n"
elif bag[topic].record_status == running.PAUSE:
response += topic + ": recording is paused\n"
else:
response += topic + ": is idle\n"
elif req.command == 'info':
for topic in req.topics:
meta = rosCas.getTopicMeta(topic)
for key in meta.keys():
response += key+": "+str(meta[key])+"\n"
response += "column name: "+rosCas.topic2Hash(topic)+"\n"
response += "number of entries: "+str(rosCas.countTopicData(topic))+"\n"
elif req.command == 'cql':
print rosCas.exequteCQL(req.topics[0])
else:
rsp += "unknown command: "+req.command+"\n"
return response
if __name__ == "__main__":
host = rospy.get_param('/cassandraBag/host', "localhost")
port = rospy.get_param('/cassandraBag/port', 9160)
keyspace = rospy.get_param('/cassandraBag/keyspace', "test")
rosCas = rc.RosCassandra(host, port)
rospy.loginfo("connected to Cassandra on %s:%d"%(host,port))
# rosCas.dropKeyspace(keyspace)
if not rosCas.connectToKeyspace(keyspace):
rosCas.createKeyspace(keyspace)
rosCas.connectToKeyspace(keyspace)
rospy.loginfo("connected to Keyspace \"%s\""%(keyspace))
bag = {}
rospy.init_node('cassandraBag')
service = {}
service['record'] = rospy.Service('cassandra_record', record, handle_record)
service['play'] = rospy.Service('cassandra_play', play, handle_play)
service['delete'] = rospy.Service('cassandra_delete', delete, handle_delete)
service['info'] = rospy.Service('cassandra_info', info, handle_info)
service['truncate'] = rospy.Service('cassandra_truncate', truncate, handle_truncate)
rospy.loginfo("start listening ... ")
rospy.spin()
for _bag in bag.itervalues():
_bag.exit()
rosCas.disconnect()
| 33.148571
| 129
| 0.542837
|
794a0d9fca9abb2f525a97c86b5f0d992b6becda
| 55,114
|
py
|
Python
|
Packs/DNSDB/Integrations/DNSDB_v2/DNSDB_v2_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/DNSDB/Integrations/DNSDB_v2/DNSDB_v2_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/DNSDB/Integrations/DNSDB_v2/DNSDB_v2_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
# Copyright (c) 2020 by Farsight Security, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import textwrap
import time
import DNSDB_v2 as DNSDB
import pytest
import CommonServerPython
class TestClient(object):
def test_headers(self, requests_mock):
apikey = 'abcdef'
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, apikey)
requests_mock.get(
'{server}/dnsdb/v2/rate_limit?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
json={},
request_headers={
'Accept': 'application/x-ndjson',
'X-API-Key': apikey,
})
c.rate_limit()
def test_rate_limit(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
requests_mock.get(
'{server}/dnsdb/v2/rate_limit?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
json={})
c.rate_limit()
def test_lookup_rrset(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count":1820,"zone_time_first":1374250920,"zone_time_last":1589472138,"rrname":"farsightsecurity.com.",'
'"rrtype":"NS","bailiwick":"com.","rdata":["ns5.dnsmadeeasy.com.","ns6.dnsmadeeasy.com.","ns7.dnsmadeeasy'
'.com."]}',
'{"count":6350,"time_first":1380123423,"time_last":1427869045,"rrname":"farsightsecurity.com.","rrtype":"'
'A","bailiwick":"farsightsecurity.com.","rdata":["66.160.140.81"]}',
]
name = 'farsightsecurity.com'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rrset(name):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_summarize_rrset(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
record = '{"count":6350,"num_results":3,"time_first":1380123423,"time_last":1427869045}'
name = 'farsightsecurity.com'
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{name}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap([record]))
rrset = c.summarize_rrset(name)
assert rrset == json.loads(record)
def test_summarize_rrset_empty(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
name = 'farsightsecurity.com'
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{name}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text='')
with pytest.raises(DNSDB.QueryError):
c.summarize_rrset(name)
def test_rrset_rrtype(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count":6350,"time_first":1380123423,"time_last":1427869045,"rrname":"farsightsecurity.com.","rrtype":"A"'
',"bailiwick":"farsightsecurity.com.","rdata":["66.160.140.81"]}',
'{"count":36770,"time_first":1427897872,"time_last":1538008183,"rrname":"farsightsecurity.com.","rrtype":"A'
'","bailiwick":"farsightsecurity.com.","rdata":["104.244.13.104"]}',
'{"count":6428,"time_first":1538047094,"time_last":1589544286,"rrname":"farsightsecurity.com.","rrtype":"A"'
',"bailiwick":"farsightsecurity.com.","rdata":["104.244.14.108"]}',
'{"count":628,"time_first":1374098930,"time_last":1380124067,"rrname":"farsightsecurity.com.","rrtype":"A",'
'"bailiwick":"farsightsecurity.com.","rdata":["149.20.4.207"]}',
]
name = 'farsightsecurity.com'
rrtype = 'A'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}/{rrtype}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
rrtype=rrtype,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rrset(name, rrtype=rrtype):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_rrset_bailiwick(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count":19,"zone_time_first":1372609301,"zone_time_last":1374164567,"rrname":"farsightsecurity.com.","rrt'
'ype":"NS","bailiwick":"com.","rdata":["ns.lah1.vix.com.","ns1.isc-sns.net.","ns2.isc-sns.com.","ns3.isc-sn'
's.info."]}',
'{"count":157,"zone_time_first":1359047885,"zone_time_last":1372522741,"rrname":"farsightsecurity.com.","rr'
'type":"NS","bailiwick":"com.","rdata":["ns.sjc1.vix.com.","ns.sql1.vix.com."]}',
'{"count":1820,"zone_time_first":1374250920,"zone_time_last":1589472138,"rrname":"farsightsecurity.com.","r'
'rtype":"NS","bailiwick":"com.","rdata":["ns5.dnsmadeeasy.com.","ns6.dnsmadeeasy.com.","ns7.dnsmadeeasy.com'
'."]}',
'{"count":58,"time_first":1372688083,"time_last":1374165919,"rrname":"farsightsecurity.com.","rrtype":"NS",'
'"bailiwick":"com.","rdata":["ns.lah1.vix.com.","ns1.isc-sns.net.","ns2.isc-sns.com.","ns3.isc-sns.info."]'
'}',
'{"count":17,"time_first":1360364071,"time_last":1372437672,"rrname":"farsightsecurity.com.","rrtype":"NS",'
'"bailiwick":"com.","rdata":["ns.sjc1.vix.com.","ns.sql1.vix.com."]}',
'{"count":853787,"time_first":1374172950,"time_last":1589549475,"rrname":"farsightsecurity.com.","rrtype":"'
'NS","bailiwick":"com.","rdata":["ns5.dnsmadeeasy.com.","ns6.dnsmadeeasy.com.","ns7.dnsmadeeasy.com."]}',
]
name = 'farsightsecurity.com'
bailiwick = 'com'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}/{rrtype}/{bailiwick}?swclient={swclient}&version={version}'.format( # noqa: E501
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
rrtype='ANY',
bailiwick=bailiwick,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rrset(name, bailiwick=bailiwick):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_rrset_rrtype_bailiwick(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count":19,"zone_time_first":1372609301,"zone_time_last":1374164567,"rrname":"farsightsecurity.com.","rrt'
'ype":"NS","bailiwick":"com.","rdata":["ns.lah1.vix.com.","ns1.isc-sns.net.","ns2.isc-sns.com.","ns3.isc-sn'
's.info."]}',
'{"count":157,"zone_time_first":1359047885,"zone_time_last":1372522741,"rrname":"farsightsecurity.com.","rr'
'type":"NS","bailiwick":"com.","rdata":["ns.sjc1.vix.com.","ns.sql1.vix.com."]}',
'{"count":1820,"zone_time_first":1374250920,"zone_time_last":1589472138,"rrname":"farsightsecurity.com.","r'
'rtype":"NS","bailiwick":"com.","rdata":["ns5.dnsmadeeasy.com.","ns6.dnsmadeeasy.com.","ns7.dnsmadeeasy.com'
'."]}',
'{"count":58,"time_first":1372688083,"time_last":1374165919,"rrname":"farsightsecurity.com.","rrtype":"NS",'
'"bailiwick":"com.","rdata":["ns.lah1.vix.com.","ns1.isc-sns.net.","ns2.isc-sns.com.","ns3.isc-sns.info."]'
'}',
'{"count":17,"time_first":1360364071,"time_last":1372437672,"rrname":"farsightsecurity.com.","rrtype":"NS",'
'"bailiwick":"com.","rdata":["ns.sjc1.vix.com.","ns.sql1.vix.com."]}',
'{"count":853787,"time_first":1374172950,"time_last":1589549475,"rrname":"farsightsecurity.com.","rrtype":"'
'NS","bailiwick":"com.","rdata":["ns5.dnsmadeeasy.com.","ns6.dnsmadeeasy.com.","ns7.dnsmadeeasy.com."]}',
]
name = 'farsightsecurity.com'
rrtype = 'NS'
bailiwick = 'com'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}/{rrtype}/{bailiwick}?swclient={swclient}&version={version}'.format( # noqa: E501
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
rrtype=rrtype,
bailiwick=bailiwick,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rrset(name, rrtype=rrtype, bailiwick=bailiwick):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_lookup_rdata_name(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count": 7, "time_first": 1380044973, "time_last": 1380141734, "rrname": "207.4.20.149.in-addr.fsi.io.",'
' "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 3, "time_first": 1372650830, "time_last": 1375220475, "rrname": "7.0.2.0.0.0.0.0.0.0.0.0.0.0.0.'
'0.6.6.0.0.1.0.0.0.8.f.4.0.1.0.0.2.ip6.arpa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 11, "time_first": 1380141403, "time_last": 1381263825, "rrname": "81.64-26.140.160.66.in-addr.a'
'rpa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 4, "time_first": 1373922472, "time_last": 1374071997, "rrname": "207.192-26.4.20.149.in-addr.ar'
'pa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
]
name = 'farsightsecurity.com'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='name',
name=name,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rdata_name(name):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_summarize_rdata_name(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
record = '{"count": 7, "num_results": 5, "time_first": 1380044973, "time_last": 1380141734}'
name = 'farsightsecurity.com'
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{name}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='name',
name=name,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap([record]))
rrset = c.summarize_rdata_name(name)
assert rrset == json.loads(record)
def test_summarize_rdata_name_empty(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
name = 'farsightsecurity.com'
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{name}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='name',
name=name,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text='')
with pytest.raises(DNSDB.QueryError):
c.summarize_rdata_name(name)
def test_rdata_name_rrtype(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count": 7, "time_first": 1380044973, "time_last": 1380141734, "rrname": "207.4.20.149.in-addr.fsi.io.",'
' "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 3, "time_first": 1372650830, "time_last": 1375220475, "rrname": "7.0.2.0.0.0.0.0.0.0.0.0.0.0.0.'
'0.6.6.0.0.1.0.0.0.8.f.4.0.1.0.0.2.ip6.arpa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 11, "time_first": 1380141403, "time_last": 1381263825, "rrname": "81.64-26.140.160.66.in-addr.a'
'rpa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 4, "time_first": 1373922472, "time_last": 1374071997, "rrname": "207.192-26.4.20.149.in-addr.ar'
'pa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
]
name = 'farsightsecurity.com'
rrtype = 'PTR'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}/{rrtype}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='name',
name=name,
rrtype=rrtype,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rdata_name(name, rrtype=rrtype):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_lookup_rdata_ip(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count":51,"time_first":1403544512,"time_last":1417464427,"rrname":"farsighsecurity.com.","rrtype":"A","'
'rdata":"66.160.140.81"}',
'{"count":4,"time_first":1404485629,"time_last":1406648461,"rrname":"www.farsighsecurity.com.","rrtype":"A'
'","rdata":"66.160.140.81"}',
'{"count":6350,"time_first":1380123423,"time_last":1427869045,"rrname":"farsightsecurity.com.","rrtype":"A'
'","rdata":"66.160.140.81"}',
'{"count":5059,"time_first":1380139330,"time_last":1427881899,"rrname":"www.farsightsecurity.com.","rrtype'
'":"A","rdata":"66.160.140.81"}',
'{"count":1523,"time_first":1381265271,"time_last":1427807985,"rrname":"archive.farsightsecurity.com.","rr'
'type":"A","rdata":"66.160.140.81"}',
]
ip = '66.160.140.81'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{ip}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='ip',
ip=ip,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rdata_ip(ip):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_summarize_rdata_ip(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
record = '{"count":51,"num_results":5,"time_first":1403544512,"time_last":1417464427}'
ip = '66.160.140.81'
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{ip}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='ip',
ip=ip,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap([record]))
rrset = c.summarize_rdata_ip(ip)
assert rrset == json.loads(record)
def test_summarize_rdata_ip_empty(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
ip = '66.160.140.81'
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{ip}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='ip',
ip=ip,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text='')
with pytest.raises(DNSDB.QueryError):
c.summarize_rdata_ip(ip)
def test_lookup_rdata_raw(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count": 7, "time_first": 1380044973, "time_last": 1380141734, "rrname": "207.4.20.149.in-addr.fsi.io.",'
' "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 3, "time_first": 1372650830, "time_last": 1375220475, "rrname": "7.0.2.0.0.0.0.0.0.0.0.0.0.0.0.'
'0.6.6.0.0.1.0.0.0.8.f.4.0.1.0.0.2.ip6.arpa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 11, "time_first": 1380141403, "time_last": 1381263825, "rrname": "81.64-26.140.160.66.in-addr.a'
'rpa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 4, "time_first": 1373922472, "time_last": 1374071997, "rrname": "207.192-26.4.20.149.in-addr.ar'
'pa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
]
raw = '0123456789ABCDEF'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{raw}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='raw',
raw=DNSDB.quote(raw),
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rdata_raw(raw):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_summarize_rdata_raw(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
record = '{"count": 7, "num_results": 5, "time_first": 1380044973, "time_last": 1380141734}'
raw = '0123456789ABCDEF'
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{raw}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='raw',
raw=DNSDB.quote(raw),
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap([record]))
rrset = c.summarize_rdata_raw(raw)
assert rrset == json.loads(record)
def test_summarize_rdata_raw_empty(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
raw = '0123456789ABCDEF'
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{raw}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='raw',
raw=DNSDB.quote(raw),
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text='')
with pytest.raises(DNSDB.QueryError):
c.summarize_rdata_raw(raw)
def test_rdata_raw_rrtype(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"count": 7, "time_first": 1380044973, "time_last": 1380141734, "rrname": "207.4.20.149.in-addr.fsi.io.",'
' "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 3, "time_first": 1372650830, "time_last": 1375220475, "rrname": "7.0.2.0.0.0.0.0.0.0.0.0.0.0.0.'
'0.6.6.0.0.1.0.0.0.8.f.4.0.1.0.0.2.ip6.arpa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 11, "time_first": 1380141403, "time_last": 1381263825, "rrname": "81.64-26.140.160.66.in-addr.a'
'rpa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
'{"count": 4, "time_first": 1373922472, "time_last": 1374071997, "rrname": "207.192-26.4.20.149.in-addr.ar'
'pa.", "rrtype": "PTR", "rdata": "farsightsecurity.com."}',
]
raw = '0123456789ABCDEF'
rrtype = 'PTR'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{raw}/{rrtype}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rdata',
type='raw',
raw=raw,
rrtype=rrtype,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.lookup_rdata_raw(raw, rrtype=rrtype):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_flex(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
records = [
'{"rdata": "10 lists.farsightsecurity.com.", "rrtype": "MX", "raw_rdata": "000A056C69737473106661727369676874736563757269747903636F6D00"}', # noqa: E501
'{"rdata": "10 support.farsightsecurity.com.", "rrtype": "MX", "raw_rdata": "000A07737570706F7274106661727369676874736563757269747903636F6D00"}', # noqa: E501
'{"rdata": "x.support.farsightsecurity.com.", "rrtype": "CNAME", "raw_rdata": "017807737570706F7274106661727369676874736563757269747903636F6D00"}', # noqa: E501
]
method = 'regex'
key = 'rdata'
value = 'farsightsecurity'
requests_mock.get(
'{server}/dnsdb/v2/{method}/{key}/{value}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
method=method,
key=key,
value=value,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap(records))
for rrset in c.flex(method, key, value):
assert rrset == json.loads(records[0])
records = records[1:]
assert len(records) == 0
def test_500(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
name = 'farsightsecurity.com'
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
status_code=500, text='{}\nerror')
with pytest.raises(CommonServerPython.DemistoException):
for rrset in c.lookup_rrset(name):
pytest.fail('received {0}'.format(rrset)) # pragma: no cover
def test_limit(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
name = 'farsightsecurity.com'
limit = 100
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}?limit={limit}&swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
limit=limit,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap([]))
for rrset in c.lookup_rrset(name, limit=limit):
pytest.fail('received {0}'.format(rrset)) # pragma: no cover
def test_time_first_before(self, requests_mock):
self._test_time_param(requests_mock, "time_first_before")
def test_time_first_after(self, requests_mock):
self._test_time_param(requests_mock, "time_first_after")
def test_time_last_before(self, requests_mock):
self._test_time_param(requests_mock, "time_last_before")
def test_time_last_after(self, requests_mock):
self._test_time_param(requests_mock, "time_last_after")
def test_aggr(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
name = 'farsightsecurity.com'
aggr = 100
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}?aggr={aggr}&swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
aggr=aggr,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap([]))
for rrset in c.lookup_rrset(name, aggr=aggr):
pytest.fail('received {0}'.format(rrset)) # pragma: no cover
def test_offset(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
name = 'farsightsecurity.com'
offset = 100
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}?offset={offset}&swclient={swclient}&version={version}'.format( # noqa: E501
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
offset=offset,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap([]))
for rrset in c.lookup_rrset(name, offset=offset):
pytest.fail('received {0}'.format(rrset)) # pragma: no cover
def test_max_count(self, requests_mock):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
name = 'farsightsecurity.com'
max_count = 100
requests_mock.get(
'{server}/dnsdb/v2/summarize/{mode}/{type}/{name}?max_count={max_count}'
'&swclient={swclient}&version={version}'.format(server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
max_count=max_count,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION),
text=_saf_wrap([]))
with pytest.raises(DNSDB.QueryError):
for rrset in c.summarize_rrset(name, max_count=max_count):
pytest.fail('received {0}'.format(rrset)) # pragma: no cover
@staticmethod
def _test_time_param(requests_mock, param: str):
c = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
name = 'farsightsecurity.com'
when = time.time()
requests_mock.get(
'{server}/dnsdb/v2/lookup/{mode}/{type}/{name}?{param}={when}&swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
mode='rrset',
type='name',
name=name,
param=param,
when=when,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
),
text=_saf_wrap([]))
for rrset in c.lookup_rrset(name, **{param: when}):
pytest.fail('received {0}'.format(rrset)) # pragma: no cover
class TestBuildResultContext(object):
def test_lookup_rrset(self):
self._run_test(
{
"count": 5059,
"time_first": 1380139330,
"time_last": 1427881899,
"rrname": "www.farsightsecurity.com.",
"rrtype": "A",
"bailiwick": "farsightsecurity.com.",
"rdata": ["66.160.140.81", '66.160.140.82']
},
{
'RRName': 'www.farsightsecurity.com',
'RRType': 'A',
'Bailiwick': 'farsightsecurity.com',
'RData': ['66.160.140.81', '66.160.140.82'],
'Count': 5059,
'TimeFirst': '2013-09-25T20:02:10Z',
'TimeLast': '2015-04-01T09:51:39Z',
'FromZoneFile': False,
}
)
def test_lookup_rdata(self):
self._run_test({
"count": 5059,
"time_first": 1380139330,
"time_last": 1427881899,
"rrname": "www.farsightsecurity.com.",
"rrtype": "A",
"bailiwick": "farsightsecurity.com.",
"rdata": "66.160.140.81",
}, {
'RRName': 'www.farsightsecurity.com',
'RRType': 'A',
'Bailiwick': 'farsightsecurity.com',
'RData': '66.160.140.81',
'Count': 5059,
'TimeFirst': '2013-09-25T20:02:10Z',
'TimeLast': '2015-04-01T09:51:39Z',
'FromZoneFile': False,
})
def test_flex(self):
self._run_test({
"rdata": "10 lists.farsightsecurity.com",
"raw_rdata": "000A056C69737473106661727369676874736563757269747903636F6D00",
"rrtype": "MX",
}, {
"RData": "10 lists.farsightsecurity.com",
"RawRData": "000A056C69737473106661727369676874736563757269747903636F6D00",
"RRType": "MX",
})
def test_summarize(self):
self._run_test({
"count": 1127,
"num_results": 2,
"zone_time_first": 1557859313,
"zone_time_last": 1560537333
}, {
'Count': 1127,
'NumResults': 2,
'TimeFirst': '2019-05-14T18:41:53Z',
'TimeLast': '2019-06-14T18:35:33Z',
'FromZoneFile': True,
})
def test_idna(self):
self._run_test({
'rrname': 'www.xn--frsight-exa.com.',
'bailiwick': 'xn--frsight-exa.com.',
}, {
'RRName': 'www.xn--frsight-exa.com',
'Bailiwick': 'xn--frsight-exa.com',
})
@staticmethod
def _run_test(input, expected):
assert DNSDB.build_result_context(input) == expected
class TestBuildLimitsContext(object):
def test_no_rate(self):
with pytest.raises(ValueError):
DNSDB.build_rate_limits_context({})
def test_time_based_quota(self):
self._run_test(
{
"rate": {
"reset": 1433980800,
"limit": 1000,
"remaining": 999,
}
},
{
'Reset': '2015-06-11T00:00:00Z',
'Limit': 1000,
'Remaining': 999,
}
)
def test_block_based_quota(self):
self._run_test(
{
"rate": {
"reset": "n/a",
"burst_size": 10,
"expires": 1555370914,
"burst_window": 300,
"offset_max": 3000000,
"results_max": 256,
"limit": 600,
"remaining": 8,
}
}, {
'NeverResets': True,
'BurstSize': 10,
'Expires': '2019-04-15T23:28:34Z',
'BurstWindow': 300,
'OffsetMax': 3000000,
'ResultsMax': 256,
'Limit': 600,
'Remaining': 8,
})
def test_unlimited(self):
self._run_test(
{
"rate": {
"reset": "n/a",
"limit": "unlimited",
"remaining": "n/a"
}
},
{
'Unlimited': True,
}
)
@staticmethod
def _run_test(input: dict, expected: dict):
assert DNSDB.build_rate_limits_context(input) == expected
class TestRDataCommand:
def test_empty(self, requests_mock):
args = {
'type': 'name',
'value': 'farsightsecurity.com',
'limit': '10',
}
input = ''
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Lookup
**No entries.**
''')
expected_output_prefix = 'DNSDB.Record'
expected_outputs = []
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
def test_name(self, requests_mock):
args = {
'type': 'name',
'value': 'ns5.dnsmadeeasy.com',
'limit': '10',
}
input = [
'{"count":1078,"zone_time_first":1374250920,"zone_time_last":1468253883,"rrname":"farsightsecurity.com.","rrtype":"NS","rdata":"ns5.dnsmadeeasy.com."}', # noqa: E501
'{"count":706617,"time_first":1374096380,"time_last":1468334926,"rrname":"farsightsecurity.com.","rrtype":"NS","rdata":"ns5.dnsmadeeasy.com."}', # noqa: E501
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Lookup
|RRName|RRType|RData|Count|TimeFirst|TimeLast|FromZoneFile|
|---|---|---|---|---|---|---|
| farsightsecurity.com | NS | ns5.dnsmadeeasy.com. | 1078 | 2013-07-19T16:22:00Z | 2016-07-11T16:18:03Z | True |
| farsightsecurity.com | NS | ns5.dnsmadeeasy.com. | 706617 | 2013-07-17T21:26:20Z | 2016-07-12T14:48:46Z | False |
''') # noqa: E501
expected_output_prefix = 'DNSDB.Record'
expected_outputs = [
{
'Count': 1078,
'RData': 'ns5.dnsmadeeasy.com.',
'RRName': 'farsightsecurity.com',
'RRType': 'NS',
'TimeFirst': '2013-07-19T16:22:00Z',
'TimeLast': '2016-07-11T16:18:03Z',
'FromZoneFile': True,
},
{
'Count': 706617,
'RData': 'ns5.dnsmadeeasy.com.',
'RRName': 'farsightsecurity.com',
'RRType': 'NS',
'TimeFirst': '2013-07-17T21:26:20Z',
'TimeLast': '2016-07-12T14:48:46Z',
'FromZoneFile': False,
}
]
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
def test_ip(self, requests_mock):
args = {
'type': 'ip',
'value': '104.244.13.104',
'limit': '10',
}
input = [
'{"count":24,"time_first":1433550785,"time_last":1468312116,"rrname":"www.farsighsecurity.com.","rrtype":"A","rdata":"104.244.13.104"}', # noqa: E501
'{"count":9429,"zone_time_first":1427897872,"zone_time_last":1468333042,"rrname":"farsightsecurity.com.","rrtype":"A","rdata":"104.244.13.104"}' # noqa: E501
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Lookup
|RRName|RRType|RData|Count|TimeFirst|TimeLast|FromZoneFile|
|---|---|---|---|---|---|---|
| www.farsighsecurity.com | A | 104.244.13.104 | 24 | 2015-06-06T00:33:05Z | 2016-07-12T08:28:36Z | False |
| farsightsecurity.com | A | 104.244.13.104 | 9429 | 2015-04-01T14:17:52Z | 2016-07-12T14:17:22Z | True |
''')
expected_prefix = 'DNSDB.Record'
expected_outputs = [
{'Count': 24,
'FromZoneFile': False,
'RData': '104.244.13.104',
'RRName': 'www.farsighsecurity.com',
'RRType': 'A',
'TimeFirst': '2015-06-06T00:33:05Z',
'TimeLast': '2016-07-12T08:28:36Z'},
{
'Count': 9429,
'FromZoneFile': True,
'RData': '104.244.13.104',
'RRName': 'farsightsecurity.com',
'RRType': 'A',
'TimeFirst': '2015-04-01T14:17:52Z',
'TimeLast': '2016-07-12T14:17:22Z'
}
]
self._run_test(requests_mock, args, input, expected_readable, expected_prefix, expected_outputs)
def test_raw(self, requests_mock):
args = {
'type': 'raw',
'value': '0123456789ABCDEF',
'limit': '10',
}
input = [
'{"count":1078,"zone_time_first":1374250920,"zone_time_last":1468253883,"rrname":"farsightsecurity.com.","rrtype":"NS","rdata":"ns5.dnsmadeeasy.com."}', # noqa: E501
'{"count":706617,"time_first":1374096380,"time_last":1468334926,"rrname":"farsightsecurity.com.","rrtype":"NS","rdata":"ns5.dnsmadeeasy.com."}', # noqa: E501
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Lookup
|RRName|RRType|RData|Count|TimeFirst|TimeLast|FromZoneFile|
|---|---|---|---|---|---|---|
| farsightsecurity.com | NS | ns5.dnsmadeeasy.com. | 1078 | 2013-07-19T16:22:00Z | 2016-07-11T16:18:03Z | True |
| farsightsecurity.com | NS | ns5.dnsmadeeasy.com. | 706617 | 2013-07-17T21:26:20Z | 2016-07-12T14:48:46Z | False |
''') # noqa: E501
expected_output_prefix = 'DNSDB.Record'
expected_outputs = [
{
'Count': 1078,
'RData': 'ns5.dnsmadeeasy.com.',
'RRName': 'farsightsecurity.com',
'RRType': 'NS',
'TimeFirst': '2013-07-19T16:22:00Z',
'TimeLast': '2016-07-11T16:18:03Z',
'FromZoneFile': True,
},
{
'Count': 706617,
'RData': 'ns5.dnsmadeeasy.com.',
'RRName': 'farsightsecurity.com',
'RRType': 'NS',
'TimeFirst': '2013-07-17T21:26:20Z',
'TimeLast': '2016-07-12T14:48:46Z',
'FromZoneFile': False,
}
]
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
@staticmethod
def _run_test(requests_mock, args: dict, input: list, expected_readable: str, expected_output_prefix: str,
expected_outputs: list):
client = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
requests_mock.get(f'{DNSDB.DEFAULT_DNSDB_SERVER}/dnsdb/v2/lookup/rdata/{args["type"]}/{args["value"]}'
f'?limit={args["limit"]}'
f'&swclient={DNSDB.SWCLIENT}&version={DNSDB.VERSION}',
text=_saf_wrap(input))
for v in args.values():
assert isinstance(v, str)
res = DNSDB.dnsdb_rdata(client, args)
assert res.readable_output == expected_readable
assert res.outputs_prefix == expected_output_prefix
assert res.outputs == expected_outputs
class TestSummarizeRDataCommand:
def test_name(self, requests_mock):
args = {
'type': 'name',
'value': 'www.farsightsecurity.com',
'limit': '2',
'max_count': '5000',
}
input = [
'{"count": 1127, "num_results": 2, "time_first": 1557859313, "time_last": 1560537333}',
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Summarize
|Count|NumResults|TimeFirst|TimeLast|
|---|---|---|---|
| 1127 | 2 | 2019-05-14T18:41:53Z | 2019-06-14T18:35:33Z |
''')
expected_output_prefix = 'DNSDB.Summary'
expected_outputs = {
'Count': 1127,
'NumResults': 2,
'TimeFirst': '2019-05-14T18:41:53Z',
'TimeLast': '2019-06-14T18:35:33Z',
'FromZoneFile': False,
}
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
def test_ip(self, requests_mock):
args = {
'type': 'ip',
'value': '127.0.0.1',
'limit': '2',
'max_count': '5000',
}
input = [
'{"count": 1127, "num_results": 2, "time_first": 1557859313, "time_last": 1560537333}',
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Summarize
|Count|NumResults|TimeFirst|TimeLast|
|---|---|---|---|
| 1127 | 2 | 2019-05-14T18:41:53Z | 2019-06-14T18:35:33Z |
''')
expected_output_prefix = 'DNSDB.Summary'
expected_outputs = {
'Count': 1127,
'NumResults': 2,
'TimeFirst': '2019-05-14T18:41:53Z',
'TimeLast': '2019-06-14T18:35:33Z',
'FromZoneFile': False,
}
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
def test_raw(self, requests_mock):
args = {
'type': 'raw',
'value': '0123456789ABCDEF',
'limit': '2',
'max_count': '5000',
}
input = [
'{"count": 1127, "num_results": 2, "time_first": 1557859313, "time_last": 1560537333}',
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Summarize
|Count|NumResults|TimeFirst|TimeLast|
|---|---|---|---|
| 1127 | 2 | 2019-05-14T18:41:53Z | 2019-06-14T18:35:33Z |
''')
expected_output_prefix = 'DNSDB.Summary'
expected_outputs = {
'Count': 1127,
'NumResults': 2,
'TimeFirst': '2019-05-14T18:41:53Z',
'TimeLast': '2019-06-14T18:35:33Z',
'FromZoneFile': False,
}
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
def test_zone(self, requests_mock):
args = {
'type': 'name',
'value': 'www.farsightsecurity.com',
'limit': '10',
'max_count': '50',
}
input = [
'{"count": 1127, "num_results": 2, "zone_time_first": 1557859313, "zone_time_last": 1560537333}',
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Summarize
|Count|NumResults|ZoneTimeFirst|ZoneTimeLast|
|---|---|---|---|
| 1127 | 2 | 2019-05-14T18:41:53Z | 2019-06-14T18:35:33Z |
''')
expected_output_prefix = 'DNSDB.Summary'
expected_outputs = {
'Count': 1127,
'NumResults': 2,
'TimeFirst': '2019-05-14T18:41:53Z',
'TimeLast': '2019-06-14T18:35:33Z',
'FromZoneFile': True,
}
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
@staticmethod
def _run_test(requests_mock, args: dict, input: dict, expected_readable: str, expected_output_prefix: str,
expected_outputs: dict):
client = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
requests_mock.get(f'{DNSDB.DEFAULT_DNSDB_SERVER}/dnsdb/v2/summarize/rdata/{args["type"]}/{args["value"]}'
f'?limit={args["limit"]}'
f'&max_count={args["max_count"]}'
f'&swclient={DNSDB.SWCLIENT}&version={DNSDB.VERSION}',
text=_saf_wrap(input))
for v in args.values():
assert isinstance(v, str)
res = DNSDB.dnsdb_summarize_rdata(client, args)
assert res.readable_output == expected_readable
assert res.outputs_prefix == expected_output_prefix
assert res.outputs == expected_outputs
class TestRRSetCommand:
def test_empty(self, requests_mock):
args = {
'owner_name': '*.farsightsecurity.com',
'limit': '10',
}
input = []
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Lookup
**No entries.**
''')
expected_output_prefix = 'DNSDB.Record'
expected_outputs = []
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
def test_a(self, requests_mock):
args = {
'owner_name': '*.farsightsecurity.com',
'limit': '10',
}
input = [
'{"count":5059,"time_first":1380139330,"time_last":1427881899,"rrname":"www.farsightsecurity.com.","rrtype":"A","bailiwick":"farsightsecurity.com.","rdata":["66.160.140.81"]}', # noqa: E501
'{"count":17381,"zone_time_first":1427893644,"zone_time_last":1468329272,"rrname":"farsightsecurity.com.","rrtype":"A","bailiwick":"com.","rdata":["104.244.13.104"]}', # noqa: E501
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Lookup
|RRName|RRType|Bailiwick|RData|Count|TimeFirst|TimeLast|FromZoneFile|
|---|---|---|---|---|---|---|---|
| www.farsightsecurity.com | A | farsightsecurity.com | 66.160.140.81 | 5059 | 2013-09-25T20:02:10Z | 2015-04-01T09:51:39Z | False |
| farsightsecurity.com | A | com | 104.244.13.104 | 17381 | 2015-04-01T13:07:24Z | 2016-07-12T13:14:32Z | True |
''') # noqa: E501
expected_output_prefix = 'DNSDB.Record'
expected_outputs = [
{
'Count': 5059,
'RRName': 'www.farsightsecurity.com',
'RRType': 'A',
'RData': ['66.160.140.81'],
'Bailiwick': 'farsightsecurity.com',
'TimeFirst': '2013-09-25T20:02:10Z',
'TimeLast': '2015-04-01T09:51:39Z',
'FromZoneFile': False,
},
{
'Count': 17381,
'RRName': 'farsightsecurity.com',
'RRType': 'A',
'Bailiwick': 'com',
'RData': ['104.244.13.104'],
'TimeFirst': '2015-04-01T13:07:24Z',
'TimeLast': '2016-07-12T13:14:32Z',
'FromZoneFile': True,
}
]
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
@staticmethod
def _run_test(requests_mock, args: dict, input: list, expected_readable: str, expected_output_prefix: str,
expected_outputs: list):
client = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
requests_mock.get(f'{DNSDB.DEFAULT_DNSDB_SERVER}/dnsdb/v2/lookup/rrset/name/{DNSDB.quote(args["owner_name"])}'
f'?limit={args["limit"]}'
f'&swclient={DNSDB.SWCLIENT}&version={DNSDB.VERSION}',
text=_saf_wrap(input))
for v in args.values():
assert isinstance(v, str)
res = DNSDB.dnsdb_rrset(client, args)
assert res.readable_output == expected_readable
assert res.outputs_prefix == expected_output_prefix
assert res.outputs == expected_outputs
class TestSummarizeRRSetCommand:
def test_1a(self, requests_mock):
args = {
'owner_name': 'www.farsightsecurity.com',
'limit': '2',
'max_count': '5000',
}
input = [
'{"count": 1127, "num_results": 2, "time_first": 1557859313, "time_last": 1560537333}',
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Summarize
|Count|NumResults|TimeFirst|TimeLast|
|---|---|---|---|
| 1127 | 2 | 2019-05-14T18:41:53Z | 2019-06-14T18:35:33Z |
''')
expected_output_prefix = 'DNSDB.Summary'
expected_outputs = {
'Count': 1127,
'NumResults': 2,
'TimeFirst': '2019-05-14T18:41:53Z',
'TimeLast': '2019-06-14T18:35:33Z',
'FromZoneFile': False,
}
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
def test_zone(self, requests_mock):
args = {
'owner_name': 'www.farsightsecurity.com',
'limit': '10',
'max_count': '50',
}
input = [
'{"count": 1127, "num_results": 2, "zone_time_first": 1557859313, "zone_time_last": 1560537333}',
]
expected_readable = textwrap.dedent('''\
### Farsight DNSDB Summarize
|Count|NumResults|ZoneTimeFirst|ZoneTimeLast|
|---|---|---|---|
| 1127 | 2 | 2019-05-14T18:41:53Z | 2019-06-14T18:35:33Z |
''')
expected_output_prefix = 'DNSDB.Summary'
expected_outputs = {
'Count': 1127,
'NumResults': 2,
'TimeFirst': '2019-05-14T18:41:53Z',
'TimeLast': '2019-06-14T18:35:33Z',
'FromZoneFile': True,
}
self._run_test(requests_mock, args, input, expected_readable, expected_output_prefix, expected_outputs)
@staticmethod
def _run_test(requests_mock, args: dict, input: list, expected_readable: str, expected_output_prefix: str,
expected_outputs: dict):
client = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
requests_mock.get(f'{DNSDB.DEFAULT_DNSDB_SERVER}/dnsdb/v2/summarize/rrset/name/{args["owner_name"]}'
f'?limit={args["limit"]}'
f'&max_count={args["max_count"]}'
f'&swclient={DNSDB.SWCLIENT}&version={DNSDB.VERSION}',
text=_saf_wrap(input))
for v in args.values():
assert isinstance(v, str)
res = DNSDB.dnsdb_summarize_rrset(client, args)
assert res.readable_output == expected_readable
assert res.outputs_prefix == expected_output_prefix
assert res.outputs == expected_outputs
class TestRateLimitCommand:
def test_unlimited(self, requests_mock):
self._run_test(requests_mock, {
"rate": {
"reset": "n/a",
"limit": "unlimited",
"remaining": "n/a"
}
}, textwrap.dedent('''\
### Farsight DNSDB Service Limits
|Unlimited|
|---|
| true |
'''))
def test_time_based(self, requests_mock):
self._run_test(requests_mock, {
"rate": {
"reset": 1433980800,
"limit": 1000,
"remaining": 999
}
}, textwrap.dedent('''\
### Farsight DNSDB Service Limits
|Limit|Remaining|Reset|
|---|---|---|
| 1000 | 999 | 2015-06-11T00:00:00Z |
'''))
def test_block_based(self, requests_mock):
self._run_test(requests_mock, {
"rate": {
"reset": "n/a",
"burst_size": 10,
"expires": 1555370914,
"burst_window": 300,
"offset_max": 3000000,
"results_max": 256,
"limit": 600,
"remaining": 8,
}
}, textwrap.dedent('''\
### Farsight DNSDB Service Limits
|Limit|Remaining|Reset|NeverResets|Expires|ResultsMax|OffsetMax|BurstSize|BurstWindow|
|---|---|---|---|---|---|---|---|---|
| 600 | 8 | | true | 2019-04-15T23:28:34Z | 256 | 3000000 | 10 | 300 |
'''))
@staticmethod
def _run_test(requests_mock, input: dict, expected_readable: str):
client = DNSDB.Client(DNSDB.DEFAULT_DNSDB_SERVER, '')
requests_mock.get(
'{server}/dnsdb/v2/rate_limit?swclient={swclient}&version={version}'.format(
server=DNSDB.DEFAULT_DNSDB_SERVER,
swclient=DNSDB.SWCLIENT,
version=DNSDB.VERSION,
), json=input)
# The context is tested in TestBuildLimitsContext
res = DNSDB.dnsdb_rate_limit(client, None)
assert res.readable_output == expected_readable
assert res.outputs_prefix == 'DNSDB.Rate'
assert isinstance(res.outputs, dict)
class TestParseRData:
def test_idna(self):
assert DNSDB.parse_rdata("10 mx.xn--frsight-exa.com.") == "10 mx.fårsight.com."
def test_idna_multi(self):
soa = DNSDB.parse_rdata(
"xn--frsightscurity-lib5e.com. SOA fsi.io. hostmaster.xn--frsight-exa.xn--scurity-bya.com. "
"2014081222 7200 3600 604800 3600")
assert soa == "fårsightsécurity.com. SOA fsi.io. hostmaster.fårsight.sécurity.com. 2014081222 7200 3600 " \
"604800 3600"
def test_idna_spf(self):
assert DNSDB.parse_rdata("include:xn--frsight-exa.com.") == "include:fårsight.com."
def test_idna_dkim(self):
assert DNSDB.parse_rdata("d=xn--frsight-exa.com.") == "d=fårsight.com."
def test_idna_email(self):
assert DNSDB.parse_rdata("test@xn--frsight-exa.com.") == "test@fårsight.com."
def _saf_wrap(records):
return '\n'.join(
['{"cond":"begin"}'] + [f'{{"obj":{r}}}' for r in records] + ['{"cond":"succeeded"}']
)
| 40.764793
| 202
| 0.538067
|
794a0e0878f1001b2c181038cef93b673cab2210
| 1,220
|
py
|
Python
|
kdezero/datasets/sincurve.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
kdezero/datasets/sincurve.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
kdezero/datasets/sincurve.py
|
kotabrog/K_DeZero
|
e8145a539874956bc235d4577fa38211c01c30ea
|
[
"MIT"
] | null | null | null |
import numpy as np
from kdezero.datasets import Dataset
class SinCurve(Dataset):
"""The data is a sine curve, and the label advances one index by one.
The number of data is 1000.
Attribute:
Attribute:
data (ndarray):
label (ndarray):
train (bool):
Flag for learning.
If train is True, data is sin and noise.
If not, data is cos.
Examples:
>>> print(dataset.data[:5])
[[-0.04955855]
[ 0.03048039]
[-0.01378722]
[-0.02327317]
[ 0.04658464]]
>>> print(dataset.label[:5])
[[ 0.03048039]
[-0.01378722]
[-0.02327317]
[ 0.04658464]
[ 0.02806842]]
"""
def prepare(self):
num_data = 1000
dtype = np.float64
x = np.linspace(0, 2 * np.pi, num_data)
noise_range = (-0.05, 0.05)
noise = np.random.uniform(noise_range[0], noise_range[1], size=x.shape)
if self.train:
y = np.sin(x) + noise
else:
y = np.cos(x)
y = y.astype(dtype)
self.data = y[:-1][:, np.newaxis]
self.label = y[1:][:, np.newaxis]
| 26.521739
| 79
| 0.494262
|
794a0e1ff816e200f4df6109942c80297eabe0b2
| 526
|
py
|
Python
|
workflow/migrations/0017_organization_logo.py
|
meetdatastory/Activity-CE
|
2692e591f08cea7c869c045577b3d9e20d3ed335
|
[
"Apache-2.0"
] | null | null | null |
workflow/migrations/0017_organization_logo.py
|
meetdatastory/Activity-CE
|
2692e591f08cea7c869c045577b3d9e20d3ed335
|
[
"Apache-2.0"
] | null | null | null |
workflow/migrations/0017_organization_logo.py
|
meetdatastory/Activity-CE
|
2692e591f08cea7c869c045577b3d9e20d3ed335
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-11-14 00:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workflow', '0016_auto_20170623_1306'),
]
operations = [
migrations.AddField(
model_name='organization',
name='logo',
field=models.FileField(blank=True, null=True, upload_to='static/img/', verbose_name='Your Organization Logo'),
),
]
| 25.047619
| 122
| 0.638783
|
794a0e220d00b4123ed94efef6358d90d0f40a37
| 6,852
|
py
|
Python
|
tests/extension/thread_/stream_substream_nested_reduce/thread_stream_substream_nested_reduce.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
tests/extension/thread_/stream_substream_nested_reduce/thread_stream_substream_nested_reduce.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
tests/extension/thread_/stream_substream_nested_reduce/thread_stream_substream_nested_reduce.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
reduce_size = 4
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
ram_c = vthread.RAM(m, 'ram_c', clk, rst, datawidth, addrwidth)
ram_d = vthread.RAM(m, 'ram_d', clk, rst, datawidth, addrwidth)
macstrm = vthread.Stream(m, 'macstream', clk, rst)
macstrm_a = macstrm.source('a')
macstrm_b = macstrm.source('b')
macstrm_const = macstrm.constant('const')
macstrm_mul = macstrm_a * macstrm_b
macstrm_c, macstrm_v = macstrm.ReduceAddValid(macstrm_mul, macstrm_const)
macstrm.sink(macstrm_c, 'c')
macstrm.sink(macstrm_v, 'v')
neststrm = vthread.Stream(m, 'neststream', clk, rst)
neststrm_a = neststrm.source('a')
neststrm_b = neststrm.source('b')
neststrm_const = neststrm.constant('const')
neststrm_a += 1
neststrm_a += 0
neststrm_b += 1
macsub = neststrm.substream(macstrm)
macsub.to_source('a', neststrm_a)
macsub.to_source('b', neststrm_b)
macsub.to_constant('const', neststrm_const)
neststrm_c = macsub.from_sink('c')
neststrm_c += neststrm_a
neststrm_c += 0
neststrm_v = macsub.from_sink('v')
neststrm.sink(neststrm_c, 'c')
neststrm.sink(neststrm_v, 'v')
strm = vthread.Stream(m, 'mystream', clk, rst)
x = strm.source('x')
y = strm.source('y')
const = strm.constant('const')
sub = strm.substream(neststrm)
sub.to_source('a', x)
sub.to_source('b', y)
sub.to_constant('const', const)
z = sub.from_sink('c')
v = sub.from_sink('v')
z = z + y
strm.sink(z, 'z', when=v, when_name='v')
all_ok = m.TmpReg(initval=0)
def comp_stream_macstrm(size, offset):
macstrm.set_source('a', ram_a, offset, size)
macstrm.set_source('b', ram_b, offset, size)
macstrm.set_constant('const', reduce_size)
macstrm.set_sink('c', ram_c, offset, size)
macstrm.set_sink('v', ram_d, offset, size)
macstrm.run()
macstrm.join()
def comp_stream_mystrm(size, offset):
strm.set_source('x', ram_a, offset, size)
strm.set_source('y', ram_b, offset, size)
strm.set_constant('const', reduce_size)
strm.set_sink('z', ram_c, offset, size // reduce_size)
strm.run()
strm.join()
def comp_sequential_macstrm(size, offset):
sum = 0
count = 0
for i in range(size):
a = ram_a.read(i + offset)
b = ram_b.read(i + offset)
sum += a * b
count += 1
ram_c.write(i + offset, sum)
ram_d.write(i + offset, count == (reduce_size - 1))
if count == reduce_size:
sum = 0
count = 0
def comp_sequential_mystrm(size, offset):
sum = 0
count = 0
write_offset = offset
for i in range(size):
x = ram_a.read(i + offset)
y = ram_b.read(i + offset)
sum += (x + 1) * (y + 1)
val = sum + (x + 1) + y
count += 1
if count == reduce_size:
ram_c.write(write_offset, val)
write_offset += 1
sum = 0
count = 0
def check(size, offset_stream, offset_seq):
for i in range(size):
st = ram_c.read(i + offset_stream)
sq = ram_c.read(i + offset_seq)
if vthread.verilog.NotEql(st, sq):
all_ok.value = False
print(i, st, sq)
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
def comp(size):
all_ok.value = True
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 0, size)
comp_stream_macstrm(size, offset)
myaxi.dma_write(ram_c, offset, 1024, size)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 0, size)
comp_sequential_macstrm(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, size)
# verification
print('# macstream')
check(size, 0, offset)
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 0, size)
comp_stream_mystrm(size, offset)
myaxi.dma_write(ram_c, offset, 1024, size // reduce_size)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 0, size)
comp_sequential_mystrm(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, size // reduce_size)
# verification
print('# mystream')
check(size // reduce_size, 0, offset)
vthread.finish()
th = vthread.Thread(m, 'th_comp', clk, rst, comp)
fsm = th.start(16)
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
#simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'verilator' and lines[-1].startswith('-'):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
| 29.407725
| 79
| 0.593549
|
794a0e4b79d251c41c42898ae804576cb46c430a
| 1,888
|
py
|
Python
|
hoomd/pytest/test_dcd.py
|
XT-Lee/hoomd-blue
|
0188f56f32c4a3efe0e74a3dc27397d6ec3469b0
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/pytest/test_dcd.py
|
XT-Lee/hoomd-blue
|
0188f56f32c4a3efe0e74a3dc27397d6ec3469b0
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/pytest/test_dcd.py
|
XT-Lee/hoomd-blue
|
0188f56f32c4a3efe0e74a3dc27397d6ec3469b0
|
[
"BSD-3-Clause"
] | null | null | null |
import hoomd
from hoomd.conftest import operation_pickling_check
import pytest
import numpy as np
def test_attach(simulation_factory, two_particle_snapshot_factory, tmp_path):
filename = tmp_path / "temporary_test_file.dcd"
sim = simulation_factory(two_particle_snapshot_factory())
dcd_dump = hoomd.write.DCD(filename, hoomd.trigger.Periodic(1))
sim.operations.add(dcd_dump)
sim.run(10)
# pip installing garnett does not use Cythonized code, so this warning will
# always be raised unless garnett is built locally.
@pytest.mark.filterwarnings("ignore:Failed to import dcdreader library")
def test_write(simulation_factory, two_particle_snapshot_factory, tmp_path):
garnett = pytest.importorskip("garnett")
dcd_reader = garnett.reader.DCDFileReader()
filename = tmp_path / "temporary_test_file.dcd"
sim = simulation_factory(two_particle_snapshot_factory())
dcd_dump = hoomd.write.DCD(filename, hoomd.trigger.Periodic(1))
sim.operations.add(dcd_dump)
positions = []
snap = sim.state.snapshot
if snap.communicator.rank == 0:
position1 = np.asarray(snap.particles.position[0])
position2 = np.asarray(snap.particles.position[1])
positions.append([list(position1), list(position2)])
sim.run(1)
if sim.device.communicator.rank == 0:
with open(filename, 'rb') as dcdfile:
traj = dcd_reader.read(dcdfile)
traj.load()
for i in range(len(traj)):
for j in [0, 1]:
np.testing.assert_allclose(traj[i].position[j], positions[i][j])
def test_pickling(simulation_factory, two_particle_snapshot_factory, tmp_path):
filename = tmp_path / "temporary_test_file.dcd"
sim = simulation_factory(two_particle_snapshot_factory())
dcd_dump = hoomd.write.DCD(filename, hoomd.trigger.Periodic(1))
operation_pickling_check(dcd_dump, sim)
| 38.530612
| 80
| 0.726165
|
794a0e506c8e9d0901898188ecbe0f434dd15233
| 1,119
|
py
|
Python
|
src/genie/libs/parser/iosxr/tests/ShowRipStatistics/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxr/tests/ShowRipStatistics/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxr/tests/ShowRipStatistics/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
'vrf': {
'default': {
'address_family': {
'ipv4': {
'instance': {
'rip': {
'statistics': {
'total_messages_sent': 5294,
'message_send_failures': 0,
'regular_updates_sent': 2944,
'queries_responsed_to': 0,
'rib_updates': 4365,
'total_packets_received': 4896,
'packets_discarded': 0,
'routes_discarded': 4760,
'packets_received_at_standby': 0,
'routes_allocated': 9,
'paths_allocated': 6,
'route_malloc_failures': 0,
'path_malloc_failures': 0
}
}
}
}
}
}
}
}
| 34.96875
| 65
| 0.301162
|
794a0ee448f0d0f634fd3b54e5adbd65a5e5332a
| 5,699
|
py
|
Python
|
ucsmsdk/mometa/firmware/FirmwareRunning.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/firmware/FirmwareRunning.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/firmware/FirmwareRunning.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for FirmwareRunning ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareRunningConsts:
DEPLOYMENT_BOOT_LOADER = "boot-loader"
DEPLOYMENT_KERNEL = "kernel"
DEPLOYMENT_SERVICE_PACK = "service-pack"
DEPLOYMENT_SYSTEM = "system"
DEPLOYMENT_UNSPECIFIED = "unspecified"
TYPE_ADAPTOR = "adaptor"
TYPE_BLADE_BIOS = "blade-bios"
TYPE_BLADE_CONTROLLER = "blade-controller"
TYPE_BOARD_CONTROLLER = "board-controller"
TYPE_CATALOG = "catalog"
TYPE_CHASSIS_BOARD_CONTROLLER = "chassis-board-controller"
TYPE_CMC = "cmc"
TYPE_DEBUG_PLUG_IN = "debug-plug-in"
TYPE_DIAG = "diag"
TYPE_FEX = "fex"
TYPE_FI_SERVICE_PACK = "fi-service-pack"
TYPE_FLEXFLASH_CONTROLLER = "flexflash-controller"
TYPE_GRAPHICS_CARD = "graphics-card"
TYPE_HOST_HBA = "host-hba"
TYPE_HOST_HBA_OPTIONROM = "host-hba-optionrom"
TYPE_HOST_NIC = "host-nic"
TYPE_HOST_NIC_OPTIONROM = "host-nic-optionrom"
TYPE_IOCARD = "iocard"
TYPE_LOCAL_DISK = "local-disk"
TYPE_MGMT_EXT = "mgmt-ext"
TYPE_MGMT_SERVICE_PACK = "mgmt-service-pack"
TYPE_NVME_MSWITCH = "nvme-mswitch"
TYPE_PSU = "psu"
TYPE_SAS_EXP_REG_FW = "sas-exp-reg-fw"
TYPE_SAS_EXPANDER = "sas-expander"
TYPE_STORAGE_CONTROLLER = "storage-controller"
TYPE_STORAGE_CONTROLLER_ONBOARD_DEVICE = "storage-controller-onboard-device"
TYPE_STORAGE_CONTROLLER_ONBOARD_DEVICE_CPLD = "storage-controller-onboard-device-cpld"
TYPE_STORAGE_DEV_BRIDGE = "storage-dev-bridge"
TYPE_STORAGE_NODE_CONTROLLER = "storage-node-controller"
TYPE_SWITCH = "switch"
TYPE_SWITCH_KERNEL = "switch-kernel"
TYPE_SWITCH_SOFTWARE = "switch-software"
TYPE_SYSTEM = "system"
TYPE_UNSPECIFIED = "unspecified"
class FirmwareRunning(ManagedObject):
"""This is FirmwareRunning class."""
consts = FirmwareRunningConsts()
naming_props = set([u'deployment'])
mo_meta = MoMeta("FirmwareRunning", "firmwareRunning", "fw-[deployment]", VersionMeta.Version101e, "InputOutput", 0x3f, [], ["read-only"], [u'adaptorHostEthIf', u'adaptorHostFcIf', u'biosUnit', u'capabilityCatalogue', u'capabilityMgmtExtension', u'equipmentPsu', u'graphicsCard', u'mgmtController', u'storageController', u'storageFlexFlashController', u'storageLocalDisk', u'storageNvmeSwitch', u'storageOnboardDevice', u'storageSasExpander'], [u'firmwareServicePack'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"deployment": MoPropertyMeta("deployment", "deployment", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x4, None, None, None, ["boot-loader", "kernel", "service-pack", "system", "unspecified"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"inv_tag": MoPropertyMeta("inv_tag", "invTag", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"package_version": MoPropertyMeta("package_version", "packageVersion", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["adaptor", "blade-bios", "blade-controller", "board-controller", "catalog", "chassis-board-controller", "cmc", "debug-plug-in", "diag", "fex", "fi-service-pack", "flexflash-controller", "graphics-card", "host-hba", "host-hba-optionrom", "host-nic", "host-nic-optionrom", "iocard", "local-disk", "mgmt-ext", "mgmt-service-pack", "nvme-mswitch", "psu", "sas-exp-reg-fw", "sas-expander", "storage-controller", "storage-controller-onboard-device", "storage-controller-onboard-device-cpld", "storage-dev-bridge", "storage-node-controller", "switch", "switch-kernel", "switch-software", "system", "unspecified"], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"deployment": "deployment",
"dn": "dn",
"invTag": "inv_tag",
"packageVersion": "package_version",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"type": "type",
"version": "version",
}
def __init__(self, parent_mo_or_dn, deployment, **kwargs):
self._dirty_mask = 0
self.deployment = deployment
self.child_action = None
self.inv_tag = None
self.package_version = None
self.sacl = None
self.status = None
self.type = None
self.version = None
ManagedObject.__init__(self, "FirmwareRunning", parent_mo_or_dn, **kwargs)
| 58.752577
| 745
| 0.689244
|
794a11aa17fbe4aff57faabff5acbf609bd4fb8d
| 1,742
|
py
|
Python
|
tests/quick/se/50.vortex/test.py
|
qianlong4526888/haha
|
01baf923693873c11ae072ce4dde3d8f1d7b6239
|
[
"BSD-3-Clause"
] | 31
|
2015-12-15T19:14:10.000Z
|
2021-12-31T17:40:21.000Z
|
tests/quick/se/50.vortex/test.py
|
qianlong4526888/haha
|
01baf923693873c11ae072ce4dde3d8f1d7b6239
|
[
"BSD-3-Clause"
] | 5
|
2015-12-04T08:06:47.000Z
|
2020-08-09T21:49:46.000Z
|
tests/quick/se/50.vortex/test.py
|
qianlong4526888/haha
|
01baf923693873c11ae072ce4dde3d8f1d7b6239
|
[
"BSD-3-Clause"
] | 21
|
2015-11-05T08:25:45.000Z
|
2021-06-19T02:24:50.000Z
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
m5.util.addToPath('../configs/common')
from cpu2000 import vortex
workload = vortex(isa, opsys, 'smred')
root.system.cpu[0].workload = workload.makeLiveProcess()
| 51.235294
| 72
| 0.791045
|
794a11d37fc51683848d4c66f0bc69302f864b60
| 7,851
|
py
|
Python
|
dankypipe/utils.py
|
lukeWaninger/DankDefense
|
969c151a42857f9fa72e6887aed22e8c977740a5
|
[
"MIT"
] | null | null | null |
dankypipe/utils.py
|
lukeWaninger/DankDefense
|
969c151a42857f9fa72e6887aed22e8c977740a5
|
[
"MIT"
] | 4
|
2020-03-24T16:36:56.000Z
|
2021-06-01T23:19:39.000Z
|
dankypipe/utils.py
|
lukeWaninger/DankDefense
|
969c151a42857f9fa72e6887aed22e8c977740a5
|
[
"MIT"
] | null | null | null |
import datetime as dt
import gc
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
NO_DETECTIONS = '#706C60'
DETECTIONS = '#FFAE09'
dp = lambda x: os.path.join('/', 'mnt', 'f', 'DankDefense', x)
def write_feat(name, train, test):
path = dp(os.path.join('feats', name))
train.to_csv(dp(f'{path}_train.csv'), index=None)
test.to_csv(dp(f'{path}_test.csv'), index=None)
def get_feat(name, t):
path = dp(os.path.join('feats', name))
train = pd.read_csv(dp(f'{path}_train.csv'), dtype=t)
test = pd.read_csv(dp(f'{path}_test.csv'), dtype=t)
return train, test
def clear(name):
if not isinstance(name, list):
name = [name]
for v in name:
if v in globals().keys():
del globals()[v]
x = gc.collect()
def nanp(df, show_zero=False):
cols = df.columns
d, p = len(df), []
for i, col in enumerate(cols):
a = sum(pd.isnull(df[col]))
p.append([col, df[col].dtype, np.round(a / d * 100, 1)])
p = pd.DataFrame(p, columns=['Variable', 'DataType', 'PercentNA'])
if not show_zero:
return p.loc[p['PercentNA'] > 0].sort_values(by='PercentNA', ascending=False)
else:
return p.sort_values(by='PercentNA', ascending=False)
def isfloat(x):
try:
float(x)
return True
except:
return False
def isint(x):
try:
int(x)
return True
except:
return False
def printcats(df, c):
df[c] = df[c].apply(lambda x: str(x).lower() if not pd.isnull(x) else np.nan)
df.loc[
(df.loc[:, c] == 'unknown') |
(df.loc[:, c] == 'unspecified') |
df.loc[:, c].isnull(), c
] = np.nan
un = df[c].unique()
if len(un) < 20:
print(c, len(c), ':', un)
else:
print(c, len(c), ':', ', '.join([str(x) for x in un[:5]]) + ', ...')
def pcols(df):
t = [print(c) for c in sorted(list(set(df.columns)))]
def cateval(df, c, test_data=False):
print(f'{"test" if test_data else "train"} percent na: ', df[c].isnull().mean())
if not test_data:
t = pd.crosstab(df[c], df.HasDetections, normalize='index').sort_values(c)
t['total_count'] = df[c].value_counts()
t['normalized'] = t.total_count / t.total_count.sum()
else:
t = pd.value_counts(df[c])
t['normalized'] = pd.value_counts(df[c], normalize=True)
t.columns = ['count', 'ratio']
print(t)
class DFMan(object):
def __init__(self, dtypes):
self.dtypes = dtypes
@property
def cat_cols(self):
return sorted([c for c, v in self.dtypes.items() if v == 'category'])
@property
def bin_cols(self):
return sorted([c for c, v in self.dtypes.items() if v == 'int8'])
@property
def num_cols(self):
return sorted(list(set(self.dtypes.keys()) - set(self.cat_cols) - set(self.bin_cols)))
@property
def dict(self):
return self.dtypes
def add_type(self, k, v):
self.dtypes[k] = v
def remove_type(self, k):
del self.dtypes[k]
def cat_over_time(train, test, c, close=False):
try:
if len(train[c].unique()) > 15:
return
def fx(df_):
ct = df_[c].value_counts(normalize=True)
return ct
df_train = train[['avsig_dt', 'MachineIdentifier', c]].groupby(['avsig_dt']).apply(fx).reset_index()
df_test = test[['avsig_dt', 'MachineIdentifier', c]].groupby(['avsig_dt']).apply(fx).reset_index()
df = pd.concat([df_train, df_test], axis=0, sort=False).sort_values(by='avsig_dt')
df.columns = ['date', c, 'perc']
df[c] = df[c].astype(str)
del df_train, df_test
x = gc.collect()
x = plt.gcf()
x = sns.set(style='whitegrid')
fig, ax1 = plt.subplots()
x = fig.set_size_inches(17, 10)
x = plt.xlim((dt.date(year=2018, month=6, day=1), max(df.date) + dt.timedelta(days=7)))
x = plt.grid(False)
x = plt.title(f'{c} over time')
# for ci in df[c].unique():
x = sns.lineplot(
x='date',
y='perc',
hue=c,
data=df
)
x = plt.savefig(dp(os.path.join('figs', f'time_category_{c}.png')))
if close:
x = plt.close()
except Exception as e:
print(f'failed: {c}')
def cat_by_detections(df, c, close=False):
try:
n = len(df[c].unique())
if n > 15:
return
df_ = df[[c, 'HasDetections', 'AvSigVersion']]\
.groupby([c, 'HasDetections'])\
.count()\
.reset_index()
df_['color'] = ''
df_.loc[df_.HasDetections == 0, 'color'] = NO_DETECTIONS
df_.loc[df_.HasDetections == 1, 'color'] = DETECTIONS
x = plt.gcf()
fig, ax1 = plt.subplots()
x = fig.set_size_inches(17, 10)
x = sns.barplot(
x=c,
y='AvSigVersion',
hue='HasDetections',
palette={0: NO_DETECTIONS, 1: DETECTIONS},
data=df_
)
x = plt.savefig(dp(os.path.join('figs', f'{c}_HasDetections.png')))
if close:
x = plt.close()
del df_, fig
x = gc.collect()
except Exception as e:
print(e)
print(f'failed {c}')
def numeric_over_time(train, test, c, close=False):
try:
train_name = f'mean_{c}_train'
test_name = f'mean_{c}_test'
df_train = train[[c, 'avsig_dt', 'HasDetections']]\
.groupby(['avsig_dt'])\
.agg([np.mean])\
.reset_index()
df_train.columns = ['dt', train_name, 'mean_detections']
df_test = test[[c, 'avsig_dt']]\
.groupby(['avsig_dt'])\
.agg([np.mean])\
.reset_index()
df_test.columns = ['dt', test_name]
df = df_train.merge(df_test, on='dt', how='outer')
df = df.fillna(0)
del df_train, df_test
gc.collect()
plt.gcf()
sns.set(style='whitegrid')
fig, ax1 = plt.subplots()
x = fig.set_size_inches(17, 10)
x = plt.xlim((dt.date(year=2018, month=6, day=1), dt.date(year=2018, month=12, day=1)))
x = plt.grid(False)
x = ax1.set_title(f'Mean {c}', fontsize=22)
x = ax1.set_ylabel('Mean AV Products Installed', fontsize=20)
x = sns.lineplot(
x='dt',
y=train_name,
data=df,
ax=ax1,
linewidth=2,
legend='brief',
dashes=False
)
x = sns.lineplot(
x='dt',
y=test_name,
data=df,
ax=ax1,
linewidth=2,
legend='brief',
dashes=False
)
x = plt.savefig(dp(os.path.join('figs', f'time_numerical_{c}.png')))
if close:
x = plt.close()
except Exception as e:
print(e)
print(f'failed {c}')
def numeric_by_detections(df, c, close=False):
try:
x = plt.gcf()
fig, ax1 = plt.subplots()
x = fig.set_size_inches(13, 6)
x = plt.grid(False)
x = sns.distplot(
df.loc[df.HasDetections == 0, c].sample(20_000),
hist=False,
kde=True,
kde_kws={"shade": True},
color=NO_DETECTIONS
)
x = sns.distplot(
df.loc[df.HasDetections == 1, c].sample(20_000),
hist=False,
kde=True,
kde_kws={"shade": True},
color=DETECTIONS
)
x = plt.savefig(os.path.join('figs', f'{c}_HasDetections.png'))
if close:
x = plt.close()
del fig
x = gc.collect()
except Exception as e:
print(e)
print(f'failed {c}')
| 25.244373
| 108
| 0.524392
|
794a1257ec624afeab41eb394bc7c0a2fa8df6a9
| 1,023
|
py
|
Python
|
src/collective/solr/zcml.py
|
IMIO/collective.solr
|
844219eb3968b34d2b83a7bd5f59340d676d149e
|
[
"ZPL-1.1"
] | 15
|
2015-04-13T14:54:47.000Z
|
2022-01-17T09:18:00.000Z
|
src/collective/solr/zcml.py
|
IMIO/collective.solr
|
844219eb3968b34d2b83a7bd5f59340d676d149e
|
[
"ZPL-1.1"
] | 198
|
2015-01-30T15:29:32.000Z
|
2022-03-22T10:39:31.000Z
|
src/collective/solr/zcml.py
|
adrianschulz/collective.solr
|
2d76fe01a02174d383fdc335d38ee52afa8bfa27
|
[
"ZPL-1.1"
] | 34
|
2015-02-24T09:23:31.000Z
|
2022-03-01T02:31:39.000Z
|
# -*- coding: utf-8 -*-
from zope.interface import Interface
from zope import schema
from zope.component.zcml import utility
from collective.solr.interfaces import IZCMLSolrConnectionConfig
from collective.solr.manager import ZCMLSolrConnectionConfig
class ISolrConnectionConfigDirective(Interface):
"""Directive which registers a Solr connection config"""
host = schema.ASCIILine(
title=u"Host",
description=u"The host name of the Solr instance to be used.",
required=True,
)
port = schema.Int(
title=u"Port",
description=u"The port of the Solr instance to be used.",
required=True,
)
base = schema.ASCIILine(
title=u"Base",
description=u"The base prefix of the Solr instance to be used.",
required=True,
)
def solrConnectionConfigDirective(_context, host, port, base):
utility(
_context,
provides=IZCMLSolrConnectionConfig,
component=ZCMLSolrConnectionConfig(host, port, base),
)
| 26.230769
| 72
| 0.686217
|
794a1267c98bd5602670aa07805d7b87d1430d68
| 2,748
|
py
|
Python
|
apysc/_animation/animation_scale_y_from_center_interface.py
|
simon-ritchie/apyscript
|
c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279
|
[
"MIT"
] | 16
|
2021-04-16T02:01:29.000Z
|
2022-01-01T08:53:49.000Z
|
apysc/_animation/animation_scale_y_from_center_interface.py
|
simon-ritchie/apysc
|
61d0078e5f3b702eaacceedfbe6e5cafe48f8033
|
[
"MIT"
] | 613
|
2021-03-24T03:37:38.000Z
|
2022-03-26T10:58:37.000Z
|
apysc/_animation/animation_scale_y_from_center_interface.py
|
simon-ritchie/apyscript
|
c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279
|
[
"MIT"
] | 2
|
2021-06-20T07:32:58.000Z
|
2021-12-26T08:22:11.000Z
|
"""Class implementation for the animation_scale_y_from_center
interface.
"""
from typing import Union
from apysc._animation.animation_interface_base import AnimationInterfaceBase
from apysc._animation.animation_scale_y_from_center import \
AnimationScaleYFromCenter
from apysc._animation.easing import Easing
from apysc._type.int import Int
from apysc._type.number import Number
class AnimationScaleYFromCenterInterface(AnimationInterfaceBase):
def animation_scale_y_from_center(
self,
scale_y_from_center: Union[float, Number],
*,
duration: Union[int, Int] = 3000,
delay: Union[int, Int] = 0,
easing: Easing = Easing.LINEAR) -> AnimationScaleYFromCenter:
"""
Set the scale-y from the center point animation setting.
Notes
-----
To start this animation, you need to call the `start` method of
the returned instance.
Parameters
----------
scale_y_from_center : float or number
The final scale-y of the animation.
duration : int or Int, default 3000
Milliseconds before an animation ends.
delay : int or Int, default 0
Milliseconds before an animation starts.
easing : Easing, default Easing.LINEAR
Easing setting.
Returns
-------
animation_scale_y_from_center : AnimationScaleYFromCenter
Created animation setting instance.
References
----------
- animation_scale_y_from_center interface document
- https://bit.ly/30qsD2m
- Animation interfaces duration setting document
- https://simon-ritchie.github.io/apysc/animation_duration.html
- Animation interfaces delay setting document
- https://simon-ritchie.github.io/apysc/animation_delay.html
- Each animation interface return value document
- https://bit.ly/2XOoa8w
- Sequential animation setting document
- https://simon-ritchie.github.io/apysc/sequential_animation.html
- animation_parallel interface document
- https://simon-ritchie.github.io/apysc/animation_parallel.html
- Easing enum document
- https://simon-ritchie.github.io/apysc/easing_enum.html
"""
animation_scale_y_from_center: AnimationScaleYFromCenter = \
AnimationScaleYFromCenter(
target=self,
scale_y_from_center=scale_y_from_center,
duration=duration,
delay=delay,
easing=easing)
return animation_scale_y_from_center
| 37.643836
| 78
| 0.633916
|
794a13e6121821e471b113eef575b0451bd3a4dc
| 221
|
py
|
Python
|
py/desispec/workflow/__init__.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | 24
|
2015-09-29T06:06:29.000Z
|
2022-01-14T07:31:45.000Z
|
py/desispec/workflow/__init__.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | 1,452
|
2015-02-26T00:14:23.000Z
|
2022-03-31T23:35:10.000Z
|
py/desispec/workflow/__init__.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | 25
|
2015-02-06T21:39:13.000Z
|
2022-02-22T14:16:31.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
desispec.workflow
===========
Tools for workflow management and running the pipeline
"""
from __future__ import absolute_import
| 22.1
| 63
| 0.710407
|
794a1493c8a37024e6225b9ef9195fda039d9453
| 1,063
|
py
|
Python
|
backend/tag_id.py
|
gustavozapata/Team4
|
ab64cfeff219891cdd74f5f3995ef90816b5eb9d
|
[
"Apache-2.0"
] | 2
|
2019-11-04T19:03:39.000Z
|
2019-12-30T11:55:51.000Z
|
backend/tag_id.py
|
gustavozapata/Team4
|
ab64cfeff219891cdd74f5f3995ef90816b5eb9d
|
[
"Apache-2.0"
] | 1
|
2019-12-29T16:37:12.000Z
|
2019-12-29T16:37:12.000Z
|
backend/tag_id.py
|
gustavozapata/Team4
|
ab64cfeff219891cdd74f5f3995ef90816b5eb9d
|
[
"Apache-2.0"
] | 3
|
2019-11-02T23:51:36.000Z
|
2019-12-29T16:41:02.000Z
|
class TagID:
def __init__(self):
self.tag_dictionary = {
'Natural Disaster': 1,
'Homeless': 2,
'Animals': 3,
'Health': 4,
'Children': 5,
'Immigration': 6,
'Culture': 7,
'Enviroment': 8,
'Cleaning up': 9,
'Family': 10
}
return
def add_tag(self, tag):
self.tag_dictionary.update({tag : self.get_new_id() })
return
def get_new_id(self):
current_id = 0
while self.tag_dictionary[current_id] != None:
current_id += 1
return current_id
def remove_tag(self, tag):
del self.tag_dictionary[self.get_id(tag)]
return
def get_id(self, tag):
return self.tag_dictionary.get(tag)
def get_tag_name(self, id):
for key, value in self.tag_dictionary.items():
if (value == id):
return key
return ''
| 20.442308
| 62
| 0.4619
|
794a14dbfead8555fabdd0594fcd63688cf76bb3
| 8,731
|
py
|
Python
|
src/pyxer/command.py
|
holtwick/pyxer
|
5e962352a8bebe33baee8858d326e7eac406f694
|
[
"MIT"
] | 2
|
2016-01-25T06:01:14.000Z
|
2016-02-07T20:30:25.000Z
|
src/pyxer/command.py
|
holtwick/pyxer
|
5e962352a8bebe33baee8858d326e7eac406f694
|
[
"MIT"
] | 2
|
2018-03-21T06:27:50.000Z
|
2018-03-22T12:57:58.000Z
|
src/pyxer/command.py
|
holtwick/pyxer
|
5e962352a8bebe33baee8858d326e7eac406f694
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
#############################################
## (C)opyright by Dirk Holtwick, 2008 ##
## All rights reserved ##
#############################################
from optparse import OptionParser
from pyxer.utils import system, call_subprocess, find_root, install_package
from pyxer import VERSION_STR
import logging
import sys
import os
import os.path
try:
log = logging.getLogger(__name__)
except:
log = logging.getLogger(__file__)
_description = """
Yet another Python framework
""".strip()
def showlog(debug):
level = logging.WARN
if debug:
level = logging.DEBUG
try:
LOG_FORMAT_DEBUG = "%(levelname)s [%(name)s] %(pathname)s line %(lineno)d: %(message)s"
logging.basicConfig(
level = level,
format = LOG_FORMAT_DEBUG)
except:
logging.basicConfig()
class OptParser(OptionParser):
def print_help(self):
OptionParser.print_help(self)
# parser.print_usage()
# print parser.format_option_help()
print
print """
Commands:
init Create a new project
serve Serves the project
push Upload project to GAE
push_empty Upload empty project with version'0' to GAE
update Update local pyxer package
install Install or update local Python package
zipinstall Install or update local Python ZIP package (slow on GAE)
Daemon commands (only paster):
start Start
stop Stop
status Status
reload, restart Restart
""".strip()
#def error(self, msg):
# OptionParser.error(self, msg)
# print "Use option --help for complete help"
iswin = (sys.platform == "win32")
def command(engine = None):
parser = OptParser(
# "usage: %prog [options] command",
"Usage: pyxer [options] command",
description = _description,
version = VERSION_STR,
# epilog="Neu\n\r\n" + 20*"hallo ",
)
parser.add_option(
"-q",
"--quiet",
action = "store_false",
dest = "verbose",
default = True,
help = "Do not print status messages to stdout")
#parser.add_option(
# "-f",
# "--force",
# action="store_false",
# dest="force",
# default=True,
# help="don't print status messages to stdout")
parser.add_option(
"-d",
"--debug",
action = "store_true",
dest = "debug",
default = False,
help = "Activate debug logging")
if not engine:
parser.add_option(
"--engine",
dest = "engine",
default = "",
help = "Engine that will be used: gae (default), wsgi, paster")
parser.add_option(
"--port",
dest = "port",
default = "8080",
help = "serving on port")
parser.add_option(
"--host",
dest = "host",
default = "0.0.0.0",
help = "Serving on host")
parser.add_option(
"-r",
"--reload",
dest = "reload",
action = "store_true",
help = "Reload on changing files")
#parser.add_option(
# "-u",
# "--update",
# dest = "update",
# action = "store_true",
# help = "update suplementary data and files")
parser.add_option(
"-U",
"--develop",
dest = "develop",
action = "store_true",
help = "Update projects Pyxer version")
parser.add_option(
"-c",
"--clear",
dest = "clear",
action = "store_true",
help = "Empty local GAE datastore")
parser.add_option(
"-f",
"--force",
dest = "force",
action = "store_true",
help = "Force updates; overwrites pyxer-app.py")
(opt, args) = parser.parse_args()
showlog(opt.debug)
#config_default = {
# "pyxer.debug": (cBOOL, False),
# "pyxer.sessions": (cBOOL, False),
# "pyxer.engine": (cSTRING, ""),
# "pyxer.templating": (cSTRING, ""),
# "pyxer.host": (cSTRING, "127.0.0.1"),
# "pyxer.port": (cINT, 8080, 0, 65536),
# }
if (len(args) < 1) or (len(args) > 2):
log.debug("Minimum 1 argument, maximum 2")
parser.print_help()
# parser.error("incorrect number of arguments")
sys.exit(1)
command = args[0].lower()
# Directory argument
if len(args) == 2:
here = os.path.abspath(args[1])
else:
here = os.getcwd()
# Get engine
if engine:
opt.engine = engine
log.debug("Command %r for engine %r in directory %r", command, engine, here)
if opt.engine in ("paster", "paste", "p"):
print "Paster"
opt.engine = "paster"
import pyxer.paster as engine
elif opt.engine in ("wsgi", "w"):
print "Python WSGI"
engine = None
else:
print "Google AppEngine"
opt.engine = "gae"
import pyxer.gae as engine
# Update version
if opt.develop and command not in ("setup", "create", "init", "pyxer"):
import pyxer.create
pyxer.create.self_setup(opt)
# Serve
if command == "serve":
if engine:
engine.serve(opt)
else:
if opt.debug:
logging.basicConfig(level = logging.DEBUG)
import pyxer.app
pyxer.app.serve(opt)
# Setup
elif (command in ("setup", "create", "init")):
import pyxer.create
pyxer.create.create(opt, here)
# Install
elif (command in ("install")):
if len(args)==2:
install_package(os.getcwd(), args[1])
# ZIP Install
elif (command in ("zipinstall")):
if len(args)==2:
install_package(os.getcwd(), args[1], zip=True)
# Activate
# elif (command in ("open", "activate", "vm")):
#
# root = find_root()
# if not root:
# print "No project found"
# elif iswin:
# # call_subprocess([os.path.join(root, "scripts", "activate.bat")])
# system("start " + os.path.join(root, "scripts", "activate.bat"))
# else:
# print "IMPORTANT! Leave VM with command 'exit'."
# call_subprocess(["bash", "--init-file", os.path.join(root, "bin", "activate")], raise_on_returncode = False)
# Deactivate
# elif (command == "close" or command == "deactivate"):
#
# root = find_root()
# if not root:
# print "No project found"
# elif iswin:
# system(os.path.join(root, "scripts", "deactivate.bat"))
# else:
# pass
# Daemon
elif command == "start" and opt.engine == "paster":
engine.serve(opt, daemon = "start")
elif command == "stop" and opt.engine == "paster":
engine.serve(opt, daemon = "stop")
elif command == "status" and opt.engine == "paster":
engine.serve(opt, daemon = "status")
elif (command in ("reload", "restart")) and opt.engine == "paster":
engine.serve(opt, daemon = "restart")
# GAE Upload
elif (command in ("upload", "deploy", "push")) and opt.engine == "gae":
engine.upload(opt)
# GAE empty
elif (command in ("push_empty")) and opt.engine == "gae":
if len(args)==2:
name = args[1]
import tempfile
tmpdir = tempfile.mkdtemp()
print "Empty project", name , "created at", tmpdir
tmpfle = os.path.join(tmpdir, 'app.yaml')
open(tmpfle, 'w').write("""
application: %s
version: 0
runtime: python
api_version: 1
handlers:
- url: /
static_dir: empty
""".strip() % name)
engine.upload(opt, root=tmpdir)
os.remove(tmpfle)
os.rmdir(tmpdir)
print
print "ATTENTION: Go to GAE dasboard/versions and switch to version '0' to turn off your project"
else:
print '*** Project name needed as last argument'
# GAE fix
#elif (command == "fix" or command == "fixup") and opt.engine == "gae":
# engine.fix()
# Setup Pyxer
elif command in ("pyxer", "update", "up"):
import pyxer.create
pyxer.create.self_setup(opt)
else:
parser.print_help()
sys.exit(1)
# parser.error("unsupported command")
# print options, args
def command_gae():
command("gae")
def command_paster():
command("paster")
def command_wsgi():
command("wsgi")
if __name__=="__main__":
command()
| 28.255663
| 121
| 0.532471
|
794a150c448ce3adb22a06d8a4efef932223e644
| 571
|
py
|
Python
|
meregistro/apps/registro/forms/AnexoAlcancesForm.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
meregistro/apps/registro/forms/AnexoAlcancesForm.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
meregistro/apps/registro/forms/AnexoAlcancesForm.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.forms import ModelForm
from apps.registro.models.Alcance import Alcance
from django.core.exceptions import ValidationError
from django import forms
class AnexoAlcancesForm(ModelForm):
alcances = forms.ModelMultipleChoiceField(queryset=Alcance.objects.all().order_by('nombre'), widget=forms.CheckboxSelectMultiple, required=False)
verificado = forms.BooleanField(required=False)
class Meta:
model = Alcance
fields = ['alcances']
def clean_alcances(self):
return self.cleaned_data['alcances']
| 31.722222
| 149
| 0.747811
|
794a15a2516db8f714b143b37f9c067c53406803
| 765
|
py
|
Python
|
dstools/flatten.py
|
keans/dstools
|
b29ff1ae191d94ce6bfac66f4c14dcac0c1a350b
|
[
"MIT"
] | null | null | null |
dstools/flatten.py
|
keans/dstools
|
b29ff1ae191d94ce6bfac66f4c14dcac0c1a350b
|
[
"MIT"
] | null | null | null |
dstools/flatten.py
|
keans/dstools
|
b29ff1ae191d94ce6bfac66f4c14dcac0c1a350b
|
[
"MIT"
] | null | null | null |
def flatten_dict(y, key_filter=None, seperator="."):
"""
flatten given dictionary, if filter is provided only given values are
returned
"""
res = {}
def flatten(x, name="", seperator=""):
if isinstance(x, dict):
# --- dict ---
for k in x.keys():
flatten(x[k], "{}{}{}".format(name, k, seperator), seperator)
elif isinstance(x, list):
# --- list ---
for no, k in enumerate(x):
flatten(k, "{}{}{}".format(name, no, seperator), seperator)
else:
# --- value ---
key = name[:-1]
if (key_filter is None) or (key in key_filter):
res[key] = x
flatten(y, seperator=".")
return res
| 27.321429
| 77
| 0.478431
|
794a1654b86848c4a62ed55d5fceb8d46b62316e
| 2,344
|
py
|
Python
|
benchmark/datasets/reddit/preprocess/get_json.py
|
FederalLab/benchmark-lightly
|
ff05a99a19c0e1dcddf16996f922431e09771c4c
|
[
"MIT"
] | null | null | null |
benchmark/datasets/reddit/preprocess/get_json.py
|
FederalLab/benchmark-lightly
|
ff05a99a19c0e1dcddf16996f922431e09771c4c
|
[
"MIT"
] | null | null | null |
benchmark/datasets/reddit/preprocess/get_json.py
|
FederalLab/benchmark-lightly
|
ff05a99a19c0e1dcddf16996f922431e09771c4c
|
[
"MIT"
] | null | null | null |
# @Author : FederalLab
# @Date : 2021-09-26 00:32:41
# @Last Modified by : Chen Dengsheng
# @Last Modified time: 2021-09-26 00:32:41
# Copyright (c) FederalLab. All rights reserved.
import json
import math
import os
import pickle
data_root = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'data')
DIR = os.path.join(data_root, 'reddit_subsampled')
FINAL_DIR = os.path.join(data_root, 'reddit_json')
FILES_PER_JSON = 10
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
def to_leaf_format(some_json, start_idx=0):
leaf_json = {'users': [], 'num_samples': [], 'user_data': {}}
new_idx = start_idx
for u, comments in some_json.items():
new_idx += 1
leaf_json['users'].append(str(new_idx))
leaf_json['num_samples'].append(len(comments))
x = []
y = []
for c in comments:
assert c.author == u
c_x = c.body
c_y = {
'subreddit': c.subreddit,
'created_utc': c.created_utc,
'score': c.score,
}
x.append(c_x)
y.append(c_y)
user_data = {'x': x, 'y': y}
leaf_json['user_data'][str(new_idx)] = user_data
return leaf_json, new_idx
def files_to_json(files, json_name, start_user_idx=0):
all_users = {}
for f in files:
f_dir = os.path.join(DIR, f)
f_users = pickle.load(open(f_dir, 'rb'))
all_users = merge_dicts(all_users, f_users)
all_users, last_user_idx = to_leaf_format(all_users, start_user_idx)
with open(os.path.join(FINAL_DIR, json_name), 'w') as outfile:
json.dump(all_users, outfile)
return last_user_idx
def main():
if not os.path.exists(FINAL_DIR):
os.makedirs(FINAL_DIR)
files = [f for f in os.listdir(DIR) if f.endswith('.pck')]
files.sort()
num_files = len(files)
num_json = math.ceil(num_files / FILES_PER_JSON)
last_user_idx = 0
for i in range(num_json):
cur_files = files[i * FILES_PER_JSON:(i + 1) * FILES_PER_JSON]
print('processing until', (i + 1) * FILES_PER_JSON)
last_user_idx = files_to_json(cur_files, 'reddit_{}.json'.format(i),
last_user_idx)
if __name__ == '__main__':
main()
| 26.044444
| 76
| 0.596416
|
794a16a52627d2a1a5f91159340e293ffd7ee02a
| 938
|
py
|
Python
|
examples/simple.py
|
Jessime/word_cloud
|
36d4e26cf1347a3979ef83d01714a82652685ada
|
[
"MIT"
] | 8,935
|
2015-01-01T07:23:24.000Z
|
2022-03-31T10:40:34.000Z
|
examples/simple.py
|
Jessime/word_cloud
|
36d4e26cf1347a3979ef83d01714a82652685ada
|
[
"MIT"
] | 579
|
2015-01-09T17:28:55.000Z
|
2022-03-30T05:54:12.000Z
|
examples/simple.py
|
Jessime/word_cloud
|
36d4e26cf1347a3979ef83d01714a82652685ada
|
[
"MIT"
] | 2,522
|
2015-01-02T17:11:32.000Z
|
2022-03-31T14:35:34.000Z
|
#!/usr/bin/env python
"""
Minimal Example
===============
Generating a square wordcloud from the US constitution using default arguments.
"""
import os
from os import path
from wordcloud import WordCloud
# get data directory (using getcwd() is needed to support running example in generated IPython notebook)
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
# Read the whole text.
text = open(path.join(d, 'constitution.txt')).read()
# Generate a word cloud image
wordcloud = WordCloud().generate(text)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
# lower max_font_size
wordcloud = WordCloud(max_font_size=40).generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# The pil way (if you don't have matplotlib)
# image = wordcloud.to_image()
# image.show()
| 24.051282
| 104
| 0.740938
|
794a16f2ca707a6946a69d0c35cbdbf7af9550a3
| 6,288
|
py
|
Python
|
zerver/lib/presence.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | 1
|
2020-03-17T14:58:50.000Z
|
2020-03-17T14:58:50.000Z
|
zerver/lib/presence.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/presence.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
import datetime
import itertools
import time
from django.utils.timezone import now as timezone_now
from typing import Any, Dict, List, Set
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models import (
query_for_ids,
PushDeviceToken,
Realm,
UserPresence,
UserProfile,
)
def get_status_dicts_for_rows(all_rows: List[Dict[str, Any]],
mobile_user_ids: Set[int],
slim_presence: bool) -> Dict[str, Dict[str, Any]]:
# Note that datetime values have sub-second granularity, which is
# mostly important for avoiding test flakes, but it's also technically
# more precise for real users.
# We could technically do this sort with the database, but doing it
# here prevents us from having to assume the caller is playing nice.
all_rows = sorted(
all_rows,
key = lambda row: (row['user_profile__id'], row['timestamp'])
)
# For now slim_presence just means that we will use
# user_id as a key instead of email. We will eventually
# do other things based on this flag to make things simpler
# for the clients.
if slim_presence:
# Stringify user_id here, since it's gonna be turned
# into a string anyway by JSON, and it keeps mypy happy.
get_user_key = lambda row: str(row['user_profile__id'])
else:
get_user_key = lambda row: row['user_profile__email']
user_statuses = dict() # type: Dict[str, Dict[str, Any]]
for user_key, presence_rows in itertools.groupby(all_rows, get_user_key):
info = get_legacy_user_info(
list(presence_rows),
mobile_user_ids
)
user_statuses[user_key] = info
return user_statuses
def get_legacy_user_info(presence_rows: List[Dict[str, Any]],
mobile_user_ids: Set[int]) -> Dict[str, Any]:
# The format of data here is for legacy users of our API,
# including old versions of the mobile app.
info_rows = []
for row in presence_rows:
client_name = row['client__name']
status = UserPresence.status_to_string(row['status'])
dt = row['timestamp']
timestamp = datetime_to_timestamp(dt)
push_enabled = row['user_profile__enable_offline_push_notifications']
has_push_devices = row['user_profile__id'] in mobile_user_ids
pushable = (push_enabled and has_push_devices)
info = dict(
client=client_name,
status=status,
timestamp=timestamp,
pushable=pushable,
)
info_rows.append(info)
most_recent_info = info_rows[-1]
result = dict()
# The word "aggegrated" here is possibly misleading.
# It's really just the most recent client's info.
result['aggregated'] = dict(
client=most_recent_info['client'],
status=most_recent_info['status'],
timestamp=most_recent_info['timestamp'],
)
# Build a dictionary of client -> info. There should
# only be one row per client, but to be on the safe side,
# we always overwrite with rows that are later in our list.
for info in info_rows:
result[info['client']] = info
return result
def get_presence_for_user(user_profile_id: int,
slim_presence: bool=False) -> Dict[str, Dict[str, Any]]:
query = UserPresence.objects.filter(user_profile_id=user_profile_id).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
)
presence_rows = list(query)
mobile_user_ids = set() # type: Set[int]
if PushDeviceToken.objects.filter(user_id=user_profile_id).exists(): # nocoverage
# TODO: Add a test, though this is low priority, since we don't use mobile_user_ids yet.
mobile_user_ids.add(user_profile_id)
return get_status_dicts_for_rows(presence_rows, mobile_user_ids, slim_presence)
def get_status_dict_by_realm(realm_id: int, slim_presence: bool = False) -> Dict[str, Dict[str, Any]]:
user_profile_ids = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
is_bot=False
).order_by('id').values_list('id', flat=True)
user_profile_ids = list(user_profile_ids)
if not user_profile_ids: # nocoverage
# This conditional is necessary because query_for_ids
# throws an exception if passed an empty list.
#
# It's not clear this condition is actually possible,
# though, because it shouldn't be possible to end up with
# a realm with 0 active users.
return {}
two_weeks_ago = timezone_now() - datetime.timedelta(weeks=2)
query = UserPresence.objects.filter(
realm_id=realm_id,
timestamp__gte=two_weeks_ago,
user_profile__is_active=True,
user_profile__is_bot=False,
).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
)
presence_rows = list(query)
mobile_query = PushDeviceToken.objects.distinct(
'user_id'
).values_list(
'user_id',
flat=True
)
mobile_query = query_for_ids(
query=mobile_query,
user_ids=user_profile_ids,
field='user_id'
)
mobile_user_ids = set(mobile_query)
return get_status_dicts_for_rows(presence_rows, mobile_user_ids, slim_presence)
def get_presences_for_realm(realm: Realm,
slim_presence: bool) -> Dict[str, Dict[str, Dict[str, Any]]]:
if realm.presence_disabled:
# Return an empty dict if presence is disabled in this realm
return defaultdict(dict)
return get_status_dict_by_realm(realm.id, slim_presence)
def get_presence_response(requesting_user_profile: UserProfile,
slim_presence: bool) -> Dict[str, Any]:
realm = requesting_user_profile.realm
server_timestamp = time.time()
presences = get_presences_for_realm(realm, slim_presence)
return dict(presences=presences, server_timestamp=server_timestamp)
| 33.989189
| 102
| 0.667303
|
794a188ed24af88fd94bbc14212196a1e96176c4
| 15,549
|
py
|
Python
|
saleor/graphql/checkout/dataloaders.py
|
eanknd/saleor
|
08aa724176be00d7aaf654f14e9ae99dd4327f97
|
[
"CC-BY-4.0"
] | 1,392
|
2021-10-06T15:54:28.000Z
|
2022-03-31T20:50:55.000Z
|
saleor/graphql/checkout/dataloaders.py
|
eanknd/saleor
|
08aa724176be00d7aaf654f14e9ae99dd4327f97
|
[
"CC-BY-4.0"
] | 888
|
2021-10-06T10:48:54.000Z
|
2022-03-31T11:00:30.000Z
|
saleor/graphql/checkout/dataloaders.py
|
eanknd/saleor
|
08aa724176be00d7aaf654f14e9ae99dd4327f97
|
[
"CC-BY-4.0"
] | 538
|
2021-10-07T16:21:27.000Z
|
2022-03-31T22:58:57.000Z
|
from collections import defaultdict
from django.db.models import F
from promise import Promise
from ...checkout.fetch import (
CheckoutInfo,
CheckoutLineInfo,
apply_voucher_to_checkout_line,
get_delivery_method_info,
update_delivery_method_lists_for_checkout_info,
)
from ...checkout.models import Checkout, CheckoutLine
from ...discount import VoucherType
from ...payment.models import TransactionItem
from ..account.dataloaders import AddressByIdLoader, UserByUserIdLoader
from ..core.dataloaders import DataLoader
from ..discount.dataloaders import VoucherByCodeLoader, VoucherInfoByVoucherCodeLoader
from ..product.dataloaders import (
CollectionsByVariantIdLoader,
ProductByVariantIdLoader,
ProductTypeByVariantIdLoader,
ProductVariantByIdLoader,
VariantChannelListingByVariantIdAndChannelIdLoader,
)
from ..shipping.dataloaders import (
ShippingMethodByIdLoader,
ShippingMethodChannelListingByChannelSlugLoader,
)
from ..warehouse.dataloaders import WarehouseByIdLoader
class CheckoutByTokenLoader(DataLoader):
context_key = "checkout_by_token"
def batch_load(self, keys):
checkouts = Checkout.objects.using(self.database_connection_name).in_bulk(keys)
return [checkouts.get(token) for token in keys]
class CheckoutLinesInfoByCheckoutTokenLoader(DataLoader):
context_key = "checkoutlinesinfo_by_checkout"
def batch_load(self, keys):
def with_checkout_lines(results):
checkouts, checkout_lines = results
variants_pks = list(
{line.variant_id for lines in checkout_lines for line in lines}
)
if not variants_pks:
return [[] for _ in keys]
channel_pks = [checkout.channel_id for checkout in checkouts]
def with_variants_products_collections(results):
(
variants,
products,
product_types,
collections,
channel_listings,
voucher_infos,
) = results
variants_map = dict(zip(variants_pks, variants))
products_map = dict(zip(variants_pks, products))
product_types_map = dict(zip(variants_pks, product_types))
collections_map = dict(zip(variants_pks, collections))
channel_listings_map = dict(
zip(variant_ids_channel_ids, channel_listings)
)
lines_info_map = defaultdict(list)
voucher_infos_map = {
voucher_info.voucher.code: voucher_info
for voucher_info in voucher_infos
if voucher_info
}
for checkout, lines in zip(checkouts, checkout_lines):
lines_info_map[checkout.pk].extend(
[
CheckoutLineInfo(
line=line,
variant=variants_map[line.variant_id],
channel_listing=channel_listings_map[
(line.variant_id, checkout.channel_id)
],
product=products_map[line.variant_id],
product_type=product_types_map[line.variant_id],
collections=collections_map[line.variant_id],
)
for line in lines
]
)
for checkout in checkouts:
if not checkout.voucher_code:
continue
voucher_info = voucher_infos_map.get(checkout.voucher_code)
if not voucher_info:
continue
voucher = voucher_info.voucher
if (
voucher.type == VoucherType.SPECIFIC_PRODUCT
or voucher.apply_once_per_order
):
apply_voucher_to_checkout_line(
voucher_info=voucher_info,
checkout=checkout,
lines_info=lines_info_map[checkout.pk],
discounts=self.context.discounts,
)
return [lines_info_map[key] for key in keys]
variants = ProductVariantByIdLoader(self.context).load_many(variants_pks)
products = ProductByVariantIdLoader(self.context).load_many(variants_pks)
product_types = ProductTypeByVariantIdLoader(self.context).load_many(
variants_pks
)
collections = CollectionsByVariantIdLoader(self.context).load_many(
variants_pks
)
voucher_codes = {
checkout.voucher_code for checkout in checkouts if checkout.voucher_code
}
voucher_infos = VoucherInfoByVoucherCodeLoader(self.context).load_many(
voucher_codes
)
variant_ids_channel_ids = []
for channel_id, lines in zip(channel_pks, checkout_lines):
variant_ids_channel_ids.extend(
[(line.variant_id, channel_id) for line in lines]
)
channel_listings = VariantChannelListingByVariantIdAndChannelIdLoader(
self.context
).load_many(variant_ids_channel_ids)
return Promise.all(
[
variants,
products,
product_types,
collections,
channel_listings,
voucher_infos,
]
).then(with_variants_products_collections)
checkouts = CheckoutByTokenLoader(self.context).load_many(keys)
checkout_lines = CheckoutLinesByCheckoutTokenLoader(self.context).load_many(
keys
)
return Promise.all([checkouts, checkout_lines]).then(with_checkout_lines)
class CheckoutByUserLoader(DataLoader):
context_key = "checkout_by_user"
def batch_load(self, keys):
checkouts = Checkout.objects.using(self.database_connection_name).filter(
user_id__in=keys, channel__is_active=True
)
checkout_by_user_map = defaultdict(list)
for checkout in checkouts:
checkout_by_user_map[checkout.user_id].append(checkout)
return [checkout_by_user_map.get(user_id) for user_id in keys]
class CheckoutByUserAndChannelLoader(DataLoader):
context_key = "checkout_by_user_and_channel"
def batch_load(self, keys):
user_ids = [key[0] for key in keys]
channel_slugs = [key[1] for key in keys]
checkouts = (
Checkout.objects.using(self.database_connection_name)
.filter(
user_id__in=user_ids,
channel__slug__in=channel_slugs,
channel__is_active=True,
)
.annotate(channel_slug=F("channel__slug"))
)
checkout_by_user_and_channel_map = defaultdict(list)
for checkout in checkouts:
key = (checkout.user_id, checkout.channel_slug)
checkout_by_user_and_channel_map[key].append(checkout)
return [checkout_by_user_and_channel_map.get(key) for key in keys]
class CheckoutInfoByCheckoutTokenLoader(DataLoader):
context_key = "checkoutinfo_by_checkout"
def batch_load(self, keys):
def with_checkout(data):
checkouts, checkout_line_infos = data
from ..channel.dataloaders import ChannelByIdLoader
channel_pks = [checkout.channel_id for checkout in checkouts]
def with_channel(channels):
billing_address_ids = {
checkout.billing_address_id
for checkout in checkouts
if checkout.billing_address_id
}
shipping_address_ids = {
checkout.shipping_address_id
for checkout in checkouts
if checkout.shipping_address_id
}
addresses = AddressByIdLoader(self.context).load_many(
billing_address_ids | shipping_address_ids
)
users = UserByUserIdLoader(self.context).load_many(
[checkout.user_id for checkout in checkouts if checkout.user_id]
)
shipping_method_ids = [
checkout.shipping_method_id
for checkout in checkouts
if checkout.shipping_method_id
]
shipping_methods = ShippingMethodByIdLoader(self.context).load_many(
shipping_method_ids
)
channel_slugs = [channel.slug for channel in channels]
shipping_method_channel_listings = (
ShippingMethodChannelListingByChannelSlugLoader(
self.context
).load_many(channel_slugs)
)
collection_point_ids = [
checkout.collection_point_id
for checkout in checkouts
if checkout.collection_point_id
]
collection_points = WarehouseByIdLoader(self.context).load_many(
collection_point_ids
)
voucher_codes = {
checkout.voucher_code
for checkout in checkouts
if checkout.voucher_code
}
vouchers = VoucherByCodeLoader(self.context).load_many(voucher_codes)
def with_checkout_info(results):
(
addresses,
users,
shipping_methods,
listings_for_channels,
collection_points,
vouchers,
) = results
address_map = {address.id: address for address in addresses}
user_map = {user.id: user for user in users}
shipping_method_map = {
shipping_method.id: shipping_method
for shipping_method in shipping_methods
}
collection_points_map = {
collection_point.id: collection_point
for collection_point in collection_points
}
voucher_map = {voucher.code: voucher for voucher in vouchers}
checkout_info_map = {}
for key, checkout, channel, checkout_lines in zip(
keys, checkouts, channels, checkout_line_infos
):
shipping_method = shipping_method_map.get(
checkout.shipping_method_id
)
collection_point = collection_points_map.get(
checkout.collection_point_id
)
shipping_address = address_map.get(checkout.shipping_address_id)
delivery_method_info = get_delivery_method_info(
None, shipping_address
)
voucher = voucher_map.get(checkout.voucher_code)
checkout_info = CheckoutInfo(
checkout=checkout,
user=user_map.get(checkout.user_id),
channel=channel,
billing_address=address_map.get(
checkout.billing_address_id
),
shipping_address=address_map.get(
checkout.shipping_address_id
),
delivery_method_info=delivery_method_info,
valid_pick_up_points=[],
all_shipping_methods=[],
voucher=voucher,
)
manager = self.context.plugins
discounts = self.context.discounts
shipping_method_listings = [
listing
for channel_listings in listings_for_channels
for listing in channel_listings
if listing.channel_id == channel.id
]
update_delivery_method_lists_for_checkout_info(
checkout_info,
shipping_method,
collection_point,
shipping_address,
checkout_lines,
discounts,
manager,
shipping_method_listings,
)
checkout_info_map[key] = checkout_info
return [checkout_info_map[key] for key in keys]
return Promise.all(
[
addresses,
users,
shipping_methods,
shipping_method_channel_listings,
collection_points,
vouchers,
]
).then(with_checkout_info)
return (
ChannelByIdLoader(self.context)
.load_many(channel_pks)
.then(with_channel)
)
checkouts = CheckoutByTokenLoader(self.context).load_many(keys)
checkout_line_infos = CheckoutLinesInfoByCheckoutTokenLoader(
self.context
).load_many(keys)
return Promise.all([checkouts, checkout_line_infos]).then(with_checkout)
class CheckoutLineByIdLoader(DataLoader):
context_key = "checkout_line_by_id"
def batch_load(self, keys):
checkout_lines = CheckoutLine.objects.using(
self.database_connection_name
).in_bulk(keys)
return [checkout_lines.get(line_id) for line_id in keys]
class CheckoutLinesByCheckoutTokenLoader(DataLoader):
context_key = "checkoutlines_by_checkout"
def batch_load(self, keys):
lines = CheckoutLine.objects.using(self.database_connection_name).filter(
checkout_id__in=keys
)
line_map = defaultdict(list)
for line in lines.iterator():
line_map[line.checkout_id].append(line)
return [line_map.get(checkout_id, []) for checkout_id in keys]
class TransactionItemsByCheckoutIDLoader(DataLoader):
context_key = "transaction_items_by_checkout_id"
def batch_load(self, keys):
transactions = (
TransactionItem.objects.using(self.database_connection_name)
.filter(checkout_id__in=keys)
.order_by("pk")
)
transactions_map = defaultdict(list)
for transaction in transactions:
transactions_map[transaction.checkout_id].append(transaction)
return [transactions_map[checkout_id] for checkout_id in keys]
| 40.704188
| 88
| 0.548974
|
794a1967bb710ad39b1bbf6992bb8d126146ce15
| 10,444
|
py
|
Python
|
py3status/events.py
|
boucman/py3status
|
84b57304fbf71a466ccb1ed2f2dd039ece6eefb6
|
[
"BSD-3-Clause"
] | 1
|
2020-04-07T19:11:36.000Z
|
2020-04-07T19:11:36.000Z
|
py3status/events.py
|
boucman/py3status
|
84b57304fbf71a466ccb1ed2f2dd039ece6eefb6
|
[
"BSD-3-Clause"
] | 2
|
2018-03-15T18:44:42.000Z
|
2018-03-15T19:22:04.000Z
|
py3status/events.py
|
boucman/py3status
|
84b57304fbf71a466ccb1ed2f2dd039ece6eefb6
|
[
"BSD-3-Clause"
] | null | null | null |
import select
import sys
from threading import Thread
from subprocess import Popen, PIPE
from json import loads
from py3status.profiling import profile
try:
# Python 3
from shlex import quote as shell_quote
except ImportError:
# Python 2
from pipes import quote as shell_quote
class IOPoller:
"""
This class implements a predictive and timing-out I/O reader
using select and the poll() mechanism for greater compatibility.
"""
def __init__(self, io, eventmask=select.POLLIN):
"""
Our default is to read (POLLIN) the specified 'io' file descriptor.
"""
self.io = io
self.poller = select.poll()
self.poller.register(io, eventmask)
def readline(self, timeout=500):
"""
Try to read our I/O for 'timeout' milliseconds, return None otherwise.
This makes calling and reading I/O non blocking !
"""
poll_result = self.poller.poll(timeout)
if poll_result:
line = self.io.readline().strip()
if self.io == sys.stdin and line == "[":
# skip first event line wrt issue #19
line = self.io.readline().strip()
try:
# python3 compatibility code
line = line.decode()
except (AttributeError, UnicodeDecodeError):
pass
return line
else:
return None
class EventTask:
"""
A simple task that can be run by the scheduler.
"""
def __init__(self, module_name, event, default_event, events_thread):
self.events_thread = events_thread
self.module_full_name = module_name
self.default_event = default_event
self.event = event
def run(self):
self.events_thread.process_event(
self.module_full_name, self.event, self.default_event
)
class EventClickTask:
"""
A task to run an external on_click event
"""
def __init__(self, module_name, event, events_thread, command):
self.events_thread = events_thread
self.module_name = module_name
self.command = command
self.event = event
def run(self):
self.events_thread.on_click_dispatcher(
self.module_name, self.event, self.command
)
class Events(Thread):
"""
This class is responsible for dispatching event JSONs sent by the i3bar.
"""
def __init__(self, py3_wrapper):
"""
We need to poll stdin to receive i3bar messages.
"""
Thread.__init__(self)
self.config = py3_wrapper.config
self.error = None
self.py3_config = py3_wrapper.config["py3_config"]
self.modules = py3_wrapper.modules
self.on_click = self.py3_config["on_click"]
self.output_modules = py3_wrapper.output_modules
self.poller_inp = IOPoller(sys.stdin)
self.py3_wrapper = py3_wrapper
def get_module_text(self, module_name, event):
"""
Get the full text for the module as well as the partial text if the
module is a composite. Partial text is the text for just the single
section of a composite.
"""
index = event.get("index")
module_info = self.py3_wrapper.output_modules.get(module_name)
output = module_info["module"].get_latest()
full_text = u"".join([out["full_text"] for out in output])
partial = None
if index is not None:
if isinstance(index, int):
partial = output[index]
else:
for item in output:
if item.get("index") == index:
partial = item
break
if partial:
partial_text = partial["full_text"]
else:
partial_text = full_text
return full_text, partial_text
def on_click_dispatcher(self, module_name, event, command):
"""
Dispatch on_click config parameters to either:
- Our own methods for special py3status commands (listed below)
- The i3-msg program which is part of i3wm
"""
if command is None:
return
elif command == "refresh_all":
self.py3_wrapper.refresh_modules()
elif command == "refresh":
self.py3_wrapper.refresh_modules(module_name)
else:
# In commands we are able to use substitutions for the text output
# of a module
if "$OUTPUT" in command or "$OUTPUT_PART" in command:
full_text, partial_text = self.get_module_text(module_name, event)
command = command.replace("$OUTPUT_PART", shell_quote(partial_text))
command = command.replace("$OUTPUT", shell_quote(full_text))
# this is a i3 message
self.wm_msg(module_name, command)
# to make the bar more responsive to users we ask for a refresh
# of the module or of i3status if the module is an i3status one
self.py3_wrapper.refresh_modules(module_name)
def wm_msg(self, module_name, command):
"""
Execute the message with i3-msg or swaymsg and log its output.
"""
wm_msg = self.config["wm"]["msg"]
pipe = Popen([wm_msg, command], stdout=PIPE)
self.py3_wrapper.log(
'{} module="{}" command="{}" stdout={}'.format(
wm_msg, module_name, command, pipe.stdout.read()
)
)
def process_event(self, module_name, event, default_event=False):
"""
Process the event for the named module.
Events may have been declared in i3status.conf, modules may have
on_click() functions. There is a default middle click event etc.
"""
# get the module that the event is for
module_info = self.output_modules.get(module_name)
# if module is a py3status one call it.
if module_info["type"] == "py3status":
module = module_info["module"]
module.click_event(event)
if self.config["debug"]:
self.py3_wrapper.log("dispatching event {}".format(event))
# to make the bar more responsive to users we refresh the module
# unless the on_click event called py3.prevent_refresh()
if not module.prevent_refresh:
self.py3_wrapper.refresh_modules(module_name)
default_event = False
if default_event:
# default button 2 action is to clear this method's cache
if self.config["debug"]:
self.py3_wrapper.log("dispatching default event {}".format(event))
self.py3_wrapper.refresh_modules(module_name)
# find container that holds the module and call its onclick
module_groups = self.py3_config[".module_groups"]
containers = module_groups.get(module_name, [])
for container in containers:
self.process_event(container, event)
def dispatch_event(self, event):
"""
Takes an event dict. Logs the event if needed and cleans up the dict
such as setting the index needed for composits.
"""
if self.config["debug"]:
self.py3_wrapper.log("received event {}".format(event))
# usage variables
event["index"] = event.get("index", "")
instance = event.get("instance", "")
name = event.get("name", "")
# composites have an index which is passed to i3bar with
# the instance. We need to separate this out here and
# clean up the event. If index
# is an integer type then cast it as such.
if " " in instance:
instance, index = instance.split(" ", 1)
try:
index = int(index)
except ValueError:
pass
event["index"] = index
event["instance"] = instance
if self.config["debug"]:
self.py3_wrapper.log(
'trying to dispatch event to module "{}"'.format(
"{} {}".format(name, instance).strip()
)
)
# guess the module config name
module_name = "{} {}".format(name, instance).strip()
default_event = False
module_info = self.output_modules.get(module_name)
module = module_info["module"]
# execute any configured i3-msg command
# we do not do this for containers
# modules that have failed do not execute their config on_click
if module.allow_config_clicks:
button = event.get("button", 0)
on_click = self.on_click.get(module_name, {}).get(str(button))
if on_click:
task = EventClickTask(module_name, event, self, on_click)
self.py3_wrapper.timeout_queue_add(task)
# otherwise setup default action on button 2 press
elif button == 2:
default_event = True
# do the work
task = EventTask(module_name, event, default_event, self)
self.py3_wrapper.timeout_queue_add(task)
@profile
def run(self):
"""
Wait for an i3bar JSON event, then find the right module to dispatch
the message to based on the 'name' and 'instance' of the event.
In case the module does NOT support click_events, the default
implementation is to clear the module's cache
when the MIDDLE button (2) is pressed on it.
Example event:
{'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'}
"""
try:
while self.py3_wrapper.running:
event_str = self.poller_inp.readline()
if not event_str:
continue
try:
# remove leading comma if present
if event_str[0] == ",":
event_str = event_str[1:]
event = loads(event_str)
self.dispatch_event(event)
except Exception:
self.py3_wrapper.report_exception("Event failed")
except: # noqa e722
err = "Events thread died, click events are disabled."
self.py3_wrapper.report_exception(err, notify_user=False)
self.py3_wrapper.notify_user(err, level="warning")
| 35.890034
| 84
| 0.588568
|
794a1993e788670563631d6574aa5b89561e42d9
| 6,865
|
py
|
Python
|
game.py
|
paulvinell/2048-python-custom-player
|
f4bae0a0b220ce1f7bf3d2a76d350bed93816064
|
[
"MIT"
] | null | null | null |
game.py
|
paulvinell/2048-python-custom-player
|
f4bae0a0b220ce1f7bf3d2a76d350bed93816064
|
[
"MIT"
] | null | null | null |
game.py
|
paulvinell/2048-python-custom-player
|
f4bae0a0b220ce1f7bf3d2a76d350bed93816064
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
import copy
import constants as c
class Game():
def __init__(self, log_history=False):
# Fitness variables
self.move_count = 0
self.max_tile = 0
self.score = 0
# Reward variables
self.score_diff = 0
self.tile_count_diff = 0
# History variables
self.log_history = log_history
if self.log_history:
self.move_history = []
self.board_history = []
# Game variables
self.matrix = self.__init_matrix()
def make_move(self, move):
game = self.matrix
game_copy = copy.deepcopy(game)
score_before = self.score
tile_count_before = self.__tile_count()
if move == 0:
game = self.__up(game)
elif move == 1:
game = self.__down(game)
elif move == 2:
game = self.__left(game)
elif move == 3:
game = self.__right(game)
changed = not np.array_equal(game, game_copy)
# Case 1: there was no change: don't add a tile
# Case 2: board was full and there was a change:
# at least one tile has been merged, and there is space for more.
# Case 3: board was not full and there was a change:
# there was and is space for more.
if changed:
game = self.__add_two(game)
self.move_count += 1
if self.log_history:
self.move_history.append(move)
self.board_history.append(game_copy)
self.matrix = game
self.score_diff = self.score - score_before
self.tile_count_diff = self.__tile_count() - tile_count_before
return changed
# In this variant, there is only Lost/Not lost.
def alive(self):
game = self.matrix
for i in range(len(game)): # check for any zero entries
for j in range(len(game[0])):
if game[i][j] == 0:
return True
for i in range(len(game)): # Check across x/columns
for j in range(len(game[0]) - 1):
if game[i][j] == game[i][j+1]:
return True
for j in range(len(game[0])): # Check across y/rows
for i in range(len(game) - 1):
if game[i][j] == game[i+1][j]:
return True
return False
# Calculates which directions it is possible to move in
def possible_directions(self):
directions = []
mat = self.matrix
for i in range(1, len(mat)):
for j in range(len(mat[0])):
if mat[i][j] > 0 and (mat[i][j] == mat[i-1][j] or mat[i-1][j] == 0):
directions.append(0) # UP
break
if 0 in directions:
break
for i in range(len(mat) - 1):
for j in range(len(mat[0])):
if mat[i][j] > 0 and (mat[i][j] == mat[i+1][j] or mat[i+1][j] == 0):
directions.append(1) # DOWN
break
if 1 in directions:
break
for i in range(len(mat)):
for j in range(1, len(mat[0])):
if mat[i][j] > 0 and (mat[i][j] == mat[i][j-1] or mat[i][j-1] == 0):
directions.append(2) # LEFT
break
if 2 in directions:
break
for i in range(len(mat)):
for j in range(len(mat[0]) - 1):
if mat[i][j] > 0 and (mat[i][j] == mat[i][j+1] or mat[i][j+1] == 0):
directions.append(3) # RIGHT
break
if 3 in directions:
break
return directions
# Creates a game board of dimensions
# specified in Constants and adds
# two starting tiles.
def __init_matrix(self):
matrix = []
for i in range(c.GRID_LEN_Y):
matrix.append([0] * c.GRID_LEN_X)
matrix = self.__add_two(matrix)
matrix = self.__add_two(matrix)
self.tile_count_diff = 2
return matrix
# Adds a two or four tile to an empty slot
def __add_two(self, mat):
empty = []
for a, col in enumerate(mat):
for b, elem in enumerate(col):
if elem == 0:
empty.append((a, b))
if len(empty) == 0:
return mat
a, b = random.choice(empty)
value = 4 if random.random() <= c.PROBABILITY_4 else 2
mat[a][b] = value
self.max_tile = np.maximum(self.max_tile, value)
return mat
def __tile_count(self):
res = 0
for a, col in enumerate(self.matrix):
for b, elem in enumerate(col):
if elem > 0:
res += 1
return res
def __reverse(self, mat):
new = []
for i in range(len(mat)):
new.append([])
for j in range(len(mat[0])):
new[i].append(mat[i][len(mat[0])-j-1])
return new
def __transpose(self, mat):
new = []
for i in range(len(mat[0])):
new.append([])
for j in range(len(mat)):
new[i].append(mat[j][i])
return new
def __cover_up(self, mat):
new = [[0] * len(mat[0]) for _ in range(len(mat))]
for i in range(len(mat)):
count = 0
for j in range(len(mat[0])):
if mat[i][j] != 0:
new[i][count] = mat[i][j]
count += 1
return new
def __merge(self, mat):
for i in range(len(mat)):
for j in range(len(mat[0])-1):
if mat[i][j] == mat[i][j+1] and mat[i][j] != 0:
mat[i][j] *= 2
mat[i][j+1] = 0
self.max_tile = np.maximum(self.max_tile, mat[i][j])
self.score += mat[i][j]
return mat
def __up(self, game):
game = self.__transpose(game)
game = self.__cover_up(game)
game = self.__merge(game)
game = self.__cover_up(game)
game = self.__transpose(game)
return game
def __down(self, game):
game = self.__reverse(self.__transpose(game))
game = self.__cover_up(game)
game = self.__merge(game)
game = self.__cover_up(game)
game = self.__transpose(self.__reverse(game))
return game
def __left(self, game):
game = self.__cover_up(game)
game = self.__merge(game)
game = self.__cover_up(game)
return game
def __right(self, game):
game = self.__reverse(game)
game = self.__cover_up(game)
game = self.__merge(game)
game = self.__cover_up(game)
game = self.__reverse(game)
return game
| 29.088983
| 84
| 0.499199
|
794a19e7ab0e91c4a1e51ffd790753d7021a3bfb
| 1,539
|
py
|
Python
|
Python Advanced Exam - 27 June 2020/Bombs.py
|
DiyanKalaydzhiev23/Advanced---Python
|
ed2c60bb887c49e5a87624719633e2b8432f6f6b
|
[
"MIT"
] | null | null | null |
Python Advanced Exam - 27 June 2020/Bombs.py
|
DiyanKalaydzhiev23/Advanced---Python
|
ed2c60bb887c49e5a87624719633e2b8432f6f6b
|
[
"MIT"
] | null | null | null |
Python Advanced Exam - 27 June 2020/Bombs.py
|
DiyanKalaydzhiev23/Advanced---Python
|
ed2c60bb887c49e5a87624719633e2b8432f6f6b
|
[
"MIT"
] | null | null | null |
from collections import deque
def check_bomb():
if bomb in (40, 60, 120):
if bomb == 60:
bombs_dict['Cherry Bombs'] += 1
elif bomb == 40:
bombs_dict['Datura Bombs'] += 1
elif bomb == 120:
bombs_dict['Smoke Decoy Bombs'] += 1
return True
return False
def check_bomb_pouch():
for bomb_value in bombs_dict.values():
if bomb_value < 3:
return False
return True
bombs_effect = deque(int(n) for n in input().split(", "))
bombs_casings = deque(int(n) for n in input().split(", "))
bombs_dict = {"Cherry Bombs": 0, "Datura Bombs": 0, "Smoke Decoy Bombs": 0}
while bombs_effect and bombs_casings:
effect = bombs_effect.popleft()
casing = bombs_casings.pop()
bomb = effect + casing
good_bomb = check_bomb()
if not good_bomb:
bombs_casings.append(casing-5)
bombs_effect.appendleft(effect)
if check_bomb_pouch():
break
if check_bomb_pouch():
print("Bene! You have successfully filled the bomb pouch!")
else:
print("You don't have enough materials to fill the bomb pouch.")
if bombs_effect:
print(f"Bomb Effects: {', '.join([str(bomb) for bomb in bombs_effect])}")
else:
print("Bomb Effects: empty")
if bombs_casings:
print(f"Bomb Casings: {', '.join([str(bomb) for bomb in bombs_casings])}")
else:
print("Bomb Casings: empty")
for bomb_name, count in bombs_dict.items():
print(f"{bomb_name}: {count}")
| 27
| 79
| 0.609487
|
794a1abf96cdcf6a49091a7ef003bf24f0fd4bab
| 2,378
|
py
|
Python
|
baike_spider/spider_main.py
|
python-qi/PythonScrapingWithDiskCache
|
392afed650a09bd5029da7a93a7bcae122c7bb76
|
[
"Apache-2.0"
] | null | null | null |
baike_spider/spider_main.py
|
python-qi/PythonScrapingWithDiskCache
|
392afed650a09bd5029da7a93a7bcae122c7bb76
|
[
"Apache-2.0"
] | null | null | null |
baike_spider/spider_main.py
|
python-qi/PythonScrapingWithDiskCache
|
392afed650a09bd5029da7a93a7bcae122c7bb76
|
[
"Apache-2.0"
] | null | null | null |
#coding:utf-8
import url_manager, html_downLoader, html_parser, html_outputer
import robotparser
import disk_cache
import logging
import sys
if sys.getdefaultencoding()!="utf-8":
reload(sys)
sys.setdefaultencoding("utf-8")
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.Url_Manager()
self.downloader = html_downLoader.HtmlDownLoader()
self.parser = html_parser.HtmlParser()
self.outper = html_outputer.OutPuter()
self.cache=disk_cache.DiskCache()
def craw_isrunning(self,new_url):
if self.cache[new_url] is not None:
#print self.cache[new_url]
html_cont=self.cache[new_url]
else:
#以上代码正常
html_cont = self.downloader.downLoad(new_url)
self.cache[new_url]=html_cont
new_urls, new_data = self.parser.parse(new_url,html_cont)
# for seturl in new_urls:
# print 'seturl:%s'%(seturl)
# print new_urls
# print new_data
self.urls.add_new_urls(new_urls)
self.outper.collect_data(new_data)
def craw(self, rool_url):
count = 1
self.urls.add_new_url(rool_url)
throttle=self.downloader.Throttle(0)
rp=robotparser.RobotFileParser()
rp.set_url('https://baike.baidu.com/robots.txt')
rp.read()
user_agent='wswp'
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print u'第%d个页面%s'%(count,new_url)
if True:#rp.can_fetch(user_agent,new_url):
#print "if is running"
throttle.wait(new_url)
self.craw_isrunning(new_url)
if count == 20:
break
count = count + 1
else:
print 'Blocked by robots.txt',new_url
except Exception, e:
print 'craw failed'
print e.message
print"111"
self.outper.output_html()
# for oldurl in self.urls.old_urls:
# print 'oldurl%s:' % (oldurl)
# for newurl in self.urls.new_urls:
# print 'newurl%s:' % (newurl)
if __name__ == '__main__':
rool_url = "http://example.webscraping.com"
obj_spider = SpiderMain()
obj_spider.craw(rool_url)
| 33.971429
| 65
| 0.578638
|
794a1b1a1f554b972fa45e5ed3e5bdcb3b1f3df6
| 4,260
|
py
|
Python
|
tutorials/rllib_pistonball.py
|
carlosluis/PettingZoo
|
aec87907777e0e62652d499b3ac96f680acf35ad
|
[
"Apache-2.0"
] | 846
|
2020-05-12T05:55:00.000Z
|
2021-10-08T19:38:40.000Z
|
tutorials/rllib_pistonball.py
|
carlosluis/PettingZoo
|
aec87907777e0e62652d499b3ac96f680acf35ad
|
[
"Apache-2.0"
] | 237
|
2020-04-27T06:01:39.000Z
|
2021-10-13T02:55:54.000Z
|
tutorials/rllib_pistonball.py
|
carlosluis/PettingZoo
|
aec87907777e0e62652d499b3ac96f680acf35ad
|
[
"Apache-2.0"
] | 126
|
2020-05-29T04:20:29.000Z
|
2021-10-13T05:31:12.000Z
|
from ray import tune
from ray.rllib.models import ModelCatalog
from ray.tune.registry import register_env
from ray.rllib.env.wrappers.pettingzoo_env import ParallelPettingZooEnv
from pettingzoo.butterfly import pistonball_v5
import supersuit as ss
import torch
from torch import nn
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray import shutdown
class CNNModelV2(TorchModelV2, nn.Module):
def __init__(self, obs_space, act_space, num_outputs, *args, **kwargs):
TorchModelV2.__init__(self, obs_space, act_space, num_outputs, *args, **kwargs)
nn.Module.__init__(self)
self.model = nn.Sequential(
nn.Conv2d(
3,
32,
[8, 8],
stride=(4, 4)),
nn.ReLU(),
nn.Conv2d(
32,
64,
[4, 4],
stride=(2, 2)),
nn.ReLU(),
nn.Conv2d(
64,
64,
[3, 3],
stride=(1, 1)),
nn.ReLU(),
nn.Flatten(),
(nn.Linear(3136,512)),
nn.ReLU(),
)
self.policy_fn = nn.Linear(512, num_outputs)
self.value_fn = nn.Linear(512, 1)
def forward(self, input_dict, state, seq_lens):
model_out = self.model(input_dict["obs"].permute(0, 3, 1, 2))
self._value_out = self.value_fn(model_out)
return self.policy_fn(model_out), state
def value_function(self):
return self._value_out.flatten()
def env_creator(args):
env = pistonball_v5.parallel_env(n_pistons=20,
time_penalty=-0.1,
continuous=True,
random_drop=True,
random_rotate=True,
ball_mass=0.75,
ball_friction=0.3,
ball_elasticity=1.5,
max_cycles=125)
env = ss.color_reduction_v0(env, mode='B')
env = ss.dtype_v0(env, 'float32')
env = ss.resize_v0(env, x_size=84, y_size=84)
env = ss.frame_stack_v1(env, 3)
env = ss.normalize_obs_v0(env, env_min=0, env_max=1)
return env
if __name__ == "__main__":
shutdown()
env_name = "pistonball_v5"
register_env(env_name, lambda config: ParallelPettingZooEnv(env_creator(config)))
test_env = ParallelPettingZooEnv(env_creator({}))
obs_space = test_env.observation_space
act_space = test_env.action_space
ModelCatalog.register_custom_model("CNNModelV2", CNNModelV2)
def gen_policy(i):
config = {
"model": {
"custom_model": "CNNModelV2",
},
"gamma": 0.99,
}
return (None, obs_space, act_space, config)
policies = {"policy_0": gen_policy(0)}
policy_ids = list(policies.keys())
tune.run(
"PPO",
name="PPO",
stop={"timesteps_total": 5000000},
checkpoint_freq=10,
local_dir="~/ray_results/"+env_name,
config={
# Environment specific
"env": env_name,
# General
"log_level": "ERROR",
"framework": "torch",
"num_gpus": 1,
"num_workers": 4,
"num_envs_per_worker": 1,
"compress_observations": False,
"batch_mode": 'truncate_episodes',
# 'use_critic': True,
'use_gae': True,
"lambda": 0.9,
"gamma": .99,
# "kl_coeff": 0.001,
# "kl_target": 1000.,
"clip_param": 0.4,
'grad_clip': None,
"entropy_coeff": 0.1,
'vf_loss_coeff': 0.25,
"sgd_minibatch_size": 64,
"num_sgd_iter": 10, # epoc
'rollout_fragment_length': 512,
"train_batch_size": 512,
'lr': 2e-05,
"clip_actions": True,
# Method specific
"multiagent": {
"policies": policies,
"policy_mapping_fn": (
lambda agent_id: policy_ids[0]),
},
},
)
| 30
| 87
| 0.51338
|
794a1b66c416b4d4fdb5425f0e030919cc9362d6
| 9,246
|
py
|
Python
|
sensei_mode.py
|
aq-amani/music-theory
|
e9292e2bf818f9d98d1d9042ad854219ca61c081
|
[
"MIT"
] | 1
|
2022-02-10T23:36:23.000Z
|
2022-02-10T23:36:23.000Z
|
sensei_mode.py
|
aq-amani/music-theory
|
e9292e2bf818f9d98d1d9042ad854219ca61c081
|
[
"MIT"
] | null | null | null |
sensei_mode.py
|
aq-amani/music-theory
|
e9292e2bf818f9d98d1d9042ad854219ca61c081
|
[
"MIT"
] | null | null | null |
import time
import sys
import mt_toolbox as mt
def sensei_print(s, play_flag=False, wait_flag=True):
for c in s:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.04)
if wait_flag:
input("\n[Press any key to play the sample...]" if play_flag else "\n[Press any key to continue...]")
def main():
sensei_print(
"""
Hello there! I'll explain to you some music theory concepts in the simplest way.
You can freely experiment with all the concepts explained here and more
by using the main music_theory_lab.py script.
This explanation will cover the following concepts:
- Notes
- Octaves
- Scales
- Chords
"""
)
sensei_print(
"""
First let us start with the nature of sound.
As you already know from grade school,
sound is the way our brains perceive vibrations that travel through air or any other medium.
These vibrations can be represented as waves of certain frequencies.
The simplest form of sound can be created using a single sine wave.
Here is how a single sine wave of 261.6 Hz frequency sounds like..
"""
, play_flag=True)
# Play C note
mt.play_note_by_name('C', 700, 4)
time.sleep(1)
sensei_print(
"""
That single sine wave you just listened to, is also called a [Musical note].
261.6 Hz coresponds to the *Do* sound in the 'Do Re Mi' sequence we all know.
It is also called 'C' in musical notation.
Every different frequency gives a different sound.
Increasing the frequency of the note (wave) will produce a sharper higher pitched sound,
while decreasing it will produce a deeper lower pitched sound.
See how the *Do* sound (261.6 Hz wave) compares to a 329.6 Hz wave (*Mi* sound) and a 440 Hz wave (*La* sound).
Do is 'C', Mi is 'E' and La is 'A' in musical notation.
"""
, play_flag=True)
# play note C -o4 and E-o4 and A -o4
mt.play_note_by_name('C', 700, 4)
time.sleep(1)
mt.play_note_by_name('E', 700, 4)
time.sleep(1)
mt.play_note_by_name('A', 700, 4)
time.sleep(1)
sensei_print(
"""
I will replace the word *wave* with [note] instead hereafter.
Doubling the frequency of a note will produce a sound that our brain perceives as similar but of a higher pitch.
Such notes have the same name but exist on different [octave]s.
See how 261.6 Hz (*Do* we listened to before) compares to the doubled frequency note (261.6 x 2 = 523.2 Hz).
This new note is also a *Do*, but on a different HIGHER [octave].
"""
, play_flag=True)
# play note C -o4 and C-o5
mt.play_note_by_name('C', 700, 4)
time.sleep(1)
mt.play_note_by_name('C', 700, 5)
time.sleep(1)
sensei_print(
"""
Similarily, we get the same note but on a different LOWER [octave] if we divide the note frequency by 2.
"""
, play_flag=True)
mt.play_note_by_name('C', 700, 4)
time.sleep(1)
mt.play_note_by_name('C', 700, 3)
time.sleep(1)
sensei_print(
"""
People historically tried to divide the frequencies
between notes and their next octave counterpart in many different ways.
Some division systems assumed equal spacing between notes, while others did not.
The most common system is one that divides the interval
between a note and its octave counterpart into [12] logarithmically equal intervals.
That is, each note frequency is equal to the previous note frequency multiplied by 2^(1/12).
"""
)
sensei_print(
"""
This 2^(1/12) ratio between consecutive frequencies is called a [Half step] or a [Semi-tone].
Two half steps/ semi-tones make a [Whole step] or a [Full Tone].
Each note on these intervals is given a human-friendly name, an alphabet between A~F,
instead of just frequency values to refer to these notes.
Here are the basic 12 notes that have names, along with their frequencies on the 4th octave.
"""
)
mt.print_note_info(octave = 4)
sensei_print('')
sensei_print(
"""
And here is how these notes exist on one [octave] of a piano keyboard
"""
)
print(mt.piano_keys)
sensei_print('')
sensei_print(
"""
Notice the '#' marking of some notes.
This '#' means that the note is increased by a half step, and is called a sharp.
Similarily, 'b' is also used in naming notes.
This 'b' means that the note is decreased by a half step and is called a flat.
Therefore C# is called [C sharp], and means a C with a frequency increase by half a step (C frequency * 2^(1/12)).
On the other hand Cb is called [C flat], and means a C with a frequency decrease by half a step (C frequency / 2^(1/12)).
"""
)
sensei_print(
"""
Looking at the 12 note list, you will notice that some notes can have two names.
For example, G# can also be called Ab, because it is half a step lower than A.
"""
)
sensei_print(
"""
Now you know what notes, octaves and steps are, let us move to musical [scales].
A scale is a sequence of notes, where note frequencies follow a certain relationship (ratios) between each other.
The Do Re Mi sequence we all know is actually one type of scales. It is called the [Major scale].
Listen to and examine the notes on the familiar Major scale.
"""
, play_flag=True)
mt.construct_and_play_scale(Note('C', 4), 'Major', 'Ionian')
time.sleep(1)
sensei_print(
"""
Our familiar major scale starts with the C note,
and its frequencies follow the pattern outlined in the Scale signature field.
T refers to a Tone (Full step) while S is for Semitone (Half step) as explained before.
"""
)
# scale pattern
sensei_print(
"""
In fact, any scale that follows this pattern is called a Major scale, regardless of the note where it starts.
Therefore scales are defined by the [scale type], Major scale in our example, and the note where it starts from.
The starting note is called a [root].
A scale type is defined by ratio sequence of its note frequencies and the number of notes in it.
A major scale has 7 notes.
Here is an A Major scale as another example.
The note frequency ratios follow a major scale pattern,
and its root note is A (starts from A), hence the [A Major scale] naming.
"""
, play_flag=True)
# Play A major scale
mt.construct_and_play_scale(Note('A', 4), 'Major', 'Ionian')
time.sleep(1)
sensei_print(
"""
There are many other types of scales, each with its own pattern or frequency ratio sequence.
The Minor scale, the Pentatonic scale and the Blues scale are some examples of other scale types.
You can use the main script music_theory_lab.py to experiment more with how the different scales sound with different root notes.
For now, listen to how the different scales sound with a C root note.
"""
, play_flag=True)
mt.scale_command_processor('C', 'all', 4, 'Ionian')
sensei_print('')
sensei_print(
"""
Now let us move on to [chords].
A chord is simply, multiple notes played together simultaneously.
The first note in a chord is also called a root note.
As in scales, notes in a [chord] also need to follow
certain frequency ratio rules to sound good together.
To ensure notes follow such frequency ratio rules,
we always pick up notes from a certain scale that we use as the [base scale] for our chord construction.
"""
)
sensei_print(
"""
For example, one type of chords is constructed
by taking the 1st, the 3rd and the 5th notes **with reference to the major scale of the same root**.
Such chords that follow this 1,3,5 position rule, are called a [Major chords].
If we choose the C note as the root note, then we will get a [C Major chord].
"""
, play_flag=True)
mt.construct_and_play_chord(Note('C', 4), 'Major_triad')
time.sleep(1)
sensei_print("""
Major chords give a 'Happy' impression.
Let's follow another chord rule. Let's use these positions instead: 1,b3,5 ('b' means flattened note in that position).
If we flatten the 3rd position note, we get a new chord called a [Minor chord].
Flattening the 3rd position in our C chord example gives an Eb (E flat).
This is equivalent to a D# if you look back again at the 12 note chart.
This is how the resulting [C Minor chord] sounds like.
"""
, play_flag=True)
mt.construct_and_play_chord(Note('C', 4), 'Minor_triad')
time.sleep(1)
sensei_print(
"""
As you have noticed, Minor chords give a 'Sad' impression.
You can use the main script to experiment with how the different chords sound with different root notes.
For now, listen to how the different chord types sound with a C root note.
"""
, play_flag=True)
mt.chord_command_processor('C', 'all', 4)
sensei_print(
"""
This is the end of explanation for this version of this script. I hope it was useful for you!
Thank you for following the explanation :)
"""
,wait_flag=False)
print('\n')
if __name__ == '__main__':
print(mt.header)
main()
else:
main()
| 35.837209
| 133
| 0.676941
|
794a1b7b30acbf76666942ad9d3647b90d50ec8c
| 119,959
|
py
|
Python
|
tests/integration/boxscore/test_ncaaf_boxscore.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | null | null | null |
tests/integration/boxscore/test_ncaaf_boxscore.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | null | null | null |
tests/integration/boxscore/test_ncaaf_boxscore.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | 1
|
2020-07-08T16:05:25.000Z
|
2020-07-08T16:05:25.000Z
|
import mock
import os
import pandas as pd
from datetime import datetime
from flexmock import flexmock
from sportsreference import utils
from sportsreference.constants import AWAY
from sportsreference.ncaaf.constants import BOXSCORE_URL, BOXSCORES_URL
from sportsreference.ncaaf.boxscore import Boxscore, Boxscores
MONTH = 10
YEAR = 2017
BOXSCORE = '2018-01-08-georgia'
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'ncaaf', filename)
return open('%s' % filepath, 'r').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
if url == BOXSCORES_URL % (8, 30, 2017):
return MockPQ(read_file('boxscores-8-30-2017.html'))
if url == BOXSCORES_URL % (8, 31, 2017):
return MockPQ(read_file('boxscores-8-31-2017.html'))
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNCAAFBoxscore:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'date': 'Monday Jan 8, 2018',
'time': '8:00 PM ET',
'stadium': 'Mercedes-Benz Stadium - Atlanta, Georgia',
'winner': AWAY,
'winning_name': 'Alabama',
'winning_abbr': 'ALABAMA',
'losing_name': 'Georgia',
'losing_abbr': 'GEORGIA',
'away_points': 26,
'away_first_downs': 20,
'away_rush_attempts': 39,
'away_rush_yards': 184,
'away_rush_touchdowns': 0,
'away_pass_completions': 17,
'away_pass_attempts': 32,
'away_pass_yards': 187,
'away_pass_touchdowns': 3,
'away_interceptions': 1,
'away_total_yards': 371,
'away_fumbles': 0,
'away_fumbles_lost': 0,
'away_turnovers': 1,
'away_penalties': 6,
'away_yards_from_penalties': 41,
'home_points': 23,
'home_first_downs': 22,
'home_rush_attempts': 45,
'home_rush_yards': 133,
'home_rush_touchdowns': 1,
'home_pass_completions': 16,
'home_pass_attempts': 32,
'home_pass_yards': 232,
'home_pass_touchdowns': 1,
'home_interceptions': 2,
'home_total_yards': 365,
'home_fumbles': 0,
'home_fumbles_lost': 0,
'home_turnovers': 2,
'home_penalties': 6,
'home_yards_from_penalties': 65
}
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.boxscore = Boxscore(BOXSCORE)
def test_ncaaf_boxscore_returns_requested_boxscore(self):
for attribute, value in self.results.items():
assert getattr(self.boxscore, attribute) == value
def test_invalid_url_yields_empty_class(self):
flexmock(Boxscore) \
.should_receive('_retrieve_html_page') \
.and_return(None)
boxscore = Boxscore(BOXSCORE)
for key, value in boxscore.__dict__.items():
if key == '_uri':
continue
assert value is None
def test_ncaaf_boxscore_dataframe_returns_dataframe_of_all_values(self):
df = pd.DataFrame([self.results], index=[BOXSCORE])
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, self.boxscore.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
class TestNCAAFBoxscores:
def setup_method(self):
self.expected = {
'8-30-2017': [
{'boxscore': '2017-09-04-georgia-tech',
'away_name': 'Tennessee',
'away_abbr': 'tennessee',
'away_score': 42,
'away_rank': 25,
'home_name': 'Georgia Tech',
'home_abbr': 'georgia-tech',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-03-ucla',
'away_name': 'Texas A&M',
'away_abbr': 'texas-am',
'away_score': 44,
'away_rank': None,
'home_name': 'UCLA',
'home_abbr': 'ucla',
'home_score': 45,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-03-virginia-tech',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 24,
'away_rank': 22,
'home_name': 'Virginia Tech',
'home_abbr': 'virginia-tech',
'home_score': 31,
'home_rank': 21,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-air-force',
'away_name': 'Virginia Military Institute',
'away_abbr': 'Virginia Military Institute',
'away_score': 0,
'away_rank': None,
'home_name': 'Air Force',
'home_abbr': 'air-force',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-alabama-birmingham',
'away_name': 'Alabama A&M',
'away_abbr': 'Alabama A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'UAB',
'home_abbr': 'alabama-birmingham',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-arizona',
'away_name': 'Northern Arizona',
'away_abbr': 'Northern Arizona',
'away_score': 24,
'away_rank': None,
'home_name': 'Arizona',
'home_abbr': 'arizona',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-auburn',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 7,
'away_rank': None,
'home_name': 'Auburn',
'home_abbr': 'auburn',
'home_score': 41,
'home_rank': 12,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-baylor',
'away_name': 'Liberty',
'away_abbr': 'Liberty',
'away_score': 48,
'away_rank': None,
'home_name': 'Baylor',
'home_abbr': 'baylor',
'home_score': 45,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-boise-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 13,
'away_rank': None,
'home_name': 'Boise State',
'home_abbr': 'boise-state',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-brigham-young',
'away_name': 'LSU',
'away_abbr': 'louisiana-state',
'away_score': 27,
'away_rank': 13,
'home_name': 'Brigham Young',
'home_abbr': 'brigham-young',
'home_score': 0,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-central-florida',
'away_name': 'Florida International',
'away_abbr': 'florida-international',
'away_score': 17,
'away_rank': None,
'home_name': 'UCF',
'home_abbr': 'central-florida',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-clemson',
'away_name': 'Kent State',
'away_abbr': 'kent-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Clemson',
'home_abbr': 'clemson',
'home_score': 56,
'home_rank': 5,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-coastal-carolina',
'away_name': 'Massachusetts',
'away_abbr': 'massachusetts',
'away_score': 28,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolina',
'home_score': 38,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-duke',
'away_name': 'North Carolina Central',
'away_abbr': 'North Carolina Central',
'away_score': 7,
'away_rank': None,
'home_name': 'Duke',
'home_abbr': 'duke',
'home_score': 60,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-east-carolina',
'away_name': 'James Madison',
'away_abbr': 'James Madison',
'away_score': 34,
'away_rank': None,
'home_name': 'East Carolina',
'home_abbr': 'east-carolina',
'home_score': 14,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-florida-state',
'away_name': 'Alabama',
'away_abbr': 'alabama',
'away_score': 24,
'away_rank': 1,
'home_name': 'Florida State',
'home_abbr': 'florida-state',
'home_score': 7,
'home_rank': 3,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-fresno-state',
'away_name': 'Incarnate Word',
'away_abbr': 'Incarnate Word',
'away_score': 0,
'away_rank': None,
'home_name': 'Fresno State',
'home_abbr': 'fresno-state',
'home_score': 66,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-georgia',
'away_name': 'Appalachian State',
'away_abbr': 'appalachian-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Georgia',
'home_abbr': 'georgia',
'home_score': 31,
'home_rank': 15,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-hawaii',
'away_name': 'Western Carolina',
'away_abbr': 'Western Carolina',
'away_score': 18,
'away_rank': None,
'home_name': 'Hawaii',
'home_abbr': 'hawaii',
'home_score': 41,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-illinois',
'away_name': 'Ball State',
'away_abbr': 'ball-state',
'away_score': 21,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-iowa',
'away_name': 'Wyoming',
'away_abbr': 'wyoming',
'away_score': 3,
'away_rank': None,
'home_name': 'Iowa',
'home_abbr': 'iowa',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-iowa-state',
'away_name': 'Northern Iowa',
'away_abbr': 'Northern Iowa',
'away_score': 24,
'away_rank': None,
'home_name': 'Iowa State',
'home_abbr': 'iowa-state',
'home_score': 42,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-kansas',
'away_name': 'Southeast Missouri State',
'away_abbr': 'Southeast Missouri State',
'away_score': 16,
'away_rank': None,
'home_name': 'Kansas',
'home_abbr': 'kansas',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-kansas-state',
'away_name': 'Central Arkansas',
'away_abbr': 'Central Arkansas',
'away_score': 19,
'away_rank': None,
'home_name': 'Kansas State',
'home_abbr': 'kansas-state',
'home_score': 55,
'home_rank': 20,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-louisiana-lafayette',
'away_name': 'Southeastern Louisiana',
'away_abbr': 'Southeastern Louisiana',
'away_score': 48,
'away_rank': None,
'home_name': 'Louisiana',
'home_abbr': 'louisiana-lafayette',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-louisiana-tech',
'away_name': 'Northwestern State',
'away_abbr': 'Northwestern State',
'away_score': 24,
'away_rank': None,
'home_name': 'Louisiana Tech',
'home_abbr': 'louisiana-tech',
'home_score': 52,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-marshall',
'away_name': 'Miami (OH)',
'away_abbr': 'miami-oh',
'away_score': 26,
'away_rank': None,
'home_name': 'Marshall',
'home_abbr': 'marshall',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-miami-fl',
'away_name': 'Bethune-Cookman',
'away_abbr': 'Bethune-Cookman',
'away_score': 13,
'away_rank': None,
'home_name': 'Miami (FL)',
'home_abbr': 'miami-fl',
'home_score': 41,
'home_rank': 18,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-michigan',
'away_name': 'Florida',
'away_abbr': 'florida',
'away_score': 17,
'away_rank': 17,
'home_name': 'Michigan',
'home_abbr': 'michigan',
'home_score': 33,
'home_rank': 11,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-michigan-state',
'away_name': 'Bowling Green State',
'away_abbr': 'bowling-green-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 35,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-middle-tennessee-state',
'away_name': 'Vanderbilt',
'away_abbr': 'vanderbilt',
'away_score': 28,
'away_rank': None,
'home_name': 'Middle Tennessee State',
'home_abbr': 'middle-tennessee-state',
'home_score': 6,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-mississippi',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 27,
'away_rank': None,
'home_name': 'Ole Miss',
'home_abbr': 'mississippi',
'home_score': 47,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-mississippi-state',
'away_name': 'Charleston Southern',
'away_abbr': 'Charleston Southern',
'away_score': 0,
'away_rank': None,
'home_name': 'Mississippi State',
'home_abbr': 'mississippi-state',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-missouri',
'away_name': 'Missouri State',
'away_abbr': 'Missouri State',
'away_score': 43,
'away_rank': None,
'home_name': 'Missouri',
'home_abbr': 'missouri',
'home_score': 72,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-nebraska',
'away_name': 'Arkansas State',
'away_abbr': 'arkansas-state',
'away_score': 36,
'away_rank': None,
'home_name': 'Nebraska',
'home_abbr': 'nebraska',
'home_score': 43,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-nevada-las-vegas',
'away_name': 'Howard',
'away_abbr': 'Howard',
'away_score': 43,
'away_rank': None,
'home_name': 'UNLV',
'home_abbr': 'nevada-las-vegas',
'home_score': 40,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-new-mexico',
'away_name': 'Abilene Christian',
'away_abbr': 'Abilene Christian',
'away_score': 14,
'away_rank': None,
'home_name': 'New Mexico',
'home_abbr': 'new-mexico',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-north-carolina',
'away_name': 'California',
'away_abbr': 'california',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina',
'home_abbr': 'north-carolina',
'home_score': 30,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-north-carolina-state',
'away_name': 'South Carolina',
'away_abbr': 'south-carolina',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina State',
'home_abbr': 'north-carolina-state',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-north-texas',
'away_name': 'Lamar',
'away_abbr': 'Lamar',
'away_score': 14,
'away_rank': None,
'home_name': 'North Texas',
'home_abbr': 'north-texas',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-northwestern',
'away_name': 'Nevada',
'away_abbr': 'nevada',
'away_score': 20,
'away_rank': None,
'home_name': 'Northwestern',
'home_abbr': 'northwestern',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-notre-dame',
'away_name': 'Temple',
'away_abbr': 'temple',
'away_score': 16,
'away_rank': None,
'home_name': 'Notre Dame',
'home_abbr': 'notre-dame',
'home_score': 49,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-ohio',
'away_name': 'Hampton',
'away_abbr': 'Hampton',
'away_score': 0,
'away_rank': None,
'home_name': 'Ohio',
'home_abbr': 'ohio',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oklahoma',
'away_name': 'UTEP',
'away_abbr': 'texas-el-paso',
'away_score': 7,
'away_rank': None,
'home_name': 'Oklahoma',
'home_abbr': 'oklahoma',
'home_score': 56,
'home_rank': 7,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-oklahoma-state',
'away_name': 'Tulsa',
'away_abbr': 'tulsa',
'away_score': 24,
'away_rank': None,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 59,
'home_rank': 10,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-old-dominion',
'away_name': 'Albany',
'away_abbr': 'Albany',
'away_score': 17,
'away_rank': None,
'home_name': 'Old Dominion',
'home_abbr': 'old-dominion',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oregon',
'away_name': 'Southern Utah',
'away_abbr': 'Southern Utah',
'away_score': 21,
'away_rank': None,
'home_name': 'Oregon',
'home_abbr': 'oregon',
'home_score': 77,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oregon-state',
'away_name': 'Portland State',
'away_abbr': 'Portland State',
'away_score': 32,
'away_rank': None,
'home_name': 'Oregon State',
'home_abbr': 'oregon-state',
'home_score': 35,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-penn-state',
'away_name': 'Akron',
'away_abbr': 'akron',
'away_score': 0,
'away_rank': None,
'home_name': 'Penn State',
'home_abbr': 'penn-state',
'home_score': 52,
'home_rank': 6,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-pittsburgh',
'away_name': 'Youngstown State',
'away_abbr': 'Youngstown State',
'away_score': 21,
'away_rank': None,
'home_name': 'Pitt',
'home_abbr': 'pittsburgh',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-purdue',
'away_name': 'Louisville',
'away_abbr': 'louisville',
'away_score': 35,
'away_rank': 16,
'home_name': 'Purdue',
'home_abbr': 'purdue',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-san-diego-state',
'away_name': 'California-Davis',
'away_abbr': 'California-Davis',
'away_score': 17,
'away_rank': None,
'home_name': 'San Diego State',
'home_abbr': 'san-diego-state',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-san-jose-state',
'away_name': 'Cal Poly',
'away_abbr': 'Cal Poly',
'away_score': 13,
'away_rank': None,
'home_name': 'San Jose State',
'home_abbr': 'san-jose-state',
'home_score': 34,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-south-florida',
'away_name': 'Stony Brook',
'away_abbr': 'Stony Brook',
'away_score': 17,
'away_rank': None,
'home_name': 'South Florida',
'home_abbr': 'south-florida',
'home_score': 31,
'home_rank': 19,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-southern-california',
'away_name': 'Western Michigan',
'away_abbr': 'western-michigan',
'away_score': 31,
'away_rank': None,
'home_name': 'USC',
'home_abbr': 'southern-california',
'home_score': 49,
'home_rank': 4,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-southern-methodist',
'away_name': 'Stephen F. Austin',
'away_abbr': 'Stephen F. Austin',
'away_score': 14,
'away_rank': None,
'home_name': 'SMU',
'home_abbr': 'southern-methodist',
'home_score': 58,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-southern-mississippi',
'away_name': 'Kentucky',
'away_abbr': 'kentucky',
'away_score': 24,
'away_rank': None,
'home_name': 'Southern Mississippi',
'home_abbr': 'southern-mississippi',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-texas',
'away_name': 'Maryland',
'away_abbr': 'maryland',
'away_score': 51,
'away_rank': None,
'home_name': 'Texas',
'home_abbr': 'texas',
'home_score': 41,
'home_rank': 23,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-texas-christian',
'away_name': 'Jackson State',
'away_abbr': 'Jackson State',
'away_score': 0,
'away_rank': None,
'home_name': 'Texas Christian',
'home_abbr': 'texas-christian',
'home_score': 63,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-texas-state',
'away_name': '',
'away_abbr': '',
'away_score': 11,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 20,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-texas-tech',
'away_name': 'Eastern Washington',
'away_abbr': 'Eastern Washington',
'away_score': 10,
'away_rank': None,
'home_name': 'Texas Tech',
'home_abbr': 'texas-tech',
'home_score': 56,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-tulane',
'away_name': 'Grambling State',
'away_abbr': 'Grambling State',
'away_score': 14,
'away_rank': None,
'home_name': 'Tulane',
'home_abbr': 'tulane',
'home_score': 43,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-virginia',
'away_name': 'William & Mary',
'away_abbr': 'William & Mary',
'away_score': 10,
'away_rank': None,
'home_name': 'Virginia',
'home_abbr': 'virginia',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-washington-state',
'away_name': 'Montana State',
'away_abbr': 'Montana State',
'away_score': 0,
'away_rank': None,
'home_name': 'Washington State',
'home_abbr': 'washington-state',
'home_score': 31,
'home_rank': 24,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-western-kentucky',
'away_name': 'Eastern Kentucky',
'away_abbr': 'Eastern Kentucky',
'away_score': 17,
'away_rank': None,
'home_name': 'Western Kentucky',
'home_abbr': 'western-kentucky',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-army',
'away_name': 'Fordham',
'away_abbr': 'Fordham',
'away_score': 6,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 64,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-colorado',
'away_name': 'Colorado State',
'away_abbr': 'colorado-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-eastern-michigan',
'away_name': 'Charlotte',
'away_abbr': 'charlotte',
'away_score': 7,
'away_rank': None,
'home_name': 'Eastern Michigan',
'home_abbr': 'eastern-michigan',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-florida-atlantic',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 42,
'away_rank': None,
'home_name': 'Florida Atlantic',
'home_abbr': 'florida-atlantic',
'home_score': 19,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-northern-illinois',
'away_name': 'Boston College',
'away_abbr': 'boston-college',
'away_score': 23,
'away_rank': None,
'home_name': 'Northern Illinois',
'home_abbr': 'northern-illinois',
'home_score': 20,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-rutgers',
'away_name': 'Washington',
'away_abbr': 'washington',
'away_score': 30,
'away_rank': 8,
'home_name': 'Rutgers',
'home_abbr': 'rutgers',
'home_score': 14,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-01-syracuse',
'away_name': 'Central Connecticut State',
'away_abbr': 'Central Connecticut State',
'away_score': 7,
'away_rank': None,
'home_name': 'Syracuse',
'home_abbr': 'syracuse',
'home_score': 50,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-wisconsin',
'away_name': 'Utah State',
'away_abbr': 'utah-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Wisconsin',
'home_abbr': 'wisconsin',
'home_score': 59,
'home_rank': 9,
'non_di': False,
'top_25': True},
{'boxscore': '2017-08-31-arizona-state',
'away_name': 'New Mexico State',
'away_abbr': 'new-mexico-state',
'away_score': 31,
'away_rank': None,
'home_name': 'Arizona State',
'home_abbr': 'arizona-state',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-arkansas',
'away_name': 'Florida A&M',
'away_abbr': 'Florida A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'Arkansas',
'home_abbr': 'arkansas',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-central-michigan',
'away_name': 'Rhode Island',
'away_abbr': 'Rhode Island',
'away_score': 27,
'away_rank': None,
'home_name': 'Central Michigan',
'home_abbr': 'central-michigan',
'home_score': 30,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-cincinnati',
'away_name': 'Austin Peay',
'away_abbr': 'Austin Peay',
'away_score': 14,
'away_rank': None,
'home_name': 'Cincinnati',
'home_abbr': 'cincinnati',
'home_score': 26,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-connecticut',
'away_name': 'Holy Cross',
'away_abbr': 'Holy Cross',
'away_score': 20,
'away_rank': None,
'home_name': 'Connecticut',
'home_abbr': 'connecticut',
'home_score': 27,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-georgia-state',
'away_name': 'Tennessee State',
'away_abbr': 'Tennessee State',
'away_score': 17,
'away_rank': None,
'home_name': 'Georgia State',
'home_abbr': 'georgia-state',
'home_score': 10,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-idaho',
'away_name': 'Sacramento State',
'away_abbr': 'Sacramento State',
'away_score': 6,
'away_rank': None,
'home_name': 'Idaho',
'home_abbr': 'idaho',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-indiana',
'away_name': 'Ohio State',
'away_abbr': 'ohio-state',
'away_score': 49,
'away_rank': 2,
'home_name': 'Indiana',
'home_abbr': 'indiana',
'home_score': 21,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-08-31-memphis',
'away_name': 'Louisiana-Monroe',
'away_abbr': 'louisiana-monroe',
'away_score': 29,
'away_rank': None,
'home_name': 'Memphis',
'home_abbr': 'memphis',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-minnesota',
'away_name': 'Buffalo',
'away_abbr': 'buffalo',
'away_score': 7,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-toledo',
'away_name': 'Elon',
'away_abbr': 'Elon',
'away_score': 13,
'away_rank': None,
'home_name': 'Toledo',
'home_abbr': 'toledo',
'home_score': 47,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-utah',
'away_name': 'North Dakota',
'away_abbr': 'North Dakota',
'away_score': 16,
'away_rank': None,
'home_name': 'Utah',
'home_abbr': 'utah',
'home_score': 37,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-wake-forest',
'away_name': 'Presbyterian',
'away_abbr': 'Presbyterian',
'away_score': 7,
'away_rank': None,
'home_name': 'Wake Forest',
'home_abbr': 'wake-forest',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search(self, *args, **kwargs):
result = Boxscores(datetime(2017, 8, 30)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_invalid_end(self, *args, **kwargs):
result = Boxscores(datetime(2017, 8, 30), datetime(2017, 8, 29)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_multiple_days(self, *args, **kwargs):
expected = {
'8-30-2017': [
{'boxscore': '2017-09-04-georgia-tech',
'away_name': 'Tennessee',
'away_abbr': 'tennessee',
'away_score': 42,
'away_rank': 25,
'home_name': 'Georgia Tech',
'home_abbr': 'georgia-tech',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-03-ucla',
'away_name': 'Texas A&M',
'away_abbr': 'texas-am',
'away_score': 44,
'away_rank': None,
'home_name': 'UCLA',
'home_abbr': 'ucla',
'home_score': 45,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-03-virginia-tech',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 24,
'away_rank': 22,
'home_name': 'Virginia Tech',
'home_abbr': 'virginia-tech',
'home_score': 31,
'home_rank': 21,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-air-force',
'away_name': 'Virginia Military Institute',
'away_abbr': 'Virginia Military Institute',
'away_score': 0,
'away_rank': None,
'home_name': 'Air Force',
'home_abbr': 'air-force',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-alabama-birmingham',
'away_name': 'Alabama A&M',
'away_abbr': 'Alabama A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'UAB',
'home_abbr': 'alabama-birmingham',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-arizona',
'away_name': 'Northern Arizona',
'away_abbr': 'Northern Arizona',
'away_score': 24,
'away_rank': None,
'home_name': 'Arizona',
'home_abbr': 'arizona',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-auburn',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 7,
'away_rank': None,
'home_name': 'Auburn',
'home_abbr': 'auburn',
'home_score': 41,
'home_rank': 12,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-baylor',
'away_name': 'Liberty',
'away_abbr': 'Liberty',
'away_score': 48,
'away_rank': None,
'home_name': 'Baylor',
'home_abbr': 'baylor',
'home_score': 45,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-boise-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 13,
'away_rank': None,
'home_name': 'Boise State',
'home_abbr': 'boise-state',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-brigham-young',
'away_name': 'LSU',
'away_abbr': 'louisiana-state',
'away_score': 27,
'away_rank': 13,
'home_name': 'Brigham Young',
'home_abbr': 'brigham-young',
'home_score': 0,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-central-florida',
'away_name': 'Florida International',
'away_abbr': 'florida-international',
'away_score': 17,
'away_rank': None,
'home_name': 'UCF',
'home_abbr': 'central-florida',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-clemson',
'away_name': 'Kent State',
'away_abbr': 'kent-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Clemson',
'home_abbr': 'clemson',
'home_score': 56,
'home_rank': 5,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-coastal-carolina',
'away_name': 'Massachusetts',
'away_abbr': 'massachusetts',
'away_score': 28,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolina',
'home_score': 38,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-duke',
'away_name': 'North Carolina Central',
'away_abbr': 'North Carolina Central',
'away_score': 7,
'away_rank': None,
'home_name': 'Duke',
'home_abbr': 'duke',
'home_score': 60,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-east-carolina',
'away_name': 'James Madison',
'away_abbr': 'James Madison',
'away_score': 34,
'away_rank': None,
'home_name': 'East Carolina',
'home_abbr': 'east-carolina',
'home_score': 14,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-florida-state',
'away_name': 'Alabama',
'away_abbr': 'alabama',
'away_score': 24,
'away_rank': 1,
'home_name': 'Florida State',
'home_abbr': 'florida-state',
'home_score': 7,
'home_rank': 3,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-fresno-state',
'away_name': 'Incarnate Word',
'away_abbr': 'Incarnate Word',
'away_score': 0,
'away_rank': None,
'home_name': 'Fresno State',
'home_abbr': 'fresno-state',
'home_score': 66,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-georgia',
'away_name': 'Appalachian State',
'away_abbr': 'appalachian-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Georgia',
'home_abbr': 'georgia',
'home_score': 31,
'home_rank': 15,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-hawaii',
'away_name': 'Western Carolina',
'away_abbr': 'Western Carolina',
'away_score': 18,
'away_rank': None,
'home_name': 'Hawaii',
'home_abbr': 'hawaii',
'home_score': 41,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-illinois',
'away_name': 'Ball State',
'away_abbr': 'ball-state',
'away_score': 21,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-iowa',
'away_name': 'Wyoming',
'away_abbr': 'wyoming',
'away_score': 3,
'away_rank': None,
'home_name': 'Iowa',
'home_abbr': 'iowa',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-iowa-state',
'away_name': 'Northern Iowa',
'away_abbr': 'Northern Iowa',
'away_score': 24,
'away_rank': None,
'home_name': 'Iowa State',
'home_abbr': 'iowa-state',
'home_score': 42,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-kansas',
'away_name': 'Southeast Missouri State',
'away_abbr': 'Southeast Missouri State',
'away_score': 16,
'away_rank': None,
'home_name': 'Kansas',
'home_abbr': 'kansas',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-kansas-state',
'away_name': 'Central Arkansas',
'away_abbr': 'Central Arkansas',
'away_score': 19,
'away_rank': None,
'home_name': 'Kansas State',
'home_abbr': 'kansas-state',
'home_score': 55,
'home_rank': 20,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-louisiana-lafayette',
'away_name': 'Southeastern Louisiana',
'away_abbr': 'Southeastern Louisiana',
'away_score': 48,
'away_rank': None,
'home_name': 'Louisiana',
'home_abbr': 'louisiana-lafayette',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-louisiana-tech',
'away_name': 'Northwestern State',
'away_abbr': 'Northwestern State',
'away_score': 24,
'away_rank': None,
'home_name': 'Louisiana Tech',
'home_abbr': 'louisiana-tech',
'home_score': 52,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-marshall',
'away_name': 'Miami (OH)',
'away_abbr': 'miami-oh',
'away_score': 26,
'away_rank': None,
'home_name': 'Marshall',
'home_abbr': 'marshall',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-miami-fl',
'away_name': 'Bethune-Cookman',
'away_abbr': 'Bethune-Cookman',
'away_score': 13,
'away_rank': None,
'home_name': 'Miami (FL)',
'home_abbr': 'miami-fl',
'home_score': 41,
'home_rank': 18,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-michigan',
'away_name': 'Florida',
'away_abbr': 'florida',
'away_score': 17,
'away_rank': 17,
'home_name': 'Michigan',
'home_abbr': 'michigan',
'home_score': 33,
'home_rank': 11,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-michigan-state',
'away_name': 'Bowling Green State',
'away_abbr': 'bowling-green-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 35,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-middle-tennessee-state',
'away_name': 'Vanderbilt',
'away_abbr': 'vanderbilt',
'away_score': 28,
'away_rank': None,
'home_name': 'Middle Tennessee State',
'home_abbr': 'middle-tennessee-state',
'home_score': 6,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-mississippi',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 27,
'away_rank': None,
'home_name': 'Ole Miss',
'home_abbr': 'mississippi',
'home_score': 47,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-mississippi-state',
'away_name': 'Charleston Southern',
'away_abbr': 'Charleston Southern',
'away_score': 0,
'away_rank': None,
'home_name': 'Mississippi State',
'home_abbr': 'mississippi-state',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-missouri',
'away_name': 'Missouri State',
'away_abbr': 'Missouri State',
'away_score': 43,
'away_rank': None,
'home_name': 'Missouri',
'home_abbr': 'missouri',
'home_score': 72,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-nebraska',
'away_name': 'Arkansas State',
'away_abbr': 'arkansas-state',
'away_score': 36,
'away_rank': None,
'home_name': 'Nebraska',
'home_abbr': 'nebraska',
'home_score': 43,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-nevada-las-vegas',
'away_name': 'Howard',
'away_abbr': 'Howard',
'away_score': 43,
'away_rank': None,
'home_name': 'UNLV',
'home_abbr': 'nevada-las-vegas',
'home_score': 40,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-new-mexico',
'away_name': 'Abilene Christian',
'away_abbr': 'Abilene Christian',
'away_score': 14,
'away_rank': None,
'home_name': 'New Mexico',
'home_abbr': 'new-mexico',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-north-carolina',
'away_name': 'California',
'away_abbr': 'california',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina',
'home_abbr': 'north-carolina',
'home_score': 30,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-north-carolina-state',
'away_name': 'South Carolina',
'away_abbr': 'south-carolina',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina State',
'home_abbr': 'north-carolina-state',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-north-texas',
'away_name': 'Lamar',
'away_abbr': 'Lamar',
'away_score': 14,
'away_rank': None,
'home_name': 'North Texas',
'home_abbr': 'north-texas',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-northwestern',
'away_name': 'Nevada',
'away_abbr': 'nevada',
'away_score': 20,
'away_rank': None,
'home_name': 'Northwestern',
'home_abbr': 'northwestern',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-notre-dame',
'away_name': 'Temple',
'away_abbr': 'temple',
'away_score': 16,
'away_rank': None,
'home_name': 'Notre Dame',
'home_abbr': 'notre-dame',
'home_score': 49,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-ohio',
'away_name': 'Hampton',
'away_abbr': 'Hampton',
'away_score': 0,
'away_rank': None,
'home_name': 'Ohio',
'home_abbr': 'ohio',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oklahoma',
'away_name': 'UTEP',
'away_abbr': 'texas-el-paso',
'away_score': 7,
'away_rank': None,
'home_name': 'Oklahoma',
'home_abbr': 'oklahoma',
'home_score': 56,
'home_rank': 7,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-oklahoma-state',
'away_name': 'Tulsa',
'away_abbr': 'tulsa',
'away_score': 24,
'away_rank': None,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 59,
'home_rank': 10,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-old-dominion',
'away_name': 'Albany',
'away_abbr': 'Albany',
'away_score': 17,
'away_rank': None,
'home_name': 'Old Dominion',
'home_abbr': 'old-dominion',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oregon',
'away_name': 'Southern Utah',
'away_abbr': 'Southern Utah',
'away_score': 21,
'away_rank': None,
'home_name': 'Oregon',
'home_abbr': 'oregon',
'home_score': 77,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oregon-state',
'away_name': 'Portland State',
'away_abbr': 'Portland State',
'away_score': 32,
'away_rank': None,
'home_name': 'Oregon State',
'home_abbr': 'oregon-state',
'home_score': 35,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-penn-state',
'away_name': 'Akron',
'away_abbr': 'akron',
'away_score': 0,
'away_rank': None,
'home_name': 'Penn State',
'home_abbr': 'penn-state',
'home_score': 52,
'home_rank': 6,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-pittsburgh',
'away_name': 'Youngstown State',
'away_abbr': 'Youngstown State',
'away_score': 21,
'away_rank': None,
'home_name': 'Pitt',
'home_abbr': 'pittsburgh',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-purdue',
'away_name': 'Louisville',
'away_abbr': 'louisville',
'away_score': 35,
'away_rank': 16,
'home_name': 'Purdue',
'home_abbr': 'purdue',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-san-diego-state',
'away_name': 'California-Davis',
'away_abbr': 'California-Davis',
'away_score': 17,
'away_rank': None,
'home_name': 'San Diego State',
'home_abbr': 'san-diego-state',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-san-jose-state',
'away_name': 'Cal Poly',
'away_abbr': 'Cal Poly',
'away_score': 13,
'away_rank': None,
'home_name': 'San Jose State',
'home_abbr': 'san-jose-state',
'home_score': 34,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-south-florida',
'away_name': 'Stony Brook',
'away_abbr': 'Stony Brook',
'away_score': 17,
'away_rank': None,
'home_name': 'South Florida',
'home_abbr': 'south-florida',
'home_score': 31,
'home_rank': 19,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-southern-california',
'away_name': 'Western Michigan',
'away_abbr': 'western-michigan',
'away_score': 31,
'away_rank': None,
'home_name': 'USC',
'home_abbr': 'southern-california',
'home_score': 49,
'home_rank': 4,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-southern-methodist',
'away_name': 'Stephen F. Austin',
'away_abbr': 'Stephen F. Austin',
'away_score': 14,
'away_rank': None,
'home_name': 'SMU',
'home_abbr': 'southern-methodist',
'home_score': 58,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-southern-mississippi',
'away_name': 'Kentucky',
'away_abbr': 'kentucky',
'away_score': 24,
'away_rank': None,
'home_name': 'Southern Mississippi',
'home_abbr': 'southern-mississippi',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-texas',
'away_name': 'Maryland',
'away_abbr': 'maryland',
'away_score': 51,
'away_rank': None,
'home_name': 'Texas',
'home_abbr': 'texas',
'home_score': 41,
'home_rank': 23,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-texas-christian',
'away_name': 'Jackson State',
'away_abbr': 'Jackson State',
'away_score': 0,
'away_rank': None,
'home_name': 'Texas Christian',
'home_abbr': 'texas-christian',
'home_score': 63,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-texas-state',
'away_name': '',
'away_abbr': '',
'away_score': 11,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 20,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-texas-tech',
'away_name': 'Eastern Washington',
'away_abbr': 'Eastern Washington',
'away_score': 10,
'away_rank': None,
'home_name': 'Texas Tech',
'home_abbr': 'texas-tech',
'home_score': 56,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-tulane',
'away_name': 'Grambling State',
'away_abbr': 'Grambling State',
'away_score': 14,
'away_rank': None,
'home_name': 'Tulane',
'home_abbr': 'tulane',
'home_score': 43,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-virginia',
'away_name': 'William & Mary',
'away_abbr': 'William & Mary',
'away_score': 10,
'away_rank': None,
'home_name': 'Virginia',
'home_abbr': 'virginia',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-washington-state',
'away_name': 'Montana State',
'away_abbr': 'Montana State',
'away_score': 0,
'away_rank': None,
'home_name': 'Washington State',
'home_abbr': 'washington-state',
'home_score': 31,
'home_rank': 24,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-western-kentucky',
'away_name': 'Eastern Kentucky',
'away_abbr': 'Eastern Kentucky',
'away_score': 17,
'away_rank': None,
'home_name': 'Western Kentucky',
'home_abbr': 'western-kentucky',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-army',
'away_name': 'Fordham',
'away_abbr': 'Fordham',
'away_score': 6,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 64,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-colorado',
'away_name': 'Colorado State',
'away_abbr': 'colorado-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-eastern-michigan',
'away_name': 'Charlotte',
'away_abbr': 'charlotte',
'away_score': 7,
'away_rank': None,
'home_name': 'Eastern Michigan',
'home_abbr': 'eastern-michigan',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-florida-atlantic',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 42,
'away_rank': None,
'home_name': 'Florida Atlantic',
'home_abbr': 'florida-atlantic',
'home_score': 19,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-northern-illinois',
'away_name': 'Boston College',
'away_abbr': 'boston-college',
'away_score': 23,
'away_rank': None,
'home_name': 'Northern Illinois',
'home_abbr': 'northern-illinois',
'home_score': 20,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-rutgers',
'away_name': 'Washington',
'away_abbr': 'washington',
'away_score': 30,
'away_rank': 8,
'home_name': 'Rutgers',
'home_abbr': 'rutgers',
'home_score': 14,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-01-syracuse',
'away_name': 'Central Connecticut State',
'away_abbr': 'Central Connecticut State',
'away_score': 7,
'away_rank': None,
'home_name': 'Syracuse',
'home_abbr': 'syracuse',
'home_score': 50,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-wisconsin',
'away_name': 'Utah State',
'away_abbr': 'utah-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Wisconsin',
'home_abbr': 'wisconsin',
'home_score': 59,
'home_rank': 9,
'non_di': False,
'top_25': True},
{'boxscore': '2017-08-31-arizona-state',
'away_name': 'New Mexico State',
'away_abbr': 'new-mexico-state',
'away_score': 31,
'away_rank': None,
'home_name': 'Arizona State',
'home_abbr': 'arizona-state',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-arkansas',
'away_name': 'Florida A&M',
'away_abbr': 'Florida A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'Arkansas',
'home_abbr': 'arkansas',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-central-michigan',
'away_name': 'Rhode Island',
'away_abbr': 'Rhode Island',
'away_score': 27,
'away_rank': None,
'home_name': 'Central Michigan',
'home_abbr': 'central-michigan',
'home_score': 30,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-cincinnati',
'away_name': 'Austin Peay',
'away_abbr': 'Austin Peay',
'away_score': 14,
'away_rank': None,
'home_name': 'Cincinnati',
'home_abbr': 'cincinnati',
'home_score': 26,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-connecticut',
'away_name': 'Holy Cross',
'away_abbr': 'Holy Cross',
'away_score': 20,
'away_rank': None,
'home_name': 'Connecticut',
'home_abbr': 'connecticut',
'home_score': 27,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-georgia-state',
'away_name': 'Tennessee State',
'away_abbr': 'Tennessee State',
'away_score': 17,
'away_rank': None,
'home_name': 'Georgia State',
'home_abbr': 'georgia-state',
'home_score': 10,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-idaho',
'away_name': 'Sacramento State',
'away_abbr': 'Sacramento State',
'away_score': 6,
'away_rank': None,
'home_name': 'Idaho',
'home_abbr': 'idaho',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-indiana',
'away_name': 'Ohio State',
'away_abbr': 'ohio-state',
'away_score': 49,
'away_rank': 2,
'home_name': 'Indiana',
'home_abbr': 'indiana',
'home_score': 21,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-08-31-memphis',
'away_name': 'Louisiana-Monroe',
'away_abbr': 'louisiana-monroe',
'away_score': 29,
'away_rank': None,
'home_name': 'Memphis',
'home_abbr': 'memphis',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-minnesota',
'away_name': 'Buffalo',
'away_abbr': 'buffalo',
'away_score': 7,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-toledo',
'away_name': 'Elon',
'away_abbr': 'Elon',
'away_score': 13,
'away_rank': None,
'home_name': 'Toledo',
'home_abbr': 'toledo',
'home_score': 47,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-utah',
'away_name': 'North Dakota',
'away_abbr': 'North Dakota',
'away_score': 16,
'away_rank': None,
'home_name': 'Utah',
'home_abbr': 'utah',
'home_score': 37,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-08-31-wake-forest',
'away_name': 'Presbyterian',
'away_abbr': 'Presbyterian',
'away_score': 7,
'away_rank': None,
'home_name': 'Wake Forest',
'home_abbr': 'wake-forest',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False}
],
'8-31-2017': [
{'boxscore': '2017-08-31-arizona-state',
'away_name': 'Arizona State',
'away_abbr': 'arizona-state',
'away_score': 31,
'away_rank': None,
'home_name': 'Arizona State',
'home_abbr': 'arizona-state',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-arkansas',
'away_name': 'Arkansas',
'away_abbr': 'arkansas',
'away_score': 7,
'away_rank': None,
'home_name': 'Arkansas',
'home_abbr': 'arkansas',
'home_score': 49,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-central-michigan',
'away_name': 'Central Michigan',
'away_abbr': 'central-michigan',
'away_score': 27,
'away_rank': None,
'home_name': 'Central Michigan',
'home_abbr': 'central-michigan',
'home_score': 30,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-cincinnati',
'away_name': 'Cincinnati',
'away_abbr': 'cincinnati',
'away_score': 14,
'away_rank': None,
'home_name': 'Cincinnati',
'home_abbr': 'cincinnati',
'home_score': 26,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-connecticut',
'away_name': 'Connecticut',
'away_abbr': 'connecticut',
'away_score': 20,
'away_rank': None,
'home_name': 'Connecticut',
'home_abbr': 'connecticut',
'home_score': 27,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-georgia-state',
'away_name': 'Georgia State',
'away_abbr': 'georgia-state',
'away_score': 17,
'away_rank': None,
'home_name': 'Georgia State',
'home_abbr': 'georgia-state',
'home_score': 10,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-idaho',
'away_name': 'Idaho',
'away_abbr': 'idaho',
'away_score': 6,
'away_rank': None,
'home_name': 'Idaho',
'home_abbr': 'idaho',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-indiana',
'away_name': 'Indiana',
'away_abbr': 'indiana',
'away_score': 49,
'away_rank': None,
'home_name': 'Indiana',
'home_abbr': 'indiana',
'home_score': 21,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-memphis',
'away_name': 'Memphis',
'away_abbr': 'memphis',
'away_score': 29,
'away_rank': None,
'home_name': 'Memphis',
'home_abbr': 'memphis',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-minnesota',
'away_name': 'Minnesota',
'away_abbr': 'minnesota',
'away_score': 7,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-oklahoma-state',
'away_name': 'Oklahoma State',
'away_abbr': 'oklahoma-state',
'away_score': 24,
'away_rank': 10,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 59,
'home_rank': 10,
'non_di': False,
'top_25': True},
{'boxscore': '2017-08-31-toledo',
'away_name': 'Toledo',
'away_abbr': 'toledo',
'away_score': 13,
'away_rank': None,
'home_name': 'Toledo',
'home_abbr': 'toledo',
'home_score': 47,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-utah',
'away_name': 'Utah',
'away_abbr': 'utah',
'away_score': 16,
'away_rank': None,
'home_name': 'Utah',
'home_abbr': 'utah',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-08-31-wake-forest',
'away_name': 'Wake Forest',
'away_abbr': 'wake-forest',
'away_score': 7,
'away_rank': None,
'home_name': 'Wake Forest',
'home_abbr': 'wake-forest',
'home_score': 51,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-04-georgia-tech',
'away_name': 'Tennessee',
'away_abbr': 'tennessee',
'away_score': 42,
'away_rank': 25,
'home_name': 'Georgia Tech',
'home_abbr': 'georgia-tech',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-03-ucla',
'away_name': 'Texas A&M',
'away_abbr': 'texas-am',
'away_score': 44,
'away_rank': None,
'home_name': 'UCLA',
'home_abbr': 'ucla',
'home_score': 45,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-03-virginia-tech',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 24,
'away_rank': 22,
'home_name': 'Virginia Tech',
'home_abbr': 'virginia-tech',
'home_score': 31,
'home_rank': 21,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-air-force',
'away_name': 'Virginia Military Institute',
'away_abbr': 'Virginia Military Institute',
'away_score': 0,
'away_rank': None,
'home_name': 'Air Force',
'home_abbr': 'air-force',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-alabama-birmingham',
'away_name': 'Alabama A&M',
'away_abbr': 'Alabama A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'UAB',
'home_abbr': 'alabama-birmingham',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-arizona',
'away_name': 'Northern Arizona',
'away_abbr': 'Northern Arizona',
'away_score': 24,
'away_rank': None,
'home_name': 'Arizona',
'home_abbr': 'arizona',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-auburn',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 7,
'away_rank': None,
'home_name': 'Auburn',
'home_abbr': 'auburn',
'home_score': 41,
'home_rank': 12,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-baylor',
'away_name': 'Liberty',
'away_abbr': 'Liberty',
'away_score': 48,
'away_rank': None,
'home_name': 'Baylor',
'home_abbr': 'baylor',
'home_score': 45,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-boise-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 13,
'away_rank': None,
'home_name': 'Boise State',
'home_abbr': 'boise-state',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-brigham-young',
'away_name': 'LSU',
'away_abbr': 'louisiana-state',
'away_score': 27,
'away_rank': 13,
'home_name': 'Brigham Young',
'home_abbr': 'brigham-young',
'home_score': 0,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-central-florida',
'away_name': 'Florida International',
'away_abbr': 'florida-international',
'away_score': 17,
'away_rank': None,
'home_name': 'UCF',
'home_abbr': 'central-florida',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-clemson',
'away_name': 'Kent State',
'away_abbr': 'kent-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Clemson',
'home_abbr': 'clemson',
'home_score': 56,
'home_rank': 5,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-coastal-carolina',
'away_name': 'Massachusetts',
'away_abbr': 'massachusetts',
'away_score': 28,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolina',
'home_score': 38,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-duke',
'away_name': 'North Carolina Central',
'away_abbr': 'North Carolina Central',
'away_score': 7,
'away_rank': None,
'home_name': 'Duke',
'home_abbr': 'duke',
'home_score': 60,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-east-carolina',
'away_name': 'James Madison',
'away_abbr': 'James Madison',
'away_score': 34,
'away_rank': None,
'home_name': 'East Carolina',
'home_abbr': 'east-carolina',
'home_score': 14,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-florida-state',
'away_name': 'Alabama',
'away_abbr': 'alabama',
'away_score': 24,
'away_rank': 1,
'home_name': 'Florida State',
'home_abbr': 'florida-state',
'home_score': 7,
'home_rank': 3,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-fresno-state',
'away_name': 'Incarnate Word',
'away_abbr': 'Incarnate Word',
'away_score': 0,
'away_rank': None,
'home_name': 'Fresno State',
'home_abbr': 'fresno-state',
'home_score': 66,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-georgia',
'away_name': 'Appalachian State',
'away_abbr': 'appalachian-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Georgia',
'home_abbr': 'georgia',
'home_score': 31,
'home_rank': 15,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-hawaii',
'away_name': 'Western Carolina',
'away_abbr': 'Western Carolina',
'away_score': 18,
'away_rank': None,
'home_name': 'Hawaii',
'home_abbr': 'hawaii',
'home_score': 41,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-illinois',
'away_name': 'Ball State',
'away_abbr': 'ball-state',
'away_score': 21,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-iowa',
'away_name': 'Wyoming',
'away_abbr': 'wyoming',
'away_score': 3,
'away_rank': None,
'home_name': 'Iowa',
'home_abbr': 'iowa',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-iowa-state',
'away_name': 'Northern Iowa',
'away_abbr': 'Northern Iowa',
'away_score': 24,
'away_rank': None,
'home_name': 'Iowa State',
'home_abbr': 'iowa-state',
'home_score': 42,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-kansas',
'away_name': 'Southeast Missouri State',
'away_abbr': 'Southeast Missouri State',
'away_score': 16,
'away_rank': None,
'home_name': 'Kansas',
'home_abbr': 'kansas',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-kansas-state',
'away_name': 'Central Arkansas',
'away_abbr': 'Central Arkansas',
'away_score': 19,
'away_rank': None,
'home_name': 'Kansas State',
'home_abbr': 'kansas-state',
'home_score': 55,
'home_rank': 20,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-louisiana-lafayette',
'away_name': 'Southeastern Louisiana',
'away_abbr': 'Southeastern Louisiana',
'away_score': 48,
'away_rank': None,
'home_name': 'Louisiana',
'home_abbr': 'louisiana-lafayette',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-louisiana-tech',
'away_name': 'Northwestern State',
'away_abbr': 'Northwestern State',
'away_score': 24,
'away_rank': None,
'home_name': 'Louisiana Tech',
'home_abbr': 'louisiana-tech',
'home_score': 52,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-marshall',
'away_name': 'Miami (OH)',
'away_abbr': 'miami-oh',
'away_score': 26,
'away_rank': None,
'home_name': 'Marshall',
'home_abbr': 'marshall',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-miami-fl',
'away_name': 'Bethune-Cookman',
'away_abbr': 'Bethune-Cookman',
'away_score': 13,
'away_rank': None,
'home_name': 'Miami (FL)',
'home_abbr': 'miami-fl',
'home_score': 41,
'home_rank': 18,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-michigan',
'away_name': 'Florida',
'away_abbr': 'florida',
'away_score': 17,
'away_rank': 17,
'home_name': 'Michigan',
'home_abbr': 'michigan',
'home_score': 33,
'home_rank': 11,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-michigan-state',
'away_name': 'Bowling Green State',
'away_abbr': 'bowling-green-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 35,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-middle-tennessee-state',
'away_name': 'Vanderbilt',
'away_abbr': 'vanderbilt',
'away_score': 28,
'away_rank': None,
'home_name': 'Middle Tennessee State',
'home_abbr': 'middle-tennessee-state',
'home_score': 6,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-mississippi',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 27,
'away_rank': None,
'home_name': 'Ole Miss',
'home_abbr': 'mississippi',
'home_score': 47,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-mississippi-state',
'away_name': 'Charleston Southern',
'away_abbr': 'Charleston Southern',
'away_score': 0,
'away_rank': None,
'home_name': 'Mississippi State',
'home_abbr': 'mississippi-state',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-missouri',
'away_name': 'Missouri State',
'away_abbr': 'Missouri State',
'away_score': 43,
'away_rank': None,
'home_name': 'Missouri',
'home_abbr': 'missouri',
'home_score': 72,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-nebraska',
'away_name': 'Arkansas State',
'away_abbr': 'arkansas-state',
'away_score': 36,
'away_rank': None,
'home_name': 'Nebraska',
'home_abbr': 'nebraska',
'home_score': 43,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-nevada-las-vegas',
'away_name': 'Howard',
'away_abbr': 'Howard',
'away_score': 43,
'away_rank': None,
'home_name': 'UNLV',
'home_abbr': 'nevada-las-vegas',
'home_score': 40,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-new-mexico',
'away_name': 'Abilene Christian',
'away_abbr': 'Abilene Christian',
'away_score': 14,
'away_rank': None,
'home_name': 'New Mexico',
'home_abbr': 'new-mexico',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-north-carolina',
'away_name': 'California',
'away_abbr': 'california',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina',
'home_abbr': 'north-carolina',
'home_score': 30,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-north-carolina-state',
'away_name': 'South Carolina',
'away_abbr': 'south-carolina',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina State',
'home_abbr': 'north-carolina-state',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-north-texas',
'away_name': 'Lamar',
'away_abbr': 'Lamar',
'away_score': 14,
'away_rank': None,
'home_name': 'North Texas',
'home_abbr': 'north-texas',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-northwestern',
'away_name': 'Nevada',
'away_abbr': 'nevada',
'away_score': 20,
'away_rank': None,
'home_name': 'Northwestern',
'home_abbr': 'northwestern',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-notre-dame',
'away_name': 'Temple',
'away_abbr': 'temple',
'away_score': 16,
'away_rank': None,
'home_name': 'Notre Dame',
'home_abbr': 'notre-dame',
'home_score': 49,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-ohio',
'away_name': 'Hampton',
'away_abbr': 'Hampton',
'away_score': 0,
'away_rank': None,
'home_name': 'Ohio',
'home_abbr': 'ohio',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oklahoma',
'away_name': 'UTEP',
'away_abbr': 'texas-el-paso',
'away_score': 7,
'away_rank': None,
'home_name': 'Oklahoma',
'home_abbr': 'oklahoma',
'home_score': 56,
'home_rank': 7,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-old-dominion',
'away_name': 'Albany',
'away_abbr': 'Albany',
'away_score': 17,
'away_rank': None,
'home_name': 'Old Dominion',
'home_abbr': 'old-dominion',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oregon',
'away_name': 'Southern Utah',
'away_abbr': 'Southern Utah',
'away_score': 21,
'away_rank': None,
'home_name': 'Oregon',
'home_abbr': 'oregon',
'home_score': 77,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-oregon-state',
'away_name': 'Portland State',
'away_abbr': 'Portland State',
'away_score': 32,
'away_rank': None,
'home_name': 'Oregon State',
'home_abbr': 'oregon-state',
'home_score': 35,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-penn-state',
'away_name': 'Akron',
'away_abbr': 'akron',
'away_score': 0,
'away_rank': None,
'home_name': 'Penn State',
'home_abbr': 'penn-state',
'home_score': 52,
'home_rank': 6,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-pittsburgh',
'away_name': 'Youngstown State',
'away_abbr': 'Youngstown State',
'away_score': 21,
'away_rank': None,
'home_name': 'Pitt',
'home_abbr': 'pittsburgh',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-purdue',
'away_name': 'Louisville',
'away_abbr': 'louisville',
'away_score': 35,
'away_rank': 16,
'home_name': 'Purdue',
'home_abbr': 'purdue',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-san-diego-state',
'away_name': 'California-Davis',
'away_abbr': 'California-Davis',
'away_score': 17,
'away_rank': None,
'home_name': 'San Diego State',
'home_abbr': 'san-diego-state',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-san-jose-state',
'away_name': 'Cal Poly',
'away_abbr': 'Cal Poly',
'away_score': 13,
'away_rank': None,
'home_name': 'San Jose State',
'home_abbr': 'san-jose-state',
'home_score': 34,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-south-florida',
'away_name': 'Stony Brook',
'away_abbr': 'Stony Brook',
'away_score': 17,
'away_rank': None,
'home_name': 'South Florida',
'home_abbr': 'south-florida',
'home_score': 31,
'home_rank': 19,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-southern-california',
'away_name': 'Western Michigan',
'away_abbr': 'western-michigan',
'away_score': 31,
'away_rank': None,
'home_name': 'USC',
'home_abbr': 'southern-california',
'home_score': 49,
'home_rank': 4,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-southern-methodist',
'away_name': 'Stephen F. Austin',
'away_abbr': 'Stephen F. Austin',
'away_score': 14,
'away_rank': None,
'home_name': 'SMU',
'home_abbr': 'southern-methodist',
'home_score': 58,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-southern-mississippi',
'away_name': 'Kentucky',
'away_abbr': 'kentucky',
'away_score': 24,
'away_rank': None,
'home_name': 'Southern Mississippi',
'home_abbr': 'southern-mississippi',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-02-texas',
'away_name': 'Maryland',
'away_abbr': 'maryland',
'away_score': 51,
'away_rank': None,
'home_name': 'Texas',
'home_abbr': 'texas',
'home_score': 41,
'home_rank': 23,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-02-texas-christian',
'away_name': 'Jackson State',
'away_abbr': 'Jackson State',
'away_score': 0,
'away_rank': None,
'home_name': 'Texas Christian',
'home_abbr': 'texas-christian',
'home_score': 63,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-texas-state',
'away_name': '',
'away_abbr': '',
'away_score': 11,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 20,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-texas-tech',
'away_name': 'Eastern Washington',
'away_abbr': 'Eastern Washington',
'away_score': 10,
'away_rank': None,
'home_name': 'Texas Tech',
'home_abbr': 'texas-tech',
'home_score': 56,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-tulane',
'away_name': 'Grambling State',
'away_abbr': 'Grambling State',
'away_score': 14,
'away_rank': None,
'home_name': 'Tulane',
'home_abbr': 'tulane',
'home_score': 43,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-virginia',
'away_name': 'William & Mary',
'away_abbr': 'William & Mary',
'away_score': 10,
'away_rank': None,
'home_name': 'Virginia',
'home_abbr': 'virginia',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-02-washington-state',
'away_name': 'Montana State',
'away_abbr': 'Montana State',
'away_score': 0,
'away_rank': None,
'home_name': 'Washington State',
'home_abbr': 'washington-state',
'home_score': 31,
'home_rank': 24,
'non_di': True,
'top_25': True},
{'boxscore': '2017-09-02-western-kentucky',
'away_name': 'Eastern Kentucky',
'away_abbr': 'Eastern Kentucky',
'away_score': 17,
'away_rank': None,
'home_name': 'Western Kentucky',
'home_abbr': 'western-kentucky',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-army',
'away_name': 'Fordham',
'away_abbr': 'Fordham',
'away_score': 6,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 64,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-colorado',
'away_name': 'Colorado State',
'away_abbr': 'colorado-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-eastern-michigan',
'away_name': 'Charlotte',
'away_abbr': 'charlotte',
'away_score': 7,
'away_rank': None,
'home_name': 'Eastern Michigan',
'home_abbr': 'eastern-michigan',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-florida-atlantic',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 42,
'away_rank': None,
'home_name': 'Florida Atlantic',
'home_abbr': 'florida-atlantic',
'home_score': 19,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-northern-illinois',
'away_name': 'Boston College',
'away_abbr': 'boston-college',
'away_score': 23,
'away_rank': None,
'home_name': 'Northern Illinois',
'home_abbr': 'northern-illinois',
'home_score': 20,
'home_rank': None,
'non_di': False,
'top_25': False},
{'boxscore': '2017-09-01-rutgers',
'away_name': 'Washington',
'away_abbr': 'washington',
'away_score': 30,
'away_rank': 8,
'home_name': 'Rutgers',
'home_abbr': 'rutgers',
'home_score': 14,
'home_rank': None,
'non_di': False,
'top_25': True},
{'boxscore': '2017-09-01-syracuse',
'away_name': 'Central Connecticut State',
'away_abbr': 'Central Connecticut State',
'away_score': 7,
'away_rank': None,
'home_name': 'Syracuse',
'home_abbr': 'syracuse',
'home_score': 50,
'home_rank': None,
'non_di': True,
'top_25': False},
{'boxscore': '2017-09-01-wisconsin',
'away_name': 'Utah State',
'away_abbr': 'utah-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Wisconsin',
'home_abbr': 'wisconsin',
'home_score': 59,
'home_rank': 9,
'non_di': False,
'top_25': True}
]
}
result = Boxscores(datetime(2017, 8, 30), datetime(2017, 8, 31)).games
assert result == expected
| 40.066466
| 78
| 0.401729
|
794a1c9784881bbdf7ee2bf9e99c652b7719dab3
| 688
|
py
|
Python
|
src/data/gopro_large.py
|
alpayuz/DeepDeblur-PyTorch
|
771252e123e3a11da849bb9cef2a7cc49d8d1a2d
|
[
"MIT"
] | 158
|
2020-06-09T08:55:00.000Z
|
2022-03-28T12:13:18.000Z
|
src/data/gopro_large.py
|
alpayuz/DeepDeblur-PyTorch
|
771252e123e3a11da849bb9cef2a7cc49d8d1a2d
|
[
"MIT"
] | 43
|
2020-06-10T10:56:02.000Z
|
2022-03-28T10:27:21.000Z
|
src/data/gopro_large.py
|
alpayuz/DeepDeblur-PyTorch
|
771252e123e3a11da849bb9cef2a7cc49d8d1a2d
|
[
"MIT"
] | 35
|
2020-06-13T15:00:31.000Z
|
2022-03-19T21:26:20.000Z
|
from data.dataset import Dataset
from utils import interact
class GOPRO_Large(Dataset):
"""GOPRO_Large train, test subset class
"""
def __init__(self, args, mode='train'):
super(GOPRO_Large, self).__init__(args, mode)
def set_modes(self):
self.modes = ('train', 'test')
def set_keys(self):
super(GOPRO_Large, self).set_keys()
self.blur_key = 'blur_gamma'
# self.sharp_key = 'sharp'
def __getitem__(self, idx):
blur, sharp, pad_width, idx, relpath = super(GOPRO_Large, self).__getitem__(idx)
relpath = relpath.replace('{}/'.format(self.blur_key), '')
return blur, sharp, pad_width, idx, relpath
| 28.666667
| 88
| 0.642442
|
794a1d8899dd7a1c2dc07aed20f7c2abff547117
| 383
|
py
|
Python
|
proj/creator.py
|
natn2323/mosaic-maker-python
|
2e3d15fbe62be7e14ee42533a3c7e9925b03dfc4
|
[
"MIT"
] | null | null | null |
proj/creator.py
|
natn2323/mosaic-maker-python
|
2e3d15fbe62be7e14ee42533a3c7e9925b03dfc4
|
[
"MIT"
] | null | null | null |
proj/creator.py
|
natn2323/mosaic-maker-python
|
2e3d15fbe62be7e14ee42533a3c7e9925b03dfc4
|
[
"MIT"
] | null | null | null |
def easy_create(filename, color=(0,0,0)):
img = Image.new("RGB", (360, 360), color)
img.save(filename)
if __name__ == '__main__':
import sys
from PIL import Image
print("Input your colors:")
r = raw_input("Red: ")
g = raw_input("Green: ")
b = raw_input("Blue: ")
color = (int(r), int(g), int(b))
easy_create(sys.argv[1], color=color)
| 21.277778
| 45
| 0.582245
|
794a1e615be5f8aaa436cfa9e75426be5854fb94
| 351
|
py
|
Python
|
Diena_8_dictionaries/d8_g1_u1.py
|
edzya/Python_RTU_08_20
|
d2921d998c611c18328dd523daf976a27ce858c1
|
[
"MIT"
] | 8
|
2020-08-31T16:10:54.000Z
|
2021-11-24T06:37:37.000Z
|
Diena_8_dictionaries/d8_g1_u1.py
|
edzya/Python_RTU_08_20
|
d2921d998c611c18328dd523daf976a27ce858c1
|
[
"MIT"
] | 8
|
2021-06-08T22:30:29.000Z
|
2022-03-12T00:48:55.000Z
|
Diena_8_dictionaries/d8_g1_u1.py
|
edzya/Python_RTU_08_20
|
d2921d998c611c18328dd523daf976a27ce858c1
|
[
"MIT"
] | 12
|
2020-09-28T17:06:52.000Z
|
2022-02-17T12:12:46.000Z
|
from collections import Counter
letters = input("enter words to count letters")
count = Counter(letters)
print(count.most_common())
# catalog = {}
# for i in letters:
# if i in catalog:
# catalog[i] += 1
# else:
# catalog[i] = 1
# list_ = list(catalog.keys())
# list_.sort()
# for z in list_:
# print(f"{z} : {catalog[z]}")
| 25.071429
| 47
| 0.606838
|
794a1edf70cc12944955496eb0d8d1f9e98d843a
| 4,987
|
py
|
Python
|
music21/tie.py
|
ismail4040/DeepLeanirngJazz
|
1e79cdde5652870706331c634aec4eca4b07ba01
|
[
"Apache-1.1"
] | null | null | null |
music21/tie.py
|
ismail4040/DeepLeanirngJazz
|
1e79cdde5652870706331c634aec4eca4b07ba01
|
[
"Apache-1.1"
] | null | null | null |
music21/tie.py
|
ismail4040/DeepLeanirngJazz
|
1e79cdde5652870706331c634aec4eca4b07ba01
|
[
"Apache-1.1"
] | null | null | null |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: tie.py
# Purpose: music21 classes for representing ties (visual and conceptual)
#
# Authors: Michael Scott Cuthbert
# Christopher Ariza
#
# Copyright: Copyright © 2009-2010, 2012, 2015 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
The `tie` module contains a single class, `Tie` that represents the visual and
conceptual idea of tied notes. They can be start or stop ties.
'''
import unittest
from music21 import exceptions21
from music21.common import SlottedObjectMixin
class TieException(exceptions21.Music21Exception):
pass
#-------------------------------------------------------------------------------
class Tie(SlottedObjectMixin):
'''
Object added to notes that are tied to other notes. The `type` value is one
of start, stop, or continue.
>>> note1 = note.Note()
>>> note1.tie = tie.Tie('start') # start, stop, or continue
>>> note1.tie.style = 'normal' # default; could also be 'dotted' or 'dashed' or 'hidden'
>>> note1.tie.type
'start'
>>> note1.tie
<music21.tie.Tie start>
Generally Ties have a placement of None, but if they are defined
as 'above' or 'below' this will be retained. (see:
http://forums.makemusic.com/viewtopic.php?f=12&t=2179&start=0
for how orientation and placement in musicxml are essentially the same
content).
>>> note1.tie.placement is None
True
Differences from MusicXML:
* notes do not need to know if they are tied from a
previous note. i.e., you can tie n1 to n2 just with
a tie start on n1. However, if you want proper musicXML output
you need a tie stop on n2.
* one tie with "continue" implies tied from and tied to.
The tie.style only applies to ties of type 'start' or 'continue' (and then
only to the next part of the tie). For instance, if there are two
tied notes, and the first note has a 'dotted'-start tie, and the
second note has a 'dashed'-stop tie, the graphical tie itself will be dotted.
OMIT_FROM_DOCS
optional (to know what notes are next:)
.to = note() # not implimented yet, b/c of garbage coll.
.from = note()
(question: should notes be able to be tied to multiple notes
for the case where a single note is tied both voices of a
two-note-head unison?)
'''
### CLASS VARIABLES ###
__slots__ = (
'id',
'placement',
'style',
'type',
)
_DOC_ATTR = {
'type': '''
The tie type, can be 'start', 'stop', 'continue', 'let-ring', or 'continue-let-ring'.
''',
'style': '''
The style of the tie. Currently can be 'normal', 'dotted', 'dashed' or 'hidden'
''',
'placement': '''
Whether the tie should go up or down. Can be None, meaning
it is unknown or should be determined from context, or 'above' or 'below.
''',
}
VALID_TIE_TYPES = ('start', 'stop', 'continue', 'let-ring', 'continue-let-ring')
### INITIALIZER ###
# pylint: disable=redefined-builtin
def __init__(self, type='start'): #@ReservedAssignment
#super().__init__()
if type not in self.VALID_TIE_TYPES:
raise TieException(
'Type must be one of {}, not {}'.format(self.VALID_TIE_TYPES, type))
# naming this 'type' was a mistake, because cannot create a property of this name.
self.id = id(self)
self.type = type
self.style = 'normal'
self.placement = None # = unknown, can be 'above' or 'below'
### SPECIAL METHODS ###
def __eq__(self, other):
'''
Equality. Based entirely on Tie.type.
>>> t1 = tie.Tie('start')
>>> t2 = tie.Tie('start')
>>> t3 = tie.Tie('stop')
>>> t1 == t2
True
>>> t2 == t3, t3 == t1
(False, False)
>>> t2 == None
False
'''
if other is None or not isinstance(other, Tie):
return False
elif self.type == other.type:
return True
return False
def __ne__(self, other):
'''
Tests for object inequality.
>>> a = tie.Tie('start')
>>> b = tie.Tie('stop')
>>> a != b
True
'''
return not self.__eq__(other)
def __repr__(self):
return '<music21.tie.Tie %s>' % self.type
class Test(unittest.TestCase):
def runTest(self):
pass
#-------------------------------------------------------------------------------
if __name__ == '__main__':
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof
| 30.042169
| 97
| 0.54221
|
794a1ef6813a9364752b3f062a90bca5f01efc82
| 3,526
|
py
|
Python
|
orchestra/management/commands/orchestrastatus.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 68
|
2015-02-09T10:28:44.000Z
|
2022-03-12T11:08:36.000Z
|
orchestra/management/commands/orchestrastatus.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 17
|
2015-05-01T18:10:03.000Z
|
2021-03-19T21:52:55.000Z
|
orchestra/management/commands/orchestrastatus.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 29
|
2015-03-31T04:51:03.000Z
|
2022-02-17T02:58:50.000Z
|
import os
import re
from django.conf import settings as djsettings
from django.core.management.base import BaseCommand
from django.db import connections
from orchestra import settings, get_version
from orchestra.utils import paths
from orchestra.utils.sys import run
from .startservices import flatten
class Command(BaseCommand):
def is_running(self, context, ps):
if context['service'] == 'uwsgi':
regex = r'.*uwsgi .*/%(project_name)s.ini.*' % context
else:
regex = r'.*%(service)s.*' % context
return re.match(regex, ps)
def handle(self, *args, **options):
context = {
'project_name': paths.get_project_name(),
'site_dir': paths.get_site_dir(),
}
banner = "%(project_name)s status" % context
self.stdout.write(banner)
self.stdout.write('-'*len(banner))
self.stdout.write(' Orchestra version: ' + get_version())
if djsettings.DEBUG:
self.stdout.write(" debug enabled")
else:
self.stdout.write(" debug disabled")
ps = run('ps aux').stdout.decode().replace('\n', ' ')
for service in flatten(settings.ORCHESTRA_START_SERVICES):
context['service'] = service
if self.is_running(context, ps):
self.stdout.write(" %(service)s online" % context)
else:
self.stdout.write(" %(service)s offline" % context)
if service == 'nginx':
try:
config_path = '/etc/nginx/sites-enabled/%(project_name)s.conf' % context
with open(config_path, 'r') as handler:
config = handler.read().replace('\n', ' ')
except FileNotFoundError:
self.stdout.write(" * %s not found" % config_path)
else:
regex = r'location\s+([^\s]+)\s+{.*uwsgi_pass unix:///var/run/uwsgi/app/%(project_name)s/socket;.*' % context
location = re.findall(regex, config)
if location:
ip = run("ip a | grep 'inet ' | awk {'print $2'} | grep -v '^127.0.' | head -n 1 | cut -d'/' -f1").stdout.decode()
if not ip:
ip = '127.0.0.1'
location = 'http://%s%s' % (ip, location[0])
self.stdout.write(" * location %s" % location)
else:
self.stdout.write(" * location not found")
elif service == 'postgresql':
db_conn = connections['default']
try:
c = db_conn.cursor()
except OperationalError:
self.stdout.write(" * DB connection failed")
else:
self.stdout.write(" * DB connection succeeded")
elif service == 'uwsgi':
uwsgi_config = '/etc/uwsgi/apps-enabled/%(project_name)s.ini' % context
if os.path.isfile(uwsgi_config):
self.stdout.write(" * %s exists" % uwsgi_config)
else:
self.stdout.write(" * %s does not exist" % uwsgi_config)
cronbeat = 'crontab -l | grep "^.*/orchestra-beat %(site_dir)s/manage.py"' % context
if run(cronbeat, valid_codes=(0, 1)).exit_code == 0:
self.stdout.write(" cronbeat installed")
else:
self.stdout.write(" cronbeat not installed")
| 44.075
| 138
| 0.525241
|
794a1f3a362332a2a73ea2b5fbb8d7c0faccb296
| 11,821
|
py
|
Python
|
extra_tests/cffi_tests/cffi0/test_zdistutils.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2018-12-27T20:40:49.000Z
|
2018-12-27T20:40:49.000Z
|
extra_tests/cffi_tests/cffi0/test_zdistutils.py
|
GabriellaUwa/pypy
|
2ede3b557a25cb49db969e942ca5a7f8a9eae0d4
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
extra_tests/cffi_tests/cffi0/test_zdistutils.py
|
GabriellaUwa/pypy
|
2ede3b557a25cb49db969e942ca5a7f8a9eae0d4
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
# Generated by pypy/tool/import_cffi.py
import sys, os, imp, math, shutil
import py
from cffi import FFI, FFIError
from cffi.verifier import Verifier, _locate_engine_class, _get_so_suffixes
from cffi.ffiplatform import maybe_relative_path
from extra_tests.cffi_tests.udir import udir
class DistUtilsTest(object):
def setup_class(self):
self.lib_m = "m"
if sys.platform == 'win32':
#there is a small chance this fails on Mingw via environ $CC
import distutils.ccompiler
if distutils.ccompiler.get_default_compiler() == 'msvc':
self.lib_m = 'msvcrt'
def teardown_class(self):
if udir.isdir():
udir.remove(ignore_errors=True)
udir.ensure(dir=1)
def test_locate_engine_class(self):
cls = _locate_engine_class(FFI(), self.generic)
if self.generic:
# asked for the generic engine, which must not generate a
# CPython extension module
assert not cls._gen_python_module
else:
# asked for the CPython engine: check that we got it, unless
# we are running on top of PyPy, where the generic engine is
# always better
if '__pypy__' not in sys.builtin_module_names:
assert cls._gen_python_module
def test_write_source(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*hi there %s!*/\n#include <math.h>\n' % self
v = Verifier(ffi, csrc, force_generic_engine=self.generic,
libraries=[self.lib_m])
v.write_source()
with open(v.sourcefilename, 'r') as f:
data = f.read()
assert csrc in data
def test_write_source_explicit_filename(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*hi there %s!*/\n#include <math.h>\n' % self
v = Verifier(ffi, csrc, force_generic_engine=self.generic,
libraries=[self.lib_m])
v.sourcefilename = filename = str(udir.join('write_source.c'))
v.write_source()
assert filename == v.sourcefilename
with open(filename, 'r') as f:
data = f.read()
assert csrc in data
def test_write_source_to_file_obj(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*hi there %s!*/\n#include <math.h>\n' % self
v = Verifier(ffi, csrc, force_generic_engine=self.generic,
libraries=[self.lib_m])
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
f = StringIO()
v.write_source(file=f)
assert csrc in f.getvalue()
def test_compile_module(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*hi there %s!*/\n#include <math.h>\n' % self
v = Verifier(ffi, csrc, force_generic_engine=self.generic,
libraries=[self.lib_m])
v.compile_module()
assert v.get_module_name().startswith('_cffi_')
if v.generates_python_module():
mod = imp.load_dynamic(v.get_module_name(), v.modulefilename)
assert hasattr(mod, '_cffi_setup')
def test_compile_module_explicit_filename(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*hi there %s!2*/\n#include <math.h>\n' % self
v = Verifier(ffi, csrc, force_generic_engine=self.generic,
libraries=[self.lib_m])
basename = self.__class__.__name__ + 'test_compile_module'
v.modulefilename = filename = str(udir.join(basename + '.so'))
v.compile_module()
assert filename == v.modulefilename
assert v.get_module_name() == basename
if v.generates_python_module():
mod = imp.load_dynamic(v.get_module_name(), v.modulefilename)
assert hasattr(mod, '_cffi_setup')
def test_name_from_checksum_of_cdef(self):
names = []
for csrc in ['double', 'double', 'float']:
ffi = FFI()
ffi.cdef("%s sin(double x);" % csrc)
v = Verifier(ffi, "#include <math.h>",
force_generic_engine=self.generic,
libraries=[self.lib_m])
names.append(v.get_module_name())
assert names[0] == names[1] != names[2]
def test_name_from_checksum_of_csrc(self):
names = []
for csrc in ['123', '123', '1234']:
ffi = FFI()
ffi.cdef("double sin(double x);")
v = Verifier(ffi, csrc, force_generic_engine=self.generic)
names.append(v.get_module_name())
assert names[0] == names[1] != names[2]
def test_load_library(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*hi there %s!3*/\n#include <math.h>\n' % self
v = Verifier(ffi, csrc, force_generic_engine=self.generic,
libraries=[self.lib_m])
library = v.load_library()
assert library.sin(12.3) == math.sin(12.3)
def test_verifier_args(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*hi there %s!4*/#include "test_verifier_args.h"\n' % self
udir.join('test_verifier_args.h').write('#include <math.h>\n')
v = Verifier(ffi, csrc, include_dirs=[str(udir)],
force_generic_engine=self.generic,
libraries=[self.lib_m])
library = v.load_library()
assert library.sin(12.3) == math.sin(12.3)
def test_verifier_object_from_ffi(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = "/*6%s*/\n#include <math.h>" % self
lib = ffi.verify(csrc, force_generic_engine=self.generic,
libraries=[self.lib_m])
assert lib.sin(12.3) == math.sin(12.3)
assert isinstance(ffi.verifier, Verifier)
with open(ffi.verifier.sourcefilename, 'r') as f:
data = f.read()
assert csrc in data
def test_extension_object(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*7%s*/' % self + '''
#include <math.h>
#ifndef TEST_EXTENSION_OBJECT
# error "define_macros missing"
#endif
'''
lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')],
force_generic_engine=self.generic,
libraries=[self.lib_m])
assert lib.sin(12.3) == math.sin(12.3)
v = ffi.verifier
ext = v.get_extension()
assert 'distutils.extension.Extension' in str(ext.__class__) or \
'setuptools.extension.Extension' in str(ext.__class__)
assert ext.sources == [maybe_relative_path(v.sourcefilename)]
assert ext.name == v.get_module_name()
assert ext.define_macros == [('TEST_EXTENSION_OBJECT', '1')]
def test_extension_forces_write_source(self):
ffi = FFI()
ffi.cdef("double sin(double x);")
csrc = '/*hi there9!%s*/\n#include <math.h>\n' % self
v = Verifier(ffi, csrc, force_generic_engine=self.generic,
libraries=[self.lib_m])
assert not os.path.exists(v.sourcefilename)
v.get_extension()
assert os.path.exists(v.sourcefilename)
def test_extension_object_extra_sources(self):
ffi = FFI()
ffi.cdef("double test1eoes(double x);")
extra_source = str(udir.join('extension_extra_sources.c'))
with open(extra_source, 'w') as f:
f.write('double test1eoes(double x) { return x * 6.0; }\n')
csrc = '/*9%s*/' % self + '''
double test1eoes(double x); /* or #include "extra_sources.h" */
'''
lib = ffi.verify(csrc, sources=[extra_source],
force_generic_engine=self.generic)
assert lib.test1eoes(7.0) == 42.0
v = ffi.verifier
ext = v.get_extension()
assert 'distutils.extension.Extension' in str(ext.__class__) or \
'setuptools.extension.Extension' in str(ext.__class__)
assert ext.sources == [maybe_relative_path(v.sourcefilename),
extra_source]
assert ext.name == v.get_module_name()
def test_install_and_reload_module(self, targetpackage='', ext_package=''):
KEY = repr(self)
if not hasattr(os, 'fork'):
py.test.skip("test requires os.fork()")
if targetpackage:
udir.ensure(targetpackage, dir=1).ensure('__init__.py')
sys.path.insert(0, str(udir))
def make_ffi(**verifier_args):
ffi = FFI()
ffi.cdef("/* %s, %s, %s */" % (KEY, targetpackage, ext_package))
ffi.cdef("double test1iarm(double x);")
csrc = "double test1iarm(double x) { return x * 42.0; }"
lib = ffi.verify(csrc, force_generic_engine=self.generic,
ext_package=ext_package,
**verifier_args)
return ffi, lib
childpid = os.fork()
if childpid == 0:
# in the child
ffi, lib = make_ffi()
assert lib.test1iarm(1.5) == 63.0
# "install" the module by moving it into udir (/targetpackage)
if targetpackage:
target = udir.join(targetpackage)
else:
target = udir
shutil.move(ffi.verifier.modulefilename, str(target))
os._exit(0)
# in the parent
_, status = os.waitpid(childpid, 0)
if not (os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0):
raise AssertionError # see error above in subprocess
from cffi import ffiplatform
prev_compile = ffiplatform.compile
try:
if targetpackage == ext_package:
ffiplatform.compile = lambda *args: dont_call_me_any_more
# won't find it in tmpdir, but should find it correctly
# installed in udir
ffi, lib = make_ffi()
assert lib.test1iarm(0.5) == 21.0
finally:
ffiplatform.compile = prev_compile
def test_install_and_reload_module_package(self):
self.test_install_and_reload_module(targetpackage='foo_iarmp',
ext_package='foo_iarmp')
def test_install_and_reload_module_ext_package_not_found(self):
self.test_install_and_reload_module(targetpackage='foo_epnf',
ext_package='not_found')
def test_tag(self):
ffi = FFI()
ffi.cdef("/* %s test_tag */ double test1tag(double x);" % self)
csrc = "double test1tag(double x) { return x - 42.0; }"
lib = ffi.verify(csrc, force_generic_engine=self.generic,
tag='xxtest_tagxx')
assert lib.test1tag(143) == 101.0
assert '_cffi_xxtest_tagxx_' in ffi.verifier.modulefilename
def test_modulename(self):
ffi = FFI()
ffi.cdef("/* %s test_modulename */ double test1foo(double x);" % self)
csrc = "double test1foo(double x) { return x - 63.0; }"
modname = 'xxtest_modulenamexx%d' % (self.generic,)
lib = ffi.verify(csrc, force_generic_engine=self.generic,
modulename=modname)
assert lib.test1foo(143) == 80.0
suffix = _get_so_suffixes()[0]
fn1 = os.path.join(ffi.verifier.tmpdir, modname + '.c')
fn2 = os.path.join(ffi.verifier.tmpdir, modname + suffix)
assert ffi.verifier.sourcefilename == fn1
assert ffi.verifier.modulefilename == fn2
class TestDistUtilsCPython(DistUtilsTest):
generic = False
class TestDistUtilsGeneric(DistUtilsTest):
generic = True
| 40.762069
| 79
| 0.58303
|
794a1f44b5dce9956b208874ca579c62fdff25ea
| 14,104
|
py
|
Python
|
oneflow/python/test/ops/test_slice_v2.py
|
basicv8vc/oneflow
|
2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3
|
[
"Apache-2.0"
] | 1
|
2020-10-13T03:03:40.000Z
|
2020-10-13T03:03:40.000Z
|
oneflow/python/test/ops/test_slice_v2.py
|
basicv8vc/oneflow
|
2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/test/ops/test_slice_v2.py
|
basicv8vc/oneflow
|
2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as otp
import test_util
import typing as tp
import collections
import unittest
import os
DEFAULT_DEVICE_TAG = "gpu"
if os.getenv("ONEFLOW_TEST_CPU_ONLY"):
DEFAULT_DEVICE_TAG = "cpu"
def _do_slice(input, args, name=None):
outputs = []
for slice_tup_list in args:
output = flow.slice_v2(input, slice_tup_list, name)
outputs.append(output)
return outputs
def _make_slice_func(slice_args, input_shape, dtype=flow.float32, func_cfg=None):
@flow.global_function(type="predict", function_config=func_cfg)
def slice_job(
x: otp.Numpy.Placeholder(shape=input_shape, dtype=dtype)
) -> tp.List[otp.Numpy]:
return _do_slice(x, slice_args)
return slice_job
def _make_slice_with_fp16_func(slice_args, input_shape, func_cfg=None):
@flow.global_function(type="predict", function_config=func_cfg)
def slice_job(
x: otp.Numpy.Placeholder(shape=input_shape, dtype=flow.float32)
) -> tp.List[otp.Numpy]:
x = flow.cast(x, flow.float16)
y = _do_slice(x, slice_args)
return [flow.cast(y_i, flow.float32) for y_i in y]
return slice_job
def _make_slice_dynamic_func(
slice_args, input_shape, dtype=flow.float32, func_cfg=None
):
if func_cfg is None:
func_cfg = flow.FunctionConfig()
func_cfg.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(type="predict", function_config=func_cfg)
def slice_dynamic_job(
x: otp.ListNumpy.Placeholder(shape=input_shape, dtype=dtype)
) -> tp.List[otp.ListNumpy]:
return _do_slice(x, slice_args, name="SliceDynamic")
return slice_dynamic_job
def _make_slice_with_grad_func(
slice_tup_list, input_shape, watch_diff_cb=None, dtype=flow.float32, func_cfg=None,
):
@flow.global_function(type="train", function_config=func_cfg)
def slice_with_grad_job(
x: otp.Numpy.Placeholder(shape=input_shape, dtype=dtype)
) -> otp.Numpy:
var = flow.get_variable(
shape=input_shape,
dtype=dtype,
initializer=flow.constant_initializer(0.0),
name="variable",
)
x = x + var
if callable(watch_diff_cb):
flow.watch_diff(x, watch_diff_cb)
y = flow.slice_v2(x, slice_tup_list, name="SliceWithGrad")
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(y)
return y
return slice_with_grad_job
def _test_slice(
test_case,
input,
slice_args,
outputs,
dtype=flow.float32,
device_tag=DEFAULT_DEVICE_TAG,
verbose=False,
):
input = input.astype(flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
outputs = [
output.astype(flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
for output in outputs
]
flow.clear_default_session()
func_cfg = flow.FunctionConfig()
func_cfg.default_data_type(dtype)
func_cfg.default_placement_scope(flow.scope.placement(device_tag, "0:0"))
slice_func = _make_slice_func(slice_args, input.shape, dtype, func_cfg)
of_outputs = slice_func(input)
if verbose:
print("input:\n{}".format(input))
print("slice_args:", slice_args)
print("dtype:", dtype)
print("device_tag:", device_tag)
for out, of_out in zip(outputs, of_outputs):
if verbose:
print("output:\n{}\n{}".format(out, of_out))
test_case.assertTrue(np.array_equal(out, of_out))
def _test_slice_dynamic(
test_case,
input,
slice_args,
outputs,
static_shape=None,
dtype=flow.float32,
device_tag=DEFAULT_DEVICE_TAG,
):
input = input.astype(flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
outputs = [
output.astype(flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
for output in outputs
]
if static_shape is None:
static_shape = input.shape
flow.clear_default_session()
func_cfg = flow.FunctionConfig()
func_cfg.default_data_type(dtype)
func_cfg.default_placement_scope(flow.scope.placement(device_tag, "0:0"))
func_cfg.default_logical_view(flow.scope.mirrored_view())
slice_func = _make_slice_dynamic_func(slice_args, static_shape, dtype, func_cfg)
of_outputs = slice_func([input])
for out, of_out in zip(outputs, of_outputs):
test_case.assertTrue(np.array_equal(out, of_out[0]))
def _test_slice_with_grad(
test_case,
input,
slice_args,
output,
diff,
dtype=flow.float32,
device_tag=DEFAULT_DEVICE_TAG,
verbose=False,
):
input = input.astype(flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
output = output.astype(flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
diff = diff.astype(flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
if verbose:
print("dtype: {}".format(dtype))
print("device_tag: {}".format(device_tag))
print("input: {}\n{}\n".format(input.shape, input))
print("output: {}\n{}\n".format(output.shape, output))
print("diff: {}\n{}\n".format(diff.shape, diff))
def WatchDiff(of_diff: otp.Numpy):
if verbose:
print("of_diff: {}\n{}\n".format(of_diff.shape, of_diff))
test_case.assertTrue(np.array_equal(of_diff, diff))
flow.clear_default_session()
func_cfg = flow.FunctionConfig()
func_cfg.default_data_type(dtype)
func_cfg.default_placement_scope(flow.scope.placement(device_tag, "0:0"))
slice_func = _make_slice_with_grad_func(
slice_args, input.shape, WatchDiff, dtype, func_cfg
)
of_output = slice_func(input)
if verbose:
print("of_output: {}\n{}\n".format(of_output.shape, of_output))
test_case.assertTrue(np.array_equal(output, of_output))
def test_slice_base(test_case):
input = np.random.rand(10)
slice_args = [[(1, 7, 2)]]
outputs = [input[1:7:2]]
arg_dict = collections.OrderedDict()
arg_dict["dtype"] = [
flow.uint8,
flow.int8,
flow.int32,
flow.int64,
flow.float32,
flow.float64,
]
arg_dict["device_tag"] = ["cpu", "gpu"]
# arg_dict["verbose"] = [True]
for kwarg in test_util.GenArgDict(arg_dict):
_test_slice(test_case, input, slice_args, outputs, **kwarg)
def test_slice_into_two_parts(test_case):
input = np.random.rand(2, 5, 4)
slice_args = [
[(None, None, None), (0, 2, None), (None, None, None)],
[(None, None, None), (2, None, None), (None, None, None)],
]
outputs = [input[:, 0:2, :], input[:, 2:, :]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_at_first_dim(test_case):
input = np.random.rand(4, 5, 4)
slice_args = [[(2, None, None)]]
outputs = [input[2:None, :, :]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_at_two_dims(test_case):
input = np.random.rand(2, 5, 4)
slice_args = [[(None, None, None), (0, 2, None), (2, None, None)]]
outputs = [input[:, 0:2, 2:]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_with_collapse_dims(test_case):
input = np.random.rand(2, 5, 4, 4, 3)
slice_args = [
[
(None, None, None),
(0, 2, None),
(None, None, None),
(None, None, None),
(1, None, None),
]
]
outputs = [input[:, 0:2, :, :, 1:]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_with_step_two(test_case):
input = np.random.rand(2, 5, 4)
slice_args = [[(None, None, None), (1, None, 2)]]
outputs = [input[:, 1::2, :]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_at_two_dim_with_step_more_than_one(test_case):
input = np.random.rand(2, 5, 4)
slice_args = [[(None, None, None), (1, None, 3), (None, None, 2)]]
outputs = [input[:, 1::3, ::2]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_with_neg_start(test_case):
input = np.random.rand(2, 5, 4)
slice_args = [[(None, None, None), (-4, None, None)]]
outputs = [input[:, -4:, :]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_with_neg_stop(test_case):
input = np.random.rand(2, 5, 4)
slice_args = [[(None, None, None), (None, -2, None)]]
outputs = [input[:, :-2, :]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_with_neg_step(test_case):
input = np.random.rand(2, 5, 4)
slice_args = [[(None, None, None), (None, None, -1)]]
outputs = [input[:, ::-1, :]]
_test_slice(test_case, input, slice_args, outputs)
def test_slice_with_neg_step_two(test_case):
input = np.random.rand(2, 5, 4)
slice_args = [[(None, None, None), (-1, 1, -2)]]
outputs = [input[:, -1:1:-2, :]]
_test_slice(test_case, input, slice_args, outputs)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_slice_with_float16(test_case):
input = np.random.rand(10).astype(np.float32)
slice_args = [[(2, 7, None)]]
outputs = [input[2:7]]
flow.clear_default_session()
flow.config.gpu_device_num(1)
slice_func = _make_slice_with_fp16_func(slice_args, input.shape)
of_outputs = slice_func(input)
# print("outputs[0]:\n{}".format(outputs[0]))
# print("of_outputs[0]:\n{}".format(of_outputs[0]))
test_case.assertTrue(np.allclose(outputs[0], of_outputs[0], rtol=1e-03, atol=1e-04))
def test_slice_dynamic_base(test_case):
input = np.random.rand(2, 4, 4)
slice_args = [[(None, None, None), (1, None, None)]]
outputs = [input[:, 1:, :]]
arg_dict = collections.OrderedDict()
arg_dict["dtype"] = [
flow.uint8,
flow.int8,
flow.int32,
flow.int64,
flow.float32,
flow.float64,
]
arg_dict["device_tag"] = ["cpu", "gpu"]
for kwarg in test_util.GenArgDict(arg_dict):
_test_slice_dynamic(
test_case, input, slice_args, outputs, static_shape=(2, 5, 5), **kwarg
)
def test_slice_dynamic_at_two_dims(test_case):
input = np.random.rand(2, 3, 2, 2)
slice_args = [
[(None, None, None), (2, None, None), (None, None, None), (1, None, None)]
]
outputs = [input[:, 2:, :, 1:]]
_test_slice_dynamic(
test_case, input, slice_args, outputs, static_shape=(2, 5, 3, 3)
)
def test_slice_dynamic_at_first_dim_and_last_dim(test_case):
input = np.random.rand(3, 6, 3, 3)
slice_args = [
[(1, None, None), (None, None, None), (None, None, None), (1, None, None)]
]
outputs = [input[1:, :, :, 1:]]
_test_slice_dynamic(
test_case, input, slice_args, outputs, static_shape=(4, 5, 5, 3)
)
def test_slice_dynamic_neg_start(test_case):
input = np.random.rand(2, 10)
slice_args = [[(None, None, None), (-5, None, None)]]
outputs = [input[:, -5:]]
_test_slice_dynamic(test_case, input, slice_args, outputs, static_shape=(3, 7))
def test_slice_dynamic_neg_step(test_case):
input = np.random.rand(2, 10)
slice_args = [[(None, None, None), (None, -5, -1)]]
outputs = [input[:, :-5:-1]]
_test_slice_dynamic(test_case, input, slice_args, outputs, static_shape=(3, 7))
# This test case will raise fatal error, error infomation is like below:
# F0808 00:20:19.768465 23960 user_kernel.cpp:451] Check failed: shape_view.elem_cnt() <= static_shape.elem_cnt() (12 vs. 9)
# InferShape of OpKernel (op_type_name: slice, op_name: SliceDynamic_0) raise error,
# output arg's (name: y, index: 0) runtime shape (2,6) surpass the limit of static shape (3,3)
# *** Check failure stack trace: ***
# ...
# The reason is the dismatch between static slice (for memory) and dynamic slice (real slice)
# The result shape of slice [:, 3:-1] for static shape (3, 7) is (3, 3)
# which indicate that blob has prod(3, 3) memory limit,
# and the result shape of slice [:, 3:-1] for dynamic shape (2, 10) is (2, 6)
# which will cause blob to be out of memory limit.
# def test_slice_dynamic_dismatch(test_case):
# input = np.random.rand(2, 10)
# slice_args = [[(None, None, None), (3, -1, None)]]
# outputs = [input[:, 3:-1]]
# _test_slice_dynamic(test_case, input, slice_args, outputs, static_shape=(3, 7))
# static shape after slice is (5, 4)
# dynamic shape after slice is (4, 5)
def test_slice_dynamic_anomaly(test_case):
input = np.random.rand(4, 7)
slice_args = [[(None, None, None), (2, None, None)]]
outputs = [input[:, 2:]]
_test_slice_dynamic(test_case, input, slice_args, outputs, static_shape=(5, 6))
# static shape after slice is (5, 3)
# dynamic shape after slice is (4, 4)
# def test_slice_dynamic_anomaly_failed(test_case):
# input = np.random.rand(4, 7)
# slice_args = [[(None, None, None), (3, None, None)]]
# outputs = [input[:, 3:]]
# _test_slice_dynamic(test_case, input, slice_args, outputs, static_shape=(5, 6))
def test_slice_with_grad(test_case):
input = np.random.rand(2, 5, 4)
slice_tup_list = [(None, None, None), (2, -2, None)]
output = input[:, 2:-2, :]
diff = np.zeros(input.shape, dtype=input.dtype)
diff[:, 2:-2, :] = 1
arg_dict = collections.OrderedDict()
arg_dict["dtype"] = [flow.float32, flow.float64]
arg_dict["device_tag"] = ["cpu", "gpu"]
arg_dict["verbose"] = [False]
for kwarg in test_util.GenArgDict(arg_dict):
_test_slice_with_grad(test_case, input, slice_tup_list, output, diff, **kwarg)
| 33.107981
| 124
| 0.659175
|
794a1fb6e01794e33fe321311f1a73e07e157321
| 4,570
|
py
|
Python
|
tests/test_dfp_create_orders.py
|
Pubmatic-Dhruv-Sonone/dfp-prebid-setup
|
bff688831f40ae067459072632b48675ca26175d
|
[
"MIT"
] | 111
|
2017-03-09T02:05:25.000Z
|
2022-03-14T21:03:00.000Z
|
tests/test_dfp_create_orders.py
|
propertypal/dfp-prebid-setup
|
03d5d0cdc37b60031fc5e2962fb89fd7c7945d9e
|
[
"MIT"
] | 81
|
2017-03-10T08:07:02.000Z
|
2022-03-02T04:44:06.000Z
|
tests/test_dfp_create_orders.py
|
propertypal/dfp-prebid-setup
|
03d5d0cdc37b60031fc5e2962fb89fd7c7945d9e
|
[
"MIT"
] | 87
|
2017-03-16T21:38:53.000Z
|
2022-02-04T11:48:29.000Z
|
from unittest import TestCase
from mock import MagicMock, Mock, patch
import settings
import dfp.create_orders
from dfp.exceptions import BadSettingException, MissingSettingException
@patch('googleads.ad_manager.AdManagerClient.LoadFromStorage')
class DFPCreateOrderTests(TestCase):
@patch('dfp.get_orders.get_order_by_name')
def test_create_orders_call(self, mock_get_order_by_name, mock_dfp_client):
"""
Ensure it calls DFP once with order info.
"""
# Mock that no order exists with the same name.
mock_get_order_by_name.return_value = None
mock_dfp_client.return_value = MagicMock()
(mock_dfp_client.return_value
.GetService.return_value
.createOrders) = MagicMock()
order_name = 'My Fake Test Order'
advertiser_id = 24681012
trafficker_id = 12359113
dfp.create_orders.create_order(order_name, advertiser_id, trafficker_id)
expected_config = [
dfp.create_orders.create_order_config(name=order_name,
advertiser_id=advertiser_id, trafficker_id=trafficker_id)
]
(mock_dfp_client.return_value
.GetService.return_value
.createOrders.assert_called_once_with(expected_config)
)
@patch.multiple('settings', DFP_USE_EXISTING_ORDER_IF_EXISTS=False)
@patch('dfp.get_orders.get_order_by_name')
def test_create_orders_duplicate_name_fail(self, mock_get_order_by_name,
mock_dfp_client):
"""
Ensure it throws an Exception if an order with that name already exists
and we should not modify the existing order.
"""
# Mock that an order already exists with the same name.
mock_get_order_by_name.return_value = {
'id': 123456789,
'name': 'My Test Order!'
}
order_name = 'My Test Order!'
advertiser_id = 24681012
trafficker_id = 12359113
with self.assertRaises(BadSettingException):
dfp.create_orders.create_order(order_name, advertiser_id, trafficker_id)
@patch.multiple('settings', DFP_USE_EXISTING_ORDER_IF_EXISTS=True)
@patch('dfp.get_orders.get_order_by_name')
def test_create_orders_duplicate_name_success(self, mock_get_order_by_name,
mock_dfp_client):
"""
Use an existing order with the same name if settings allow.
"""
# Mock that an order already exists with the same name.
mock_get_order_by_name.return_value = {
'id': 123456789,
'name': 'My Test Order!'
}
order_name = 'My Test Order!'
advertiser_id = 24681012
trafficker_id = 12359113
order_id = dfp.create_orders.create_order(order_name, advertiser_id,
trafficker_id)
self.assertEqual(order_id, 123456789)
@patch('dfp.get_orders.get_order_by_name')
def test_return_order_id(self, mock_get_order_by_name, mock_dfp_client):
"""
Ensure it returns the created order ID.
"""
# Mock that no order exists with the same name.
mock_get_order_by_name.return_value = None
mock_dfp_client.return_value = MagicMock()
order_name = 'Some Order!'
advertiser_id = 97867564
trafficker_id = 13243546
# Mock DFP response.
(mock_dfp_client.return_value
.GetService.return_value
.createOrders) = MagicMock(
return_value=[{
'id': 22233344,
'name': order_name,
'startDateTime': {},
'endDateTime': {},
'unlimitedEndDateTime': True,
'status': 'DRAFT',
'isArchived': False,
'externalOrderId': 0,
'currencyCode': 'USD',
'advertiserId': advertiser_id,
'creatorId': 123456789,
'traffickerId': trafficker_id,
'totalImpressionsDelivered': 0,
'totalClicksDelivered': 0,
'totalViewableImpressionsDelivered': 0,
'totalBudget': {
'currencyCode': 'USD',
'microAmount': 0,
},
'lastModifiedByApp': 'tab-for-',
'isProgrammatic': False,
'lastModifiedDateTime': {},
}]
)
order_id = dfp.create_orders.create_order(order_name, advertiser_id,
trafficker_id)
self.assertEqual(order_id, 22233344)
def test_order_config(self, mock_dfp_client):
"""
Ensure order config creation is correct.
"""
order_name = 'My Test Order!'
advertiser_id = 24681012
trafficker_id = 12359113
config = dfp.create_orders.create_order_config(name=order_name,
advertiser_id=advertiser_id, trafficker_id=trafficker_id)
self.assertEqual(config, {
'advertiserId': 24681012,
'name': 'My Test Order!',
'traffickerId': 12359113
})
| 29.10828
| 78
| 0.686871
|
794a213a30039565706b722b9a48f175fa1339e5
| 1,321
|
py
|
Python
|
app/core/tests/test_admin.py
|
shojibhasan/racipe-app-api
|
f641b71ac7b5dc7e0a8d72614c80620bb265ae3e
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
shojibhasan/racipe-app-api
|
f641b71ac7b5dc7e0a8d72614c80620bb265ae3e
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
shojibhasan/racipe-app-api
|
f641b71ac7b5dc7e0a8d72614c80620bb265ae3e
|
[
"MIT"
] | null | null | null |
from django.test import TestCase,Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@londonappdev.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@londonappdev.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
# Test that the user edit page works
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code,200)
def test_create_user_page(self):
# Test that the create user page works
url= reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code,200)
| 33.025
| 68
| 0.647994
|
794a21ce4931b025319c382f5c936d6b0b2654b1
| 1,304
|
py
|
Python
|
lab4/espclient.py
|
petersumner/ECE40862
|
ce5132b7fb48276e80de5f8df19f3434fb282e1b
|
[
"MIT"
] | null | null | null |
lab4/espclient.py
|
petersumner/ECE40862
|
ce5132b7fb48276e80de5f8df19f3434fb282e1b
|
[
"MIT"
] | null | null | null |
lab4/espclient.py
|
petersumner/ECE40862
|
ce5132b7fb48276e80de5f8df19f3434fb282e1b
|
[
"MIT"
] | null | null | null |
from machine import Pin, Timer
import network
import esp32
import socket
import ssl
# Initialize Red and Green LEDS on GPIO Output
led_red = Pin(12, Pin.OUT)
led_green = Pin(21, Pin.OUT)
led_red.value(1)
led_green.value(1)
timer = Timer(0)
# Connect to WiFi network
essid = "The MATRIX"
password = "redacted"
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlan.connect(essid, password)
while not wlan.isconnected():
pass
print("Connected!")
print(wlan.ifconfig())
# Connect to ThingSpeak
api_key = '3ODNWZEY2LM86CTX'
ai = socket.getaddrinfo('api.thingspeak.com', 80)[0][-1]
addr = [(2, 1, 0, '', ('10.0.0.158', 80))][0][-1]
#Send data via API key
def send_data(timer):
html = """
POST /update HTTP/1.1
Host: api.thingspeak.com
Connection: close
X-THINGSPEAKAPIKEY: 3ODNWZEY2LM86CTX
Content-Type: application/x-www-form-urlencoded
Content-Length: %d
%s
"""
temp = esp32.raw_temperature()
hall = esp32.hall_sensor()
print('Temperature: '+str(temp)+', Hall: '+str(hall))
s = socket.socket()
s.connect(addr)
s = ssl.wrap_socket(s)
data = 'field1=%.2f&field2=%.2f' % (temp, hall)
http = html & (len(data), data)
s.write(http.encode())
s.close()
timer.init(period=16000, mode=Timer.PERIODIC, callback=send_data)
| 23.709091
| 65
| 0.671779
|
794a268c71cb461cd02fa9422f2a72473fa39f22
| 1,922
|
py
|
Python
|
named_entity_recognizer.py
|
antoreep-jana/BBC-News-Analyzer
|
0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca
|
[
"MIT"
] | 1
|
2021-12-27T12:57:07.000Z
|
2021-12-27T12:57:07.000Z
|
named_entity_recognizer.py
|
antoreep-jana/BBC-News-Analyzer
|
0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca
|
[
"MIT"
] | null | null | null |
named_entity_recognizer.py
|
antoreep-jana/BBC-News-Analyzer
|
0a6e54ddf4baefa4532213c5e6f60e712ff3a1ca
|
[
"MIT"
] | null | null | null |
import spacy
import streamlit as st
# txt = 'Apple reached an all-time high stock price of 143 dollars this January'
# nlp = spacy.load('en_core_web_trf')
# doc = nlp(txt)
# for ent in doc.ents:
# print(ent.text)
# print(ent.label_)
# print('='*5)
# import gc
# gc.collect()
## requirements.txt
# https://github.com/explosion/spacy-models/releases/download/en_core_web_lg-3.2.0/en_core_web_lg-3.2.0.tar.gz#egg=en_core_web_lg
# https://github.com/explosion/spacy-models/releases/download/en_core_web_trf-3.0.0/en_core_web_trf-3.0.0.tar.gz#egg=en_core_web_trf
@st.cache(allow_output_mutation = True, show_spinner = False, max_entries = 4, persist = True)
def get_model():
#nlp = spacy.load('en_core_web_trf')
#nlp = spacy.load('en_core_web_lg')
nlp = spacy.load('en_core_web_md')
return nlp
class NER:
def __init__(self, txt):
#nlp = spacy.load('en_core_web_sm')
#nlp = spacy.load('en_core_web_md')
#nlp = spacy.load('en_core_web_lg')
nlp = get_model()
#nlp = spacy.load('en_core_web_trf')
self.entities = []
self.entityLabels = []
self.txt = txt
self.doc = nlp(txt)
def entRecognizer(self, entDict, typeEnt):
entList = [ent for ent in entDict if entDict[ent] == typeEnt]
return entList
def get_entities(self):
for ent in self.doc.ents:
self.entities.append(ent.text)
self.entityLabels.append(ent.label_)
entDict = dict(zip(self.entities, self.entityLabels))
entOrg = self.entRecognizer(entDict, "ORG")
entCardinal = self.entRecognizer(entDict, "CARDINAL")
entPerson = self.entRecognizer(entDict, "PERSON")
entDate = self.entRecognizer(entDict, "DATE")
entGPE = self.entRecognizer(entDict, "GPE")
return entOrg, entCardinal, entPerson, entDate, entGPE
| 30.507937
| 132
| 0.646202
|
794a270129755c3ca3fd97c2e44069897e770a46
| 12,155
|
py
|
Python
|
TrainingInterfaces/Text_to_Spectrogram/Tacotron2/tacotron2_train_loop.py
|
Vaibhavs10/IMS-Toucan
|
931e4ce63a4cc675cb15b72474a3c3619632a07b
|
[
"Apache-2.0"
] | 93
|
2021-08-11T13:52:37.000Z
|
2022-03-29T23:19:07.000Z
|
TrainingInterfaces/Text_to_Spectrogram/Tacotron2/tacotron2_train_loop.py
|
Vaibhavs10/IMS-Toucan
|
931e4ce63a4cc675cb15b72474a3c3619632a07b
|
[
"Apache-2.0"
] | 4
|
2021-12-15T17:23:14.000Z
|
2022-03-24T04:51:40.000Z
|
TrainingInterfaces/Text_to_Spectrogram/Tacotron2/tacotron2_train_loop.py
|
Vaibhavs10/IMS-Toucan
|
931e4ce63a4cc675cb15b72474a3c3619632a07b
|
[
"Apache-2.0"
] | 25
|
2021-08-11T14:23:47.000Z
|
2022-03-28T20:23:51.000Z
|
import os
import time
import matplotlib.pyplot as plt
import torch
import torch.multiprocessing
import torch.nn.functional as F
from speechbrain.pretrained import EncoderClassifier
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from Preprocessing.TextFrontend import TextFrontend
from TrainingInterfaces.Text_to_Spectrogram.Tacotron2.AlignmentLoss import binarize_attention_parallel
from Utility.utils import delete_old_checkpoints
from Utility.utils import get_most_recent_checkpoint
def plot_attention(model, lang, device, speaker_embedding, att_dir, step):
tf = TextFrontend(language=lang, use_word_boundaries=False, use_explicit_eos=False)
sentence = ""
if lang == "en":
sentence = "This is a complex sentence, it even has a pause!"
elif lang == "de":
sentence = "Dies ist ein komplexer Satz, er hat sogar eine Pause!"
text = tf.string_to_tensor(sentence).long().squeeze(0).to(device)
phones = tf.get_phone_string(sentence)
model.eval()
att = model.inference(text=text, speaker_embeddings=speaker_embedding)[2].to("cpu")
model.train()
del tf
bin_att = binarize_attention_parallel(att.unsqueeze(0).unsqueeze(1),
in_lens=torch.LongTensor([len(text)]),
out_lens=torch.LongTensor([len(att)])).squeeze(0).squeeze(0).detach().numpy()
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 9))
ax[0].imshow(att.detach().numpy(), interpolation='nearest', aspect='auto', origin="lower")
ax[1].imshow(bin_att, interpolation='nearest', aspect='auto', origin="lower")
ax[1].set_xlabel("Inputs")
ax[0].xaxis.set_visible(False)
ax[0].set_ylabel("Outputs")
ax[1].set_ylabel("Outputs")
ax[1].set_xticks(range(len(att[0])))
del att
ax[1].set_xticklabels(labels=[phone for phone in phones])
ax[0].set_title("Soft-Attention")
ax[1].set_title("Hard-Attention")
fig.tight_layout()
if not os.path.exists(os.path.join(att_dir, "attention_plots")):
os.makedirs(os.path.join(att_dir, "attention_plots"))
fig.savefig(os.path.join(os.path.join(att_dir, "attention_plots"), str(step) + ".png"))
fig.clf()
plt.close()
def collate_and_pad(batch):
if len(batch[0]) == 4:
# every entry in batch: [text, text_length, spec, spec_length]
return (pad_sequence([datapoint[0].squeeze(0) for datapoint in batch], batch_first=True),
torch.stack([datapoint[1] for datapoint in batch]).squeeze(1),
pad_sequence([datapoint[2] for datapoint in batch], batch_first=True),
torch.stack([datapoint[3] for datapoint in batch]).squeeze(1))
elif len(batch[0]) == 5:
# every entry in batch: [text, text_length, spec, spec_length, speaker_embedding]
return (pad_sequence([datapoint[0].squeeze(0) for datapoint in batch], batch_first=True),
torch.stack([datapoint[1] for datapoint in batch]).squeeze(1),
pad_sequence([datapoint[2] for datapoint in batch], batch_first=True),
torch.stack([datapoint[3] for datapoint in batch]).squeeze(1),
torch.stack([datapoint[4] for datapoint in batch]))
def train_loop(net,
train_dataset,
device,
save_directory,
batch_size,
steps,
epochs_per_save,
lang,
lr,
use_speaker_embedding=False,
path_to_checkpoint=None,
fine_tune=False,
collapse_margin=5.0, # be wary of loss scheduling
resume=False,
use_cycle_consistency_for_speakerembedding=False):
"""
Args:
resume: whether to resume from the most recent checkpoint
collapse_margin: margin in which the loss may increase in one epoch without triggering the soft-reset
steps: How many steps to train
lr: The initial learning rate for the optimiser
path_to_checkpoint: reloads a checkpoint to continue training from there
fine_tune: whether to load everything from a checkpoint, or only the model parameters
lang: language of the synthesis
use_speaker_embedding: whether to expect speaker embeddings
net: Model to train
train_dataset: Pytorch Dataset Object for train data
device: Device to put the loaded tensors on
save_directory: Where to save the checkpoints
batch_size: How many elements should be loaded at once
epochs_per_save: how many epochs to train in between checkpoints
"""
net = net.to(device)
scaler = GradScaler()
previous_error = 999999 # tacotron can collapse sometimes and requires soft-resets. This is to detect collapses.
train_loader = DataLoader(batch_size=batch_size,
dataset=train_dataset,
drop_last=True,
num_workers=10,
pin_memory=True,
shuffle=True,
prefetch_factor=10,
collate_fn=collate_and_pad,
persistent_workers=True)
if use_speaker_embedding:
reference_speaker_embedding_for_att_plot = torch.Tensor(train_dataset[0][4]).to(device)
if use_cycle_consistency_for_speakerembedding:
speaker_embedding_func = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb",
run_opts={"device": str(device)},
savedir="Models/speechbrain_speaker_embedding_ecapa")
else:
reference_speaker_embedding_for_att_plot = None
step_counter = 0
epoch = 0
net.train()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
if resume:
path_to_checkpoint = get_most_recent_checkpoint(checkpoint_dir=save_directory)
if path_to_checkpoint is not None:
# careful when restarting, plotting data will be overwritten!
check_dict = torch.load(os.path.join(path_to_checkpoint), map_location=device)
net.load_state_dict(check_dict["model"])
if not fine_tune:
optimizer.load_state_dict(check_dict["optimizer"])
scaler.load_state_dict(check_dict["scaler"])
step_counter = check_dict["step_counter"]
start_time = time.time()
while True:
epoch += 1
optimizer.zero_grad()
train_losses_this_epoch = list()
for batch in tqdm(train_loader):
with autocast():
if not use_speaker_embedding:
train_loss = net(text=batch[0].to(device),
text_lengths=batch[1].to(device),
speech=batch[2].to(device),
speech_lengths=batch[3].to(device),
step=step_counter)
else:
if not use_cycle_consistency_for_speakerembedding:
train_loss = net(text=batch[0].to(device),
text_lengths=batch[1].to(device),
speech=batch[2].to(device),
speech_lengths=batch[3].to(device),
step=step_counter,
speaker_embeddings=batch[4].to(device))
else:
train_loss, predicted_mels = net(text=batch[0].to(device),
text_lengths=batch[1].to(device),
speech=batch[2].to(device),
speech_lengths=batch[3].to(device),
step=step_counter,
speaker_embeddings=batch[4].to(device),
return_mels=True)
pred_spemb = speaker_embedding_func.modules.embedding_model(predicted_mels,
torch.tensor([x / len(predicted_mels[0]) for x in batch[3]]))
gold_spemb = speaker_embedding_func.modules.embedding_model(batch[2].to(device),
torch.tensor([x / len(batch[2][0]) for x in batch[3]]))
# we have to recalculate the speaker embedding from our own mel because we project into a slightly different mel space
cosine_cycle_distance = torch.tensor(1.0) - F.cosine_similarity(pred_spemb.squeeze(), gold_spemb.squeeze(), dim=1).mean()
pairwise_cycle_distance = F.pairwise_distance(pred_spemb.squeeze(), gold_spemb.squeeze()).mean()
cycle_distance = cosine_cycle_distance + pairwise_cycle_distance
del pred_spemb
del predicted_mels
del gold_spemb
train_loss = train_loss + cycle_distance * 5
train_losses_this_epoch.append(train_loss.item())
optimizer.zero_grad()
scaler.scale(train_loss).backward()
del train_loss
step_counter += 1
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(net.parameters(), 1.0, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
with torch.no_grad():
net.eval()
loss_this_epoch = sum(train_losses_this_epoch) / len(train_losses_this_epoch)
if previous_error + collapse_margin < loss_this_epoch:
print("Model Collapse detected! \nPrevious Loss: {}\nNew Loss: {}".format(previous_error, loss_this_epoch))
print("Trying to reset to a stable state ...")
path_to_checkpoint = get_most_recent_checkpoint(checkpoint_dir=save_directory)
check_dict = torch.load(path_to_checkpoint, map_location=device)
net.load_state_dict(check_dict["model"])
if not fine_tune:
optimizer.load_state_dict(check_dict["optimizer"])
step_counter = check_dict["step_counter"]
scaler.load_state_dict(check_dict["scaler"])
else:
previous_error = loss_this_epoch
if epoch % epochs_per_save == 0:
torch.save({
"model" : net.state_dict(),
"optimizer" : optimizer.state_dict(),
"scaler" : scaler.state_dict(),
"step_counter": step_counter,
}, os.path.join(save_directory, "checkpoint_{}.pt".format(step_counter)))
delete_old_checkpoints(save_directory, keep=5)
plot_attention(model=net,
lang=lang,
device=device,
speaker_embedding=reference_speaker_embedding_for_att_plot,
att_dir=save_directory,
step=step_counter)
if step_counter > steps:
# DONE
return
print("Epoch: {}".format(epoch))
print("Train Loss: {}".format(loss_this_epoch))
print("Time elapsed: {} Minutes".format(round((time.time() - start_time) / 60)))
print("Steps: {}".format(step_counter))
torch.cuda.empty_cache()
net.train()
| 53.311404
| 145
| 0.56956
|
794a297ca1bc2e2b144b1f22bbb005d822e90cfb
| 32,404
|
py
|
Python
|
docassemble_webapp/docassemble/webapp/update.py
|
patrickr81/docassemble
|
651653f6d3ab4c4c95d1defbc547ab8e15e460cc
|
[
"MIT"
] | null | null | null |
docassemble_webapp/docassemble/webapp/update.py
|
patrickr81/docassemble
|
651653f6d3ab4c4c95d1defbc547ab8e15e460cc
|
[
"MIT"
] | 6
|
2021-02-08T20:44:14.000Z
|
2022-01-13T02:42:41.000Z
|
docassemble_webapp/docassemble/webapp/update.py
|
mkrejpsky/docassemble.pirati.cz
|
ffce141cad187aa96f477aa42877c5ad185a02e4
|
[
"MIT"
] | null | null | null |
import os
import sys
import socket
import tempfile
import subprocess
import xmlrpc.client
import re
#from io import StringIO
import sys
import shutil
import time
import fcntl
from distutils.version import LooseVersion
if __name__ == "__main__":
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
if 'initialize' in sys.argv:
mode = 'initialize'
elif 'check_for_updates' in sys.argv:
mode = 'check_for_updates'
else:
mode = 'initialize'
supervisor_url = os.environ.get('SUPERVISOR_SERVER_URL', None)
if supervisor_url:
USING_SUPERVISOR = True
else:
USING_SUPERVISOR = False
def fix_fnctl():
try:
flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL);
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);
sys.stderr.write("fix_fnctl: updated stdout\n")
except:
pass
try:
flags = fcntl.fcntl(sys.stderr, fcntl.F_GETFL);
fcntl.fcntl(sys.stderr, fcntl.F_SETFL, flags&~os.O_NONBLOCK);
sys.stderr.write("fix_fnctl: updated stderr\n")
except:
pass
def remove_inactive_hosts():
start_time = time.time()
sys.stderr.write("remove_inactive_hosts: starting\n")
if USING_SUPERVISOR:
from docassemble.base.config import hostname
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.core.models import Supervisors
to_delete = set()
for host in Supervisors.query.all():
if host.hostname == hostname:
continue
try:
socket.gethostbyname(host.hostname)
server = xmlrpc.client.Server(host.url + '/RPC2')
result = server.supervisor.getState()
except:
to_delete.add(host.id)
for id_to_delete in to_delete:
Supervisors.query.filter_by(id=id_to_delete).delete()
sys.stderr.write("remove_inactive_hosts: ended after " + str(time.time() - start_time) + " seconds\n")
class DummyPackage(object):
def __init__(self, name):
self.name = name
self.type = 'pip'
self.limitation = None
def check_for_updates(doing_startup=False):
start_time = time.time()
sys.stderr.write("check_for_updates: starting\n")
from docassemble.base.config import hostname
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
ok = True
here_already = dict()
results = dict()
sys.stderr.write("check_for_updates: 0.5 after " + str(time.time() - start_time) + " seconds\n")
num_deleted = Package.query.filter_by(name='psycopg2').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='pdfminer').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='py-bcrypt').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='pycrypto').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='constraint').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='distutils2').delete()
if num_deleted > 0:
db.session.commit()
sys.stderr.write("check_for_updates: 1 after " + str(time.time() - start_time) + " seconds\n")
installed_packages = get_installed_distributions()
for package in installed_packages:
here_already[package.key] = package.version
changed = False
if 'pdfminer.six' not in here_already:
sys.stderr.write("check_for_updates: installing pdfminer.six\n")
install_package(DummyPackage('pdfminer.six'))
changed = True
if 'psycopg2' in here_already:
sys.stderr.write("check_for_updates: uninstalling psycopg2\n")
uninstall_package(DummyPackage('psycopg2'))
if 'psycopg2-binary' in here_already:
sys.stderr.write("check_for_updates: reinstalling psycopg2-binary\n")
uninstall_package(DummyPackage('psycopg2-binary'))
install_package(DummyPackage('psycopg2-binary'))
changed = True
if 'psycopg2-binary' not in here_already:
sys.stderr.write("check_for_updates: installing psycopg2-binary\n")
install_package(DummyPackage('psycopg2-binary'))
change = True
if 'kombu' not in here_already or LooseVersion(here_already['kombu']) <= LooseVersion('4.1.0'):
sys.stderr.write("check_for_updates: installing new kombu version\n")
install_package(DummyPackage('kombu'))
changed = True
if 'celery' not in here_already or LooseVersion(here_already['celery']) <= LooseVersion('4.1.0'):
sys.stderr.write("check_for_updates: installing new celery version\n")
install_package(DummyPackage('celery'))
changed = True
if 'pycrypto' in here_already:
sys.stderr.write("check_for_updates: uninstalling pycrypto\n")
uninstall_package(DummyPackage('pycrypto'))
if 'pycryptodome' in here_already:
sys.stderr.write("check_for_updates: reinstalling pycryptodome\n")
uninstall_package(DummyPackage('pycryptodome'))
install_package(DummyPackage('pycryptodome'))
changed = True
if 'pycryptodome' not in here_already:
sys.stderr.write("check_for_updates: installing pycryptodome\n")
install_package(DummyPackage('pycryptodome'))
changed = True
if 'pdfminer' in here_already:
sys.stderr.write("check_for_updates: uninstalling pdfminer\n")
uninstall_package(DummyPackage('pdfminer'))
changed = True
if 'pdfminer3k' not in here_already:
sys.stderr.write("check_for_updates: installing pdfminer3k\n")
install_package(DummyPackage('pdfminer3k'))
changed = True
if 'py-bcrypt' in here_already:
sys.stderr.write("check_for_updates: uninstalling py-bcrypt\n")
uninstall_package(DummyPackage('py-bcrypt'))
changed = True
if 'bcrypt' in here_already:
sys.stderr.write("check_for_updates: reinstalling bcrypt\n")
uninstall_package(DummyPackage('bcrypt'))
install_package(DummyPackage('bcrypt'))
changed = True
if 'bcrypt' not in here_already:
sys.stderr.write("check_for_updates: installing bcrypt\n")
install_package(DummyPackage('bcrypt'))
changed = True
if changed:
installed_packages = get_installed_distributions()
here_already = dict()
for package in installed_packages:
here_already[package.key] = package.version
packages = dict()
installs = dict()
to_install = list()
to_uninstall = list()
uninstall_done = dict()
uninstalled_packages = dict()
logmessages = ''
package_by_name = dict()
sys.stderr.write("check_for_updates: 2 after " + str(time.time() - start_time) + " seconds\n")
for package in Package.query.filter_by(active=True).all():
package_by_name[package.name] = package
#sys.stderr.write("check_for_updates: database includes a package called " + package.name + " after " + str(time.time() - start_time) + " seconds\n")
# packages is what is supposed to be installed
sys.stderr.write("check_for_updates: 3 after " + str(time.time() - start_time) + " seconds\n")
for package in Package.query.filter_by(active=True).all():
if package.type is not None:
packages[package.id] = package
#sys.stderr.write("check_for_updates: database includes a package called " + package.name + " that has a type after " + str(time.time() - start_time) + " seconds\n")
#print("Found a package " + package.name)
sys.stderr.write("check_for_updates: 4 after " + str(time.time() - start_time) + " seconds\n")
for package in Package.query.filter_by(active=False).all():
if package.name not in package_by_name:
#sys.stderr.write("check_for_updates: database says " + package.name + " should be uninstalled after " + str(time.time() - start_time) + " seconds\n")
uninstalled_packages[package.id] = package # this is what the database says should be uninstalled
sys.stderr.write("check_for_updates: 5 after " + str(time.time() - start_time) + " seconds\n")
for install in Install.query.filter_by(hostname=hostname).all():
installs[install.package_id] = install # this is what the database says in installed on this server
if install.package_id in uninstalled_packages and uninstalled_packages[install.package_id].name not in package_by_name:
sys.stderr.write("check_for_updates: " + uninstalled_packages[install.package_id].name + " will be uninstalled after " + str(time.time() - start_time) + " seconds\n")
to_uninstall.append(uninstalled_packages[install.package_id]) # uninstall if it is installed
changed = False
package_owner = dict()
sys.stderr.write("check_for_updates: 6 after " + str(time.time() - start_time) + " seconds\n")
for auth in PackageAuth.query.filter_by(authtype='owner').all():
package_owner[auth.package_id] = auth.user_id
sys.stderr.write("check_for_updates: 7 after " + str(time.time() - start_time) + " seconds\n")
for package in packages.values():
if package.id not in installs and package.name in here_already:
sys.stderr.write("check_for_updates: package " + package.name + " here already. Writing an Install record for it.\n")
install = Install(hostname=hostname, packageversion=here_already[package.name], version=package.version, package_id=package.id)
db.session.add(install)
installs[package.id] = install
changed = True
if changed:
db.session.commit()
sys.stderr.write("check_for_updates: 8 after " + str(time.time() - start_time) + " seconds\n")
for package in packages.values():
#sys.stderr.write("check_for_updates: processing package id " + str(package.id) + "\n")
#sys.stderr.write("1: " + str(installs[package.id].packageversion) + " 2: " + str(package.packageversion) + "\n")
if (package.packageversion is not None and package.id in installs and installs[package.id].packageversion is None) or (package.packageversion is not None and package.id in installs and installs[package.id].packageversion is not None and LooseVersion(package.packageversion) > LooseVersion(installs[package.id].packageversion)):
sys.stderr.write("check_for_updates: a new version of " + package.name + " is needed because the necessary package version, " + str(package.packageversion) + ", is ahead of the installed version, " + str(installs[package.id].packageversion) + " after " + str(time.time() - start_time) + " seconds\n")
new_version_needed = True
else:
new_version_needed = False
#sys.stderr.write("got here and new version is " + str(new_version_needed) + "\n")
# Check for missing local packages
if (package.name not in here_already) and (package.id in installs):
sys.stderr.write("check_for_updates: the package " + package.name + " is supposed to be installed on this server, but was not detected after " + str(time.time() - start_time) + " seconds\n")
package_missing = True
else:
package_missing = False
if package.id in installs and package.version > installs[package.id].version:
sys.stderr.write("check_for_updates: the package " + package.name + " has internal version " + str(package.version) + " but the installed version has version " + str(installs[package.id].version) + " after " + str(time.time() - start_time) + " seconds\n")
package_version_greater = True
else:
package_version_greater = False
if package.id not in installs:
sys.stderr.write("check_for_updates: the package " + package.name + " is not in the table of installed packages for this server after " + str(time.time() - start_time) + " seconds\n")
if package.id not in installs or package_version_greater or new_version_needed or package_missing:
to_install.append(package)
#sys.stderr.write("done with that" + "\n")
sys.stderr.write("check_for_updates: 9 after " + str(time.time() - start_time) + " seconds\n")
for package in to_uninstall:
#sys.stderr.write("Going to uninstall a package: " + package.name + "\n")
if package.name in uninstall_done:
sys.stderr.write("check_for_updates: skipping uninstallation of " + str(package.name) + " because already uninstalled after " + str(time.time() - start_time) + " seconds" + "\n")
continue
if package.name not in here_already:
sys.stderr.write("check_for_updates: skipping uninstallation of " + str(package.name) + " because not installed" + " after " + str(time.time() - start_time) + " seconds\n")
returnval = 1
newlog = ''
else:
returnval, newlog = uninstall_package(package)
uninstall_done[package.name] = 1
logmessages += newlog
if returnval == 0:
Install.query.filter_by(hostname=hostname, package_id=package.id).delete()
results[package.name] = 'pip uninstall command returned success code. See log for details.'
elif returnval == 1:
Install.query.filter_by(hostname=hostname, package_id=package.id).delete()
results[package.name] = 'pip uninstall was not run because the package was not installed.'
else:
results[package.name] = 'pip uninstall command returned failure code'
ok = False
packages_to_delete = list()
sys.stderr.write("check_for_updates: 10 after " + str(time.time() - start_time) + " seconds\n")
for package in to_install:
sys.stderr.write("check_for_updates: going to install a package: " + package.name + "after " + str(time.time() - start_time) + " seconds\n")
# if doing_startup and package.name.startswith('docassemble') and package.name in here_already:
# #adding this because of unpredictability of installing new versions of docassemble
# #just because of a system restart.
# sys.stderr.write("check_for_updates: skipping update on " + str(package.name) + "\n")
# continue
returnval, newlog = install_package(package)
logmessages += newlog
sys.stderr.write("check_for_updates: return value was " + str(returnval) + " after " + str(time.time() - start_time) + " seconds\n")
if returnval != 0:
sys.stderr.write("Return value was not good" + " after " + str(time.time() - start_time) + " seconds\n")
ok = False
#pip._vendor.pkg_resources._initialize_master_working_set()
pip_info = get_pip_info(package.name)
real_name = pip_info['Name']
sys.stderr.write("check_for_updates: real name of package " + str(package.name) + " is " + str(real_name) + "\n after " + str(time.time() - start_time) + " seconds")
if real_name is None:
results[package.name] = 'install failed'
ok = False
if package.name not in here_already:
sys.stderr.write("check_for_updates: removing package entry for " + package.name + " after " + str(time.time() - start_time) + " seconds\n")
packages_to_delete.append(package)
elif returnval != 0:
results[package.name] = 'pip install command returned failure code'
else:
results[package.name] = 'pip install command returned success code. See log for details.'
if real_name != package.name:
sys.stderr.write("check_for_updates: changing name" + " after " + str(time.time() - start_time) + " seconds\n")
package.name = real_name
if package.id in installs:
install = installs[package.id]
install.version = package.version
else:
install = Install(hostname=hostname, packageversion=package.packageversion, version=package.version, package_id=package.id)
db.session.add(install)
db.session.commit()
update_versions()
add_dependencies(package_owner.get(package.id, 1))
update_versions()
sys.stderr.write("check_for_updates: 11 after " + str(time.time() - start_time) + " seconds\n")
for package in packages_to_delete:
db.session.delete(package)
sys.stderr.write("check_for_updates: 12 after " + str(time.time() - start_time) + " seconds\n")
db.session.commit()
sys.stderr.write("check_for_updates: finished uninstalling and installing after " + str(time.time() - start_time) + " seconds\n")
return ok, logmessages, results
def update_versions():
start_time = time.time()
sys.stderr.write("update_versions: starting" + "\n")
from docassemble.base.config import hostname
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
from docassemble.webapp.daredis import r
install_by_id = dict()
for install in Install.query.filter_by(hostname=hostname).all():
install_by_id[install.package_id] = install
package_by_name = dict()
for package in Package.query.filter_by(active=True).order_by(Package.name, Package.id.desc()).all():
if package.name in package_by_name:
continue
package_by_name[package.name] = Object(id=package.id, packageversion=package.packageversion, name=package.name)
installed_packages = get_installed_distributions()
for package in installed_packages:
if package.key in package_by_name:
if package_by_name[package.key].id in install_by_id and package.version != install_by_id[package_by_name[package.key].id].packageversion:
install_row = Install.query.filter_by(hostname=hostname, package_id=package_by_name[package.key].id).first()
install_row.packageversion = package.version
if package.version != package_by_name[package.key].packageversion:
package_row = Package.query.filter_by(active=True, name=package_by_name[package.key].name).with_for_update().first()
package_row.packageversion = package.version
db.session.commit()
sys.stderr.write("update_versions: ended after " + str(time.time() - start_time) + "\n")
return
def get_home_page_dict():
from docassemble.base.config import daconfig
PACKAGE_DIRECTORY = daconfig.get('packages', '/usr/share/docassemble/local' + str(sys.version_info.major) + '.' + str(sys.version_info.minor))
FULL_PACKAGE_DIRECTORY = os.path.join(PACKAGE_DIRECTORY, 'lib', 'python' + str(sys.version_info.major) + '.' + str(sys.version_info.minor), 'site-packages')
home_page = dict()
for d in os.listdir(FULL_PACKAGE_DIRECTORY):
if not d.startswith('docassemble.'):
continue
metadata_path = os.path.join(d, 'METADATA')
if os.path.isfile(metadata_path):
name = None
url = None
with open(metadata_path, 'rU', encoding='utf-8') as fp:
for line in fp:
if line.startswith('Name: '):
name = line[6:]
elif line.startswith('Home-page: '):
url = line[11:]
break
if name:
home_page[name.lower()] = url
return home_page
def add_dependencies(user_id):
start_time = time.time()
#sys.stderr.write('add_dependencies: user_id is ' + str(user_id) + "\n")
sys.stderr.write("add_dependencies: starting\n")
from docassemble.base.config import hostname
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
packages_known = set()
for package in Package.query.filter_by(active=True).all():
packages_known.add(package.name)
installed_packages = get_installed_distributions()
home_pages = None
packages_to_add = list()
for package in installed_packages:
if package.key in packages_known:
continue
if package.key.startswith('mysqlclient') or package.key.startswith('mysql-connector') or package.key.startswith('MySQL-python'):
continue
Package.query.filter_by(name=package.key).delete()
packages_to_add.append(package)
if len(packages_to_add):
db.session.commit()
for package in packages_to_add:
package_auth = PackageAuth(user_id=user_id)
if package.key.startswith('docassemble.'):
if home_pages is None:
home_pages = get_home_page_dict()
home_page = home_pages.get(package.key.lower(), None)
if home_page is not None and re.search(r'/github.com/', home_page):
package_entry = Package(name=package.key, package_auth=package_auth, type='git', giturl=home_page, packageversion=package.version, dependency=True)
else:
package_entry = Package(name=package.key, package_auth=package_auth, type='pip', packageversion=package.version, dependency=True)
else:
package_entry = Package(name=package.key, package_auth=package_auth, type='pip', packageversion=package.version, dependency=True)
db.session.add(package_entry)
db.session.commit()
install = Install(hostname=hostname, packageversion=package_entry.packageversion, version=package_entry.version, package_id=package_entry.id)
db.session.add(install)
db.session.commit()
sys.stderr.write("add_dependencies: ending after " + str(time.time() - start_time) + " seconds\n")
return
def fix_names():
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
installed_packages = [package.key for package in get_installed_distributions()]
for package in Package.query.filter_by(active=True).with_for_update().all():
if package.name not in installed_packages:
pip_info = get_pip_info(package.name)
actual_name = pip_info['Name']
if actual_name is not None:
package.name = actual_name
else:
sys.stderr.write("fix_names: package " + package.name + " does not appear to be installed" + "\n")
db.session.commit()
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path:
allparts.insert(0, parts[0])
break
elif parts[1] == path:
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def install_package(package):
sys.stderr.write("install_package: " + package.name + "\n")
if package.type == 'zip' and package.upload is None:
return 0, ''
sys.stderr.write('install_package: ' + package.name + "\n")
from docassemble.base.config import daconfig
from docassemble.webapp.daredis import r
from docassemble.webapp.files import SavedFile
PACKAGE_DIRECTORY = daconfig.get('packages', '/usr/share/docassemble/local' + str(sys.version_info.major) + '.' + str(sys.version_info.minor))
logfilecontents = ''
pip_log = tempfile.NamedTemporaryFile()
temp_dir = tempfile.mkdtemp()
#use_pip_cache = r.get('da:updatepackage:use_pip_cache')
#if use_pip_cache is None:
# disable_pip_cache = False
#elif int(use_pip_cache):
# disable_pip_cache = False
#else:
# disable_pip_cache = True
disable_pip_cache = True
if package.type == 'zip' and package.upload is not None:
saved_file = SavedFile(package.upload, extension='zip', fix=True)
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--log-file=' + pip_log.name, '--upgrade', saved_file.path + '.zip'])
elif package.type == 'git' and package.giturl is not None:
if package.gitbranch is not None:
branchpart = '@' + str(package.gitbranch)
else:
branchpart = ''
if package.gitsubdir is not None:
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, 'git+' + str(package.giturl) + '.git' + branchpart + '#egg=' + package.name + '&subdirectory=' + str(package.gitsubdir)])
else:
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, 'git+' + str(package.giturl) + '.git' + branchpart + '#egg=' + package.name])
elif package.type == 'pip':
if package.limitation is None:
limit = ""
else:
limit = str(package.limitation)
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, package.name + limit])
else:
sys.stderr.write("Wrong package type\n")
return 1, 'Unable to recognize package type: ' + package.name
sys.stderr.write("install_package: running " + " ".join(commands) + "\n")
logfilecontents += " ".join(commands) + "\n"
returnval = 1
try:
subprocess.run(commands)
returnval = 0
except subprocess.CalledProcessError as err:
returnval = err.returncode
fix_fnctl()
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
with open(pip_log.name, 'rU', encoding='utf-8') as x:
logfilecontents += x.read()
pip_log.close()
try:
sys.stderr.write(logfilecontents + "\n")
except:
pass
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
sys.stderr.write('returnval is: ' + str(returnval) + "\n")
sys.stderr.write('install_package: done' + "\n")
shutil.rmtree(temp_dir)
return returnval, logfilecontents
def uninstall_package(package):
sys.stderr.write('uninstall_package: ' + package.name + "\n")
logfilecontents = ''
#sys.stderr.write("uninstall_package: uninstalling " + package.name + "\n")
pip_log = tempfile.NamedTemporaryFile()
commands = ['pip', 'uninstall', '--yes', '--log-file=' + pip_log.name, package.name]
sys.stderr.write("Running " + " ".join(commands) + "\n")
logfilecontents += " ".join(commands) + "\n"
#returnval = pip.main(commands)
try:
subprocess.run(commands)
returnval = 0
except subprocess.CalledProcessError as err:
returnval = err.returncode
fix_fnctl()
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
sys.stderr.write('Finished running pip' + "\n")
with open(pip_log.name, 'rU', encoding='utf-8') as x:
logfilecontents += x.read()
pip_log.close()
try:
sys.stderr.write(logfilecontents + "\n")
except:
pass
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
sys.stderr.write('uninstall_package: done' + "\n")
return returnval, logfilecontents
class Object(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
pass
def get_installed_distributions():
start_time = time.time()
sys.stderr.write("get_installed_distributions: starting\n")
results = list()
try:
output = subprocess.check_output(['pip', '--version']).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
output = err.output.decode('utf-8', 'ignore')
sys.stderr.write("get_installed_distributions: pip version:\n" + output)
try:
output = subprocess.check_output(['pip', 'list', '--format=freeze']).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
output = err.output.decode('utf-8', 'ignore')
#sys.stderr.write("get_installed_distributions: result of pip list --format freeze was:\n" + str(output) + "\n")
for line in output.split('\n'):
a = line.split("==")
if len(a) == 2:
results.append(Object(key=a[0], version=a[1]))
sys.stderr.write("get_installed_distributions: ending after " + str(time.time() - start_time) + " seconds\n")
#sys.stderr.write(repr([x.key for x in results]) + "\n")
return results
def get_pip_info(package_name):
#sys.stderr.write("get_pip_info: " + package_name + "\n")
try:
output = subprocess.check_output(['pip', 'show', package_name]).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
output = ""
sys.stderr.write("get_pip_info: error. output was " + err.output.decode('utf-8', 'ignore') + "\n")
# old_stdout = sys.stdout
# sys.stdout = saved_stdout = StringIO()
# pip.main(['show', package_name])
# sys.stdout = old_stdout
# output = saved_stdout.getvalue()
results = dict()
if not isinstance(output, str):
output = output.decode('utf-8', 'ignore')
for line in output.split('\n'):
#sys.stderr.write("Found line " + str(line) + "\n")
a = line.split(": ")
if len(a) == 2:
#sys.stderr.write("Found " + a[0] + " which was " + a[1] + "\n")
results[a[0]] = a[1]
for key in ['Name', 'Home-page', 'Version']:
if key not in results:
results[key] = None
return results
if __name__ == "__main__":
#import docassemble.webapp.database
from docassemble.webapp.app_object import app
with app.app_context():
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
from docassemble.webapp.daredis import r
#app.config['SQLALCHEMY_DATABASE_URI'] = docassemble.webapp.database.alchemy_connection_string()
if mode == 'initialize':
sys.stderr.write("updating with mode initialize\n")
update_versions()
any_package = Package.query.filter_by(active=True).first()
if any_package is None:
add_dependencies(1)
update_versions()
check_for_updates(doing_startup=True)
remove_inactive_hosts()
else:
sys.stderr.write("updating with mode check_for_updates\n")
check_for_updates()
from docassemble.base.config import daconfig
if USING_SUPERVISOR:
SUPERVISORCTL = daconfig.get('supervisorctl', 'supervisorctl')
container_role = ':' + os.environ.get('CONTAINERROLE', '') + ':'
if re.search(r':(web|celery|all):', container_role):
sys.stderr.write("Sending reset signal\n")
args = [SUPERVISORCTL, '-s', 'http://localhost:9001', 'start', 'reset']
subprocess.run(args)
else:
sys.stderr.write("Not sending reset signal because not web or celery\n")
else:
sys.stderr.write("update: touched wsgi file" + "\n")
wsgi_file = daconfig.get('webapp', '/usr/share/docassemble/webapp/docassemble.wsgi')
if os.path.isfile(wsgi_file):
with open(wsgi_file, 'a'):
os.utime(wsgi_file, None)
db.engine.dispose()
sys.exit(0)
| 50.395023
| 335
| 0.64776
|
794a29f6e5a4b5e6be3bd0fd3286684ab64d73ea
| 1,089
|
py
|
Python
|
utils/swift_build_support/swift_build_support/products/sourcekitlsp.py
|
Vooblin/swift
|
acc13579c6493c58144a180d3a5013eec164efb3
|
[
"Apache-2.0"
] | 10
|
2019-05-11T02:17:28.000Z
|
2022-02-06T15:37:53.000Z
|
utils/swift_build_support/swift_build_support/products/sourcekitlsp.py
|
Vooblin/swift
|
acc13579c6493c58144a180d3a5013eec164efb3
|
[
"Apache-2.0"
] | 1
|
2020-05-20T21:18:01.000Z
|
2020-05-20T21:18:01.000Z
|
utils/swift_build_support/swift_build_support/products/sourcekitlsp.py
|
Vooblin/swift
|
acc13579c6493c58144a180d3a5013eec164efb3
|
[
"Apache-2.0"
] | 1
|
2021-09-12T16:22:05.000Z
|
2021-09-12T16:22:05.000Z
|
# swift_build_support/products/sourcekitlsp.py -------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
from . import indexstoredb
from . import product
class SourceKitLSP(product.Product):
@classmethod
def product_source_name(cls):
return "sourcekit-lsp"
@classmethod
def is_build_script_impl_product(cls):
return False
def build(self, host_target):
indexstoredb.run_build_script_helper(
'build', host_target, self, self.args)
def test(self, host_target):
if self.args.test and self.args.test_sourcekitlsp:
indexstoredb.run_build_script_helper(
'test', host_target, self, self.args)
| 32.029412
| 79
| 0.647383
|
794a2b286ba2606b25dc78e69842c531b5059fa0
| 3,634
|
py
|
Python
|
ftis/ftis/process.py
|
jamesb93/ftis
|
55ab43e8ba85399aeeaa383bbfa1dc8a524d96eb
|
[
"BSD-3-Clause"
] | 4
|
2021-04-16T05:47:52.000Z
|
2021-12-04T08:50:46.000Z
|
ftis/ftis/process.py
|
jamesb93/ftis
|
55ab43e8ba85399aeeaa383bbfa1dc8a524d96eb
|
[
"BSD-3-Clause"
] | 10
|
2020-05-13T16:47:38.000Z
|
2022-03-09T15:45:16.000Z
|
ftis/ftis/process.py
|
jamesb93/ftis
|
55ab43e8ba85399aeeaa383bbfa1dc8a524d96eb
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import logging
from pathlib import Path
from rich.console import Console
from rich.markdown import Markdown
from ftis.common.io import write_json, read_json
from ftis.common.exceptions import InvalidSource
from ftis.common.utils import ignored_keys, create_hash
class FTISProcess:
"""Class that represents the life cycle of a 'FTIS' execution"""
def __init__(self, source=None, sink=None, mode="chain"):
self.sink = Path(sink).expanduser().resolve()
self.corpus = source
self.source = source.items # FIXME this is confusing
self.chain = []
self.logger = logging.getLogger(__name__)
self.console = Console()
self.mode = mode
self.metadata = {}
self.prev_meta = None
def setup(self):
self.sink.mkdir(exist_ok=True, parents=True)
# Create a place to store microcached results
self.cache = self.sink / ".cache"
self.cache.mkdir(exist_ok=True)
# Setup logging and meta path
self.metapath = self.sink / "metadata.json"
logfile_path = self.sink / "logfile.log"
# Read in previous metadata if exists
try:
self.prev_meta = read_json(self.metapath)
except FileNotFoundError:
self.prev_meta = None
self.logger.setLevel(logging.DEBUG)
if logfile_path.exists():
logfile_path.unlink()
logfile_handler = logging.FileHandler(logfile_path)
formatter = logging.Formatter("%(asctime)s : %(levelname)s : %(name)s : %(message)s")
logfile_handler.setFormatter(formatter)
self.logger.addHandler(logfile_handler)
self.logger.debug("Logging initialised")
def general_metadata(self):
# Time
self.metadata["time"] = datetime.datetime.now().strftime("%H:%M:%S | %B %d, %Y")
# Analyser chain
self.metadata["io"] = str([link.name for link in self.chain])
def fprint(self, text):
self.console.print(text, style="yellow underline")
def add(self, *args):
"""Accepts any number of classes to chain together"""
self.chain = args # Lets store these classes somewhere
analyser_params = {}
for i, analyser in enumerate(self.chain):
analyser.order = i
name = analyser.__class__.__name__
analyser_params[f"{i}_{name}"] = {
k: v for k, v in vars(analyser).items() if k not in ignored_keys
}
analyser.process = self
analyser.set_dump()
self.metadata["analyser"] = analyser_params
# self.order_hash = create_hash(*[x.name for x in self.chain]) #FIXME moot?
def run_analysers(self):
for i, analyser in enumerate(self.chain):
if self.mode == "chain":
if i == 0:
analyser.input = self.source
else:
analyser.input = self.chain[i - 1].output
else:
analyser.input = self.source
analyser.create_identity()
self.metadata["analyser"][f"{i}_{analyser.name}"]["identity_hash"] = analyser.identity_hash
analyser.do()
def run(self):
self.setup()
md = "# **** FTIS v1.1.1 ****"
md += f"\n\n**Source: {self.corpus.path}**"
md += f"\n\n**Sink: {self.sink}**"
md += "\n\n---------------------"
md += "\n\nBeginning processing..."
self.console.print(Markdown(md))
print("\n")
self.run_analysers()
self.general_metadata()
write_json(self.metapath, self.metadata)
| 34.283019
| 103
| 0.594111
|
794a2b93e64430465d077ba6e09c84da1dafe8dc
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/core/packages/utils/utils.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/core/packages/utils/utils.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/core/packages/utils/utils.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/0c/1f/be/81c5f1985c122a8a1b09de03082e9c77a993fa809dc5855c6cc1a9137a
| 96
| 96
| 0.895833
|
794a2bf333fe61a6c779b0834afad6687661341f
| 4,344
|
py
|
Python
|
envs/cheetah-mod-control.py
|
russellmendonca/oyster
|
0bdc46e650e47615a459d7eaad53ead735907c05
|
[
"MIT"
] | null | null | null |
envs/cheetah-mod-control.py
|
russellmendonca/oyster
|
0bdc46e650e47615a459d7eaad53ead735907c05
|
[
"MIT"
] | null | null | null |
envs/cheetah-mod-control.py
|
russellmendonca/oyster
|
0bdc46e650e47615a459d7eaad53ead735907c05
|
[
"MIT"
] | null | null | null |
import numpy as np
from itertools import combinations
from . import register_env
from .half_cheetah import HalfCheetahEnv
@register_env('cheetah-mod-control')
class HalfCheetahModControlEnv(HalfCheetahEnv):
"""Half-cheetah environment with target velocity, as described in [1]. The
code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/half_cheetah_env_rand.py
The half-cheetah follows the dynamics from MuJoCo [2], and receives at each
time step a reward composed of a control cost and a penalty equal to the
difference between its current velocity and the target velocity. The tasks
are generated by sampling the target velocities from the uniform
distribution on [0, 2].
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] Emanuel Todorov, Tom Erez, Yuval Tassa, "MuJoCo: A physics engine for
model-based control", 2012
(https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)
"""
def __init__(self, contextual=False, type='mask', n_tasks=20, randomize_tasks=False):
# self._task = task
self.contextual = contextual
self.task_type = type
if type == 'swp':
self.tasks = gen_swp_tasks()
self._joint_permutation = self.tasks[0].get('joint_permutation')
elif type == 'mask':
# 10 train tasks, 10 test tasks. 6th joint negated for test tasks
self.tasks = gen_neg_tasks()
self.mask = self.tasks[0].get('mask')
assert n_tasks == len(self.tasks)
assert randomize_tasks == False
super(HalfCheetahModControlEnv, self).__init__()
def step(self, action):
if self.task_type == 'swp':
action = action[self._joint_permutation]
elif self.task_type == 'mask':
action = self.mask * action
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
ob = self._get_obs()
reward_ctrl = - 0.1 * np.square(action).sum()
reward_run = (xposafter - xposbefore) / self.dt
reward = reward_ctrl + reward_run
done = False
return ob, reward, done, dict(reward_run=reward_run, reward_ctrl=reward_ctrl)
def reward(self, obs, action, next_obs):
assert obs.ndim == 2
assert obs.shape == next_obs.shape
assert obs.shape[0] == action.shape[0]
reward_ctrl = - 0.1 * np.square(action).sum()
reward_run = (next_obs[:, -3] - obs[:, -3]) / self.dt
reward = reward_ctrl + reward_run
return reward
# def sample_tasks(self, num_tasks):
# velocities = np.random.uniform(0.0, 3.0, size=(num_tasks,))
# tasks = [{'velocity': velocity} for velocity in velocities]
# return tasks
def get_all_task_idx(self):
return range(len(self.tasks))
def reset_task(self, idx):
self._task = self.tasks[idx]
if self.task_type == 'swp':
self._joint_permutation = self._task['joint_permutation']
elif self.task_type == 'mask':
self.mask = self._task['mask']
self.reset()
def gen_swp_tasks():
all_tasks = []
swp_idxs = list(combinations(np.arange(6), 2))
orig_lst = np.arange(6)
for a, b in swp_idxs:
task_lst = orig_lst.copy()
task_lst[a], task_lst[b] = task_lst[b], task_lst[a]
all_tasks.append({'joint_permutation': task_lst})
for task in all_tasks:
print(task)
# print(all_tasks)
return all_tasks
def gen_neg_tasks():
# 10 train tasks, followed by 10 test tasks
all_tasks = []
all_train_neg_idxs = list(combinations(np.arange(5), 3))
for i, neg_idxs in enumerate(all_train_neg_idxs):
mask = np.ones(6)
for idx in neg_idxs:
mask[idx] = -1
all_tasks.append({'mask': mask})
all_test_neg_idxs = list(combinations(np.arange(5), 2))
for i, neg_idxs in enumerate(all_test_neg_idxs):
mask = np.ones(6)
mask[-1] = -1
for idx in neg_idxs:
mask[idx] = -1
all_tasks.append({'mask': mask})
return all_tasks
| 35.317073
| 126
| 0.638812
|
794a2d2ed315a11becabfe6193a571510c2ce21f
| 92,489
|
py
|
Python
|
tools/runtime_CLI.py
|
shaiazulay/behavioral-model
|
3b7b1ff13860aa7e2d8c08d18342aae928bd2a97
|
[
"Apache-2.0"
] | null | null | null |
tools/runtime_CLI.py
|
shaiazulay/behavioral-model
|
3b7b1ff13860aa7e2d8c08d18342aae928bd2a97
|
[
"Apache-2.0"
] | null | null | null |
tools/runtime_CLI.py
|
shaiazulay/behavioral-model
|
3b7b1ff13860aa7e2d8c08d18342aae928bd2a97
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Antonin Bas (antonin@barefootnetworks.com)
#
#
import argparse
import cmd
from collections import Counter
import os
import sys
import struct
import json
from functools import wraps
import bmpy_utils as utils
from bm_runtime.standard import Standard
from bm_runtime.standard.ttypes import *
try:
from bm_runtime.simple_pre import SimplePre
except:
pass
try:
from bm_runtime.simple_pre_lag import SimplePreLAG
except:
pass
def enum(type_name, *sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
@staticmethod
def to_str(x):
return reverse[x]
enums['to_str'] = to_str
@staticmethod
def from_str(x):
return enums[x]
enums['from_str'] = from_str
return type(type_name, (), enums)
PreType = enum('PreType', 'None', 'SimplePre', 'SimplePreLAG')
MeterType = enum('MeterType', 'packets', 'bytes')
TableType = enum('TableType', 'simple', 'indirect', 'indirect_ws')
ResType = enum('ResType', 'table', 'action_prof', 'action', 'meter_array',
'counter_array', 'register_array', 'parse_vset')
def bytes_to_string(byte_array):
form = 'B' * len(byte_array)
return struct.pack(form, *byte_array)
def table_error_name(x):
return TableOperationErrorCode._VALUES_TO_NAMES[x]
def get_parser():
class ActionToPreType(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(ActionToPreType, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
assert(type(values) is str)
setattr(namespace, self.dest, PreType.from_str(values))
parser = argparse.ArgumentParser(description='BM runtime CLI')
# One port == one device !!!! This is not a multidevice CLI
parser.add_argument('--thrift-port', help='Thrift server port for table updates',
type=int, action="store", default=9090)
parser.add_argument('--thrift-ip', help='Thrift IP address for table updates',
type=str, action="store", default='localhost')
parser.add_argument('--json', help='JSON description of P4 program',
type=str, action="store", required=False)
parser.add_argument('--pre', help='Packet Replication Engine used by target',
type=str, choices=['None', 'SimplePre', 'SimplePreLAG'],
default=PreType.SimplePre, action=ActionToPreType)
return parser
TABLES = {}
ACTION_PROFS = {}
ACTIONS = {}
METER_ARRAYS = {}
COUNTER_ARRAYS = {}
REGISTER_ARRAYS = {}
CUSTOM_CRC_CALCS = {}
PARSE_VSETS = {}
# maps (object type, unique suffix) to object
SUFFIX_LOOKUP_MAP = {}
class MatchType:
EXACT = 0
LPM = 1
TERNARY = 2
VALID = 3
RANGE = 4
@staticmethod
def to_str(x):
return {0: "exact", 1: "lpm", 2: "ternary", 3: "valid", 4: "range"}[x]
@staticmethod
def from_str(x):
return {"exact": 0, "lpm": 1, "ternary": 2, "valid": 3, "range": 4}[x]
class Table:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.match_type_ = None
self.actions = {}
self.key = []
self.default_action = None
self.type_ = None
self.support_timeout = False
self.action_prof = None
TABLES[name] = self
def num_key_fields(self):
return len(self.key)
def key_str(self):
return ",\t".join([name + "(" + MatchType.to_str(t) + ", " + str(bw) + ")" for name, t, bw in self.key])
def table_str(self):
ap_str = "implementation={}".format(
"None" if not self.action_prof else self.action_prof.name)
return "{0:30} [{1}, mk={2}]".format(self.name, ap_str, self.key_str())
def get_action(self, action_name):
key = ResType.action, action_name
action = SUFFIX_LOOKUP_MAP.get(key, None)
if action is None or action.name not in self.actions:
return None
return action
class ActionProf:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.with_selection = False
self.actions = {}
self.ref_cnt = 0
ACTION_PROFS[name] = self
def action_prof_str(self):
return "{0:30} [{1}]".format(self.name, self.with_selection)
def get_action(self, action_name):
key = ResType.action, action_name
action = SUFFIX_LOOKUP_MAP.get(key, None)
if action is None or action.name not in self.actions:
return None
return action
class Action:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.runtime_data = []
ACTIONS[name] = self
def num_params(self):
return len(self.runtime_data)
def runtime_data_str(self):
return ",\t".join([name + "(" + str(bw) + ")" for name, bw in self.runtime_data])
def action_str(self):
return "{0:30} [{1}]".format(self.name, self.runtime_data_str())
class MeterArray:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.type_ = None
self.is_direct = None
self.size = None
self.binding = None
self.rate_count = None
METER_ARRAYS[name] = self
def meter_str(self):
return "{0:30} [{1}, {2}]".format(self.name, self.size,
MeterType.to_str(self.type_))
class CounterArray:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.is_direct = None
self.size = None
self.binding = None
COUNTER_ARRAYS[name] = self
def counter_str(self):
return "{0:30} [{1}]".format(self.name, self.size)
class RegisterArray:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.width = None
self.size = None
REGISTER_ARRAYS[name] = self
def register_str(self):
return "{0:30} [{1}]".format(self.name, self.size)
class ParseVSet:
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.bitwidth = None
PARSE_VSETS[name] = self
def parse_vset_str(self):
return "{0:30} [compressed bitwidth:{1}]".format(
self.name, self.bitwidth)
def reset_config():
TABLES.clear()
ACTION_PROFS.clear()
ACTIONS.clear()
METER_ARRAYS.clear()
COUNTER_ARRAYS.clear()
REGISTER_ARRAYS.clear()
CUSTOM_CRC_CALCS.clear()
PARSE_VSETS.clear()
SUFFIX_LOOKUP_MAP.clear()
def load_json_str(json_str, architecture_spec=None):
def get_header_type(header_name, j_headers):
for h in j_headers:
if h["name"] == header_name:
return h["header_type"]
assert(0)
def get_field_bitwidth(header_type, field_name, j_header_types):
for h in j_header_types:
if h["name"] != header_type: continue
for t in h["fields"]:
# t can have a third element (field signedness)
f, bw = t[0], t[1]
if f == field_name:
return bw
assert(0)
reset_config()
json_ = json.loads(json_str)
def get_json_key(key):
return json_.get(key, [])
for j_action in get_json_key("actions"):
action = Action(j_action["name"], j_action["id"])
for j_param in j_action["runtime_data"]:
action.runtime_data += [(j_param["name"], j_param["bitwidth"])]
for j_pipeline in get_json_key("pipelines"):
if "action_profiles" in j_pipeline: # new JSON format
for j_aprof in j_pipeline["action_profiles"]:
action_prof = ActionProf(j_aprof["name"], j_aprof["id"])
action_prof.with_selection = "selector" in j_aprof
for j_table in j_pipeline["tables"]:
table = Table(j_table["name"], j_table["id"])
table.match_type = MatchType.from_str(j_table["match_type"])
table.type_ = TableType.from_str(j_table["type"])
table.support_timeout = j_table["support_timeout"]
for action in j_table["actions"]:
table.actions[action] = ACTIONS[action]
if table.type_ in {TableType.indirect, TableType.indirect_ws}:
if "action_profile" in j_table:
action_prof = ACTION_PROFS[j_table["action_profile"]]
else: # for backward compatibility
assert("act_prof_name" in j_table)
action_prof = ActionProf(j_table["act_prof_name"],
table.id_)
action_prof.with_selection = "selector" in j_table
action_prof.actions.update(table.actions)
action_prof.ref_cnt += 1
table.action_prof = action_prof
for j_key in j_table["key"]:
target = j_key["target"]
match_type = MatchType.from_str(j_key["match_type"])
if match_type == MatchType.VALID:
field_name = target + "_valid"
bitwidth = 1
elif target[1] == "$valid$":
field_name = target[0] + "_valid"
bitwidth = 1
else:
field_name = ".".join(target)
header_type = get_header_type(target[0],
json_["headers"])
bitwidth = get_field_bitwidth(header_type, target[1],
json_["header_types"])
table.key += [(field_name, match_type, bitwidth)]
for j_meter in get_json_key("meter_arrays"):
meter_array = MeterArray(j_meter["name"], j_meter["id"])
if "is_direct" in j_meter and j_meter["is_direct"]:
meter_array.is_direct = True
meter_array.binding = j_meter["binding"]
else:
meter_array.is_direct = False
meter_array.size = j_meter["size"]
meter_array.type_ = MeterType.from_str(j_meter["type"])
meter_array.rate_count = j_meter["rate_count"]
for j_counter in get_json_key("counter_arrays"):
counter_array = CounterArray(j_counter["name"], j_counter["id"])
counter_array.is_direct = j_counter["is_direct"]
if counter_array.is_direct:
counter_array.binding = j_counter["binding"]
else:
counter_array.size = j_counter["size"]
for j_register in get_json_key("register_arrays"):
register_array = RegisterArray(j_register["name"], j_register["id"])
register_array.size = j_register["size"]
register_array.width = j_register["bitwidth"]
for j_calc in get_json_key("calculations"):
calc_name = j_calc["name"]
if j_calc["algo"] == "crc16_custom":
CUSTOM_CRC_CALCS[calc_name] = 16
elif j_calc["algo"] == "crc32_custom":
CUSTOM_CRC_CALCS[calc_name] = 32
for j_parse_vset in get_json_key("parse_vsets"):
parse_vset = ParseVSet(j_parse_vset["name"], j_parse_vset["id"])
parse_vset.bitwidth = j_parse_vset["compressed_bitwidth"]
if architecture_spec is not None:
# call architecture specific json parsing code
architecture_spec(json_)
# Builds a dictionary mapping (object type, unique suffix) to the object
# (Table, Action, etc...). In P4_16 the object name is the fully-qualified
# name, which can be quite long, which is why we accept unique suffixes as
# valid identifiers.
# Auto-complete does not support suffixes, only the fully-qualified names,
# but that can be changed in the future if needed.
suffix_count = Counter()
for res_type, res_dict in [
(ResType.table, TABLES), (ResType.action_prof, ACTION_PROFS),
(ResType.action, ACTIONS), (ResType.meter_array, METER_ARRAYS),
(ResType.counter_array, COUNTER_ARRAYS),
(ResType.register_array, REGISTER_ARRAYS),
(ResType.parse_vset, PARSE_VSETS)]:
for name, res in res_dict.items():
suffix = None
for s in reversed(name.split('.')):
suffix = s if suffix is None else s + '.' + suffix
key = (res_type, suffix)
SUFFIX_LOOKUP_MAP[key] = res
suffix_count[key] += 1
for key, c in suffix_count.items():
if c > 1:
del SUFFIX_LOOKUP_MAP[key]
class UIn_Error(Exception):
def __init__(self, info=""):
self.info = info
def __str__(self):
return self.info
class UIn_ResourceError(UIn_Error):
def __init__(self, res_type, name):
self.res_type = res_type
self.name = name
def __str__(self):
return "Invalid %s name (%s)" % (self.res_type, self.name)
class UIn_MatchKeyError(UIn_Error):
def __init__(self, info=""):
self.info = info
def __str__(self):
return self.info
class UIn_RuntimeDataError(UIn_Error):
def __init__(self, info=""):
self.info = info
def __str__(self):
return self.info
class CLI_FormatExploreError(Exception):
def __init__(self):
pass
class UIn_BadParamError(UIn_Error):
def __init__(self, info=""):
self.info = info
def __str__(self):
return self.info
class UIn_BadIPv4Error(UIn_Error):
def __init__(self):
pass
class UIn_BadIPv6Error(UIn_Error):
def __init__(self):
pass
class UIn_BadMacError(UIn_Error):
def __init__(self):
pass
def ipv4Addr_to_bytes(addr):
if not '.' in addr:
raise CLI_FormatExploreError()
s = addr.split('.')
if len(s) != 4:
raise UIn_BadIPv4Error()
try:
return [int(b) for b in s]
except:
raise UIn_BadIPv4Error()
def macAddr_to_bytes(addr):
if not ':' in addr:
raise CLI_FormatExploreError()
s = addr.split(':')
if len(s) != 6:
raise UIn_BadMacError()
try:
return [int(b, 16) for b in s]
except:
raise UIn_BadMacError()
def ipv6Addr_to_bytes(addr):
from ipaddr import IPv6Address
if not ':' in addr:
raise CLI_FormatExploreError()
try:
ip = IPv6Address(addr)
except:
raise UIn_BadIPv6Error()
try:
return [ord(b) for b in ip.packed]
except:
raise UIn_BadIPv6Error()
def int_to_bytes(i, num):
byte_array = []
while i > 0:
byte_array.append(i % 256)
i = i / 256
num -= 1
if num < 0:
raise UIn_BadParamError("Parameter is too large")
while num > 0:
byte_array.append(0)
num -= 1
byte_array.reverse()
return byte_array
def parse_param(input_str, bitwidth):
if bitwidth == 32:
try:
return ipv4Addr_to_bytes(input_str)
except CLI_FormatExploreError:
pass
except UIn_BadIPv4Error:
raise UIn_BadParamError("Invalid IPv4 address")
elif bitwidth == 48:
try:
return macAddr_to_bytes(input_str)
except CLI_FormatExploreError:
pass
except UIn_BadMacError:
raise UIn_BadParamError("Invalid MAC address")
elif bitwidth == 128:
try:
return ipv6Addr_to_bytes(input_str)
except CLI_FormatExploreError:
pass
except UIn_BadIPv6Error:
raise UIn_BadParamError("Invalid IPv6 address")
try:
input_ = int(input_str, 0)
except:
raise UIn_BadParamError(
"Invalid input, could not cast to integer, try in hex with 0x prefix"
)
try:
return int_to_bytes(input_, (bitwidth + 7) / 8)
except UIn_BadParamError:
raise
def parse_runtime_data(action, params):
def parse_param_(field, bw):
try:
return parse_param(field, bw)
except UIn_BadParamError as e:
raise UIn_RuntimeDataError(
"Error while parsing %s - %s" % (field, e)
)
bitwidths = [bw for( _, bw) in action.runtime_data]
byte_array = []
for input_str, bitwidth in zip(params, bitwidths):
byte_array += [bytes_to_string(parse_param_(input_str, bitwidth))]
return byte_array
_match_types_mapping = {
MatchType.EXACT : BmMatchParamType.EXACT,
MatchType.LPM : BmMatchParamType.LPM,
MatchType.TERNARY : BmMatchParamType.TERNARY,
MatchType.VALID : BmMatchParamType.VALID,
MatchType.RANGE : BmMatchParamType.RANGE,
}
def parse_match_key(table, key_fields):
def parse_param_(field, bw):
try:
return parse_param(field, bw)
except UIn_BadParamError as e:
raise UIn_MatchKeyError(
"Error while parsing %s - %s" % (field, e)
)
params = []
match_types = [t for (_, t, _) in table.key]
bitwidths = [bw for (_, _, bw) in table.key]
for idx, field in enumerate(key_fields):
param_type = _match_types_mapping[match_types[idx]]
bw = bitwidths[idx]
if param_type == BmMatchParamType.EXACT:
key = bytes_to_string(parse_param_(field, bw))
param = BmMatchParam(type = param_type,
exact = BmMatchParamExact(key))
elif param_type == BmMatchParamType.LPM:
try:
prefix, length = field.split("/")
except ValueError:
raise UIn_MatchKeyError(
"Invalid LPM value {}, use '/' to separate prefix "
"and length".format(field))
key = bytes_to_string(parse_param_(prefix, bw))
param = BmMatchParam(type = param_type,
lpm = BmMatchParamLPM(key, int(length)))
elif param_type == BmMatchParamType.TERNARY:
try:
key, mask = field.split("&&&")
except ValueError:
raise UIn_MatchKeyError(
"Invalid ternary value {}, use '&&&' to separate key and "
"mask".format(field))
key = bytes_to_string(parse_param_(key, bw))
mask = bytes_to_string(parse_param_(mask, bw))
if len(mask) != len(key):
raise UIn_MatchKeyError(
"Key and mask have different lengths in expression %s" % field
)
param = BmMatchParam(type = param_type,
ternary = BmMatchParamTernary(key, mask))
elif param_type == BmMatchParamType.VALID:
key = bool(int(field))
param = BmMatchParam(type = param_type,
valid = BmMatchParamValid(key))
elif param_type == BmMatchParamType.RANGE:
try:
start, end = field.split("->")
except ValueError:
raise UIn_MatchKeyError(
"Invalid range value {}, use '->' to separate range start "
"and range end".format(field))
start = bytes_to_string(parse_param_(start, bw))
end = bytes_to_string(parse_param_(end, bw))
if len(start) != len(end):
raise UIn_MatchKeyError(
"start and end have different lengths in expression %s" % field
)
if start > end:
raise UIn_MatchKeyError(
"start is less than end in expression %s" % field
)
param = BmMatchParam(type = param_type,
range = BmMatchParamRange(start, end))
else:
assert(0)
params.append(param)
return params
def printable_byte_str(s):
return ":".join("{:02x}".format(ord(c)) for c in s)
def BmMatchParam_to_str(self):
return BmMatchParamType._VALUES_TO_NAMES[self.type] + "-" +\
(self.exact.to_str() if self.exact else "") +\
(self.lpm.to_str() if self.lpm else "") +\
(self.ternary.to_str() if self.ternary else "") +\
(self.valid.to_str() if self.valid else "") +\
(self.range.to_str() if self.range else "")
def BmMatchParamExact_to_str(self):
return printable_byte_str(self.key)
def BmMatchParamLPM_to_str(self):
return printable_byte_str(self.key) + "/" + str(self.prefix_length)
def BmMatchParamTernary_to_str(self):
return printable_byte_str(self.key) + " &&& " + printable_byte_str(self.mask)
def BmMatchParamValid_to_str(self):
return ""
def BmMatchParamRange_to_str(self):
return printable_byte_str(self.start) + " -> " + printable_byte_str(self.end_)
BmMatchParam.to_str = BmMatchParam_to_str
BmMatchParamExact.to_str = BmMatchParamExact_to_str
BmMatchParamLPM.to_str = BmMatchParamLPM_to_str
BmMatchParamTernary.to_str = BmMatchParamTernary_to_str
BmMatchParamValid.to_str = BmMatchParamValid_to_str
BmMatchParamRange.to_str = BmMatchParamRange_to_str
def parse_pvs_value(input_str, bitwidth):
try:
input_ = int(input_str, 0)
except:
raise UIn_BadParamError(
"Invalid input, could not cast to integer, try in hex with 0x prefix"
)
max_v = (1 << bitwidth) - 1
# bmv2 does not perform this check when receiving the value (and does not
# truncate values which are too large), so we perform this check
# client-side.
if input_ > max_v:
raise UIn_BadParamError(
"Input is too large, it should fit within {} bits".format(bitwidth))
try:
v = int_to_bytes(input_, (bitwidth + 7) / 8)
except UIn_BadParamError:
# should not happen because of check above
raise
return bytes_to_string(v)
# services is [(service_name, client_class), ...]
def thrift_connect(thrift_ip, thrift_port, services):
return utils.thrift_connect(thrift_ip, thrift_port, services)
def handle_bad_input(f):
@wraps(f)
def handle(*args, **kwargs):
try:
return f(*args, **kwargs)
except UIn_MatchKeyError as e:
print "Invalid match key:", e
except UIn_RuntimeDataError as e:
print "Invalid runtime data:", e
except UIn_Error as e:
print "Error:", e
except InvalidTableOperation as e:
error = TableOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid table operation ({})".format(error)
except InvalidCounterOperation as e:
error = CounterOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid counter operation ({})".format(error)
except InvalidMeterOperation as e:
error = MeterOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid meter operation ({})".format(error)
except InvalidRegisterOperation as e:
error = RegisterOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid register operation ({})".format(error)
except InvalidLearnOperation as e:
error = LearnOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid learn operation ({})".format(error)
except InvalidSwapOperation as e:
error = SwapOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid swap operation ({})".format(error)
except InvalidDevMgrOperation as e:
error = DevMgrErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid device manager operation ({})".format(error)
except InvalidCrcOperation as e:
error = CrcErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid crc operation ({})".format(error)
except InvalidParseVSetOperation as e:
error = ParseVSetOperationErrorCode._VALUES_TO_NAMES[e.code]
print "Invalid parser value set operation ({})".format(error)
return handle
def handle_bad_input_mc(f):
@wraps(f)
def handle(*args, **kwargs):
pre_type = args[0].pre_type
if pre_type == PreType.None:
return handle_bad_input(f)(*args, **kwargs)
EType = {
PreType.SimplePre : SimplePre.InvalidMcOperation,
PreType.SimplePreLAG : SimplePreLAG.InvalidMcOperation
}[pre_type]
Codes = {
PreType.SimplePre : SimplePre.McOperationErrorCode,
PreType.SimplePreLAG : SimplePreLAG.McOperationErrorCode
}[pre_type]
try:
return handle_bad_input(f)(*args, **kwargs)
except EType as e:
error = Codes._VALUES_TO_NAMES[e.code]
print "Invalid PRE operation (%s)" % error
return handle
def deprecated_act_prof(substitute, with_selection=False,
strictly_deprecated=True):
# need two levels here because our decorator takes arguments
def deprecated_act_prof_(f):
# not sure if this is the right place for it, if I want it to play nice
# with @wraps
if strictly_deprecated:
f.__doc__ = "[DEPRECATED!] " + f.__doc__
f.__doc__ += "\nUse '{}' instead".format(substitute)
@wraps(f)
def wrapper(obj, line):
substitute_fn = getattr(obj, "do_" + substitute)
args = line.split()
obj.at_least_n_args(args, 1)
table_name = args[0]
table = obj.get_res("table", table_name, ResType.table)
if with_selection:
obj.check_indirect_ws(table)
else:
obj.check_indirect(table)
assert(table.action_prof is not None)
assert(table.action_prof.ref_cnt > 0)
if strictly_deprecated and table.action_prof.ref_cnt > 1:
raise UIn_Error(
"Legacy command does not work with shared action profiles")
args[0] = table.action_prof.name
if strictly_deprecated:
# writing to stderr in case someone is parsing stdout
sys.stderr.write(
"This is a deprecated command, use '{}' instead\n".format(
substitute))
return substitute_fn(" ".join(args))
# we add the handle_bad_input decorator "programatically"
return handle_bad_input(wrapper)
return deprecated_act_prof_
# thrift does not support unsigned integers
def hex_to_i16(h):
x = int(h, 0)
if (x > 0xFFFF):
raise UIn_Error("Integer cannot fit within 16 bits")
if (x > 0x7FFF): x-= 0x10000
return x
def i16_to_hex(h):
x = int(h)
if (x & 0x8000): x+= 0x10000
return x
def hex_to_i32(h):
x = int(h, 0)
if (x > 0xFFFFFFFF):
raise UIn_Error("Integer cannot fit within 32 bits")
if (x > 0x7FFFFFFF): x-= 0x100000000
return x
def i32_to_hex(h):
x = int(h)
if (x & 0x80000000): x+= 0x100000000
return x
def parse_bool(s):
if s == "true" or s == "True":
return True
if s == "false" or s == "False":
return False
try:
s = int(s, 0)
return bool(s)
except:
pass
raise UIn_Error("Invalid bool parameter")
def hexstr(v):
return "".join("{:02x}".format(ord(c)) for c in v)
class RuntimeAPI(cmd.Cmd):
prompt = 'RuntimeCmd: '
intro = "Control utility for runtime P4 table manipulation"
@staticmethod
def get_thrift_services(pre_type):
services = [("standard", Standard.Client)]
if pre_type == PreType.SimplePre:
services += [("simple_pre", SimplePre.Client)]
elif pre_type == PreType.SimplePreLAG:
services += [("simple_pre_lag", SimplePreLAG.Client)]
else:
services += [(None, None)]
return services
def __init__(self, pre_type, standard_client, mc_client=None):
cmd.Cmd.__init__(self)
self.client = standard_client
self.mc_client = mc_client
self.pre_type = pre_type
def do_greet(self, line):
print "hello"
def do_EOF(self, line):
print
return True
def do_shell(self, line):
"Run a shell command"
output = os.popen(line).read()
print output
def get_res(self, type_name, name, res_type):
key = res_type, name
if key not in SUFFIX_LOOKUP_MAP:
raise UIn_ResourceError(type_name, name)
return SUFFIX_LOOKUP_MAP[key]
def at_least_n_args(self, args, n):
if len(args) < n:
raise UIn_Error("Insufficient number of args")
def exactly_n_args(self, args, n):
if len(args) != n:
raise UIn_Error(
"Wrong number of args, expected %d but got %d" % (n, len(args))
)
def _complete_res(self, array, text):
res = sorted(array.keys())
if not text:
return res
return [r for r in res if r.startswith(text)]
@handle_bad_input
def do_show_tables(self, line):
"List tables defined in the P4 program: show_tables"
self.exactly_n_args(line.split(), 0)
for table_name in sorted(TABLES):
print TABLES[table_name].table_str()
@handle_bad_input
def do_show_actions(self, line):
"List actions defined in the P4 program: show_actions"
self.exactly_n_args(line.split(), 0)
for action_name in sorted(ACTIONS):
print ACTIONS[action_name].action_str()
def _complete_tables(self, text):
return self._complete_res(TABLES, text)
def _complete_act_profs(self, text):
return self._complete_res(ACTION_PROFS, text)
@handle_bad_input
def do_table_show_actions(self, line):
"List one table's actions as per the P4 program: table_show_actions <table_name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
for action_name in sorted(table.actions):
print ACTIONS[action_name].action_str()
def complete_table_show_actions(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_info(self, line):
"Show info about a table: table_info <table_name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
print table.table_str()
print "*" * 80
for action_name in sorted(table.actions):
print ACTIONS[action_name].action_str()
def complete_table_info(self, text, line, start_index, end_index):
return self._complete_tables(text)
# used for tables but also for action profiles
def _complete_actions(self, text, table_name = None, res = TABLES):
if not table_name:
actions = sorted(ACTIONS.keys())
elif table_name not in res:
return []
actions = sorted(res[table_name].actions.keys())
if not text:
return actions
return [a for a in actions if a.startswith(text)]
def _complete_table_and_action(self, text, line):
tables = sorted(TABLES.keys())
args = line.split()
args_cnt = len(args)
if args_cnt == 1 and not text:
return self._complete_tables(text)
if args_cnt == 2 and text:
return self._complete_tables(text)
table_name = args[1]
if args_cnt == 2 and not text:
return self._complete_actions(text, table_name)
if args_cnt == 3 and text:
return self._complete_actions(text, table_name)
return []
def _complete_act_prof_and_action(self, text, line):
act_profs = sorted(ACTION_PROFS.keys())
args = line.split()
args_cnt = len(args)
if args_cnt == 1 and not text:
return self._complete_act_profs(text)
if args_cnt == 2 and text:
return self._complete_act_profs(text)
act_prof_name = args[1]
if args_cnt == 2 and not text:
return self._complete_actions(text, act_prof_name, ACTION_PROFS)
if args_cnt == 3 and text:
return self._complete_actions(text, act_prof_name, ACTION_PROFS)
return []
# for debugging
def print_set_default(self, table_name, action_name, runtime_data):
print "Setting default action of", table_name
print "{0:20} {1}".format("action:", action_name)
print "{0:20} {1}".format(
"runtime data:",
"\t".join(printable_byte_str(d) for d in runtime_data)
)
@handle_bad_input
def do_table_set_default(self, line):
"Set default action for a match table: table_set_default <table name> <action name> <action parameters>"
args = line.split()
self.at_least_n_args(args, 2)
table_name, action_name = args[0], args[1]
table = self.get_res("table", table_name, ResType.table)
action = table.get_action(action_name)
if action is None:
raise UIn_Error(
"Table %s has no action %s" % (table_name, action_name)
)
if len(args[2:]) != action.num_params():
raise UIn_Error(
"Action %s needs %d parameters" % (action_name, action.num_params())
)
runtime_data = parse_runtime_data(action, args[2:])
self.print_set_default(table_name, action_name, runtime_data)
self.client.bm_mt_set_default_action(0, table.name, action.name, runtime_data)
def complete_table_set_default(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
@handle_bad_input
def do_table_reset_default(self, line):
"Reset default entry for a match table: table_reset_default <table name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
self.client.bm_mt_reset_default_entry(0, table.name)
def complete_table_reset_default(self, text, line, start_index, end_index):
return self._complete_tables(text)
def parse_runtime_data(self, action, action_params):
if len(action_params) != action.num_params():
raise UIn_Error(
"Action %s needs %d parameters" % (action.name, action.num_params())
)
return parse_runtime_data(action, action_params)
# for debugging
def print_table_add(self, match_key, action_name, runtime_data):
print "{0:20} {1}".format(
"match key:",
"\t".join(d.to_str() for d in match_key)
)
print "{0:20} {1}".format("action:", action_name)
print "{0:20} {1}".format(
"runtime data:",
"\t".join(printable_byte_str(d) for d in runtime_data)
)
@handle_bad_input
def do_table_num_entries(self, line):
"Return the number of entries in a match table (direct or indirect): table_num_entries <table name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
print self.client.bm_mt_get_num_entries(0, table.name)
def complete_table_num_entries(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_clear(self, line):
"Clear all entries in a match table (direct or indirect), but not the default entry: table_clear <table name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
self.client.bm_mt_clear_entries(0, table.name, False)
def complete_table_clear(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_add(self, line):
"Add entry to a match table: table_add <table name> <action name> <match fields> => <action parameters> [priority]"
args = line.split()
self.at_least_n_args(args, 3)
table_name, action_name = args[0], args[1]
table = self.get_res("table", table_name, ResType.table)
action = table.get_action(action_name)
if action is None:
raise UIn_Error(
"Table %s has no action %s" % (table_name, action_name)
)
if table.match_type in {MatchType.TERNARY, MatchType.RANGE}:
try:
priority = int(args.pop(-1))
except:
raise UIn_Error(
"Table is ternary, but could not extract a valid priority from args"
)
else:
priority = 0
for idx, input_ in enumerate(args[2:]):
if input_ == "=>": break
idx += 2
match_key = args[2:idx]
action_params = args[idx+1:]
if len(match_key) != table.num_key_fields():
raise UIn_Error(
"Table %s needs %d key fields" % (table_name, table.num_key_fields())
)
runtime_data = self.parse_runtime_data(action, action_params)
match_key = parse_match_key(table, match_key)
print "Adding entry to", MatchType.to_str(table.match_type), "match table", table_name
# disable, maybe a verbose CLI option?
self.print_table_add(match_key, action_name, runtime_data)
entry_handle = self.client.bm_mt_add_entry(
0, table.name, match_key, action.name, runtime_data,
BmAddEntryOptions(priority = priority)
)
print "Entry has been added with handle", entry_handle
def complete_table_add(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
@handle_bad_input
def do_table_set_timeout(self, line):
"Set a timeout in ms for a given entry; the table has to support timeouts: table_set_timeout <table_name> <entry handle> <timeout (ms)>"
args = line.split()
self.exactly_n_args(args, 3)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
if not table.support_timeout:
raise UIn_Error(
"Table {} does not support entry timeouts".format(table_name))
try:
entry_handle = int(args[1])
except:
raise UIn_Error("Bad format for entry handle")
try:
timeout_ms = int(args[2])
except:
raise UIn_Error("Bad format for timeout")
print "Setting a", timeout_ms, "ms timeout for entry", entry_handle
self.client.bm_mt_set_entry_ttl(0, table.name, entry_handle, timeout_ms)
def complete_table_set_timeout(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_modify(self, line):
"Add entry to a match table: table_modify <table name> <action name> <entry handle> [action parameters]"
args = line.split()
self.at_least_n_args(args, 3)
table_name, action_name = args[0], args[1]
table = self.get_res("table", table_name, ResType.table)
action = table.get_action(action_name)
if action is None:
raise UIn_Error(
"Table %s has no action %s" % (table_name, action_name)
)
try:
entry_handle = int(args[2])
except:
raise UIn_Error("Bad format for entry handle")
action_params = args[3:]
if args[3] == "=>":
# be more tolerant
action_params = args[4:]
runtime_data = self.parse_runtime_data(action, action_params)
print "Modifying entry", entry_handle, "for", MatchType.to_str(table.match_type), "match table", table_name
entry_handle = self.client.bm_mt_modify_entry(
0, table.name, entry_handle, action.name, runtime_data
)
def complete_table_modify(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
@handle_bad_input
def do_table_delete(self, line):
"Delete entry from a match table: table_delete <table name> <entry handle>"
args = line.split()
self.exactly_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
try:
entry_handle = int(args[1])
except:
raise UIn_Error("Bad format for entry handle")
print "Deleting entry", entry_handle, "from", table_name
self.client.bm_mt_delete_entry(0, table.name, entry_handle)
def complete_table_delete(self, text, line, start_index, end_index):
return self._complete_tables(text)
def check_indirect(self, table):
if table.type_ not in {TableType.indirect, TableType.indirect_ws}:
raise UIn_Error("Cannot run this command on non-indirect table")
def check_indirect_ws(self, table):
if table.type_ != TableType.indirect_ws:
raise UIn_Error(
"Cannot run this command on non-indirect table,"\
" or on indirect table with no selector")
def check_act_prof_ws(self, act_prof):
if not act_prof.with_selection:
raise UIn_Error(
"Cannot run this command on an action profile without selector")
@handle_bad_input
def do_act_prof_create_member(self, line):
"Add a member to an action profile: act_prof_create_member <action profile name> <action_name> [action parameters]"
args = line.split()
self.at_least_n_args(args, 2)
act_prof_name, action_name = args[0], args[1]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
action = act_prof.get_action(action_name)
if action is None:
raise UIn_Error("Action profile '{}' has no action '{}'".format(
act_prof_name, action_name))
action_params = args[2:]
runtime_data = self.parse_runtime_data(action, action_params)
mbr_handle = self.client.bm_mt_act_prof_add_member(
0, act_prof.name, action.name, runtime_data)
print "Member has been created with handle", mbr_handle
def complete_act_prof_create_member(self, text, line, start_index, end_index):
return self._complete_act_prof_and_action(text, line)
@deprecated_act_prof("act_prof_create_member")
def do_table_indirect_create_member(self, line):
"Add a member to an indirect match table: table_indirect_create_member <table name> <action_name> [action parameters]"
pass
def complete_table_indirect_create_member(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
@handle_bad_input
def do_act_prof_delete_member(self, line):
"Delete a member in an action profile: act_prof_delete_member <action profile name> <member handle>"
args = line.split()
self.exactly_n_args(args, 2)
act_prof_name, action_name = args[0], args[1]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
try:
mbr_handle = int(args[1])
except:
raise UIn_Error("Bad format for member handle")
self.client.bm_mt_act_prof_delete_member(0, act_prof.name, mbr_handle)
def complete_act_prof_delete_member(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_delete_member")
def do_table_indirect_delete_member(self, line):
"Delete a member in an indirect match table: table_indirect_delete_member <table name> <member handle>"
pass
def complete_table_indirect_delete_member(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_modify_member(self, line):
"Modify member in an action profile: act_prof_modify_member <action profile name> <action_name> <member_handle> [action parameters]"
args = line.split()
self.at_least_n_args(args, 3)
act_prof_name, action_name = args[0], args[1]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
action = act_prof.get_action(action_name)
if action is None:
raise UIn_Error("Action profile '{}' has no action '{}'".format(
act_prof_name, action_name))
try:
mbr_handle = int(args[2])
except:
raise UIn_Error("Bad format for member handle")
action_params = args[3:]
if args[3] == "=>":
# be more tolerant
action_params = args[4:]
runtime_data = self.parse_runtime_data(action, action_params)
mbr_handle = self.client.bm_mt_act_prof_modify_member(
0, act_prof.name, mbr_handle, action.name, runtime_data)
def complete_act_prof_modify_member(self, text, line, start_index, end_index):
return self._complete_act_prof_and_action(text, line)
@deprecated_act_prof("act_prof_modify_member")
def do_table_indirect_modify_member(self, line):
"Modify member in an indirect match table: table_indirect_modify_member <table name> <action_name> <member_handle> [action parameters]"
pass
def complete_table_indirect_modify_member(self, text, line, start_index, end_index):
return self._complete_table_and_action(text, line)
def indirect_add_common(self, line, ws=False):
args = line.split()
self.at_least_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
if ws:
self.check_indirect_ws(table)
else:
self.check_indirect(table)
if table.match_type in {MatchType.TERNARY, MatchType.RANGE}:
try:
priority = int(args.pop(-1))
except:
raise UIn_Error(
"Table is ternary, but could not extract a valid priority from args"
)
else:
priority = 0
for idx, input_ in enumerate(args[1:]):
if input_ == "=>": break
idx += 1
match_key = args[1:idx]
if len(args) != (idx + 2):
raise UIn_Error("Invalid arguments, could not find handle")
handle = args[idx+1]
try:
handle = int(handle)
except:
raise UIn_Error("Bad format for handle")
match_key = parse_match_key(table, match_key)
print "Adding entry to indirect match table", table.name
return table.name, match_key, handle, BmAddEntryOptions(priority = priority)
@handle_bad_input
def do_table_indirect_add(self, line):
"Add entry to an indirect match table: table_indirect_add <table name> <match fields> => <member handle> [priority]"
table_name, match_key, handle, options = self.indirect_add_common(line)
entry_handle = self.client.bm_mt_indirect_add_entry(
0, table_name, match_key, handle, options
)
print "Entry has been added with handle", entry_handle
def complete_table_indirect_add(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_indirect_add_with_group(self, line):
"Add entry to an indirect match table: table_indirect_add <table name> <match fields> => <group handle> [priority]"
table_name, match_key, handle, options = self.indirect_add_common(line, ws=True)
entry_handle = self.client.bm_mt_indirect_ws_add_entry(
0, table_name, match_key, handle, options
)
print "Entry has been added with handle", entry_handle
def complete_table_indirect_add_with_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_indirect_delete(self, line):
"Delete entry from an indirect match table: table_indirect_delete <table name> <entry handle>"
args = line.split()
self.exactly_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
self.check_indirect(table)
try:
entry_handle = int(args[1])
except:
raise UIn_Error("Bad format for entry handle")
print "Deleting entry", entry_handle, "from", table_name
self.client.bm_mt_indirect_delete_entry(0, table.name, entry_handle)
def complete_table_indirect_delete(self, text, line, start_index, end_index):
return self._complete_tables(text)
def indirect_set_default_common(self, line, ws=False):
args = line.split()
self.exactly_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
if ws:
self.check_indirect_ws(table)
else:
self.check_indirect(table)
try:
handle = int(args[1])
except:
raise UIn_Error("Bad format for handle")
return table.name, handle
@handle_bad_input
def do_table_indirect_set_default(self, line):
"Set default member for indirect match table: table_indirect_set_default <table name> <member handle>"
table_name, handle = self.indirect_set_default_common(line)
self.client.bm_mt_indirect_set_default_member(0, table_name, handle)
def complete_table_indirect_set_default(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_indirect_set_default_with_group(self, line):
"Set default group for indirect match table: table_indirect_set_default <table name> <group handle>"
table_name, handle = self.indirect_set_default_common(line, ws=True)
self.client.bm_mt_indirect_ws_set_default_group(0, table_name, handle)
def complete_table_indirect_set_default_with_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_indirect_reset_default(self, line):
"Reset default entry for indirect match table: table_indirect_reset_default <table name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
self.client.bm_mt_indirect_reset_default_entry(0, table.name)
def complete_table_indirect_reset_default(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_create_group(self, line):
"Add a group to an action pofile: act_prof_create_group <action profile name>"
args = line.split()
self.exactly_n_args(args, 1)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
self.check_act_prof_ws(act_prof)
grp_handle = self.client.bm_mt_act_prof_create_group(0, act_prof.name)
print "Group has been created with handle", grp_handle
def complete_act_prof_create_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_create_group", with_selection=True)
def do_table_indirect_create_group(self, line):
"Add a group to an indirect match table: table_indirect_create_group <table name>"
pass
def complete_table_indirect_create_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_delete_group(self, line):
"Delete a group from an action profile: act_prof_delete_group <action profile name> <group handle>"
args = line.split()
self.exactly_n_args(args, 2)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
self.check_act_prof_ws(act_prof)
try:
grp_handle = int(args[1])
except:
raise UIn_Error("Bad format for group handle")
self.client.bm_mt_act_prof_delete_group(0, act_prof.name, grp_handle)
def complete_act_prof_delete_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_delete_group", with_selection=True)
def do_table_indirect_delete_group(self, line):
"Delete a group: table_indirect_delete_group <table name> <group handle>"
pass
def complete_table_indirect_delete_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_add_member_to_group(self, line):
"Add member to group in an action profile: act_prof_add_member_to_group <action profile name> <member handle> <group handle>"
args = line.split()
self.exactly_n_args(args, 3)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
self.check_act_prof_ws(act_prof)
try:
mbr_handle = int(args[1])
except:
raise UIn_Error("Bad format for member handle")
try:
grp_handle = int(args[2])
except:
raise UIn_Error("Bad format for group handle")
self.client.bm_mt_act_prof_add_member_to_group(
0, act_prof.name, mbr_handle, grp_handle)
def complete_act_prof_add_member_to_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_add_member_to_group", with_selection=True)
def do_table_indirect_add_member_to_group(self, line):
"Add member to group: table_indirect_add_member_to_group <table name> <member handle> <group handle>"
pass
def complete_table_indirect_add_member_to_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_remove_member_from_group(self, line):
"Remove member from group in action profile: act_prof_remove_member_from_group <action profile name> <member handle> <group handle>"
args = line.split()
self.exactly_n_args(args, 3)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
self.check_act_prof_ws(act_prof)
try:
mbr_handle = int(args[1])
except:
raise UIn_Error("Bad format for member handle")
try:
grp_handle = int(args[2])
except:
raise UIn_Error("Bad format for group handle")
self.client.bm_mt_act_prof_remove_member_from_group(
0, act_prof.name, mbr_handle, grp_handle)
def complete_act_prof_remove_member_from_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_remove_member_from_group", with_selection=True)
def do_table_indirect_remove_member_from_group(self, line):
"Remove member from group: table_indirect_remove_member_from_group <table name> <member handle> <group handle>"
pass
def complete_table_indirect_remove_member_from_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
def check_has_pre(self):
if self.pre_type == PreType.None:
raise UIn_Error(
"Cannot execute this command without packet replication engine"
)
def get_mgrp(self, s):
try:
return int(s)
except:
raise UIn_Error("Bad format for multicast group id")
@handle_bad_input_mc
def do_mc_mgrp_create(self, line):
"Create multicast group: mc_mgrp_create <group id>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 1)
mgrp = self.get_mgrp(args[0])
print "Creating multicast group", mgrp
mgrp_hdl = self.mc_client.bm_mc_mgrp_create(0, mgrp)
assert(mgrp == mgrp_hdl)
@handle_bad_input_mc
def do_mc_mgrp_destroy(self, line):
"Destroy multicast group: mc_mgrp_destroy <group id>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 1)
mgrp = self.get_mgrp(args[0])
print "Destroying multicast group", mgrp
self.mc_client.bm_mc_mgrp_destroy(0, mgrp)
def ports_to_port_map_str(self, ports, description="port"):
last_port_num = 0
port_map_str = ""
ports_int = []
for port_num_str in ports:
try:
port_num = int(port_num_str)
except:
raise UIn_Error("'%s' is not a valid %s number"
"" % (port_num_str, description))
if port_num < 0:
raise UIn_Error("'%s' is not a valid %s number"
"" % (port_num_str, description))
ports_int.append(port_num)
ports_int.sort()
for port_num in ports_int:
if port_num == (last_port_num - 1):
raise UIn_Error("Found duplicate %s number '%s'"
"" % (description, port_num))
port_map_str += "0" * (port_num - last_port_num) + "1"
last_port_num = port_num + 1
return port_map_str[::-1]
def parse_ports_and_lags(self, args):
ports = []
i = 1
while (i < len(args) and args[i] != '|'):
ports.append(args[i])
i += 1
port_map_str = self.ports_to_port_map_str(ports)
if self.pre_type == PreType.SimplePreLAG:
i += 1
lags = [] if i == len(args) else args[i:]
lag_map_str = self.ports_to_port_map_str(lags, description="lag")
else:
lag_map_str = None
return port_map_str, lag_map_str
@handle_bad_input_mc
def do_mc_node_create(self, line):
"Create multicast node: mc_node_create <rid> <space-separated port list> [ | <space-separated lag list> ]"
self.check_has_pre()
args = line.split()
self.at_least_n_args(args, 1)
try:
rid = int(args[0])
except:
raise UIn_Error("Bad format for rid")
port_map_str, lag_map_str = self.parse_ports_and_lags(args)
if self.pre_type == PreType.SimplePre:
print "Creating node with rid", rid, "and with port map", port_map_str
l1_hdl = self.mc_client.bm_mc_node_create(0, rid, port_map_str)
else:
print "Creating node with rid", rid, ", port map", port_map_str, "and lag map", lag_map_str
l1_hdl = self.mc_client.bm_mc_node_create(0, rid, port_map_str, lag_map_str)
print "node was created with handle", l1_hdl
def get_node_handle(self, s):
try:
return int(s)
except:
raise UIn_Error("Bad format for node handle")
@handle_bad_input_mc
def do_mc_node_update(self, line):
"Update multicast node: mc_node_update <node handle> <space-separated port list> [ | <space-separated lag list> ]"
self.check_has_pre()
args = line.split()
self.at_least_n_args(args, 2)
l1_hdl = self.get_node_handle(args[0])
port_map_str, lag_map_str = self.parse_ports_and_lags(args)
if self.pre_type == PreType.SimplePre:
print "Updating node", l1_hdl, "with port map", port_map_str
self.mc_client.bm_mc_node_update(0, l1_hdl, port_map_str)
else:
print "Updating node", l1_hdl, "with port map", port_map_str, "and lag map", lag_map_str
self.mc_client.bm_mc_node_update(0, l1_hdl, port_map_str, lag_map_str)
@handle_bad_input_mc
def do_mc_node_associate(self, line):
"Associate node to multicast group: mc_node_associate <group handle> <node handle>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 2)
mgrp = self.get_mgrp(args[0])
l1_hdl = self.get_node_handle(args[1])
print "Associating node", l1_hdl, "to multicast group", mgrp
self.mc_client.bm_mc_node_associate(0, mgrp, l1_hdl)
@handle_bad_input_mc
def do_mc_node_dissociate(self, line):
"Dissociate node from multicast group: mc_node_associate <group handle> <node handle>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 2)
mgrp = self.get_mgrp(args[0])
l1_hdl = self.get_node_handle(args[1])
print "Dissociating node", l1_hdl, "from multicast group", mgrp
self.mc_client.bm_mc_node_dissociate(0, mgrp, l1_hdl)
@handle_bad_input_mc
def do_mc_node_destroy(self, line):
"Destroy multicast node: mc_node_destroy <node handle>"
self.check_has_pre()
args = line.split()
self.exactly_n_args(args, 1)
l1_hdl = int(line.split()[0])
print "Destroying node", l1_hdl
self.mc_client.bm_mc_node_destroy(0, l1_hdl)
@handle_bad_input_mc
def do_mc_set_lag_membership(self, line):
"Set lag membership of port list: mc_set_lag_membership <lag index> <space-separated port list>"
self.check_has_pre()
if self.pre_type != PreType.SimplePreLAG:
raise UIn_Error(
"Cannot execute this command with this type of PRE,"\
" SimplePreLAG is required"
)
args = line.split()
self.at_least_n_args(args, 2)
try:
lag_index = int(args[0])
except:
raise UIn_Error("Bad format for lag index")
port_map_str = self.ports_to_port_map_str(args[1:], description="lag")
print "Setting lag membership:", lag_index, "<-", port_map_str
self.mc_client.bm_mc_set_lag_membership(0, lag_index, port_map_str)
@handle_bad_input_mc
def do_mc_dump(self, line):
"Dump entries in multicast engine"
self.check_has_pre()
json_dump = self.mc_client.bm_mc_get_entries(0)
try:
mc_json = json.loads(json_dump)
except:
print "Exception when retrieving MC entries"
return
l1_handles = {}
for h in mc_json["l1_handles"]:
l1_handles[h["handle"]] = (h["rid"], h["l2_handle"])
l2_handles = {}
for h in mc_json["l2_handles"]:
l2_handles[h["handle"]] = (h["ports"], h["lags"])
print "=========="
print "MC ENTRIES"
for mgrp in mc_json["mgrps"]:
print "**********"
mgid = mgrp["id"]
print "mgrp({})".format(mgid)
for L1h in mgrp["l1_handles"]:
rid, L2h = l1_handles[L1h]
print " -> (L1h={}, rid={})".format(L1h, rid),
ports, lags = l2_handles[L2h]
print "-> (ports=[{}], lags=[{}])".format(
", ".join([str(p) for p in ports]),
", ".join([str(l) for l in lags]))
print "=========="
print "LAGS"
if "lags" in mc_json:
for lag in mc_json["lags"]:
print "lag({})".format(lag["id"]),
print "-> ports=[{}]".format(", ".join([str(p) for p in ports]))
else:
print "None for this PRE type"
print "=========="
@handle_bad_input
def do_load_new_config_file(self, line):
"Load new json config: load_new_config_file <path to .json file>"
args = line.split()
self.exactly_n_args(args, 1)
filename = args[0]
if not os.path.isfile(filename):
raise UIn_Error("Not a valid filename")
print "Loading new Json config"
with open(filename, 'r') as f:
json_str = f.read()
try:
json.loads(json_str)
except:
raise UIn_Error("Not a valid JSON file")
self.client.bm_load_new_config(json_str)
load_json_str(json_str)
@handle_bad_input
def do_swap_configs(self, line):
"Swap the 2 existing configs, need to have called load_new_config_file before"
print "Swapping configs"
self.client.bm_swap_configs()
@handle_bad_input
def do_meter_array_set_rates(self, line):
"Configure rates for an entire meter array: meter_array_set_rates <name> <rate_1>:<burst_1> <rate_2>:<burst_2> ..."
args = line.split()
self.at_least_n_args(args, 1)
meter_name = args[0]
meter = self.get_res("meter", meter_name, ResType.meter_array)
rates = args[1:]
if len(rates) != meter.rate_count:
raise UIn_Error(
"Invalid number of rates, expected %d but got %d"\
% (meter.rate_count, len(rates))
)
new_rates = []
for rate in rates:
try:
r, b = rate.split(':')
r = float(r)
b = int(b)
new_rates.append(BmMeterRateConfig(r, b))
except:
raise UIn_Error("Error while parsing rates")
self.client.bm_meter_array_set_rates(0, meter.name, new_rates)
def complete_meter_array_set_rates(self, text, line, start_index, end_index):
return self._complete_meters(text)
@handle_bad_input
def do_meter_set_rates(self, line):
"Configure rates for a meter: meter_set_rates <name> <index> <rate_1>:<burst_1> <rate_2>:<burst_2> ...\nRate uses units/microsecond and burst uses units where units is bytes or packets"
args = line.split()
self.at_least_n_args(args, 2)
meter_name = args[0]
meter = self.get_res("meter", meter_name, ResType.meter_array)
try:
index = int(args[1])
except:
raise UIn_Error("Bad format for index")
rates = args[2:]
if len(rates) != meter.rate_count:
raise UIn_Error(
"Invalid number of rates, expected %d but got %d"\
% (meter.rate_count, len(rates))
)
new_rates = []
for rate in rates:
try:
r, b = rate.split(':')
r = float(r)
b = int(b)
new_rates.append(BmMeterRateConfig(r, b))
except:
raise UIn_Error("Error while parsing rates")
if meter.is_direct:
table_name = meter.binding
self.client.bm_mt_set_meter_rates(0, table_name, index, new_rates)
else:
self.client.bm_meter_set_rates(0, meter.name, index, new_rates)
def complete_meter_set_rates(self, text, line, start_index, end_index):
return self._complete_meters(text)
@handle_bad_input
def do_meter_get_rates(self, line):
"Retrieve rates for a meter: meter_get_rates <name> <index>"
args = line.split()
self.exactly_n_args(args, 2)
meter_name = args[0]
meter = self.get_res("meter", meter_name, ResType.meter_array)
try:
index = int(args[1])
except:
raise UIn_Error("Bad format for index")
# meter.rate_count
if meter.is_direct:
table_name = meter.binding
rates = self.client.bm_mt_get_meter_rates(0, table_name, index)
else:
rates = self.client.bm_meter_get_rates(0, meter.name, index)
if len(rates) != meter.rate_count:
print "WARNING: expected", meter.rate_count, "rates",
print "but only received", len(rates)
for idx, rate in enumerate(rates):
print "{}: info rate = {}, burst size = {}".format(
idx, rate.units_per_micros, rate.burst_size)
def complete_meter_get_rates(self, text, line, start_index, end_index):
return self._complete_meters(text)
def _complete_meters(self, text):
return self._complete_res(METER_ARRAYS, text)
@handle_bad_input
def do_counter_read(self, line):
"Read counter value: counter_read <name> <index>"
args = line.split()
self.exactly_n_args(args, 2)
counter_name = args[0]
counter = self.get_res("counter", counter_name, ResType.counter_array)
index = args[1]
try:
index = int(index)
except:
raise UIn_Error("Bad format for index")
if counter.is_direct:
table_name = counter.binding
print "this is the direct counter for table", table_name
# index = index & 0xffffffff
value = self.client.bm_mt_read_counter(0, table_name, index)
else:
value = self.client.bm_counter_read(0, counter.name, index)
print "%s[%d]= " % (counter_name, index), value
def complete_counter_read(self, text, line, start_index, end_index):
return self._complete_counters(text)
@handle_bad_input
def do_counter_write(self, line):
"Write counter value: counter_write <name> <index> <packets> <bytes>"
args = line.split()
self.exactly_n_args(args, 4)
counter_name = args[0]
counter = self.get_res("counter", counter_name, ResType.counter_array)
index = args[1]
pkts = args[2]
byts = args[3]
try:
index = int(index)
except:
raise UIn_Error("Bad format for index")
try:
pkts = int(pkts)
except:
raise UIn_Error("Bad format for packets")
try:
byts = int(byts)
except:
raise UIn_Error("Bad format for bytes")
if counter.is_direct:
table_name = counter.binding
print "writing to direct counter for table", table_name
value = self.client.bm_mt_write_counter(0, table_name, index, BmCounterValue(packets=pkts, bytes = byts))
else:
self.client.bm_counter_write(0, counter_name, index, BmCounterValue(packets=pkts, bytes = byts))
print "%s[%d] has been updated" % (counter_name, index)
def complete_counter_write(self, text, line, start_index, end_index):
return self._complete_counters(text)
@handle_bad_input
def do_counter_reset(self, line):
"Reset counter: counter_reset <name>"
args = line.split()
self.exactly_n_args(args, 1)
counter_name = args[0]
counter = self.get_res("counter", counter_name, ResType.counter_array)
if counter.is_direct:
table_name = counter.binding
print "this is the direct counter for table", table_name
value = self.client.bm_mt_reset_counters(0, table_name)
else:
value = self.client.bm_counter_reset_all(0, counter.name)
def complete_counter_reset(self, text, line, start_index, end_index):
return self._complete_counters(text)
def _complete_counters(self, text):
return self._complete_res(COUNTER_ARRAYS, text)
@handle_bad_input
def do_register_read(self, line):
"Read register value: register_read <name> [index]"
args = line.split()
self.at_least_n_args(args, 1)
register_name = args[0]
register = self.get_res("register", register_name,
ResType.register_array)
if len(args) > 1:
self.exactly_n_args(args, 2)
index = args[1]
try:
index = int(index)
except:
raise UIn_Error("Bad format for index")
value = self.client.bm_register_read(0, register.name, index)
print "{}[{}]=".format(register_name, index), value
else:
sys.stderr.write("register index omitted, reading entire array\n")
entries = self.client.bm_register_read_all(0, register.name)
print "{}=".format(register_name), ", ".join(
[str(e) for e in entries])
def complete_register_read(self, text, line, start_index, end_index):
return self._complete_registers(text)
@handle_bad_input
def do_register_write(self, line):
"Write register value: register_write <name> <index> <value>"
args = line.split()
self.exactly_n_args(args, 3)
register_name = args[0]
register = self.get_res("register", register_name,
ResType.register_array)
index = args[1]
try:
index = int(index)
except:
raise UIn_Error("Bad format for index")
value = args[2]
try:
value = int(value)
except:
raise UIn_Error("Bad format for value, must be an integer")
self.client.bm_register_write(0, register.name, index, value)
def complete_register_write(self, text, line, start_index, end_index):
return self._complete_registers(text)
@handle_bad_input
def do_register_reset(self, line):
"Reset all the cells in the register array to 0: register_reset <name>"
args = line.split()
self.exactly_n_args(args, 1)
register_name = args[0]
register = self.get_res("register", register_name,
ResType.register_array)
self.client.bm_register_reset(0, register.name)
def complete_register_reset(self, text, line, start_index, end_index):
return self._complete_registers(text)
def _complete_registers(self, text):
return self._complete_res(REGISTER_ARRAYS, text)
def dump_action_and_data(self, action_name, action_data):
print "Action entry: {} - {}".format(
action_name, ", ".join([hexstr(a) for a in action_data]))
def dump_action_entry(self, a_entry):
if a_entry.action_type == BmActionEntryType.NONE:
print "EMPTY"
elif a_entry.action_type == BmActionEntryType.ACTION_DATA:
self.dump_action_and_data(a_entry.action_name, a_entry.action_data)
elif a_entry.action_type == BmActionEntryType.MBR_HANDLE:
print "Index: member({})".format(a_entry.mbr_handle)
elif a_entry.action_type == BmActionEntryType.GRP_HANDLE:
print "Index: group({})".format(a_entry.grp_handle)
def dump_one_member(self, member):
print "Dumping member {}".format(member.mbr_handle)
self.dump_action_and_data(member.action_name, member.action_data)
def dump_members(self, members):
for m in members:
print "**********"
self.dump_one_member(m)
def dump_one_group(self, group):
print "Dumping group {}".format(group.grp_handle)
print "Members: [{}]".format(", ".join(
[str(h) for h in group.mbr_handles]))
def dump_groups(self, groups):
for g in groups:
print "**********"
self.dump_one_group(g)
def dump_one_entry(self, table, entry):
if table.key:
out_name_w = max(20, max([len(t[0]) for t in table.key]))
def dump_exact(p):
return hexstr(p.exact.key)
def dump_lpm(p):
return "{}/{}".format(hexstr(p.lpm.key), p.lpm.prefix_length)
def dump_ternary(p):
return "{} &&& {}".format(hexstr(p.ternary.key),
hexstr(p.ternary.mask))
def dump_range(p):
return "{} -> {}".format(hexstr(p.range.start),
hexstr(p.range.end_))
def dump_valid(p):
return "01" if p.valid.key else "00"
pdumpers = {"exact": dump_exact, "lpm": dump_lpm,
"ternary": dump_ternary, "valid": dump_valid,
"range": dump_range}
print "Dumping entry {}".format(hex(entry.entry_handle))
print "Match key:"
for p, k in zip(entry.match_key, table.key):
assert(k[1] == p.type)
pdumper = pdumpers[MatchType.to_str(p.type)]
print "* {0:{w}}: {1:10}{2}".format(
k[0], MatchType.to_str(p.type).upper(),
pdumper(p), w=out_name_w)
if entry.options.priority >= 0:
print "Priority: {}".format(entry.options.priority)
self.dump_action_entry(entry.action_entry)
if entry.life is not None:
print "Life: {}ms since hit, timeout is {}ms".format(
entry.life.time_since_hit_ms, entry.life.timeout_ms)
@handle_bad_input
def do_table_dump_entry(self, line):
"Display some information about a table entry: table_dump_entry <table name> <entry handle>"
args = line.split()
self.exactly_n_args(args, 2)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
try:
entry_handle = int(args[1])
except:
raise UIn_Error("Bad format for entry handle")
entry = self.client.bm_mt_get_entry(0, table.name, entry_handle)
self.dump_one_entry(table, entry)
def complete_table_dump_entry(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_dump_member(self, line):
"Display some information about a member: act_prof_dump_member <action profile name> <member handle>"
args = line.split()
self.exactly_n_args(args, 2)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
try:
mbr_handle = int(args[1])
except:
raise UIn_Error("Bad format for member handle")
member = self.client.bm_mt_act_prof_get_member(
0, act_prof.name, mbr_handle)
self.dump_one_member(member)
def complete_act_prof_dump_member(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
# notice the strictly_deprecated=False; I don't consider this command to be
# strictly deprecated because it can be convenient and does not modify the
# action profile so won't create problems
@deprecated_act_prof("act_prof_dump_member", with_selection=False,
strictly_deprecated=False)
def do_table_dump_member(self, line):
"Display some information about a member: table_dump_member <table name> <member handle>"
pass
def complete_table_dump_member(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_act_prof_dump_group(self, line):
"Display some information about a group: table_dump_group <action profile name> <group handle>"
args = line.split()
self.exactly_n_args(args, 2)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
try:
grp_handle = int(args[1])
except:
raise UIn_Error("Bad format for group handle")
group = self.client.bm_mt_act_prof_get_group(
0, act_prof.name, grp_handle)
self.dump_one_group(group)
def complete_act_prof_dump_group(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@deprecated_act_prof("act_prof_dump_group", with_selection=False,
strictly_deprecated=False)
def do_table_dump_group(self, line):
"Display some information about a group: table_dump_group <table name> <group handle>"
pass
def complete_table_dump_group(self, text, line, start_index, end_index):
return self._complete_tables(text)
def _dump_act_prof(self, act_prof):
act_prof_name = act_prof.name
members = self.client.bm_mt_act_prof_get_members(0, act_prof.name)
print "=========="
print "MEMBERS"
self.dump_members(members)
if act_prof.with_selection:
groups = self.client.bm_mt_act_prof_get_groups(0, act_prof.name)
print "=========="
print "GROUPS"
self.dump_groups(groups)
@handle_bad_input
def do_act_prof_dump(self, line):
"Display entries in an action profile: act_prof_dump <action profile name>"
args = line.split()
self.exactly_n_args(args, 1)
act_prof_name = args[0]
act_prof = self.get_res("action profile", act_prof_name,
ResType.action_prof)
self._dump_act_prof(act_prof)
def complete_act_prof_dump(self, text, line, start_index, end_index):
return self._complete_act_profs(text)
@handle_bad_input
def do_table_dump(self, line):
"Display entries in a match-table: table_dump <table name>"
args = line.split()
self.exactly_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
entries = self.client.bm_mt_get_entries(0, table.name)
print "=========="
print "TABLE ENTRIES"
for e in entries:
print "**********"
self.dump_one_entry(table, e)
if table.type_ == TableType.indirect or\
table.type_ == TableType.indirect_ws:
assert(table.action_prof is not None)
self._dump_act_prof(table.action_prof)
# default entry
default_entry = self.client.bm_mt_get_default_entry(0, table.name)
print "=========="
print "Dumping default entry"
self.dump_action_entry(default_entry)
print "=========="
def complete_table_dump(self, text, line, start_index, end_index):
return self._complete_tables(text)
@handle_bad_input
def do_table_dump_entry_from_key(self, line):
"Display some information about a table entry: table_dump_entry_from_key <table name> <match fields> [priority]"
args = line.split()
self.at_least_n_args(args, 1)
table_name = args[0]
table = self.get_res("table", table_name, ResType.table)
if table.match_type in {MatchType.TERNARY, MatchType.RANGE}:
try:
priority = int(args.pop(-1))
except:
raise UIn_Error(
"Table is ternary, but could not extract a valid priority from args"
)
else:
priority = 0
match_key = args[1:]
if len(match_key) != table.num_key_fields():
raise UIn_Error(
"Table %s needs %d key fields" % (table_name, table.num_key_fields())
)
match_key = parse_match_key(table, match_key)
entry = self.client.bm_mt_get_entry_from_key(
0, table.name, match_key, BmAddEntryOptions(priority = priority))
self.dump_one_entry(table, entry)
def complete_table_dump_entry_from_key(self, text, line, start_index, end_index):
return self._complete_tables(text)
def _complete_pvs(self, text):
return self._complete_res(PARSE_VSETS, text)
@handle_bad_input
def do_show_pvs(self, line):
"List parser value sets defined in the P4 program: show_pvs"
self.exactly_n_args(line.split(), 0)
for pvs_name in sorted(PARSE_VSETS):
print PARSE_VSETS[pvs_name].parse_vset_str()
@handle_bad_input
def do_pvs_add(self, line):
"""
Add a value to a parser value set: pvs_add <pvs_name> <value>
bmv2 will not report an error if the value already exists.
"""
args = line.split()
self.exactly_n_args(args, 2)
pvs_name = args[0]
pvs = self.get_res("parser value set", pvs_name, ResType.parse_vset)
v = parse_pvs_value(args[1], pvs.bitwidth)
self.client.bm_parse_vset_add(0, pvs_name, v)
def complete_pvs_add(self, text, line, start_index, end_index):
return self._complete_pvs(text)
@handle_bad_input
def do_pvs_remove(self, line):
"""
Remove a value from a parser value set: pvs_remove <pvs_name> <value>
bmv2 will not report an error if the value does not exist.
"""
args = line.split()
self.exactly_n_args(args, 2)
pvs_name = args[0]
pvs = self.get_res("parser value set", pvs_name, ResType.parse_vset)
v = parse_pvs_value(args[1], pvs.bitwidth)
self.client.bm_parse_vset_remove(0, pvs_name, v)
def complete_pvs_remove(self, text, line, start_index, end_index):
return self._complete_pvs(text)
@handle_bad_input
def do_pvs_get(self, line):
"""
Print all values from a parser value set: pvs_get <pvs_name>
Values are displayed in no particular order, one per line.
"""
args = line.split()
self.exactly_n_args(args, 1)
pvs_name = args[0]
pvs = self.get_res("parser value set", pvs_name, ResType.parse_vset)
values = self.client.bm_parse_vset_get(0, pvs_name)
for v in values:
print hexstr(v)
def complete_pvs_get(self, text, line, start_index, end_index):
return self._complete_pvs(text)
@handle_bad_input
def do_pvs_clear(self, line):
"""
Remove all values from a parser value set: pvs_clear <pvs_name>
"""
args = line.split()
self.exactly_n_args(args, 1)
pvs_name = args[0]
pvs = self.get_res("parser value set", pvs_name, ResType.parse_vset)
self.client.bm_parse_vset_clear(0, pvs_name)
def complete_pvs_clear(self, text, line, start_index, end_index):
return self._complete_pvs(text)
@handle_bad_input
def do_port_add(self, line):
"Add a port to the switch (behavior depends on device manager used): port_add <iface_name> <port_num> [pcap_path]"
args = line.split()
self.at_least_n_args(args, 2)
iface_name = args[0]
try:
port_num = int(args[1])
except:
raise UIn_Error("Bad format for port_num, must be an integer")
pcap_path = ""
if len(args) > 2:
pcap_path = args[2]
self.client.bm_dev_mgr_add_port(iface_name, port_num, pcap_path)
@handle_bad_input
def do_port_remove(self, line):
"Removes a port from the switch (behavior depends on device manager used): port_remove <port_num>"
args = line.split()
self.exactly_n_args(args, 1)
try:
port_num = int(args[0])
except:
raise UIn_Error("Bad format for port_num, must be an integer")
self.client.bm_dev_mgr_remove_port(port_num)
@handle_bad_input
def do_show_ports(self, line):
"Shows the ports connected to the switch: show_ports"
self.exactly_n_args(line.split(), 0)
ports = self.client.bm_dev_mgr_show_ports()
print "{:^10}{:^20}{:^10}{}".format(
"port #", "iface name", "status", "extra info")
print "=" * 50
for port_info in ports:
status = "UP" if port_info.is_up else "DOWN"
extra_info = "; ".join(
[k + "=" + v for k, v in port_info.extra.items()])
print "{:^10}{:^20}{:^10}{}".format(
port_info.port_num, port_info.iface_name, status, extra_info)
@handle_bad_input
def do_switch_info(self, line):
"Show some basic info about the switch: switch_info"
self.exactly_n_args(line.split(), 0)
info = self.client.bm_mgmt_get_info()
attributes = [t[2] for t in info.thrift_spec[1:]]
out_attr_w = 5 + max(len(a) for a in attributes)
for a in attributes:
print "{:{w}}: {}".format(a, getattr(info, a), w=out_attr_w)
@handle_bad_input
def do_reset_state(self, line):
"Reset all state in the switch (table entries, registers, ...), but P4 config is preserved: reset_state"
self.exactly_n_args(line.split(), 0)
self.client.bm_reset_state()
@handle_bad_input
def do_write_config_to_file(self, line):
"Retrieves the JSON config currently used by the switch and dumps it to user-specified file"
args = line.split()
self.exactly_n_args(args, 1)
filename = args[0]
json_cfg = self.client.bm_get_config()
with open(filename, 'w') as f:
f.write(json_cfg)
@handle_bad_input
def do_serialize_state(self, line):
"Serialize the switch state and dumps it to user-specified file"
args = line.split()
self.exactly_n_args(args, 1)
filename = args[0]
state = self.client.bm_serialize_state()
with open(filename, 'w') as f:
f.write(state)
def set_crc_parameters_common(self, line, crc_width=16):
conversion_fn = {16: hex_to_i16, 32: hex_to_i32}[crc_width]
config_type = {16: BmCrc16Config, 32: BmCrc32Config}[crc_width]
thrift_fn = {16: self.client.bm_set_crc16_custom_parameters,
32: self.client.bm_set_crc32_custom_parameters}[crc_width]
args = line.split()
self.exactly_n_args(args, 6)
name = args[0]
if name not in CUSTOM_CRC_CALCS or CUSTOM_CRC_CALCS[name] != crc_width:
raise UIn_ResourceError("crc{}_custom".format(crc_width), name)
config_args = [conversion_fn(a) for a in args[1:4]]
config_args += [parse_bool(a) for a in args[4:6]]
crc_config = config_type(*config_args)
thrift_fn(0, name, crc_config)
def _complete_crc(self, text, crc_width=16):
crcs = sorted(
[c for c, w in CUSTOM_CRC_CALCS.items() if w == crc_width])
if not text:
return crcs
return [c for c in crcs if c.startswith(text)]
@handle_bad_input
def do_set_crc16_parameters(self, line):
"Change the parameters for a custom crc16 hash: set_crc16_parameters <name> <polynomial> <initial remainder> <final xor value> <reflect data?> <reflect remainder?>"
self.set_crc_parameters_common(line, 16)
def complete_set_crc16_parameters(self, text, line, start_index, end_index):
return self._complete_crc(text, 16)
@handle_bad_input
def do_set_crc32_parameters(self, line):
"Change the parameters for a custom crc32 hash: set_crc32_parameters <name> <polynomial> <initial remainder> <final xor value> <reflect data?> <reflect remainder?>"
self.set_crc_parameters_common(line, 32)
def complete_set_crc32_parameters(self, text, line, start_index, end_index):
return self._complete_crc(text, 32)
def load_json_config(standard_client=None, json_path=None, architecture_spec=None):
load_json_str(utils.get_json_config(standard_client, json_path), architecture_spec)
def main():
args = get_parser().parse_args()
standard_client, mc_client = thrift_connect(
args.thrift_ip, args.thrift_port,
RuntimeAPI.get_thrift_services(args.pre)
)
load_json_config(standard_client, args.json)
RuntimeAPI(args.pre, standard_client, mc_client).cmdloop()
if __name__ == '__main__':
main()
| 36.542473
| 193
| 0.620225
|
794a2d8313987a7e6fa279ea4b0d86a29fd93cdc
| 994
|
py
|
Python
|
shuffle_images.py
|
blackrubystudio/bread-scanner
|
a8fa01201506373e03c9a93279a0bea5f4a74ce9
|
[
"MIT"
] | 3
|
2018-11-09T04:36:08.000Z
|
2018-11-12T04:57:24.000Z
|
shuffle_images.py
|
blackrubystudio/Bread-scanner
|
a8fa01201506373e03c9a93279a0bea5f4a74ce9
|
[
"MIT"
] | null | null | null |
shuffle_images.py
|
blackrubystudio/Bread-scanner
|
a8fa01201506373e03c9a93279a0bea5f4a74ce9
|
[
"MIT"
] | null | null | null |
"""
Shuffle and renames images in the given directory.
Usage:
python3 shuffle_images.py --dataset ${PWD}/path/to/data
"""
import os
import random
INDEX = 256
def suffle_images (path):
filenames = os.listdir(path)
random.shuffle(filenames)
for index, filename in enumerate(filenames):
if '.jpg' in filename:
os.rename(os.path.join(path, filename),
os.path.join(path, '{:03}.jpg'.format(index + INDEX)))
if __name__ == "__main__":
# Parse command line argument
import argparse
parser = argparse.ArgumentParser(description='Shuffle images for train deep learning models')
parser.add_argument('--dataset', required=False,
default=os.getcwd(),
metavar='/path/to/dataset',
help='Directory of the dataset')
arg = parser.parse_args()
print('Shuffle images start in {}...'.format(arg.dataset))
suffle_images(arg.dataset)
print('Finish!')
| 29.235294
| 97
| 0.627767
|
794a2da0e5708cda7df4d7c83cd3b9235bedd82e
| 2,713
|
py
|
Python
|
src/generators/cloth.py
|
saic-vul/point_based_clothing
|
3650fdae2ba9a5ced40f3075a4f0fd995442a64e
|
[
"MIT"
] | 29
|
2021-10-09T08:43:51.000Z
|
2022-02-27T04:51:58.000Z
|
src/generators/cloth.py
|
SamsungLabs/point_based_clothing
|
1dbb7465ef2b032eb92947f5aa9ccdfaed9f8a39
|
[
"MIT"
] | 4
|
2021-10-08T06:51:46.000Z
|
2022-03-25T18:07:07.000Z
|
src/generators/cloth.py
|
SamsungLabs/point_based_clothing
|
1dbb7465ef2b032eb92947f5aa9ccdfaed9f8a39
|
[
"MIT"
] | 3
|
2021-11-01T07:36:57.000Z
|
2022-01-18T08:57:32.000Z
|
import os
import segmentation_models_pytorch as smp
import torch
from torch import nn
from utils.common import to_tanh, to_sigm
class Wrapper:
@staticmethod
def get_args(parser):
parser.add('--gen_in_channels', type=int, default=17)
parser.add('--backbone', type=str, default='resnet18')
parser.add('--segm_channels', type=int, default=1)
parser.add('--ntex_channels', type=int, default=16)
parser.add('--norm_gen', type=str, default='batch')
parser.add('--n_people', type=int)
@staticmethod
def get_net(args):
net = Generator(args.gen_in_channels, args.segm_channels, args.backbone, normalization=args.norm_gen)
net = net.to(args.device)
return net
class Generator(nn.Module):
def __init__(self, in_channels, segm_channels, backbone, normalization='batch'):
super().__init__()
n_out = 16
dubn = 'instance' if normalization == 'instance' else True
self.model = smp.Unet(backbone, encoder_weights=None, in_channels=in_channels, classes=n_out,
decoder_use_batchnorm=dubn, encoder_normalization=normalization)
norm_layer = nn.InstanceNorm2d if normalization == 'instance' else nn.BatchNorm2d
padding = nn.ZeroPad2d
self.rgb_head = nn.Sequential(
norm_layer(n_out, affine=True),
nn.ReLU(True),
padding(1),
nn.Conv2d(n_out, n_out, 3, 1, 0, bias=False),
norm_layer(n_out, affine=True),
nn.ReLU(True),
padding(1),
nn.Conv2d(n_out, n_out, 3, 1, 0, bias=False),
norm_layer(n_out, affine=True),
nn.ReLU(True),
padding(1),
nn.Conv2d(n_out, 3, 3, 1, 0, bias=True),
nn.Tanh())
self.segm_head = nn.Sequential(
norm_layer(n_out, affine=True),
nn.ReLU(True),
padding(1),
nn.Conv2d(n_out, segm_channels, 3, 1, 0, bias=True),
nn.Sigmoid())
def forward(self, data_dict):
raster_mask = data_dict['raster_mask']
raster_features = data_dict['raster_features']
inp = torch.cat([raster_mask, raster_features], dim=1)
out = self.model(inp)
segm = self.segm_head(out)
rgb = self.rgb_head(out)
segm_fg = segm[:, :1]
if 'background' in data_dict:
background = data_dict['background']
rgb_segm = to_sigm(rgb) * segm_fg + background * (1. - segm_fg)
else:
rgb_segm = to_sigm(rgb) * segm_fg
rgb_segm = to_tanh(rgb_segm)
out_dict = dict(fake_rgb=rgb_segm, fake_segm=segm)
return out_dict
| 32.297619
| 109
| 0.60118
|
794a2dd105bae1dcd79e815bb8b167c0d733155b
| 3,531
|
py
|
Python
|
distanceMetricLearning/load_data_dml.py
|
KareemYousrii/2015-DL-TIWafer
|
5aa7d2ecfcfd3da95811a0a49c855ba1bcb3f034
|
[
"BSD-2-Clause"
] | null | null | null |
distanceMetricLearning/load_data_dml.py
|
KareemYousrii/2015-DL-TIWafer
|
5aa7d2ecfcfd3da95811a0a49c855ba1bcb3f034
|
[
"BSD-2-Clause"
] | null | null | null |
distanceMetricLearning/load_data_dml.py
|
KareemYousrii/2015-DL-TIWafer
|
5aa7d2ecfcfd3da95811a0a49c855ba1bcb3f034
|
[
"BSD-2-Clause"
] | null | null | null |
"""
"""
from os.path import dirname
from pandas import read_csv
from itertools import combinations
from random import shuffle
import pandas as pd
import cPickle
import numpy as np
import lmdb
import cPickle
from collections import defaultdict
import sys
# Make sure that caffe is on the python path:
sys.path.append('/home/karn_s/2015-DL-TIWafer/python')
import caffe
from caffe.proto import caffe_pb2
class Bunch(dict):
"""
Container object for datasets: dictionary-like object that
exposes its keys as attributes.
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def load_data_sdml():
id_class_pair = cPickle.load(open('patch_id_class.p', 'rb'))
patch_triplets = cPickle.load(open('patch_triplets.p', 'rb'))
train_index = cPickle.load(open('train_indexes.p', 'rb'))
IdFeature = cPickle.load(open('IdFeature.p', 'rb'))
ids = np.array([key for key, val in id_class_pair.items()])
train_ids = sorted(list(ids[train_index]))
y = []
ti_data = []
for patch_id in train_ids:
y.append(id_class_pair[patch_id])
ti_data.append(IdFeature[patch_id])
sim_pairs = []
diff_pairs = []
for item in patch_triplets:
sim_pairs.append([item[0], item[1]])
diff_pairs.append([item[0], item[2]])
shuffle(diff_pairs)
res = Bunch(sortedIds=train_ids, data=ti_data, target=y, sim_pairs=sim_pairs, diff_pairs=diff_pairs)
return res
def load_sample_data():
id_class_pair = cPickle.load(open('patch_id_class.p', 'rb'))
patch_triplets = cPickle.load(open('patch_triplets.p', 'rb'))
train_index = cPickle.load(open('train_indexes.p', 'rb'))
IdFeature = cPickle.load(open('IdFeature.p', 'rb'))
ids = np.array([key for key, val in id_class_pair.items()])
train_ids = list(ids[train_index])
y = []
train_id_feat = {}
for patch_id in train_ids:
y.append(id_class_pair[patch_id])
train_id_feat[patch_id] = IdFeature[patch_id]
sim_pairs = []
diff_pairs = []
for item in patch_triplets:
sim_pairs.append([item[0], item[1]])
diff_pairs.append([item[0], item[2]])
shuffle(diff_pairs)
X_new = pd.DataFrame.from_dict(train_id_feat, orient='index')
res = Bunch(data=X_new, target=y, sim_pairs=sim_pairs, diff_pairs=diff_pairs)
return res
# IDs = [key for key, val in IdFeature.items()]
# data = [val for key, val in IdFeature.items()]
# X = np.array(data)
# nrows, ncols = X.shape
# column_features = ['ID']
# for i in xrange(ncols):
# column_features.append("feature_{}".format(i))
#
# X_new = pd.DataFrame.from_records(X, index=IDs, columns=column_features)
# datum_features = caffe_pb2.Datum()
# LMDB_PATH_Features = "TIWafer_Patches/features/"
# env_features = lmdb.open(LMDB_PATH_Features, readonly=True, lock=False)
# IdFeature = defaultdict(list)
# with env_features.begin() as txn_features:
# cur_features = txn_features.cursor()
# for i in xrange(20756):
# if not cur_features.next():
# break
# # Read the current cursor
# key_feature, value_feature = cur_features.item()
# datum_features.ParseFromString(value_feature)
# features = np.array(datum_features.float_data).astype(np.float32)
# IdFeature[key_feature] = list(features)
# with open('IdFeature.p', 'wb') as handle:
# cPickle.dump(IdFeature, handle)
| 30.973684
| 104
| 0.661286
|
794a2e111df770b9ed40926fb6c9e26b3d44a221
| 97,057
|
py
|
Python
|
lightly/openapi_generated/swagger_client/api/docker_api.py
|
CodeGuy-007/lightly
|
64143fe8a477c04288009c65fa1265cef8aa48f8
|
[
"MIT"
] | 1
|
2022-03-10T00:22:30.000Z
|
2022-03-10T00:22:30.000Z
|
lightly/openapi_generated/swagger_client/api/docker_api.py
|
CodeGuy-007/lightly
|
64143fe8a477c04288009c65fa1265cef8aa48f8
|
[
"MIT"
] | null | null | null |
lightly/openapi_generated/swagger_client/api/docker_api.py
|
CodeGuy-007/lightly
|
64143fe8a477c04288009c65fa1265cef8aa48f8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lightly.openapi_generated.swagger_client.api_client import ApiClient
class DockerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cancel_scheduled_docker_run_state_by_id(self, dataset_id, scheduled_id, **kwargs): # noqa: E501
"""cancel_scheduled_docker_run_state_by_id # noqa: E501
Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_scheduled_docker_run_state_by_id(dataset_id, scheduled_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) # noqa: E501
else:
(data) = self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) # noqa: E501
return data
def cancel_scheduled_docker_run_state_by_id_with_http_info(self, dataset_id, scheduled_id, **kwargs): # noqa: E501
"""cancel_scheduled_docker_run_state_by_id # noqa: E501
Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dataset_id', 'scheduled_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_scheduled_docker_run_state_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `cancel_scheduled_docker_run_state_by_id`") # noqa: E501
# verify the required parameter 'scheduled_id' is set
if self.api_client.client_side_validation and ('scheduled_id' not in params or
params['scheduled_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `scheduled_id` when calling `cancel_scheduled_docker_run_state_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
if 'scheduled_id' in params:
path_params['scheduledId'] = params['scheduled_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/docker/worker/schedule/{scheduledId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_docker_run(self, body, **kwargs): # noqa: E501
"""create_docker_run # noqa: E501
Creates a new docker run database entry. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunCreateRequest body: (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_docker_run_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_docker_run_with_http_info(body, **kwargs) # noqa: E501
return data
def create_docker_run_with_http_info(self, body, **kwargs): # noqa: E501
"""create_docker_run # noqa: E501
Creates a new docker run database entry. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunCreateRequest body: (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_run" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_docker_run`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/runs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateEntityResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_docker_run_scheduled_by_dataset_id(self, body, dataset_id, **kwargs): # noqa: E501
"""create_docker_run_scheduled_by_dataset_id # noqa: E501
Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run_scheduled_by_dataset_id(body, dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunScheduledCreateRequest body: (required)
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, **kwargs) # noqa: E501
else:
(data) = self.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, **kwargs) # noqa: E501
return data
def create_docker_run_scheduled_by_dataset_id_with_http_info(self, body, dataset_id, **kwargs): # noqa: E501
"""create_docker_run_scheduled_by_dataset_id # noqa: E501
Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunScheduledCreateRequest body: (required)
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'dataset_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_run_scheduled_by_dataset_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_docker_run_scheduled_by_dataset_id`") # noqa: E501
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `create_docker_run_scheduled_by_dataset_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/docker/worker/schedule', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateEntityResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_docker_worker_config(self, body, **kwargs): # noqa: E501
"""create_docker_worker_config # noqa: E501
Creates a docker worker configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_worker_config(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerWorkerConfigCreateRequest body: (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_docker_worker_config_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_docker_worker_config_with_http_info(body, **kwargs) # noqa: E501
return data
def create_docker_worker_config_with_http_info(self, body, **kwargs): # noqa: E501
"""create_docker_worker_config # noqa: E501
Creates a docker worker configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_worker_config_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerWorkerConfigCreateRequest body: (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_worker_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_docker_worker_config`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker/config', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateEntityResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_docker_worker_registry_entry_by_id(self, worker_id, **kwargs): # noqa: E501
"""delete_docker_worker_registry_entry_by_id # noqa: E501
Deletes a worker registry entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_docker_worker_registry_entry_by_id(worker_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID worker_id: ObjectId of the docker worker (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_docker_worker_registry_entry_by_id_with_http_info(worker_id, **kwargs) # noqa: E501
else:
(data) = self.delete_docker_worker_registry_entry_by_id_with_http_info(worker_id, **kwargs) # noqa: E501
return data
def delete_docker_worker_registry_entry_by_id_with_http_info(self, worker_id, **kwargs): # noqa: E501
"""delete_docker_worker_registry_entry_by_id # noqa: E501
Deletes a worker registry entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_docker_worker_registry_entry_by_id_with_http_info(worker_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID worker_id: ObjectId of the docker worker (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['worker_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_docker_worker_registry_entry_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'worker_id' is set
if self.api_client.client_side_validation and ('worker_id' not in params or
params['worker_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `worker_id` when calling `delete_docker_worker_registry_entry_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'worker_id' in params:
path_params['workerId'] = params['worker_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker/{workerId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_license_information(self, **kwargs): # noqa: E501
"""get_docker_license_information # noqa: E501
Requests license information to run the container. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_license_information(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: DockerLicenseInformation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_license_information_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_docker_license_information_with_http_info(**kwargs) # noqa: E501
return data
def get_docker_license_information_with_http_info(self, **kwargs): # noqa: E501
"""get_docker_license_information # noqa: E501
Requests license information to run the container. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_license_information_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: DockerLicenseInformation
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_license_information" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/licenseInformation', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DockerLicenseInformation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_run_by_id(self, run_id, **kwargs): # noqa: E501
"""get_docker_run_by_id # noqa: E501
Gets a docker run by docker run id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_by_id(run_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID run_id: ObjectId of the docker run (required)
:return: DockerRunData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_run_by_id_with_http_info(run_id, **kwargs) # noqa: E501
else:
(data) = self.get_docker_run_by_id_with_http_info(run_id, **kwargs) # noqa: E501
return data
def get_docker_run_by_id_with_http_info(self, run_id, **kwargs): # noqa: E501
"""get_docker_run_by_id # noqa: E501
Gets a docker run by docker run id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_by_id_with_http_info(run_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID run_id: ObjectId of the docker run (required)
:return: DockerRunData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['run_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'run_id' is set
if self.api_client.client_side_validation and ('run_id' not in params or
params['run_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `run_id` when calling `get_docker_run_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'run_id' in params:
path_params['runId'] = params['run_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/runs/{runId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DockerRunData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_run_report_read_url_by_id(self, run_id, **kwargs): # noqa: E501
"""get_docker_run_report_read_url_by_id # noqa: E501
Get the url of a specific docker runs report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_report_read_url_by_id(run_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID run_id: ObjectId of the docker run (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_run_report_read_url_by_id_with_http_info(run_id, **kwargs) # noqa: E501
else:
(data) = self.get_docker_run_report_read_url_by_id_with_http_info(run_id, **kwargs) # noqa: E501
return data
def get_docker_run_report_read_url_by_id_with_http_info(self, run_id, **kwargs): # noqa: E501
"""get_docker_run_report_read_url_by_id # noqa: E501
Get the url of a specific docker runs report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_report_read_url_by_id_with_http_info(run_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID run_id: ObjectId of the docker run (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['run_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_report_read_url_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'run_id' is set
if self.api_client.client_side_validation and ('run_id' not in params or
params['run_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `run_id` when calling `get_docker_run_report_read_url_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'run_id' in params:
path_params['runId'] = params['run_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/runs/{runId}/readReportUrl', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_run_report_write_url_by_id(self, run_id, **kwargs): # noqa: E501
"""get_docker_run_report_write_url_by_id # noqa: E501
Get the signed url to upload a report of a docker run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_report_write_url_by_id(run_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID run_id: ObjectId of the docker run (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_run_report_write_url_by_id_with_http_info(run_id, **kwargs) # noqa: E501
else:
(data) = self.get_docker_run_report_write_url_by_id_with_http_info(run_id, **kwargs) # noqa: E501
return data
def get_docker_run_report_write_url_by_id_with_http_info(self, run_id, **kwargs): # noqa: E501
"""get_docker_run_report_write_url_by_id # noqa: E501
Get the signed url to upload a report of a docker run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_report_write_url_by_id_with_http_info(run_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID run_id: ObjectId of the docker run (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['run_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_report_write_url_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'run_id' is set
if self.api_client.client_side_validation and ('run_id' not in params or
params['run_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `run_id` when calling `get_docker_run_report_write_url_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'run_id' in params:
path_params['runId'] = params['run_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/runs/{runId}/writeReportUrl', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_runs(self, **kwargs): # noqa: E501
"""get_docker_runs # noqa: E501
Gets all docker runs for a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DockerRunData]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_runs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_docker_runs_with_http_info(**kwargs) # noqa: E501
return data
def get_docker_runs_with_http_info(self, **kwargs): # noqa: E501
"""get_docker_runs # noqa: E501
Gets all docker runs for a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DockerRunData]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/runs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DockerRunData]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_runs_scheduled_by_dataset_id(self, dataset_id, **kwargs): # noqa: E501
"""get_docker_runs_scheduled_by_dataset_id # noqa: E501
Get all scheduled docker runs by dataset id which have not finished (not DONE or CANCLED). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:return: list[DockerRunScheduledData]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_runs_scheduled_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
else:
(data) = self.get_docker_runs_scheduled_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
return data
def get_docker_runs_scheduled_by_dataset_id_with_http_info(self, dataset_id, **kwargs): # noqa: E501
"""get_docker_runs_scheduled_by_dataset_id # noqa: E501
Get all scheduled docker runs by dataset id which have not finished (not DONE or CANCLED). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:return: list[DockerRunScheduledData]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dataset_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs_scheduled_by_dataset_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `get_docker_runs_scheduled_by_dataset_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/docker/worker/schedule', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DockerRunScheduledData]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_runs_scheduled_by_state(self, **kwargs): # noqa: E501
"""get_docker_runs_scheduled_by_state # noqa: E501
Get all scheduled docker runs of the user with the specified state. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_state(async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunScheduledState state:
:return: list[DockerRunScheduledData]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_runs_scheduled_by_state_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_docker_runs_scheduled_by_state_with_http_info(**kwargs) # noqa: E501
return data
def get_docker_runs_scheduled_by_state_with_http_info(self, **kwargs): # noqa: E501
"""get_docker_runs_scheduled_by_state # noqa: E501
Get all scheduled docker runs of the user with the specified state. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_state_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunScheduledState state:
:return: list[DockerRunScheduledData]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['state'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs_scheduled_by_state" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'state' in params:
query_params.append(('state', params['state'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker/schedule', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DockerRunScheduledData]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_worker_config_by_id(self, config_id, **kwargs): # noqa: E501
"""get_docker_worker_config_by_id # noqa: E501
Gets a docker worker configuration by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_config_by_id(config_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID config_id: ObjectId of the docker worker config (required)
:return: DockerWorkerConfigData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_worker_config_by_id_with_http_info(config_id, **kwargs) # noqa: E501
else:
(data) = self.get_docker_worker_config_by_id_with_http_info(config_id, **kwargs) # noqa: E501
return data
def get_docker_worker_config_by_id_with_http_info(self, config_id, **kwargs): # noqa: E501
"""get_docker_worker_config_by_id # noqa: E501
Gets a docker worker configuration by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_config_by_id_with_http_info(config_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID config_id: ObjectId of the docker worker config (required)
:return: DockerWorkerConfigData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['config_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_config_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'config_id' is set
if self.api_client.client_side_validation and ('config_id' not in params or
params['config_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `config_id` when calling `get_docker_worker_config_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'config_id' in params:
path_params['configId'] = params['config_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker/config/{configId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DockerWorkerConfigData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_worker_configs(self, **kwargs): # noqa: E501
"""get_docker_worker_configs # noqa: E501
Get docker worker configurations. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_configs(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DockerWorkerConfigData]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_worker_configs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_docker_worker_configs_with_http_info(**kwargs) # noqa: E501
return data
def get_docker_worker_configs_with_http_info(self, **kwargs): # noqa: E501
"""get_docker_worker_configs # noqa: E501
Get docker worker configurations. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_configs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DockerWorkerConfigData]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_configs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker/config', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DockerWorkerConfigData]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_worker_registry_entries(self, **kwargs): # noqa: E501
"""get_docker_worker_registry_entries # noqa: E501
Returns all worker registry entries for a given user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_registry_entries(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DockerWorkerRegistryEntryData]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_worker_registry_entries_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_docker_worker_registry_entries_with_http_info(**kwargs) # noqa: E501
return data
def get_docker_worker_registry_entries_with_http_info(self, **kwargs): # noqa: E501
"""get_docker_worker_registry_entries # noqa: E501
Returns all worker registry entries for a given user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_registry_entries_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DockerWorkerRegistryEntryData]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_registry_entries" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DockerWorkerRegistryEntryData]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_docker_worker_registry_entry_by_id(self, worker_id, **kwargs): # noqa: E501
"""get_docker_worker_registry_entry_by_id # noqa: E501
Returns worker registry entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_registry_entry_by_id(worker_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID worker_id: ObjectId of the docker worker (required)
:return: DockerWorkerRegistryEntryData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_docker_worker_registry_entry_by_id_with_http_info(worker_id, **kwargs) # noqa: E501
else:
(data) = self.get_docker_worker_registry_entry_by_id_with_http_info(worker_id, **kwargs) # noqa: E501
return data
def get_docker_worker_registry_entry_by_id_with_http_info(self, worker_id, **kwargs): # noqa: E501
"""get_docker_worker_registry_entry_by_id # noqa: E501
Returns worker registry entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_registry_entry_by_id_with_http_info(worker_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MongoObjectID worker_id: ObjectId of the docker worker (required)
:return: DockerWorkerRegistryEntryData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['worker_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_registry_entry_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'worker_id' is set
if self.api_client.client_side_validation and ('worker_id' not in params or
params['worker_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `worker_id` when calling `get_docker_worker_registry_entry_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'worker_id' in params:
path_params['workerId'] = params['worker_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker/{workerId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DockerWorkerRegistryEntryData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_docker_authorization_request(self, body, **kwargs): # noqa: E501
"""post_docker_authorization_request # noqa: E501
Performs an authorization to run the container. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_authorization_request(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerAuthorizationRequest body: (required)
:return: DockerAuthorizationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_docker_authorization_request_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_docker_authorization_request_with_http_info(body, **kwargs) # noqa: E501
return data
def post_docker_authorization_request_with_http_info(self, body, **kwargs): # noqa: E501
"""post_docker_authorization_request # noqa: E501
Performs an authorization to run the container. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_authorization_request_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerAuthorizationRequest body: (required)
:return: DockerAuthorizationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_docker_authorization_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `post_docker_authorization_request`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/authorization', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DockerAuthorizationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_docker_usage_stats(self, body, **kwargs): # noqa: E501
"""post_docker_usage_stats # noqa: E501
Adds a diagnostic entry of user stats. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_usage_stats(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerUserStats body: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_docker_usage_stats_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_docker_usage_stats_with_http_info(body, **kwargs) # noqa: E501
return data
def post_docker_usage_stats_with_http_info(self, body, **kwargs): # noqa: E501
"""post_docker_usage_stats # noqa: E501
Adds a diagnostic entry of user stats. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_usage_stats_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerUserStats body: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_docker_usage_stats" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `post_docker_usage_stats`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def register_docker_worker(self, body, **kwargs): # noqa: E501
"""register_docker_worker # noqa: E501
Registers a worker for a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_docker_worker(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateDockerWorkerRegistryEntryRequest body: (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.register_docker_worker_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.register_docker_worker_with_http_info(body, **kwargs) # noqa: E501
return data
def register_docker_worker_with_http_info(self, body, **kwargs): # noqa: E501
"""register_docker_worker # noqa: E501
Registers a worker for a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_docker_worker_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateDockerWorkerRegistryEntryRequest body: (required)
:return: CreateEntityResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method register_docker_worker" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `register_docker_worker`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateEntityResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_docker_run_by_id(self, body, run_id, **kwargs): # noqa: E501
"""update_docker_run_by_id # noqa: E501
Updates a docker run database entry. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_run_by_id(body, run_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunUpdateRequest body: (required)
:param MongoObjectID run_id: ObjectId of the docker run (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_docker_run_by_id_with_http_info(body, run_id, **kwargs) # noqa: E501
else:
(data) = self.update_docker_run_by_id_with_http_info(body, run_id, **kwargs) # noqa: E501
return data
def update_docker_run_by_id_with_http_info(self, body, run_id, **kwargs): # noqa: E501
"""update_docker_run_by_id # noqa: E501
Updates a docker run database entry. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_run_by_id_with_http_info(body, run_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunUpdateRequest body: (required)
:param MongoObjectID run_id: ObjectId of the docker run (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'run_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_docker_run_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `update_docker_run_by_id`") # noqa: E501
# verify the required parameter 'run_id' is set
if self.api_client.client_side_validation and ('run_id' not in params or
params['run_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `run_id` when calling `update_docker_run_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'run_id' in params:
path_params['runId'] = params['run_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/runs/{runId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_docker_worker_config_by_id(self, body, config_id, **kwargs): # noqa: E501
"""update_docker_worker_config_by_id # noqa: E501
Updates a docker worker configuration by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_worker_config_by_id(body, config_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerWorkerConfigCreateRequest body: (required)
:param MongoObjectID config_id: ObjectId of the docker worker config (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_docker_worker_config_by_id_with_http_info(body, config_id, **kwargs) # noqa: E501
else:
(data) = self.update_docker_worker_config_by_id_with_http_info(body, config_id, **kwargs) # noqa: E501
return data
def update_docker_worker_config_by_id_with_http_info(self, body, config_id, **kwargs): # noqa: E501
"""update_docker_worker_config_by_id # noqa: E501
Updates a docker worker configuration by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_worker_config_by_id_with_http_info(body, config_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerWorkerConfigCreateRequest body: (required)
:param MongoObjectID config_id: ObjectId of the docker worker config (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'config_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_docker_worker_config_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `update_docker_worker_config_by_id`") # noqa: E501
# verify the required parameter 'config_id' is set
if self.api_client.client_side_validation and ('config_id' not in params or
params['config_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `config_id` when calling `update_docker_worker_config_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'config_id' in params:
path_params['configId'] = params['config_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker/config/{configId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_docker_worker_registry_entry_by_id(self, body, worker_id, **kwargs): # noqa: E501
"""update_docker_worker_registry_entry_by_id # noqa: E501
Updates the worker status by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_worker_registry_entry_by_id(body, worker_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UpdateDockerWorkerRegistryEntryRequest body: (required)
:param MongoObjectID worker_id: ObjectId of the docker worker (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_docker_worker_registry_entry_by_id_with_http_info(body, worker_id, **kwargs) # noqa: E501
else:
(data) = self.update_docker_worker_registry_entry_by_id_with_http_info(body, worker_id, **kwargs) # noqa: E501
return data
def update_docker_worker_registry_entry_by_id_with_http_info(self, body, worker_id, **kwargs): # noqa: E501
"""update_docker_worker_registry_entry_by_id # noqa: E501
Updates the worker status by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_worker_registry_entry_by_id_with_http_info(body, worker_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UpdateDockerWorkerRegistryEntryRequest body: (required)
:param MongoObjectID worker_id: ObjectId of the docker worker (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'worker_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_docker_worker_registry_entry_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `update_docker_worker_registry_entry_by_id`") # noqa: E501
# verify the required parameter 'worker_id' is set
if self.api_client.client_side_validation and ('worker_id' not in params or
params['worker_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `worker_id` when calling `update_docker_worker_registry_entry_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'worker_id' in params:
path_params['workerId'] = params['worker_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/docker/worker/{workerId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_scheduled_docker_run_state_by_id(self, body, dataset_id, worker_id, scheduled_id, **kwargs): # noqa: E501
"""update_scheduled_docker_run_state_by_id # noqa: E501
Update the state of a scheduled run. This will fail if the state of the scheduled run is LOCKED. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_scheduled_docker_run_state_by_id(body, dataset_id, worker_id, scheduled_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunScheduledUpdateRequest body: (required)
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param MongoObjectID worker_id: ObjectId of the docker worker (required)
:param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_scheduled_docker_run_state_by_id_with_http_info(body, dataset_id, worker_id, scheduled_id, **kwargs) # noqa: E501
else:
(data) = self.update_scheduled_docker_run_state_by_id_with_http_info(body, dataset_id, worker_id, scheduled_id, **kwargs) # noqa: E501
return data
def update_scheduled_docker_run_state_by_id_with_http_info(self, body, dataset_id, worker_id, scheduled_id, **kwargs): # noqa: E501
"""update_scheduled_docker_run_state_by_id # noqa: E501
Update the state of a scheduled run. This will fail if the state of the scheduled run is LOCKED. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_scheduled_docker_run_state_by_id_with_http_info(body, dataset_id, worker_id, scheduled_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DockerRunScheduledUpdateRequest body: (required)
:param MongoObjectID dataset_id: ObjectId of the dataset (required)
:param MongoObjectID worker_id: ObjectId of the docker worker (required)
:param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'dataset_id', 'worker_id', 'scheduled_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_scheduled_docker_run_state_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `update_scheduled_docker_run_state_by_id`") # noqa: E501
# verify the required parameter 'dataset_id' is set
if self.api_client.client_side_validation and ('dataset_id' not in params or
params['dataset_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `dataset_id` when calling `update_scheduled_docker_run_state_by_id`") # noqa: E501
# verify the required parameter 'worker_id' is set
if self.api_client.client_side_validation and ('worker_id' not in params or
params['worker_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `worker_id` when calling `update_scheduled_docker_run_state_by_id`") # noqa: E501
# verify the required parameter 'scheduled_id' is set
if self.api_client.client_side_validation and ('scheduled_id' not in params or
params['scheduled_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `scheduled_id` when calling `update_scheduled_docker_run_state_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dataset_id' in params:
path_params['datasetId'] = params['dataset_id'] # noqa: E501
if 'worker_id' in params:
path_params['workerId'] = params['worker_id'] # noqa: E501
if 'scheduled_id' in params:
path_params['scheduledId'] = params['scheduled_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501
return self.api_client.call_api(
'/v1/datasets/{datasetId}/docker/worker/{workerId}/schedule/{scheduledId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.420017
| 252
| 0.622016
|
794a2e50c8eac2ea18e8b1c98aa006d2752140df
| 1,329
|
py
|
Python
|
park/envs/multi_dim_index/spaces.py
|
utkarsh5k/park
|
e7eba74f532204564df42a8e82a65ed025ce3b30
|
[
"MIT"
] | 180
|
2019-04-30T05:50:32.000Z
|
2022-03-28T01:32:07.000Z
|
park/envs/multi_dim_index/spaces.py
|
utkarsh5k/park
|
e7eba74f532204564df42a8e82a65ed025ce3b30
|
[
"MIT"
] | 21
|
2019-05-03T17:42:54.000Z
|
2022-01-25T19:31:42.000Z
|
park/envs/multi_dim_index/spaces.py
|
utkarsh5k/park
|
e7eba74f532204564df42a8e82a65ed025ce3b30
|
[
"MIT"
] | 42
|
2019-05-01T15:15:19.000Z
|
2021-11-19T05:27:09.000Z
|
import random
from park.core import Space
from park.envs.multi_dim_index.params import Params as params
from park.envs.multi_dim_index.config import Action, Query
class ActionSpace(Space):
def sample(self):
n = random.randint(1, params.NDIMS)
dims = random.sample(range(params.NDIMS), n)
cols = []
for i in range(n-1):
cols.append(random.randint(1, 100))
return Action(dims, cols)
def contains(self, a):
valid = True
valid &= (len(a.dimensions) <= params.NDIMS)
# Make sure no dimensions are duplicated in the grid list (except the sort dimension
# can be the same as a grid dimension).
valid &= len(set(a.dimensions[:-1])) == len(a.dimensions[:-1])
for d in a.dimensions:
valid &= isinstance(d, int)
valid &= (d < params.NDIMS) and (d >= 0)
valid &= (len(a.columns) == len(a.dimensions)-1)
for c in a.columns:
valid &= isinstance(c, int)
valid &= (c > 0)
return valid
class DataObsSpace(Space):
def sample(self):
pass
def contains(self, s):
return s.data_iterator is not None
class QueryObsSpace(Space):
def sample(self):
pass
def contains(self, s):
return isinstance(s, Query) and s.valid()
| 29.533333
| 92
| 0.598194
|
794a2e5a854e866dfc118d308be6b9bc0bf71980
| 1,526
|
py
|
Python
|
main.py
|
biprodip/multi_omic_vae
|
e174bfd97f9a47b2b916390fcf46830fccae063b
|
[
"MIT"
] | 17
|
2019-08-11T12:29:06.000Z
|
2022-03-16T15:47:50.000Z
|
main.py
|
biprodip/multi_omic_vae
|
e174bfd97f9a47b2b916390fcf46830fccae063b
|
[
"MIT"
] | 1
|
2021-12-23T08:38:26.000Z
|
2021-12-24T02:40:35.000Z
|
main.py
|
biprodip/multi_omic_vae
|
e174bfd97f9a47b2b916390fcf46830fccae063b
|
[
"MIT"
] | 17
|
2019-08-15T09:03:51.000Z
|
2022-03-17T14:23:04.000Z
|
import numpy as np
import pandas as pd
from MultiOmiVAE import MultiOmiVAE
from MethyOmiVAE import MethyOmiVAE
from ExprOmiVAE import ExprOmiVAE
from plot_sactter import plot_scatter
from classification import classification
if __name__ == "__main__":
input_path = 'data/OmiVAE/PANCAN/GDC-PANCAN_'
expr_path = input_path + 'htseq_fpkm_'
methy_path = input_path + 'methylation450_'
# Loading data
print('Loading gene expression data...')
expr_df = pd.read_csv(expr_path + 'preprocessed_both.tsv', sep='\t', header=0, index_col=0)
print('Loading DNA methylation data...')
methy_chr_df_list = []
chr_id = list(range(1, 23))
chr_id.append('X')
# Loop among different chromosomes
for chrom in chr_id:
print('Loading methylation data on chromosome ' + str(chrom) + '...')
methy_chr_path = methy_path + 'preprocessed_both_chr' + str(chrom) + '.tsv'
# methy_chr_df = pd.read_csv(methy_chr_path, sep='\t', header=0, index_col=0, dtype=all_cols_f32)
methy_chr_df = pd.read_csv(methy_chr_path, sep='\t', header=0, index_col=0)
methy_chr_df_list.append(methy_chr_df)
e_num_1 = 50
e_num_2 = 200
l_dim = 128
# Example
latent_code, train_acc, val_acc = MultiOmiVAE(input_path=input_path, expr_df=expr_df,
methy_chr_df_list=methy_chr_df_list, p1_epoch_num=e_num_1,
p2_epoch_num=e_num_2, latent_dim=l_dim, early_stopping=False)
| 38.15
| 111
| 0.673657
|
794a2e8000ad496b4fbbb1bdbbf91b79c1addb68
| 3,269
|
py
|
Python
|
HeapMemoryUsage.py
|
carlo4002/kafka_checks
|
b7bb2897b0c3b1b7ffa8a73d6d5600f3b3a874ae
|
[
"Apache-2.0"
] | null | null | null |
HeapMemoryUsage.py
|
carlo4002/kafka_checks
|
b7bb2897b0c3b1b7ffa8a73d6d5600f3b3a874ae
|
[
"Apache-2.0"
] | null | null | null |
HeapMemoryUsage.py
|
carlo4002/kafka_checks
|
b7bb2897b0c3b1b7ffa8a73d6d5600f3b3a874ae
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
#######################################
# Copyright (C) 2019:
# SETRA Conseil
#######################################
from subprocess import CalledProcessError
from jmxquery import JMXConnection
from jmxquery import JMXQuery
import argparse
def parsing_arguments():
p = argparse.ArgumentParser(description="Get JVM heap usage")
p.add_argument("string_host", metavar="'host1:port'",
help="an string with the host nad port jmx")
p.add_argument("-w", help="warning threshold", type=float, required=True, metavar="warning_threashold")
p.add_argument("-c", help="critical threshold", type=float, required=True, metavar="critical_threshold")
p.add_argument('-u', metavar="user_jmx", help="user name jmx")
p.add_argument('-pw', metavar="password", help="password for the user jmx")
arg1 = p.parse_args()
if (arg1.u and not arg1.pw) or (not arg1.u and arg1.pw):
p.print_usage()
p.exit(3, "Error: if you are using authentication jmx, options -u and -pw are requested")
return arg1, p
if __name__ == "__main__":
arg, parser = parsing_arguments()
host = arg.string_host
critical = arg.c
warn = arg.w
type_jmx = "Memory"
name = "UncleanLeaderElectionsPerSec"
metric_value_total = 0
try:
if len(host.split(':')) == 1:
parser.print_usage()
parser.exit(3, "Error : port missing in string '%s'" % host)
if arg.u:
jmxConnection = JMXConnection("service:jmx:rmi:///jndi/rmi://" + host + "/jmxrmi",
jmx_username=arg.u,
jmx_password=arg.pw)
else:
jmxConnection = JMXConnection("service:jmx:rmi:///jndi/rmi://" + host + "/jmxrmi")
jmxQuery = [JMXQuery("java.lang:type=" + type_jmx,
metric_name="kafka_cluster_{type}_{name}",
metric_labels={"host": host})]
metrics = jmxConnection.query(jmxQuery)
for metric in metrics:
if f"{metric.attribute}.{metric.attributeKey}" == "HeapMemoryUsage.used":
used = metric.value
if f"{metric.attribute}.{metric.attributeKey}" == "HeapMemoryUsage.max":
max = metric.value
# print(f"{metric.metric_name}<{metric.attribute}.{metric.attributeKey}> == {metric.value}")
if not used or not max:
print("Error - not values found, it cannot be processed the size of the heap")
exit(3)
percent = used/max * 100.0
if percent > critical:
print("CRITICAL - HeapMemoryUsage.used %" + " - " + str(percent))
exit(2)
elif percent > warn:
print("WARNING - HeapMemoryUsage.used %" + " - " + str(percent))
exit(1)
else:
print("OK - HeapMemoryUsage.used %" + " - " + str(percent))
exit(0)
except TimeoutError as e:
err = "Error : connexion failed '%s'" % host
print(err)
exit(3)
raise Exception(err)
except CalledProcessError as e:
err = "Error : connexion failed '%s'" % host
print(err)
exit(3)
raise Exception(err)
| 35.923077
| 108
| 0.566228
|
794a2e98ca444dd396fe67baa9f9ef619f7a4d59
| 24,451
|
py
|
Python
|
Lib/fontTools/cffLib/specializer.py
|
twardoch/fonttools-py27
|
75b852d3f59fc0d03c6e78581530597d4c6368a1
|
[
"MIT",
"BSD-3-Clause"
] | 240
|
2021-01-11T14:49:24.000Z
|
2022-03-29T22:33:49.000Z
|
Lib/fontTools/cffLib/specializer.py
|
twardoch/fonttools-py27
|
75b852d3f59fc0d03c6e78581530597d4c6368a1
|
[
"MIT",
"BSD-3-Clause"
] | 77
|
2021-01-12T20:23:30.000Z
|
2022-03-28T12:14:34.000Z
|
Lib/fontTools/cffLib/specializer.py
|
twardoch/fonttools-py27
|
75b852d3f59fc0d03c6e78581530597d4c6368a1
|
[
"MIT",
"BSD-3-Clause"
] | 28
|
2021-01-17T05:44:11.000Z
|
2022-01-11T19:58:46.000Z
|
# -*- coding: utf-8 -*-
"""T2CharString operator specializer and generalizer."""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.cffLib import maxStackLimit
def stringToProgram(string):
if isinstance(string, basestring):
string = string.split()
program = []
for token in string:
try:
token = int(token)
except ValueError:
try:
token = float(token)
except ValueError:
pass
program.append(token)
return program
def programToString(program):
return ' '.join(str(x) for x in program)
def programToCommands(program, getNumRegions=None):
"""Takes a T2CharString program list and returns list of commands.
Each command is a two-tuple of commandname,arg-list. The commandname might
be empty string if no commandname shall be emitted (used for glyph width,
hintmask/cntrmask argument, as well as stray arguments at the end of the
program (¯\_(ツ)_/¯).
'getNumRegions' may be None, or a callable object. It must return the
number of regions. 'getNumRegions' takes a single argument, vsindex. If
the vsindex argument is None, getNumRegions returns the default number
of regions for the charstring, else it returns the numRegions for
the vsindex.
The Charstring may or may not start with a width value. If the first
non-blend operator has an odd number of arguments, then the first argument is
a width, and is popped off. This is complicated with blend operators, as
there may be more than one before the first hint or moveto operator, and each
one reduces several arguments to just one list argument. We have to sum the
number of arguments that are not part of the blend arguments, and all the
'numBlends' values. We could instead have said that by definition, if there
is a blend operator, there is no width value, since CFF2 Charstrings don't
have width values. I discussed this with Behdad, and we are allowing for an
initial width value in this case because developers may assemble a CFF2
charstring from CFF Charstrings, which could have width values.
"""
seenWidthOp = False
vsIndex = None
lenBlendStack = 0
lastBlendIndex = 0
commands = []
stack = []
it = iter(program)
for token in it:
if not isinstance(token, basestring):
stack.append(token)
continue
if token == 'blend':
assert getNumRegions is not None
numSourceFonts = 1 + getNumRegions(vsIndex)
# replace the blend op args on the stack with a single list
# containing all the blend op args.
numBlends = stack[-1]
numBlendArgs = numBlends * numSourceFonts + 1
# replace first blend op by a list of the blend ops.
stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
lenBlendStack += numBlends + len(stack) - 1
lastBlendIndex = len(stack)
# if a blend op exists, this is or will be a CFF2 charstring.
continue
elif token == 'vsindex':
vsIndex = stack[-1]
assert type(vsIndex) is int
elif (not seenWidthOp) and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm',
'cntrmask', 'hintmask',
'hmoveto', 'vmoveto', 'rmoveto',
'endchar'}:
seenWidthOp = True
parity = token in {'hmoveto', 'vmoveto'}
if lenBlendStack:
# lenBlendStack has the number of args represented by the last blend
# arg and all the preceding args. We need to now add the number of
# args following the last blend arg.
numArgs = lenBlendStack + len(stack[lastBlendIndex:])
else:
numArgs = len(stack)
if numArgs and (numArgs % 2) ^ parity:
width = stack.pop(0)
commands.append(('', [width]))
if token in {'hintmask', 'cntrmask'}:
if stack:
commands.append(('', stack))
commands.append((token, []))
commands.append(('', [next(it)]))
else:
commands.append((token, stack))
stack = []
if stack:
commands.append(('', stack))
return commands
def _flattenBlendArgs(args):
token_list = []
for arg in args:
if isinstance(arg, list):
token_list.extend(arg)
token_list.append('blend')
else:
token_list.append(arg)
return token_list
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
program = []
for op,args in commands:
if any(isinstance(arg, list) for arg in args):
args = _flattenBlendArgs(args)
program.extend(args)
if op:
program.append(op)
return program
def _everyN(el, n):
"""Group the list el into groups of size n"""
if len(el) % n != 0: raise ValueError(el)
for i in range(0, len(el), n):
yield el[i:i+n]
class _GeneralizerDecombinerCommandsMap(object):
@staticmethod
def rmoveto(args):
if len(args) != 2: raise ValueError(args)
yield ('rmoveto', args)
@staticmethod
def hmoveto(args):
if len(args) != 1: raise ValueError(args)
yield ('rmoveto', [args[0], 0])
@staticmethod
def vmoveto(args):
if len(args) != 1: raise ValueError(args)
yield ('rmoveto', [0, args[0]])
@staticmethod
def rlineto(args):
if not args: raise ValueError(args)
for args in _everyN(args, 2):
yield ('rlineto', args)
@staticmethod
def hlineto(args):
if not args: raise ValueError(args)
it = iter(args)
try:
while True:
yield ('rlineto', [next(it), 0])
yield ('rlineto', [0, next(it)])
except StopIteration:
pass
@staticmethod
def vlineto(args):
if not args: raise ValueError(args)
it = iter(args)
try:
while True:
yield ('rlineto', [0, next(it)])
yield ('rlineto', [next(it), 0])
except StopIteration:
pass
@staticmethod
def rrcurveto(args):
if not args: raise ValueError(args)
for args in _everyN(args, 6):
yield ('rrcurveto', args)
@staticmethod
def hhcurveto(args):
if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args)
if len(args) % 2 == 1:
yield ('rrcurveto', [args[1], args[0], args[2], args[3], args[4], 0])
args = args[5:]
for args in _everyN(args, 4):
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[3], 0])
@staticmethod
def vvcurveto(args):
if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args)
if len(args) % 2 == 1:
yield ('rrcurveto', [args[0], args[1], args[2], args[3], 0, args[4]])
args = args[5:]
for args in _everyN(args, 4):
yield ('rrcurveto', [0, args[0], args[1], args[2], 0, args[3]])
@staticmethod
def hvcurveto(args):
if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args)
last_args = None
if len(args) % 2 == 1:
lastStraight = len(args) % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]])
args = next(it)
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]])
else:
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]])
@staticmethod
def vhcurveto(args):
if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args)
last_args = None
if len(args) % 2 == 1:
lastStraight = len(args) % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0])
args = next(it)
yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]])
else:
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]])
@staticmethod
def rcurveline(args):
if len(args) < 8 or len(args) % 6 != 2: raise ValueError(args)
args, last_args = args[:-2], args[-2:]
for args in _everyN(args, 6):
yield ('rrcurveto', args)
yield ('rlineto', last_args)
@staticmethod
def rlinecurve(args):
if len(args) < 8 or len(args) % 2 != 0: raise ValueError(args)
args, last_args = args[:-6], args[-6:]
for args in _everyN(args, 2):
yield ('rlineto', args)
yield ('rrcurveto', last_args)
def _convertBlendOpToArgs(blendList):
# args is list of blend op args. Since we are supporting
# recursive blend op calls, some of these args may also
# be a list of blend op args, and need to be converted before
# we convert the current list.
if any([isinstance(arg, list) for arg in blendList]):
args = [i for e in blendList for i in
(_convertBlendOpToArgs(e) if isinstance(e,list) else [e]) ]
else:
args = blendList
# We now know that blendList contains a blend op argument list, even if
# some of the args are lists that each contain a blend op argument list.
# Convert from:
# [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
# to:
# [ [x0] + [delta tuple for x0],
# ...,
# [xn] + [delta tuple for xn] ]
numBlends = args[-1]
# Can't use args.pop() when the args are being used in a nested list
# comprehension. See calling context
args = args[:-1]
numRegions = len(args)//numBlends - 1
if not (numBlends*(numRegions + 1) == len(args)):
raise ValueError(blendList)
defaultArgs = [[arg] for arg in args[:numBlends]]
deltaArgs = args[numBlends:]
numDeltaValues = len(deltaArgs)
deltaList = [ deltaArgs[i:i + numRegions] for i in range(0, numDeltaValues, numRegions) ]
blend_args = [ a + b for a, b in zip(defaultArgs,deltaList)]
return blend_args
def generalizeCommands(commands, ignoreErrors=False):
result = []
mapping = _GeneralizerDecombinerCommandsMap
for op, args in commands:
# First, generalize any blend args in the arg list.
if any([isinstance(arg, list) for arg in args]):
try:
args = [n for arg in args for n in (_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg])]
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(('', args))
result.append(('', [op]))
else:
raise
func = getattr(mapping, op, None)
if not func:
result.append((op,args))
continue
try:
for command in func(args):
result.append(command)
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(('', args))
result.append(('', [op]))
else:
raise
return result
def generalizeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(generalizeCommands(programToCommands(program, getNumRegions), **kwargs))
def _categorizeVector(v):
"""
Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
it returns a single zero still.
>>> _categorizeVector((0,0))
('0', (0,))
>>> _categorizeVector((1,0))
('h', (1,))
>>> _categorizeVector((0,2))
('v', (2,))
>>> _categorizeVector((1,2))
('r', (1, 2))
"""
if not v[0]:
if not v[1]:
return '0', v[:1]
else:
return 'v', v[1:]
else:
if not v[1]:
return 'h', v[:1]
else:
return 'r', v
def _mergeCategories(a, b):
if a == '0': return b
if b == '0': return a
if a == b: return a
return None
def _negateCategory(a):
if a == 'h': return 'v'
if a == 'v': return 'h'
assert a in '0r'
return a
def _convertToBlendCmds(args):
# return a list of blend commands, and
# the remaining non-blended args, if any.
num_args = len(args)
stack_use = 0
new_args = []
i = 0
while i < num_args:
arg = args[i]
if not isinstance(arg, list):
new_args.append(arg)
i += 1
stack_use += 1
else:
prev_stack_use = stack_use
# The arg is a tuple of blend values.
# These are each (master 0,delta 1..delta n)
# Combine as many successive tuples as we can,
# up to the max stack limit.
num_sources = len(arg)
blendlist = [arg]
i += 1
stack_use += 1 + num_sources # 1 for the num_blends arg
while (i < num_args) and isinstance(args[i], list):
blendlist.append(args[i])
i += 1
stack_use += num_sources
if stack_use + num_sources > maxStackLimit:
# if we are here, max stack is the CFF2 max stack.
# I use the CFF2 max stack limit here rather than
# the 'maxstack' chosen by the client, as the default
# maxstack may have been used unintentionally. For all
# the other operators, this just produces a little less
# optimization, but here it puts a hard (and low) limit
# on the number of source fonts that can be used.
break
# blendList now contains as many single blend tuples as can be
# combined without exceeding the CFF2 stack limit.
num_blends = len(blendlist)
# append the 'num_blends' default font values
blend_args = []
for arg in blendlist:
blend_args.append(arg[0])
for arg in blendlist:
blend_args.extend(arg[1:])
blend_args.append(num_blends)
new_args.append(blend_args)
stack_use = prev_stack_use + num_blends
return new_args
def _addArgs(a, b):
if isinstance(b, list):
if isinstance(a, list):
if len(a) != len(b):
raise ValueError()
return [_addArgs(va, vb) for va,vb in zip(a, b)]
else:
a, b = b, a
if isinstance(a, list):
return [_addArgs(a[0], b)] + a[1:]
return a + b
def specializeCommands(commands,
ignoreErrors=False,
generalizeFirst=True,
preserveTopology=False,
maxstack=48):
# We perform several rounds of optimizations. They are carefully ordered and are:
#
# 0. Generalize commands.
# This ensures that they are in our expected simple form, with each line/curve only
# having arguments for one segment, and using the generic form (rlineto/rrcurveto).
# If caller is sure the input is in this form, they can turn off generalization to
# save time.
#
# 1. Combine successive rmoveto operations.
#
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
# We specialize into some, made-up, variants as well, which simplifies following
# passes.
#
# 3. Merge or delete redundant operations, to the extent requested.
# OpenType spec declares point numbers in CFF undefined. As such, we happily
# change topology. If client relies on point numbers (in GPOS anchors, or for
# hinting purposes(what?)) they can turn this off.
#
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
#
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
#
# 6. Resolve any remaining made-up operators into real operators.
#
# I have convinced myself that this produces optimal bytecode (except for, possibly
# one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
# A dynamic-programming approach can do the same but would be significantly slower.
#
# 7. For any args which are blend lists, convert them to a blend command.
# 0. Generalize commands.
if generalizeFirst:
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
else:
commands = list(commands) # Make copy since we modify in-place later.
# 1. Combine successive rmoveto operations.
for i in range(len(commands)-1, 0, -1):
if 'rmoveto' == commands[i][0] == commands[i-1][0]:
v1, v2 = commands[i-1][1], commands[i][1]
commands[i-1] = ('rmoveto', [v1[0]+v2[0], v1[1]+v2[1]])
del commands[i]
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
#
# We, in fact, specialize into more, made-up, variants that special-case when both
# X and Y components are zero. This simplifies the following optimization passes.
# This case is rare, but OCD does not let me skip it.
#
# After this round, we will have four variants that use the following mnemonics:
#
# - 'r' for relative, ie. non-zero X and non-zero Y,
# - 'h' for horizontal, ie. zero X and non-zero Y,
# - 'v' for vertical, ie. non-zero X and zero Y,
# - '0' for zeros, ie. zero X and zero Y.
#
# The '0' pseudo-operators are not part of the spec, but help simplify the following
# optimization rounds. We resolve them at the end. So, after this, we will have four
# moveto and four lineto variants:
#
# - 0moveto, 0lineto
# - hmoveto, hlineto
# - vmoveto, vlineto
# - rmoveto, rlineto
#
# and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
# dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
# An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
#
# There are nine different variants of curves without the '0'. Those nine map exactly
# to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
# vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
# arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
# arguments) is in fact an rhcurveto. The operators in the spec are designed such that
# all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
#
# Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
# of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
# thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
# encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
# the '0' with either 'h' or 'v' and it works.
#
# When we get to curve splines however, things become more complicated... XXX finish this.
# There's one more complexity with splines. If one side of the spline is not horizontal or
# vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
# Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
# only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
# This limits our merge opportunities later.
#
for i in range(len(commands)):
op,args = commands[i]
if op in {'rmoveto', 'rlineto'}:
c, args = _categorizeVector(args)
commands[i] = c+op[1:], args
continue
if op == 'rrcurveto':
c1, args1 = _categorizeVector(args[:2])
c2, args2 = _categorizeVector(args[-2:])
commands[i] = c1+c2+'curveto', args1+args[2:4]+args2
continue
# 3. Merge or delete redundant operations, to the extent requested.
#
# TODO
# A 0moveto that comes before all other path operations can be removed.
# though I find conflicting evidence for this.
#
# TODO
# "If hstem and vstem hints are both declared at the beginning of a
# CharString, and this sequence is followed directly by the hintmask or
# cntrmask operators, then the vstem hint operator (or, if applicable,
# the vstemhm operator) need not be included."
#
# "The sequence and form of a CFF2 CharString program may be represented as:
# {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
#
# https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
#
# For Type2 CharStrings the sequence is:
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
# Some other redundancies change topology (point numbers).
if not preserveTopology:
for i in range(len(commands)-1, -1, -1):
op, args = commands[i]
# A 00curveto is demoted to a (specialized) lineto.
if op == '00curveto':
assert len(args) == 4
c, args = _categorizeVector(args[1:3])
op = c+'lineto'
commands[i] = op, args
# and then...
# A 0lineto can be deleted.
if op == '0lineto':
del commands[i]
continue
# Merge adjacent hlineto's and vlineto's.
# In CFF2 charstrings from variable fonts, each
# arg item may be a list of blendable values, one from
# each source font.
if (i and op in {'hlineto', 'vlineto'} and
(op == commands[i-1][0])):
_, other_args = commands[i-1]
assert len(args) == 1 and len(other_args) == 1
try:
new_args = [_addArgs(args[0], other_args[0])]
except ValueError:
continue
commands[i-1] = (op, new_args)
del commands[i]
continue
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
for i in range(1, len(commands)-1):
op,args = commands[i]
prv,nxt = commands[i-1][0], commands[i+1][0]
if op in {'0lineto', 'hlineto', 'vlineto'} and prv == nxt == 'rlineto':
assert len(args) == 1
args = [0, args[0]] if op[0] == 'v' else [args[0], 0]
commands[i] = ('rlineto', args)
continue
if op[2:] == 'curveto' and len(args) == 5 and prv == nxt == 'rrcurveto':
assert (op[0] == 'r') ^ (op[1] == 'r')
if op[0] == 'v':
pos = 0
elif op[0] != 'r':
pos = 1
elif op[1] == 'v':
pos = 4
else:
pos = 5
# Insert, while maintaining the type of args (can be tuple or list).
args = args[:pos] + type(args)((0,)) + args[pos:]
commands[i] = ('rrcurveto', args)
continue
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
for i in range(len(commands)-1, 0, -1):
op1,args1 = commands[i-1]
op2,args2 = commands[i]
new_op = None
# Merge logic...
if {op1, op2} <= {'rlineto', 'rrcurveto'}:
if op1 == op2:
new_op = op1
else:
if op2 == 'rrcurveto' and len(args2) == 6:
new_op = 'rlinecurve'
elif len(args2) == 2:
new_op = 'rcurveline'
elif (op1, op2) in {('rlineto', 'rlinecurve'), ('rrcurveto', 'rcurveline')}:
new_op = op2
elif {op1, op2} == {'vlineto', 'hlineto'}:
new_op = op1
elif 'curveto' == op1[2:] == op2[2:]:
d0, d1 = op1[:2]
d2, d3 = op2[:2]
if d1 == 'r' or d2 == 'r' or d0 == d3 == 'r':
continue
d = _mergeCategories(d1, d2)
if d is None: continue
if d0 == 'r':
d = _mergeCategories(d, d3)
if d is None: continue
new_op = 'r'+d+'curveto'
elif d3 == 'r':
d0 = _mergeCategories(d0, _negateCategory(d))
if d0 is None: continue
new_op = d0+'r'+'curveto'
else:
d0 = _mergeCategories(d0, d3)
if d0 is None: continue
new_op = d0+d+'curveto'
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
if new_op and len(args1) + len(args2) < maxstack:
commands[i-1] = (new_op, args1+args2)
del commands[i]
# 6. Resolve any remaining made-up operators into real operators.
for i in range(len(commands)):
op,args = commands[i]
if op in {'0moveto', '0lineto'}:
commands[i] = 'h'+op[1:], args
continue
if op[2:] == 'curveto' and op[:2] not in {'rr', 'hh', 'vv', 'vh', 'hv'}:
op0, op1 = op[:2]
if (op0 == 'r') ^ (op1 == 'r'):
assert len(args) % 2 == 1
if op0 == '0': op0 = 'h'
if op1 == '0': op1 = 'h'
if op0 == 'r': op0 = op1
if op1 == 'r': op1 = _negateCategory(op0)
assert {op0,op1} <= {'h','v'}, (op0, op1)
if len(args) % 2:
if op0 != op1: # vhcurveto / hvcurveto
if (op0 == 'h') ^ (len(args) % 8 == 1):
# Swap last two args order
args = args[:-2]+args[-1:]+args[-2:-1]
else: # hhcurveto / vvcurveto
if op0 == 'h': # hhcurveto
# Swap first two args order
args = args[1:2]+args[:1]+args[2:]
commands[i] = op0+op1+'curveto', args
continue
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
for i in range(len(commands)):
op, args = commands[i]
if any(isinstance(arg, list) for arg in args):
commands[i] = op, _convertToBlendCmds(args)
return commands
def specializeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(specializeCommands(programToCommands(program, getNumRegions), **kwargs))
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
program = stringToProgram(sys.argv[1:])
print("Program:"); print(programToString(program))
commands = programToCommands(program)
print("Commands:"); print(commands)
program2 = commandsToProgram(commands)
print("Program from commands:"); print(programToString(program2))
assert program == program2
print("Generalized program:"); print(programToString(generalizeProgram(program)))
print("Specialized program:"); print(programToString(specializeProgram(program)))
| 32.95283
| 104
| 0.665617
|
794a2fb9b59515218ab99ca2b415afea06a23c62
| 3,432
|
py
|
Python
|
deploy-agent/deployd/common/utils.py
|
jsoref/pinterest-teletraan
|
e1093a8fc232c66b01595f46ed5f2f2ff42d6dc6
|
[
"Apache-2.0"
] | null | null | null |
deploy-agent/deployd/common/utils.py
|
jsoref/pinterest-teletraan
|
e1093a8fc232c66b01595f46ed5f2f2ff42d6dc6
|
[
"Apache-2.0"
] | null | null | null |
deploy-agent/deployd/common/utils.py
|
jsoref/pinterest-teletraan
|
e1093a8fc232c66b01595f46ed5f2f2ff42d6dc6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import code
import errno
import hashlib
import logging
import os
import signal
import sys
import traceback
import subprocess
from deployd import IS_PINTEREST
log = logging.getLogger(__name__)
# noinspection PyProtectedMember
def exit_abruptly(status=0):
"""Exit method that just quits abruptly.
Helps with KeyError issues.
:param status: exit code
"""
# if we are testing we want to test gracefully or this will abort the tests
if os.environ.get('DEPLOY_TESTING'):
sys.exit(status)
os._exit(status)
def touch(fname, times=None):
try:
with file(fname, 'a'):
os.utime(fname, times)
except IOError:
log.error('Failed touching host type file {}'.format(fname))
def hash_file(filepath):
""" hash the file content
:param filepath: the full path of the file
:return:the sha1 of the file data
"""
with open(filepath, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
# steal from http://stackoverflow.com/questions/132058/
# showing-the-stack-trace-from-a-running-python-application
# use : sudo kill -SIGUSR1 $pid to trigger the debug
def debug(sig, frame):
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
i = code.InteractiveConsole(d)
message = "Signal recieved : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
def listen():
signal.signal(signal.SIGUSR1, debug) # Register handler
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as ex:
# if the directory exists, silently exits
if ex.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def ensure_dirs(config):
# make sure deployd directories exist
mkdir_p(config.get_builds_directory())
mkdir_p(config.get_agent_directory())
mkdir_p(config.get_log_directory())
def run_prereqs(config):
# check if the puppet has finished or not
if IS_PINTEREST:
respect_puppet = config.respect_puppet()
puppet_file_path = config.get_puppet_file_path()
if respect_puppet and \
puppet_file_path is not None and \
not os.path.exists(puppet_file_path):
print("Waiting for first puppet run.")
sys.exit(0)
ensure_dirs(config)
def get_info_from_facter(key):
try:
output = subprocess.check_output(['facter', '-p', key])
if output:
return output.strip("\n")
else:
return None
except:
log.error("Failed to get info from facter by key {}".format(key))
return None
| 27.902439
| 79
| 0.677156
|
794a308b0be02eb93b17276da2c2a743ea3b3bcc
| 11,031
|
py
|
Python
|
gitScrabber/argHandler.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
gitScrabber/argHandler.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
gitScrabber/argHandler.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2017 Roland Jaeger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from argparse import ArgumentTypeError as err
import argparse
import sys
import os
import re
class PathType(object):
""" Taken from http://stackoverflow.com/a/33181083/1935553"""
def __init__(self, exists=True, type='file', dash_ok=True):
"""
:param exists: True: a path that does exist
False: a path that does not exist, in a valid parent
directory
None: don't care
:param type: file, dir, symlink, None, or a function returning
True for valid paths
None: don't care
:param dash_ok: whether to allow "-" as stdin/stdout
"""
assert exists in (True, False, None)
assert type in ('file', 'dir', 'symlink', None)\
or hasattr(type, '__call__')
self._exists = exists
self._type = type
self._dash_ok = dash_ok
self.__name__ = type
def __call__(self, string):
if string == '-':
# the special argument "-" means sys.std{in,out}
if self._type == 'dir':
raise err(
'standard input/output (-) not allowed as directory path')
elif self._type == 'symlink':
raise err(
'standard input/output (-) not allowed as symlink path')
elif not self._dash_ok:
raise err('standard input/output (-) not allowed')
else:
path = os.path.expandvars(os.path.expanduser(string))
e = os.path.exists(path)
if self._exists:
if not e:
raise err("path does not exist: '%s'" % string)
if self._type is None:
pass
elif self._type == 'file':
if not os.path.isfile(path):
raise err("path is not a file: '%s'" % string)
elif self._type == 'symlink':
if not os.path.symlink(path):
raise err("path is not a symlink: '%s'" % string)
elif self._type == 'dir':
if not os.path.isdir(path):
raise err("path is not a directory: '%s'" % string)
elif not self._type(path):
raise err("path not valid: '%s'" % string)
else:
if not self._exists and self._exists is not None and e:
raise err("path exists: '%s'" % string)
p = os.path.dirname(os.path.normpath(path)) or '.'
if not os.path.isdir(p):
raise err("parent path is not a directory: '%s'" % p)
elif not os.path.exists(p):
raise err("parent directory does not exist: '%s'" % p)
return string
class SmartFormatter(argparse.MetavarTypeHelpFormatter):
"""
Formatter class that handles line breaks in a non idiotic way
"""
def _split_lines(self, text, width):
out = []
for line in text.splitlines():
out.extend(argparse.MetavarTypeHelpFormatter._split_lines(
self, line, width))
return out
def __setup_parser():
"""
Set up of the argument parser
:returns: argument parser
"""
parser = argparse.ArgumentParser(
description='ScrabGitRepos',
formatter_class=SmartFormatter,
add_help=False)
required_args = parser.add_argument_group('Required arguments')
required_args.add_argument('-t', '--tasks',
type=PathType(exists=True, type='file'),
default=None,
help="Path to the tasks.yaml file - can be "
"provided via configuration file")
program_args = parser.add_argument_group('Program arguments')
program_args.add_argument('-r', '--report',
type=PathType(exists=True, type='file'),
default=None,
help="Path to an old report as base")
program_args.add_argument('-o', '--output',
type=PathType(exists=None, type='file'),
default=None,
help="Path where the report will be saved to")
program_args.add_argument('-c', '--config',
type=PathType(exists=True, type='file'),
default=None,
help="Path to the configuration file - defaults "
"to './gitScrabber.conf'. Write the command "
"line arguments without leading dashes e.g.:\n"
"print\n"
"\tdata=/tmp")
program_args.add_argument('-d', '--data',
type=PathType(exists=True, type='dir'),
default='.',
help="Directory where the repositories and "
"archives are stored")
program_args.add_argument('-u', '--update',
action='store_true',
help="Before scrabbing the tool will try to "
"update the sources")
program_args.add_argument('-p', '--print',
action='store_true',
default=False,
help="If the report should be printed to stdout "
"- defaults to false")
program_args.add_argument('-f', '--force',
action='store_true',
default=False,
help="Forces the override of a present report "
"- defaults to false")
program_args.add_argument('-h', '--help',
action='help',
help="Show this help message and exit")
global_args = parser.add_argument_group('Global arguments')
global_args.add_argument('--github-token',
type=str,
default=None,
help="Access token for github to work with a "
"higher query limit against their api")
return parser
def __check_overwrite(parser, args):
"""
Checks weather the --force flag was set without need or is missing to force
an overwrite
:param parser: The parser used to raise an error message
:param args: The arguments that were passed to the program
"""
if (args.force
and (not args.output
or args.output != args.report)):
parser.error('Force is only needed to overwrite an existing report')
elif (not args.force
and args.output
and args.output == args.report):
parser.error('{} exists already! '
'Specify a new location for the new report or '
'--force override'.format(args.output))
def __check_tasks(parser, args):
"""
Checks weather a --tasks file was provided
:param parser: The parser used to raise an error message
:param args: The arguments that were passed to the program
"""
if not args.tasks:
raise Exception("There was no tasks file provided - "
"you have to provide one.")
def __check_arguments(parser, args):
"""
Check the given arguments for bade combinations
:param parser: The parsed parser
:param args: The arguments
"""
__check_overwrite(parser, args)
__check_tasks(parser, args)
def __load_config(config_path):
"""
Loads the configuration options from file and extends the argument list that
will be passed to the parser
:param config_path: The path to the configuration file
:returns: The argument list that will be parsed extended by the arguments in
the configuration file
"""
options = []
with open(config_path) as f:
config = f.readlines()
for line in config:
if re.match(r'^#', line):
continue
arg = line.strip().split(sep='=', maxsplit=1)
if len(arg[0]) > 1:
arg[0] = '--' + arg[0]
else:
arg[0] = '-' + arg[0]
options.extend(arg)
return options
def __replace_config_file(args):
"""
Replaces the -c/--config argument with the arguments specified in the
configuration file
:param args: The arguments that will be parsed
:returns: The argument list that will be parsed extended by the arguments in
the configuration file
"""
load_config = False
out = []
for option in args:
if (option.startswith('-c')
or option.startswith('--config')):
load_config = True
elif load_config:
load_config = False
out.extend(__load_config(option))
else:
out.append(option)
return out
def parse_args(args=None):
"""
Parses the arguments
:param args: The command line arguments
:returns: The parsed arguments
"""
if args is None:
args = sys.argv[1:] # args default to the system args
else:
args = list(args) # make sure that args are mutable
if ('--config' not in args
and '-c' not in args
and os.path.isfile(os.getcwd()+'/gitScrabber.conf')):
args.insert(0, '--config')
args.insert(1, os.getcwd()+'/gitScrabber.conf')
args = __replace_config_file(args)
parser = __setup_parser()
parsed_args = parser.parse_args(args)
__check_arguments(parser, parsed_args)
return parsed_args
| 36.77
| 80
| 0.550267
|
794a31c9d8973a445a99fad61b16375756fecaa9
| 4,306
|
py
|
Python
|
namedtensor/distributions/distributions.py
|
larslorch/namedtensor
|
8689fdfb8190fe0f31f05a982778c21a85c713b1
|
[
"MIT"
] | null | null | null |
namedtensor/distributions/distributions.py
|
larslorch/namedtensor
|
8689fdfb8190fe0f31f05a982778c21a85c713b1
|
[
"MIT"
] | null | null | null |
namedtensor/distributions/distributions.py
|
larslorch/namedtensor
|
8689fdfb8190fe0f31f05a982778c21a85c713b1
|
[
"MIT"
] | null | null | null |
from ..schema import _Schema
from ..torch_helpers import NamedTensor
import torch
import torch.distributions
class NamedDistribution:
def __init__(self, dist, batch_names, event_names):
self._dist = dist
self._batch_schema = _Schema.build(batch_names, 0)
self._event_schema = _Schema.build(event_names, 0)
@staticmethod
def build(init, *args, **kwargs):
collect = []
def fix(v):
if isinstance(v, NamedTensor):
collect.append(v)
return v.values
else:
return v
new_args = [fix(v) for v in args]
new_kwargs = {k: fix(v) for k, v in kwargs.items()}
dist = init(*new_args, **new_kwargs)
c = collect[0]
return NamedDistribution(
dist,
c._schema._names[: len(dist._batch_shape)],
c._schema._names[len(dist._batch_shape) :],
)
@property
def batch_shape(self):
"Named batch shape as an ordered dict"
return self._batch_schema.ordered_dict(self._dist.batch_shape)
@property
def event_shape(self):
"Named event shape as an ordered dict"
return self._event_schema.ordered_dict(self._dist.event_shape)
def _sample(self, fn, sizes, names):
tensor = fn(torch.Size(sizes))
return NamedTensor(
tensor,
names + self._batch_schema._names + self._event_schema._names,
)
def sample(self, sizes=(), names=()):
return self._sample(self._dist.sample, sizes, names)
def rsample(self, sizes=(), names=()):
return self._sample(self._dist.rsample, sizes, names)
def __getattr__(self, name):
if name in self._batch_methods:
def call():
method = getattr(self._dist, name)
return NamedTensor(method(), self._batch_schema)
return call
elif name in self._batch:
method = getattr(self._dist, name)
return NamedTensor(method, self._batch_schema)
elif name in self._properties:
return getattr(self._dist, name)
elif name in self._bin:
def call(values):
method = getattr(self._dist, name)
print(values.values.size())
return NamedTensor(
method(values.values),
values._schema._names[-len(self._event_schema._names) :],
)
return call
assert False, "No attr"
def __repr__(self):
return repr(self._dist)
# batch shape methods
_batch_methods = {"entropy", "perplexity"}
# batch shape properties
_batch = {"mean", "stddev", "variance"}
# properties
_properties = {"arg_constraints", "support"}
# batch shape methods
_bin = {"log_prob", "icdf", "cdf"}
class NDistributions(type):
def __getattr__(cls, name):
if name in cls._build:
def call(*args, **kwargs):
return NamedDistribution.build(
getattr(torch.distributions, name), *args, **kwargs
)
return call
elif name in cls._other:
def call(*args, **kwargs):
new_args = [arg._dist for arg in args]
return getattr(torch.distributions, name)(*new_args, **kwargs)
return call
assert False, "Function does not exist"
_build = {
"Normal",
"Multinomial",
"Bernoulli",
"Beta",
"Binomial",
"Categorical",
"Cauchy",
"Chi2",
"Dirichlet",
"Exponential",
"FisherSnedecor",
"Gamma",
"Geometric",
"Gumbel",
"HalfCauchy",
"HalfNormal",
"Independent",
"Laplace",
"LogNormal",
"LowRankMultivariateNormal",
"Multinomial",
"MultivariateNormal",
"NegativeBinomial",
"Normal",
"OneHotCategorical",
"Pareto",
"Poisson",
"RelaxedBernoulli",
"RelaxedOneHotCategorical",
"StudentT",
"TransformedDistribution",
"Uniform",
"Weibull",
}
_other = {"kl_divergence"}
class ndistributions(metaclass=NDistributions):
pass
| 26.745342
| 78
| 0.55922
|
794a323b1bc269c50200e901bf79a656f237cde3
| 271
|
py
|
Python
|
Avaliacion7e.py
|
JoaozinhoProgramation/infosatc-lp-avaliativo-02
|
9049f69c3d66767e1b26bdd4f980eacbcede8713
|
[
"MIT"
] | null | null | null |
Avaliacion7e.py
|
JoaozinhoProgramation/infosatc-lp-avaliativo-02
|
9049f69c3d66767e1b26bdd4f980eacbcede8713
|
[
"MIT"
] | null | null | null |
Avaliacion7e.py
|
JoaozinhoProgramation/infosatc-lp-avaliativo-02
|
9049f69c3d66767e1b26bdd4f980eacbcede8713
|
[
"MIT"
] | null | null | null |
Filmes = ["infinite","Jungle Cruise",]
Jogos = ["Hitman","Crash"]
Livros = ["Você sabia","mindset"]
Esporte = ["Futebol","Basquete"]
Disciplinas =["Matemática","História"]
Lista = Filmes.copy(),Jogos.copy(),Livros.copy(),Esporte.copy(),Disciplinas.copy()
print(ABCD)
| 22.583333
| 82
| 0.678967
|
794a32d50c6cc821447e9e9e396796237be5dd1e
| 3,299
|
py
|
Python
|
plugins/action/mnt_session_disconnect_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
plugins/action/mnt_session_disconnect_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
plugins/action/mnt_session_disconnect_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.ise.plugins.module_utils.ise import (
ISESDK,
ise_argument_spec,
)
# Get common arguements specification
argument_spec = ise_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
ENDPOINT_IP=dict(type="str"),
PSN_NAME=dict(type="str"),
MAC=dict(type="str"),
DISCONNECT_TYPE=dict(type="str"),
NAS_IPV4=dict(type="str"),
))
required_if = []
required_one_of = []
mutually_exclusive = []
required_together = []
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = True
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def get_object(self, params):
new_object = dict(
end_poi_nti_p=params.get("ENDPOINT_IP"),
psn_nam_e=params.get("PSN_NAME"),
mac=params.get("MAC"),
dis_con_nec_tty_pe=params.get("DISCONNECT_TYPE"),
nas_ipv4=params.get("NAS_IPV4"),
)
return new_object
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
ise = ISESDK(params=self._task.args)
id = self._task.args.get("id")
name = self._task.args.get("name")
if id:
response = ise.exec(
family="misc",
function='session_disconnect',
params=self.get_object(self._task.args)
).response
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
if not name and not id:
# NOTICE: Does not have a get all method or it is in another action
response = None
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
| 34.364583
| 128
| 0.64656
|
794a32e14120424cbeb6675e4014824abf7c99ff
| 140
|
py
|
Python
|
cargo/domain/model/base/factory.py
|
agiledragon/ddd-sample-in-python
|
5268e580845e599d8d3488c92bd1b44f4ece2378
|
[
"MIT"
] | 2
|
2018-08-24T15:09:07.000Z
|
2018-10-29T01:45:21.000Z
|
cargo/domain/model/base/factory.py
|
agiledragon/ddd-sample-in-python
|
5268e580845e599d8d3488c92bd1b44f4ece2378
|
[
"MIT"
] | null | null | null |
cargo/domain/model/base/factory.py
|
agiledragon/ddd-sample-in-python
|
5268e580845e599d8d3488c92bd1b44f4ece2378
|
[
"MIT"
] | 3
|
2018-07-30T02:07:21.000Z
|
2021-02-18T07:04:21.000Z
|
from abc import abstractmethod
class Factory(object):
@abstractmethod
def create(self, *args):
raise NotImplementedError
| 15.555556
| 33
| 0.714286
|
794a3312c94fb00c4795bccb081b09e783ac1069
| 14,264
|
py
|
Python
|
iotprovision/winc/pywinc_main.py
|
microchip-pic-avr-tools/iotprovision
|
310806d6df4462484fe289a14957228a6f34ca48
|
[
"MIT"
] | null | null | null |
iotprovision/winc/pywinc_main.py
|
microchip-pic-avr-tools/iotprovision
|
310806d6df4462484fe289a14957228a6f34ca48
|
[
"MIT"
] | null | null | null |
iotprovision/winc/pywinc_main.py
|
microchip-pic-avr-tools/iotprovision
|
310806d6df4462484fe289a14957228a6f34ca48
|
[
"MIT"
] | null | null | null |
"""Main pywinc tool functions
"""
import pathlib
import serial
from pykitcommander.kitprotocols import setup_kit
from .wincupgrade import WincUpgradeBridgeLink, WincUpgrade
from .winc_flash_map import FlashMap
from .winc_certs import ClientCertStorage, RootCertStorage
PYWINC_VERSION = "1.0.0" # FIXME where to define this?
STATUS_ERROR = 1
STATUS_SUCCESS = 0
def read_root_certs(port, decode_print=False, outfile=None):
"""Read root certificate storage from WINC
:param port: Serial port to use for the operation e.g. COM5
:type port: String
:param decode_print: Print the storage object in a readable format, defaults to False
:type decode_print: bool, optional
:param outfile: File in which the storage object should be stored, defaults to None
:type outfile: String, optional
:return: 0 for success and 1 for failure
:rtype: Int
"""
ser = serial.Serial(port, 115200, parity=serial.PARITY_NONE, timeout=10)
winc_bridge = WincUpgradeBridgeLink(ser)
number_of_pages = int(FlashMap.tls_root_cert_size / FlashMap.page_size)
cert_blob = bytearray()
# Read root certificate storage
print("Reading {} bytes from address {}"
.format(number_of_pages * FlashMap.page_size, FlashMap.tls_root_cert_offset))
for i in range(number_of_pages):
cert_blob += winc_bridge.read_page(FlashMap.tls_root_cert_offset + FlashMap.page_size * i)
cert_handler = RootCertStorage()
cert_handler.decode(cert_blob)
if outfile is not None:
with open(outfile, "wb") as file:
file.write(cert_blob)
print("Stored root cert storage to {}".format(outfile))
if decode_print:
print(cert_handler)
return STATUS_SUCCESS
def read_client_certs(port, decode_print=False, outfile=None):
"""Read client storage from WINC
:param port: Serial port to use for the operation e.g. COM5
:type port: String
:param decode_print: Print storage object in a readable format, defaults to False
:type decode_print: bool, optional
:param outfile: File in which the storage object should be stored, defaults to None
:type outfile: String, optional
:return: 0 for success and 1 for failure
:rtype: Int
"""
ser = serial.Serial(port, 115200, parity=serial.PARITY_NONE, timeout=10)
winc_bridge = WincUpgradeBridgeLink(ser)
number_of_pages = int(FlashMap.tls_server_size / FlashMap.page_size)
cert_blob = bytearray()
# Read root certificate storage area (2 sectors with 4k each = 8k storage)
for i in range(number_of_pages):
cert_blob += winc_bridge.read_page(FlashMap.tls_server_offset + FlashMap.page_size * i)
cstorage = ClientCertStorage()
cstorage.decode(cert_blob)
if outfile is not None:
with open(outfile, "wb") as file:
file.write(cert_blob)
print("Stored client cert storage to {}".format(outfile))
if decode_print:
print(cstorage)
return STATUS_SUCCESS
def build_client_certs(files, decode_print=False, outfile=None):
"""Build a client certificate storage object
:param files: Certificates to include in the storage object.
:type files: Array with file or directory names
:param decode_print: Print storage object in a readable format, defaults to False
:type decode_print: bool, optional
:param outfile: File name of the file where the storage object should be stored, defaults to None
:type outfile: String, optional
:return: 0 for success and 1 for failure
:rtype: Int
"""
flist = []
for file in files:
if pathlib.Path(file).is_dir():
for p in pathlib.Path(file).iterdir():
print("Adding certificate {}".format(p))
flist.append(p)
# Sort list of certs alphabetically for a consistency since iterdir returns files in
# directories arbitrarily. Interestingly the sorting is different on Windows and Linux
# a capital letter on Linux come before lower case letters while on Windows it is the
# other way round... applying casefold now to work around this.
flist.sort(key=lambda path: str.casefold(path.name))
else:
print("Adding certificate {}".format(file))
flist.append(file)
cstorage = ClientCertStorage()
cstorage.add_certificates(flist)
cstorage.add_ecdsa_list()
blob = cstorage.build()
if outfile is not None:
with open(outfile, "wb",) as file:
print("Writing client certificate storage to: {}".format(outfile))
file.write(blob)
if decode_print:
print(cstorage)
return STATUS_SUCCESS
def build_root_certs(files, decode_print=False, outfile=None):
"""Build a root certificate storage object
:param files: Certificates to include in the storage object.
:type files: Array with file or directory names
:param decode_print: Print storage object in a readable format, defaults to False
:type decode_print: bool, optional
:param outfile: File name of the file where the storage object should be stored, defaults to None
:type outfile: String, optional
:return: 0 for success and 1 for failure
:rtype: Int
"""
flist = []
for file in files:
if pathlib.Path(file).is_dir():
for p in pathlib.Path(file).iterdir():
print("Adding certificate {}".format(p))
flist.append(p)
# Sort list of certs alphabetically for a consistency since iterdir returns files in
# directories arbitrarily. Interestingly the sorting is different on Windows and Linux
# a capital letter on Linux come before lower case letters while on Windows it is the
# other way round... applying casefold now to work around this.
flist.sort(key=lambda path: str.casefold(path.name))
else:
print("Adding certificate {}".format(file))
flist.append(file)
print("{} certificates added to storage".format(len(flist)))
cert_handler = RootCertStorage()
cert_handler.add_certificates(flist)
blob = cert_handler.build()
if outfile is not None:
with open(outfile, "wb",) as file:
print("Writing root certificate storage to: {}".format(outfile))
file.write(blob)
print("Root CA storage size is {}".format(len(blob)))
print("Max storage size in WINC is {} ({} bytes left)".format(
FlashMap.tls_root_cert_size, FlashMap.tls_root_cert_size - len(blob)))
if decode_print:
print(cert_handler)
return STATUS_SUCCESS
def decode_root_certs_storage(file):
"""Decode and print a root storage object
:param file: Storage file name
:type file: String
:return: 0 for success and 1 for failure
:rtype: Int
"""
cert_handler = RootCertStorage()
with open(file, "rb") as storage_file:
blob = storage_file.read()
cert_handler.decode(blob)
print(cert_handler)
return STATUS_SUCCESS
def decode_client_certs_storage(file):
"""Decode and print a client storage object
:param files: Storage object file name
:type files: String
:return: 0 for success and 1 for failure
:rtype: Int
"""
cert_handler = ClientCertStorage()
with open(file, "rb") as storage_file:
blob = storage_file.read()
cert_handler.decode(blob)
print(cert_handler)
return STATUS_SUCCESS
def write_root_certs_storage(port, file):
"""Write a root certificate storage object to WINC
:param port: Serial port to use for the operation e.g. COM5
:type port: String
:param file: Storage object file name
:type file: String
:return: 0 for success and 1 for failure
:rtype: Int
"""
ser = serial.Serial(port, 115200, parity=serial.PARITY_NONE, timeout=10)
winc_bridge = WincUpgradeBridgeLink(ser)
with open(file, "rb") as storage_file:
blob = bytearray(storage_file.read())
# TODO Do a check that the data is a valid root certificate store
# cert_handler = RootCertStorage()
# cert_handler.decode(blob)
if len(blob) > FlashMap.tls_root_cert_size:
print("Root cert store does not fit into flash.")
return STATUS_ERROR
sectors_to_erase = len(blob) // FlashMap.sector_size + (len(blob) % FlashMap.sector_size > 0)
pages_to_write = len(blob) // FlashMap.page_size + (len(blob) % FlashMap.page_size > 0)
# Extend the storage to match page size
if (len(blob) % FlashMap.page_size) != 0:
blob.extend([0xff] * (FlashMap.page_size - (len(blob) % FlashMap.page_size)))
# Erase sectors
print("Erasing {} sector(s) starting from address {}".format(sectors_to_erase, FlashMap.tls_root_cert_offset))
for sector in range(sectors_to_erase):
winc_bridge.erase_sector(FlashMap.tls_root_cert_offset + sector * FlashMap.sector_size)
# Write root certificate storage
print("Writing {} bytes ({} pages) starting from address {}"
.format(pages_to_write * FlashMap.page_size, pages_to_write, FlashMap.tls_root_cert_offset))
for i in range(pages_to_write):
winc_bridge.write_page(FlashMap.tls_root_cert_offset + FlashMap.page_size * i,
blob[i * FlashMap.page_size:(i + 1) * FlashMap.page_size])
return STATUS_SUCCESS
def write_client_certs_storage(port, file):
"""Write a client certificate storage object to WINC
:param port: Serial port to use for the operation e.g. COM5
:type port: String
:param file: Storage object file name
:type file: String
:return: 0 for success and 1 for failure
:rtype: Int
"""
ser = serial.Serial(port, 115200, parity=serial.PARITY_NONE, timeout=10)
winc_bridge = WincUpgradeBridgeLink(ser)
with open(file, "rb") as storage_file:
blob = bytearray(storage_file.read())
# TODO Implement a check if client certificate store is valid
if len(blob) > FlashMap.tls_server_size:
print("Error: Client cert store does not fit into flash, aborting.")
return STATUS_ERROR
if len(blob) > FlashMap.tls_server_size - FlashMap.page_size:
print("Warning: Erasing last page in client cert storage (IoT related data could be lost)")
sectors_to_erase = len(blob) // FlashMap.sector_size + (len(blob) % FlashMap.sector_size > 0)
pages_to_write = len(blob) // FlashMap.page_size + (len(blob) % FlashMap.page_size > 0)
# Extend the storage to match page size
if (len(blob) % FlashMap.page_size) != 0:
blob.extend([0xff] * (FlashMap.page_size - (len(blob) % FlashMap.page_size)))
# Erase sectors
print("Erasing {} sector(s) starting from address {}".format(sectors_to_erase, FlashMap.tls_server_offset))
for sector in range(sectors_to_erase):
winc_bridge.erase_sector(FlashMap.tls_server_offset + sector * FlashMap.sector_size)
# Write root certificate storage
print("Writing {} bytes starting from address {}"
.format(pages_to_write * FlashMap.page_size, FlashMap.tls_server_offset))
for i in range(pages_to_write):
winc_bridge.write_page(FlashMap.tls_server_offset + FlashMap.page_size * i,
blob[i * FlashMap.page_size:(i + 1) * FlashMap.page_size])
return STATUS_SUCCESS
def winc_fw_upgrade(port, file):
"""Upgrade WINC FW
:param port: Serial port to use for the operation e.g. COM5
:type port: String
:param file: WINC FW image file name
:type file: String
:return: 0 for success and 1 for failure
:rtype: Int
"""
ser = serial.Serial(port, 115200, parity=serial.PARITY_NONE, timeout=10)
winc_upgrade = WincUpgrade(ser)
with open(file, "rb") as storage_file:
blob = storage_file.read()
winc_upgrade.upgrade_full_image(blob)
return STATUS_SUCCESS
def pywinc(args, logging_level):
"""WINC tool CLI commands handler
:param args: Command line arguments (argparse output)
:type args: object
:param logging_level: Logging level from logger module e.g. logging.INFO
:type logging_level: Int
:return: Status. STATUS_SUCCESS=0 or STATUS_ERROR=1
:rtype: Int
"""
status = STATUS_SUCCESS
if args.version:
print(f"pywinc version {PYWINC_VERSION}")
return STATUS_SUCCESS
if args.action in ["read", "write", "fwupgrade"]:
# These commands require a kit is connected.
# Request pykitcommander to setup WINC upgrade bridge FW
kit_info = setup_kit("wincupgrade", serialnumber=args.serialnumber,
skip_programming=args.skip_target_programming)
# Extract auto-detected port, or use override provided
port = kit_info['port']
if args.port:
port = args.port
if args.action == 'read':
if args.memory == 'root-certs':
status = read_root_certs(port, decode_print=args.decode,
outfile=args.out)
elif args.memory == 'client-certs':
status = read_client_certs(port, decode_print=args.decode,
outfile=args.out)
elif args.action == 'write':
if args.memory == 'root-certs':
status = write_root_certs_storage(port, args.input[0])
elif args.memory == 'client-certs':
status = write_client_certs_storage(port, args.input[0])
elif args.action == 'build':
print("Building certificate store")
if args.memory == 'client-certs':
status = build_client_certs(args.input, decode_print=args.decode,
outfile=args.out)
elif args.memory == 'root-certs':
status = build_root_certs(args.input, decode_print=args.decode,
outfile=args.out)
elif args.action == 'decode':
if args.memory == 'client-certs':
status = decode_client_certs_storage(args.input[0])
elif args.memory == 'root-certs':
status = decode_root_certs_storage(args.input[0])
elif args.action == 'fwupgrade':
status = winc_fw_upgrade(port, args.input[0] if args.input is not None else None)
return status
| 39.622222
| 114
| 0.672883
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.