text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_c_string(data, position):
"""Decode a BSON 'C' string to python unicode string."""
|
end = data.index(b"\x00", position)
return _utf_8_decode(data[position:end], None, True)[0], end + 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _synchronized(meth):
"""Call method while holding a lock."""
|
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
with self._lock:
return meth(self, *args, **kwargs)
return wrapper
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mock_server_receive_request(client, server):
"""Take a client socket and return a Request."""
|
header = mock_server_receive(client, 16)
length = _UNPACK_INT(header[:4])[0]
request_id = _UNPACK_INT(header[4:8])[0]
opcode = _UNPACK_INT(header[12:])[0]
msg_bytes = mock_server_receive(client, length - 16)
if opcode not in OPCODES:
raise NotImplementedError("Don't know how to unpack opcode %d yet"
% opcode)
return OPCODES[opcode].unpack(msg_bytes, client, server, request_id)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mock_server_receive(sock, length):
"""Receive `length` bytes from a socket object."""
|
msg = b''
while length:
chunk = sock.recv(length)
if chunk == b'':
raise socket.error(errno.ECONNRESET, 'closed')
length -= len(chunk)
msg += chunk
return msg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_docs(*args, **kwargs):
"""Make the documents for a `Request` or `Reply`. Takes a variety of argument styles, returns a list of dicts. Used by `make_prototype_request` and `make_reply`, which are in turn used by `MockupDB.receives`, `Request.replies`, and so on. See examples in tutorial. """
|
err_msg = "Can't interpret args: "
if not args and not kwargs:
return []
if not args:
# OpReply(ok=1, ismaster=True).
return [kwargs]
if isinstance(args[0], (int, float, bool)):
# server.receives().ok(0, err='uh oh').
if args[1:]:
raise_args_err(err_msg, ValueError)
doc = OrderedDict({'ok': args[0]})
doc.update(kwargs)
return [doc]
if isinstance(args[0], (list, tuple)):
# Send a batch: OpReply([{'a': 1}, {'a': 2}]).
if not all(isinstance(doc, (OpReply, Mapping))
for doc in args[0]):
raise_args_err('each doc must be a dict:')
if kwargs:
raise_args_err(err_msg, ValueError)
return list(args[0])
if isinstance(args[0], (string_type, text_type)):
if args[2:]:
raise_args_err(err_msg, ValueError)
if len(args) == 2:
# Command('aggregate', 'collection', {'cursor': {'batchSize': 1}}).
doc = OrderedDict({args[0]: args[1]})
else:
# OpReply('ismaster', me='a.com').
doc = OrderedDict({args[0]: 1})
doc.update(kwargs)
return [doc]
if kwargs:
raise_args_err(err_msg, ValueError)
# Send a batch as varargs: OpReply({'a': 1}, {'a': 2}).
if not all(isinstance(doc, (OpReply, Mapping)) for doc in args):
raise_args_err('each doc must be a dict')
return args
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_prototype_request(*args, **kwargs):
"""Make a prototype Request for a Matcher."""
|
if args and inspect.isclass(args[0]) and issubclass(args[0], Request):
request_cls, arg_list = args[0], args[1:]
return request_cls(*arg_list, **kwargs)
if args and isinstance(args[0], Request):
if args[1:] or kwargs:
raise_args_err("can't interpret args")
return args[0]
# Match any opcode.
return Request(*args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def docs_repr(*args):
"""Stringify ordered dicts like a regular ones. Preserve order, remove 'u'-prefix on unicodes in Python 2: {"_id": 2} {"_id": 2, "a": "b"}, {"a": 1} {"ts": {"$date": 123456000}} {"oid": {"$oid": "123456781234567812345678"}} """
|
sio = StringIO()
for doc_idx, doc in enumerate(args):
if doc_idx > 0:
sio.write(u', ')
sio.write(text_type(json_util.dumps(doc)))
return sio.getvalue()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def seq_match(seq0, seq1):
"""True if seq0 is a subset of seq1 and their elements are in same order. True True False True True True True False """
|
len_seq1 = len(seq1)
if len_seq1 < len(seq0):
return False
seq1_idx = 0
for i, elem in enumerate(seq0):
while seq1_idx < len_seq1:
if seq1[seq1_idx] == elem:
break
seq1_idx += 1
if seq1_idx >= len_seq1 or seq1[seq1_idx] != elem:
return False
seq1_idx += 1
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raise_args_err(message='bad arguments', error_class=TypeError):
"""Throw an error with standard message, displaying function call. Traceback (most recent call last):
TypeError: bad arguments: f(1, 2, x='y') """
|
frame = inspect.currentframe().f_back
raise error_class(message + ': ' + format_call(frame))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def interactive_server(port=27017, verbose=True, all_ok=False, name='MockupDB', ssl=False, uds_path=None):
"""A `MockupDB` that the mongo shell can connect to. Call `~.MockupDB.run` on the returned server, and clean it up with `~.MockupDB.stop`. If ``all_ok`` is True, replies {ok: 1} to anything unmatched by a specific responder. """
|
if uds_path is not None:
port = None
server = MockupDB(port=port,
verbose=verbose,
request_timeout=int(1e6),
ssl=ssl,
auto_ismaster=True,
uds_path=uds_path)
if all_ok:
server.append_responder({})
server.autoresponds('whatsmyuri', you='localhost:12345')
server.autoresponds({'getLog': 'startupWarnings'},
log=['hello from %s!' % name])
server.autoresponds(OpMsg('buildInfo'), version='MockupDB ' + __version__)
server.autoresponds(OpMsg('listCollections'))
server.autoresponds('replSetGetStatus', ok=0)
server.autoresponds('getFreeMonitoringStatus', ok=0)
return server
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def client_port(self):
"""Client connection's TCP port."""
|
address = self._client.getpeername()
if isinstance(address, tuple):
return address[1]
# Maybe a Unix domain socket connection.
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def command_err(self, code=1, errmsg='MockupDB command failure', *args, **kwargs):
"""Error reply to a command. Returns True so it is suitable as an `~MockupDB.autoresponds` handler. """
|
kwargs.setdefault('ok', 0)
kwargs['code'] = code
kwargs['errmsg'] = errmsg
self.replies(*args, **kwargs)
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpMsg`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
|
payload_document = OrderedDict()
flags, = _UNPACK_UINT(msg[:4])
pos = 4
if flags != 0 and flags != 2:
raise ValueError('OP_MSG flag must be 0 or 2 not %r' % (flags,))
while pos < len(msg):
payload_type, = _UNPACK_BYTE(msg[pos:pos + 1])
pos += 1
payload_size, = _UNPACK_INT(msg[pos:pos + 4])
if payload_type == 0:
doc = bson.decode_all(msg[pos:pos + payload_size],
CODEC_OPTIONS)[0]
payload_document.update(doc)
pos += payload_size
elif payload_type == 1:
section_size, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
identifier, pos = _get_c_string(msg, pos)
# Section starts w/ 4-byte size prefix, identifier ends w/ nil.
documents_len = section_size - len(identifier) - 1 - 4
documents = bson.decode_all(msg[pos:pos + documents_len],
CODEC_OPTIONS)
payload_document[identifier] = documents
pos += documents_len
database = payload_document['$db']
return OpMsg(payload_document, namespace=database, flags=flags,
_client=client, request_id=request_id,
_server=server)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpQuery` or `Command`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
|
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
is_command = namespace.endswith('.$cmd')
num_to_skip, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
num_to_return, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
docs = bson.decode_all(msg[pos:], CODEC_OPTIONS)
if is_command:
assert len(docs) == 1
command_ns = namespace[:-len('.$cmd')]
return Command(docs, namespace=command_ns, flags=flags,
_client=client, request_id=request_id,
_server=server)
else:
if len(docs) == 1:
fields = None
else:
assert len(docs) == 2
fields = docs[1]
return OpQuery(docs[0], fields=fields, namespace=namespace,
flags=flags, num_to_skip=num_to_skip,
num_to_return=num_to_return, _client=client,
request_id=request_id, _server=server)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpGetMore`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
|
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
num_to_return, = _UNPACK_INT(msg[pos:pos + 4])
pos += 4
cursor_id, = _UNPACK_LONG(msg[pos:pos + 8])
return OpGetMore(namespace=namespace, flags=flags, _client=client,
num_to_return=num_to_return, cursor_id=cursor_id,
request_id=request_id, _server=server)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unpack(cls, msg, client, server, _):
"""Parse message and return an `OpKillCursors`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
|
# Leading 4 bytes are reserved.
num_of_cursor_ids, = _UNPACK_INT(msg[4:8])
cursor_ids = []
pos = 8
for _ in range(num_of_cursor_ids):
cursor_ids.append(_UNPACK_INT(msg[pos:pos + 4])[0])
pos += 4
return OpKillCursors(_client=client, cursor_ids=cursor_ids,
_server=server)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unpack(cls, msg, client, server, request_id):
"""Parse message and return an `OpInsert`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
|
flags, = _UNPACK_INT(msg[:4])
namespace, pos = _get_c_string(msg, 4)
docs = bson.decode_all(msg[pos:], CODEC_OPTIONS)
return cls(*docs, namespace=namespace, flags=flags, _client=client,
request_id=request_id, _server=server)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reply_bytes(self, request):
"""Take a `Request` and return an OP_REPLY message as bytes."""
|
flags = struct.pack("<i", self._flags)
cursor_id = struct.pack("<q", self._cursor_id)
starting_from = struct.pack("<i", self._starting_from)
number_returned = struct.pack("<i", len(self._docs))
reply_id = random.randint(0, 1000000)
response_to = request.request_id
data = b''.join([flags, cursor_id, starting_from, number_returned])
data += b''.join([bson.BSON.encode(doc) for doc in self._docs])
message = struct.pack("<i", 16 + len(data))
message += struct.pack("<i", reply_id)
message += struct.pack("<i", response_to)
message += struct.pack("<i", OP_REPLY)
return message + data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reply_bytes(self, request):
"""Take a `Request` and return an OP_MSG message as bytes."""
|
flags = struct.pack("<I", self._flags)
payload_type = struct.pack("<b", 0)
payload_data = bson.BSON.encode(self.doc)
data = b''.join([flags, payload_type, payload_data])
reply_id = random.randint(0, 1000000)
response_to = request.request_id
header = struct.pack(
"<iiii", 16 + len(data), reply_id, response_to, OP_MSG)
return header + data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Begin serving. Returns the bound port, or 0 for domain socket."""
|
self._listening_sock, self._address = (
bind_domain_socket(self._address)
if self._uds_path
else bind_tcp_socket(self._address))
if self._ssl:
certfile = os.path.join(os.path.dirname(__file__), 'server.pem')
self._listening_sock = _ssl.wrap_socket(
self._listening_sock,
certfile=certfile,
server_side=True)
self._accept_thread = threading.Thread(target=self._accept_loop)
self._accept_thread.daemon = True
self._accept_thread.start()
return self.port
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
"""Stop serving. Always call this to clean up after yourself."""
|
self._stopped = True
threads = [self._accept_thread]
threads.extend(self._server_threads)
self._listening_sock.close()
for sock in list(self._server_socks):
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
try:
sock.close()
except socket.error:
pass
with self._unlock():
for thread in threads:
thread.join(10)
if self._uds_path:
try:
os.unlink(self._uds_path)
except OSError:
pass
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def receives(self, *args, **kwargs):
"""Pop the next `Request` and assert it matches. Returns None if the server is stopped. Pass a `Request` or request pattern to specify what client request to expect. See the tutorial for examples. Pass ``timeout`` as a keyword argument to override this server's ``request_timeout``. """
|
timeout = kwargs.pop('timeout', self._request_timeout)
end = time.time() + timeout
matcher = Matcher(*args, **kwargs)
while not self._stopped:
try:
# Short timeout so we notice if the server is stopped.
request = self._request_q.get(timeout=0.05)
except Empty:
if time.time() > end:
raise AssertionError('expected to receive %r, got nothing'
% matcher.prototype)
else:
if matcher.matches(request):
return request
else:
raise AssertionError('expected to receive %r, got %r'
% (matcher.prototype, request))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def autoresponds(self, matcher, *args, **kwargs):
"""Send a canned reply to all matching client requests. ``matcher`` is a `Matcher` or a command name, or an instance of `OpInsert`, `OpQuery`, etc. True The remaining arguments are a :ref:`message spec <message spec>`: Traceback (most recent call last):
OperationFailure: command SON([('bar', 1)]) on namespace db.$cmd failed: err True True Remove an autoresponder like: If the request currently at the head of the queue matches, it is popped and replied to. Future matching requests skip the queue. True Responders are applied in order, most recently added first, until one matches: True True You can pass a request handler in place of the message spec. Return True if you handled the request: The standard `Request.ok`, `~Request.replies`, `~Request.fail`, `~Request.hangup` and so on all return True to make them suitable as handler functions. True If the request is not handled, it is checked against the remaining responders, or enqueued if none match. You can pass the handler as the only argument so it receives *all* requests. For example you could log them, then return None to allow other handlers to run: logging: OpMsg({"baz": 1, "$db": "db", "$readPreference": {"mode": "primaryPreferred"}}, namespace="db") True The synonym `subscribe` better expresses your intent if your handler never returns True: .. doctest: :hide: """
|
return self._insert_responder("top", matcher, *args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append_responder(self, matcher, *args, **kwargs):
"""Add a responder of last resort. Like `.autoresponds`, but instead of adding a responder to the top of the stack, add it to the bottom. This responder will be called if no others match. """
|
return self._insert_responder("bottom", matcher, *args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uri(self):
"""Connection string to pass to `~pymongo.mongo_client.MongoClient`."""
|
if self._uds_path:
uri = 'mongodb://%s' % (quote_plus(self._uds_path),)
else:
uri = 'mongodb://%s' % (format_addr(self._address),)
return uri + '/?ssl=true' if self._ssl else uri
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _accept_loop(self):
"""Accept client connections and spawn a thread for each."""
|
self._listening_sock.setblocking(0)
while not self._stopped and not _shutting_down:
try:
# Wait a short time to accept.
if select.select([self._listening_sock.fileno()], [], [], 1):
client, client_addr = self._listening_sock.accept()
client.setblocking(True)
self._log('connection from %s' % format_addr(client_addr))
server_thread = threading.Thread(
target=functools.partial(
self._server_loop, client, client_addr))
# Store weakrefs to the thread and socket, so we can
# dispose them in stop().
self._server_threads[server_thread] = None
self._server_socks[client] = None
server_thread.daemon = True
server_thread.start()
except socket.error as error:
if error.errno not in (
errno.EAGAIN, errno.EBADF, errno.EWOULDBLOCK):
raise
except select.error as error:
if error.args[0] == errno.EBADF:
# Closed.
break
else:
raise
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _server_loop(self, client, client_addr):
"""Read requests from one client socket, 'client'."""
|
while not self._stopped and not _shutting_down:
try:
with self._unlock():
request = mock_server_receive_request(client, self)
self._requests_count += 1
self._log('%d\t%r' % (request.client_port, request))
# Give most recently added responders precedence.
for responder in reversed(self._autoresponders):
if responder.handle(request):
self._log('\t(autoresponse)')
break
else:
self._request_q.put(request)
except socket.error as error:
if error.errno in (errno.ECONNRESET, errno.EBADF):
# We hung up, or the client did.
break
raise
except select.error as error:
if error.args[0] == errno.EBADF:
# Closed.
break
else:
raise
except AssertionError:
traceback.print_exc()
break
self._log('disconnected: %s' % format_addr(client_addr))
client.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_password(self, username, password):
"""The actual password checking logic. Separated from the authenticate code from Django for easier updating"""
|
try:
if SUPPORTS_VERIFY:
kerberos.checkPassword(username.lower(), password, getattr(settings, "KRB5_SERVICE", ""), getattr(settings, "KRB5_REALM", ""), getattr(settings, "KRB5_VERIFY_KDC", True))
else:
kerberos.checkPassword(username.lower(), password, getattr(settings, "KRB5_SERVICE", ""), getattr(settings, "KRB5_REALM", ""))
return True
except kerberos.BasicAuthError:
if getattr(settings, "KRB5_DEBUG", False):
logger.exception("Failure during authentication")
return False
except:
if getattr(settings, "KRB5_DEBUG", False):
logger.exception("Failure during authentication")
# for all other execptions also deny access
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Start an interactive `MockupDB`. Use like ``python -m mockupdb``. """
|
from optparse import OptionParser
parser = OptionParser('Start mock MongoDB server')
parser.add_option('-p', '--port', dest='port', default=27017,
help='port on which mock mongod listens')
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose', default=True,
help="don't print messages to stdout")
options, cmdline_args = parser.parse_args()
if cmdline_args:
parser.error('Unrecognized argument(s): %s' % ' '.join(cmdline_args))
server = interactive_server(port=options.port, verbose=options.verbose)
try:
server.run()
print('Listening on port %d' % server.port)
time.sleep(1e6)
except KeyboardInterrupt:
server.stop()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _initialize_distance_grid(self):
"""Initialize the distance grid by calls to _grid_dist."""
|
p = [self._grid_distance(i) for i in range(self.num_neurons)]
return np.array(p)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _grid_distance(self, index):
""" Calculate the distance grid for a single index position. This is pre-calculated for fast neighborhood calculations later on (see _calc_influence). """
|
# Take every dimension but the first in reverse
# then reverse that list again.
dimensions = np.cumprod(self.map_dimensions[1::][::-1])[::-1]
coord = []
for idx, dim in enumerate(dimensions):
if idx != 0:
value = (index % dimensions[idx-1]) // dim
else:
value = index // dim
coord.append(value)
coord.append(index % self.map_dimensions[-1])
for idx, (width, row) in enumerate(zip(self.map_dimensions, coord)):
x = np.abs(np.arange(width) - row) ** 2
dims = self.map_dimensions[::-1]
if idx:
dims = dims[:-idx]
x = np.broadcast_to(x, dims).T
if idx == 0:
distance = np.copy(x)
else:
distance += x.T
return distance
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def topographic_error(self, X, batch_size=1):
""" Calculate the topographic error. The topographic error is a measure of the spatial organization of the map. Maps in which the most similar neurons are also close on the grid have low topographic error and indicate that a problem has been learned correctly. Formally, the topographic error is the proportion of units for which the two most similar neurons are not direct neighbors on the map. Parameters X : numpy array. The input data. batch_size : int The batch size to use when calculating the topographic error. Returns ------- error : numpy array A vector of numbers, representing the topographic error for each data point. """
|
dist = self.transform(X, batch_size)
# Sort the distances and get the indices of the two smallest distances
# for each datapoint.
res = dist.argsort(1)[:, :2]
# Lookup the euclidean distance between these points in the distance
# grid
dgrid = self.distance_grid.reshape(self.num_neurons, self.num_neurons)
res = np.asarray([dgrid[x, y] for x, y in res])
# Subtract 1.0 because 1.0 is the smallest distance.
return np.sum(res > 1.0) / len(res)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def neighbors(self, distance=2.0):
"""Get all neighbors for all neurons."""
|
dgrid = self.distance_grid.reshape(self.num_neurons, self.num_neurons)
for x, y in zip(*np.nonzero(dgrid <= distance)):
if x != y:
yield x, y
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def neighbor_difference(self):
"""Get the euclidean distance between a node and its neighbors."""
|
differences = np.zeros(self.num_neurons)
num_neighbors = np.zeros(self.num_neurons)
distance, _ = self.distance_function(self.weights, self.weights)
for x, y in self.neighbors():
differences[x] += distance[x, y]
num_neighbors[x] += 1
return differences / num_neighbors
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spread(self, X):
""" Calculate the average spread for each node. The average spread is a measure of how far each neuron is from the data points which cluster to it. Parameters X : numpy array The input data. Returns ------- spread : numpy array The average distance from each neuron to each data point. """
|
distance, _ = self.distance_function(X, self.weights)
dists_per_neuron = defaultdict(list)
for x, y in zip(np.argmin(distance, 1), distance):
dists_per_neuron[x].append(y[x])
out = np.zeros(self.num_neurons)
average_spread = {k: np.mean(v)
for k, v in dists_per_neuron.items()}
for x, y in average_spread.items():
out[x] = y
return out
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def receptive_field(self, X, identities, max_len=10, threshold=0.9, batch_size=1):
""" Calculate the receptive field of the SOM on some data. The receptive field is the common ending of all sequences which lead to the activation of a given BMU. If a SOM is well-tuned to specific sequences, it will have longer receptive fields, and therefore gives a better description of the dynamics of a given system. Parameters X : numpy array Input data. identities : list A list of symbolic identities associated with each input. We expect this list to be as long as the input data. max_len : int, optional, default 10 The maximum length to attempt to find. Raising this increases memory use. threshold : float, optional, default .9 The threshold at which we consider a receptive field valid. If at least this proportion of the sequences of a neuron have the same suffix, that suffix is counted as acquired by the SOM. batch_size : int, optional, default 1 The batch size to use in prediction Returns ------- receptive_fields : dict A dictionary mapping from the neuron id to the found sequences for that neuron. The sequences are represented as lists of symbols from identities. """
|
receptive_fields = defaultdict(list)
predictions = self.predict(X, batch_size)
if len(predictions) != len(identities):
raise ValueError("X and identities are not the same length: "
"{0} and {1}".format(len(X), len(identities)))
for idx, p in enumerate(predictions.tolist()):
receptive_fields[p].append(identities[idx+1 - max_len:idx+1])
rec = {}
for k, v in receptive_fields.items():
# if there's only one sequence, we don't know
# anything abouw how salient it is.
seq = []
if len(v) <= 1:
continue
else:
for x in reversed(list(zip(*v))):
x = Counter(x)
if x.most_common(1)[0][1] / sum(x.values()) > threshold:
seq.append(x.most_common(1)[0][0])
else:
rec[k] = seq
break
return rec
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def invert_projection(self, X, identities):
""" Calculate the inverted projection. The inverted projectio of a SOM is created by association each weight with the input which matches it the most, thus giving a good approximation of the "influence" of each input item. Works best for symbolic (instead of continuous) input data. Parameters X : numpy array Input data identities : list A list of names for each of the input data. Must be the same length as X. Returns ------- m : numpy array An array with the same shape as the map """
|
distances = self.transform(X)
if len(distances) != len(identities):
raise ValueError("X and identities are not the same length: "
"{0} and {1}".format(len(X), len(identities)))
node_match = []
for d in distances.__getattribute__(self.argfunc)(0):
node_match.append(identities[d])
return np.array(node_match)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_weights(self):
""" Reshaped weights for visualization. The weights are reshaped as (W.shape[0], prod(W.shape[1:-1]), W.shape[2]). This allows one to easily see patterns, even for hyper-dimensional soms. For one-dimensional SOMs, the returned array is of shape (W.shape[0], 1, W.shape[2]) Returns ------- w : numpy array A three-dimensional array containing the weights in a 2D array for easy visualization. """
|
first_dim = self.map_dimensions[0]
if len(self.map_dimensions) != 1:
second_dim = np.prod(self.map_dimensions[1:])
else:
second_dim = 1
# Reshape to appropriate dimensions
return self.weights.reshape((first_dim,
second_dim,
self.data_dimensionality))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(cls, path):
""" Load a SOM from a JSON file saved with this package.. Parameters path : str The path to the JSON file. Returns ------- s : cls A som of the specified class. """
|
data = json.load(open(path))
weights = data['weights']
weights = np.asarray(weights, dtype=np.float64)
s = cls(data['map_dimensions'],
data['params']['lr']['orig'],
data['data_dimensionality'],
influence=data['params']['infl']['orig'],
lr_lambda=data['params']['lr']['factor'],
infl_lambda=data['params']['infl']['factor'])
s.weights = weights
s.trained = True
return s
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_dirs(self, directory):
"""Delete a directory recursively. :param directory: $PATH to directory. :type directory: ``str`` """
|
LOG.info('Removing directory [ %s ]', directory)
local_files = self._drectory_local_files(directory=directory)
for file_name in local_files:
try:
os.remove(file_name['local_object'])
except OSError as exp:
LOG.error(str(exp))
# Build a list of all local directories
directories = sorted(
[i for i, _, _ in os.walk(directory)],
reverse=True
)
# Remove directories
for directory_path in directories:
try:
os.removedirs(directory_path)
except OSError as exp:
if exp.errno != 2:
LOG.error(str(exp))
pass
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _return_container_objects(self):
"""Return a list of objects to delete. The return tuple will indicate if it was a userd efined list of objects as True of False. The list of objects is a list of dictionaries with the key being "container_object". :returns: tuple (``bol``, ``list``) """
|
container_objects = self.job_args.get('object')
if container_objects:
return True, [{'container_object': i} for i in container_objects]
container_objects = self.job_args.get('objects_file')
if container_objects:
container_objects = os.path.expanduser(container_objects)
if os.path.isfile(container_objects):
with open(container_objects) as f:
return True, [
{'container_object': i.rstrip('\n')}
for i in f.readlines()
]
container_objects = self._list_contents()
pattern_match = self.job_args.get('pattern_match')
if pattern_match:
container_objects = self.match_filter(
idx_list=container_objects,
pattern=pattern_match,
dict_type=True,
dict_key='name'
)
# Reformat list for processing
if container_objects and isinstance(container_objects[0], dict):
return False, self._return_deque([
{'container_object': i['name']} for i in container_objects
])
else:
return False, self._return_deque()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _index_fs(self):
"""Returns a deque object full of local file system items. :returns: ``deque`` """
|
indexed_objects = self._return_deque()
directory = self.job_args.get('directory')
if directory:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._drectory_local_files(
directory=directory
)
)
object_names = self.job_args.get('object')
if object_names:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._named_local_files(
object_names=object_names
)
)
return indexed_objects
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_filter(self, idx_list, pattern, dict_type=False, dict_key='name'):
"""Return Matched items in indexed files. :param idx_list: :return list """
|
if dict_type is False:
return self._return_deque([
obj for obj in idx_list
if re.search(pattern, obj)
])
elif dict_type is True:
return self._return_deque([
obj for obj in idx_list
if re.search(pattern, obj.get(dict_key))
])
else:
return self._return_deque()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_horiz_table(self, data):
"""Print a horizontal pretty table from data."""
|
# Build list of returned objects
return_objects = list()
fields = self.job_args.get('fields')
if not fields:
fields = set()
for item_dict in data:
for field_item in item_dict.keys():
fields.add(field_item)
fields = sorted(fields)
for obj in data:
item_struct = dict()
for item in fields:
item_struct[item] = obj.get(item)
else:
return_objects.append(item_struct)
table = prettytable.PrettyTable(fields)
for obj in return_objects:
table.add_row([obj.get(i) for i in fields])
for tbl in table.align.keys():
table.align[tbl] = 'l'
sort_key = self.job_args.get('sort_by')
if sort_key:
table.sortby = sort_key
self.printer(table)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_virt_table(self, data):
"""Print a vertical pretty table from data."""
|
table = prettytable.PrettyTable()
keys = sorted(data.keys())
table.add_column('Keys', keys)
table.add_column('Values', [data.get(i) for i in keys])
for tbl in table.align.keys():
table.align[tbl] = 'l'
self.printer(table)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def printer(self, message, color_level='info'):
"""Print Messages and Log it. :param message: item to print to screen """
|
if self.job_args.get('colorized'):
print(cloud_utils.return_colorized(msg=message, color=color_level))
else:
print(message)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_method(method):
"""Return an imported object. :param method: ``str`` DOT notation for import with Colin used to separate the class used for the job. :returns: ``object`` Loaded class object from imported method. """
|
# Split the class out from the job
module = method.split(':')
# Set the import module
_module_import = module[0]
# Set the class name to use
class_name = module[-1]
# import the module
module_import = __import__(_module_import, fromlist=[class_name])
# Return the attributes for the imported module and class
return getattr(module_import, class_name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_manager(self, job_override=None):
"""The run manager. The run manager is responsible for loading the plugin required based on what the user has inputted using the parsed_command value as found in the job_args dict. If the user provides a *job_override* the method will attempt to import the module and class as provided by the user. Before the method attempts to run any job the run manager will first authenticate to the the cloud provider. :param job_override: ``str`` DOT notation for import with Colin used to separate the class used for the job. """
|
for arg_name, arg_value in self.job_args.items():
if arg_name.endswith('_headers'):
if isinstance(arg_value, list):
self.job_args[arg_name] = self._list_headers(
headers=arg_value
)
elif not arg_name:
self.job_args[arg_name] = self._str_headers(
header=arg_value
)
else:
self.job_args[arg_name] = dict()
# Set base header for the user-agent
self.job_args['base_headers']['User-Agent'] = 'turbolift'
LOG.info('Authenticating')
indicator_options = {'run': self.job_args.get('run_indicator', True)}
with indicator.Spinner(**indicator_options):
LOG.debug('Authenticate against the Service API')
self.job_args.update(auth.authenticate(job_args=self.job_args))
if job_override:
action = self._get_method(method=job_override)
else:
parsed_command = self.job_args.get('parsed_command')
if not parsed_command:
raise exceptions.NoCommandProvided(
'Please provide a command. Basic commands are: %s',
list(self.job_map.keys())
)
else:
action = self._get_method(method=self.job_map[parsed_command])
run = action(job_args=self.job_args)
run.start()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def range_initialization(X, num_weights):
""" Initialize the weights by calculating the range of the data. The data range is calculated by reshaping the input matrix to a 2D matrix, and then taking the min and max values over the columns. Parameters X : numpy array The input data. The data range is calculated over the last axis. num_weights : int The number of weights to initialize. Returns ------- new_weights : numpy array A new version of the weights, initialized to the data range specified by X. """
|
# Randomly initialize weights to cover the range of each feature.
X_ = X.reshape(-1, X.shape[-1])
min_val, max_val = X_.min(0), X_.max(0)
data_range = max_val - min_val
return data_range * np.random.rand(num_weights,
X.shape[-1]) + min_val
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit(self, X, num_epochs=10, updates_epoch=None, stop_param_updates=dict(), batch_size=1, show_progressbar=False, show_epoch=False, refit=True):
""" Fit the learner to some data. Parameters X : numpy array. The input data. num_epochs : int, optional, default 10 The number of epochs to train for. updates_epoch : int, optional, default 10 The number of updates to perform on the learning rate and neighborhood per epoch. 10 suffices for most problems. stop_param_updates : dict The epoch at which to stop updating each param. This means that the specified parameter will be reduced to 0 at the specified epoch. batch_size : int, optional, default 100 The batch size to use. Warning: batching can change your performance dramatically, depending on the task. show_progressbar : bool, optional, default False Whether to show a progressbar during training. show_epoch : bool, optional, default False Whether to print the epoch number to stdout """
|
if self.data_dimensionality is None:
self.data_dimensionality = X.shape[-1]
self.weights = np.zeros((self.num_neurons,
self.data_dimensionality))
X = self._check_input(X)
if not self.trained or refit:
X = self._init_weights(X)
else:
if self.scaler is not None:
self.weights = self.scaler.transform(self.weights)
if updates_epoch is None:
X_len = X.shape[0]
updates_epoch = np.min([50, X_len // batch_size])
constants = self._pre_train(stop_param_updates,
num_epochs,
updates_epoch)
start = time.time()
for epoch in tqdm(range(num_epochs), disable=not show_epoch):
logger.info("Epoch {0} of {1}".format(epoch+1, num_epochs))
self._epoch(X,
epoch,
batch_size,
updates_epoch,
constants,
show_progressbar)
self.trained = True
if self.scaler is not None:
self.weights = self.scaler.inverse_transform(self.weights)
logger.info("Total train time: {0}".format(time.time() - start))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_weights(self, X):
"""Set the weights and normalize data before starting training."""
|
X = np.asarray(X, dtype=np.float64)
if self.scaler is not None:
X = self.scaler.fit_transform(X)
if self.initializer is not None:
self.weights = self.initializer(X, self.num_neurons)
for v in self.params.values():
v['value'] = v['orig']
return X
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pre_train(self, stop_param_updates, num_epochs, updates_epoch):
"""Set parameters and constants before training."""
|
# Calculate the total number of updates given early stopping.
updates = {k: stop_param_updates.get(k, num_epochs) * updates_epoch
for k, v in self.params.items()}
# Calculate the value of a single step given the number of allowed
# updates.
single_steps = {k: np.exp(-((1.0 - (1.0 / v)))
* self.params[k]['factor'])
for k, v in updates.items()}
# Calculate the factor given the true factor and the value of a
# single step.
constants = {k: np.exp(-self.params[k]['factor']) / v
for k, v in single_steps.items()}
return constants
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_predict(self, X, num_epochs=10, updates_epoch=10, stop_param_updates=dict(), batch_size=1, show_progressbar=False):
"""First fit, then predict."""
|
self.fit(X,
num_epochs,
updates_epoch,
stop_param_updates,
batch_size,
show_progressbar)
return self.predict(X, batch_size=batch_size)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_transform(self, X, num_epochs=10, updates_epoch=10, stop_param_updates=dict(), batch_size=1, show_progressbar=False, show_epoch=False):
"""First fit, then transform."""
|
self.fit(X,
num_epochs,
updates_epoch,
stop_param_updates,
batch_size,
show_progressbar,
show_epoch)
return self.transform(X, batch_size=batch_size)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_params(self, constants):
"""Update params and return new influence."""
|
for k, v in constants.items():
self.params[k]['value'] *= v
influence = self._calculate_influence(self.params['infl']['value'])
return influence * self.params['lr']['value']
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_batches(self, X, batch_size, shuffle_data=True):
""" Create batches out of a sequence of data. This function will append zeros to the end of your data to ensure that all batches are even-sized. These are masked out during training. """
|
if shuffle_data:
X = shuffle(X)
if batch_size > X.shape[0]:
batch_size = X.shape[0]
max_x = int(np.ceil(X.shape[0] / batch_size))
X = np.resize(X, (max_x, batch_size, X.shape[-1]))
return X
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _propagate(self, x, influences, **kwargs):
"""Propagate a single batch of examples through the network."""
|
activation, difference_x = self.forward(x)
update = self.backward(difference_x, influences, activation)
# If batch size is 1 we can leave out the call to mean.
if update.shape[0] == 1:
self.weights += update[0]
else:
self.weights += update.mean(0)
return activation
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_input(self, X):
""" Check the input for validity. Ensures that the input data, X, is a 2-dimensional matrix, and that the second dimension of this matrix has the same dimensionality as the weight matrix. """
|
if np.ndim(X) == 1:
X = np.reshape(X, (1, -1))
if X.ndim != 2:
raise ValueError("Your data is not a 2D matrix. "
"Actual size: {0}".format(X.shape))
if X.shape[1] != self.data_dimensionality:
raise ValueError("Your data size != weight dim: {0}, "
"expected {1}".format(X.shape[1],
self.data_dimensionality))
return X
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X, batch_size=100, show_progressbar=False):
""" Transform input to a distance matrix by measuring the L2 distance. Parameters X : numpy array. The input data. batch_size : int, optional, default 100 The batch size to use in transformation. This may affect the transformation in stateful, i.e. sequential SOMs. show_progressbar : bool Whether to show a progressbar during transformation. Returns ------- transformed : numpy array A matrix containing the distance from each datapoint to all neurons. The distance is normally expressed as euclidean distance, but can be any arbitrary metric. """
|
X = self._check_input(X)
batched = self._create_batches(X, batch_size, shuffle_data=False)
activations = []
prev = self._init_prev(batched)
for x in tqdm(batched, disable=not show_progressbar):
prev = self.forward(x, prev_activation=prev)[0]
activations.extend(prev)
activations = np.asarray(activations, dtype=np.float64)
activations = activations[:X.shape[0]]
return activations.reshape(X.shape[0], self.num_neurons)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X, batch_size=1, show_progressbar=False):
""" Predict the BMU for each input data. Parameters X : numpy array. The input data. batch_size : int, optional, default 100 The batch size to use in prediction. This may affect prediction in stateful, i.e. sequential SOMs. show_progressbar : bool Whether to show a progressbar during prediction. Returns ------- predictions : numpy array An array containing the BMU for each input data point. """
|
dist = self.transform(X, batch_size, show_progressbar)
res = dist.__getattribute__(self.argfunc)(1)
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quantization_error(self, X, batch_size=1):
""" Calculate the quantization error. Find the the minimum euclidean distance between the units and some input. Parameters X : numpy array. The input data. batch_size : int The batch size to use for processing. Returns ------- error : numpy array The error for each data point. """
|
dist = self.transform(X, batch_size)
res = dist.__getattribute__(self.valfunc)(1)
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(cls, path):
""" Load a SOM from a JSON file saved with this package. Parameters path : str The path to the JSON file. Returns ------- s : cls A som of the specified class. """
|
data = json.load(open(path))
weights = data['weights']
weights = np.asarray(weights, dtype=np.float64)
s = cls(data['num_neurons'],
data['data_dimensionality'],
data['params']['lr']['orig'],
neighborhood=data['params']['infl']['orig'],
valfunc=data['valfunc'],
argfunc=data['argfunc'],
lr_lambda=data['params']['lr']['factor'],
nb_lambda=data['params']['nb']['factor'])
s.weights = weights
s.trained = True
return s
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, path):
"""Save a SOM to a JSON file."""
|
to_save = {}
for x in self.param_names:
attr = self.__getattribute__(x)
if type(attr) == np.ndarray:
attr = [[float(x) for x in row] for row in attr]
elif isinstance(attr, types.FunctionType):
attr = attr.__name__
to_save[x] = attr
json.dump(to_save, open(path, 'w'))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_authversion(job_args):
"""Get or infer the auth version. Based on the information found in the *AUTH_VERSION_MAP* the authentication version will be set to a correct value as determined by the **os_auth_version** parameter as found in the `job_args`. :param job_args: ``dict`` :returns: ``str`` """
|
_version = job_args.get('os_auth_version')
for version, variants in AUTH_VERSION_MAP.items():
if _version in variants:
authversion = job_args['os_auth_version'] = version
return authversion
else:
raise exceptions.AuthenticationProblem(
"Auth Version must be one of %s.",
list(AUTH_VERSION_MAP.keys())
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_headers(self):
"""Setup headers for authentication request."""
|
try:
return {
'X-Auth-User': self.job_args['os_user'],
'X-Auth-Key': self.job_args['os_apikey']
}
except KeyError as exp:
raise exceptions.AuthenticationProblem(
'Missing Credentials. Error: %s',
exp
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth_request(self, url, headers, body):
"""Perform auth request for token."""
|
return self.req.post(url, headers, body=body)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_reqtype(self):
"""Return the authentication body."""
|
if self.job_args['os_auth_version'] == 'v1.0':
return dict()
else:
setup = {
'username': self.job_args.get('os_user')
}
# Check if any prefix items are set. A prefix should be a
# dictionary with keys matching the os_* credential type.
prefixes = self.job_args.get('os_prefix')
if self.job_args.get('os_token') is not None:
auth_body = {
'auth': {
'token': {
'id': self.job_args.get('os_token')
}
}
}
if not self.job_args.get('os_tenant'):
raise exceptions.AuthenticationProblem(
'To use token auth you must specify the tenant id. Set'
' the tenant ID with [ --os-tenant ]'
)
elif self.job_args.get('os_password') is not None:
setup['password'] = self.job_args.get('os_password')
if prefixes:
prefix = prefixes.get('os_password')
if not prefix:
raise NotImplementedError(
'the `password` method is not implemented for this'
' auth plugin'
)
else:
prefix = 'passwordCredentials'
auth_body = {
'auth': {
prefix: setup
}
}
elif self.job_args.get('os_apikey') is not None:
setup['apiKey'] = self.job_args.get('os_apikey')
if prefixes:
prefix = prefixes.get('os_apikey')
if not prefix:
raise NotImplementedError(
'the `apikey` method is not implemented for this'
' auth plugin'
)
else:
prefix = 'apiKeyCredentials'
auth_body = {
'auth': {
prefix: setup
}
}
else:
raise exceptions.AuthenticationProblem(
'No Password, APIKey, or Token Specified'
)
if self.job_args.get('os_tenant'):
auth = auth_body['auth']
auth['tenantName'] = self.job_args.get('os_tenant')
LOG.debug('AUTH Request body: [ %s ]', auth_body)
return auth_body
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute():
"""This is the run section of the application Turbolift."""
|
if len(sys.argv) <= 1:
raise SystemExit(
'No Arguments provided. use [--help] for more information.'
)
# Capture user arguments
_args = arguments.ArgumentParserator(
arguments_dict=turbolift.ARGUMENTS,
env_name='TURBO',
epilog=turbolift.VINFO,
title='Turbolift',
detail='Multiprocessing Swift CLI tool.',
description='Manage Swift easily and fast.'
)
user_args = _args.arg_parser()
user_args['run_indicator'] = True
debug_log = False
stream_logs = True
# Load system logging
if user_args.get('debug'):
debug_log = True
user_args['run_indicator'] = False
# Load system logging
if user_args.get('quiet'):
stream_logs = False
user_args['run_indicator'] = False
_logging = logger.LogSetup(
debug_logging=debug_log,
colorized_messages=user_args.get('colorized', False)
)
_logging.default_logger(name='turbolift', enable_stream=stream_logs)
job = worker.Worker(job_args=user_args)
job.run_manager()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, log_file, msg):
""" Append message to .log file """
|
try:
with open(log_file, 'a') as LogFile:
LogFile.write(msg + os.linesep)
except:
raise Exception('Error Configuring PyLogger.TextStorage Class.')
return os.path.isfile(log_file)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, log_file):
""" Read messages from .log file """
|
if os.path.isdir(os.path.dirname(log_file)) and os.path.isfile(log_file):
with open(log_file, 'r') as LogFile:
data = LogFile.readlines()
data = "".join(line for line in data)
else:
data = ''
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit(self, X):
""" Fit the scaler based on some data. Takes the columnwise mean and standard deviation of the entire input array. If the array has more than 2 dimensions, it is flattened. Parameters X : numpy array Returns ------- scaled : numpy array A scaled version of said array. """
|
if X.ndim > 2:
X = X.reshape((np.prod(X.shape[:-1]), X.shape[-1]))
self.mean = X.mean(0)
self.std = X.std(0)
self.is_fit = True
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X):
"""Transform your data to zero mean unit variance."""
|
if not self.is_fit:
raise ValueError("The scaler has not been fit yet.")
return (X-self.mean) / (self.std + 10e-7)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stupid_hack(most=10, wait=None):
"""Return a random time between 1 - 10 Seconds."""
|
# Stupid Hack For Public Cloud so it is not overwhelmed with API requests.
if wait is not None:
time.sleep(wait)
else:
time.sleep(random.randrange(1, most))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def time_stamp():
"""Setup time functions :returns: ``tuple`` """
|
# Time constants
fmt = '%Y-%m-%dT%H:%M:%S.%f'
date = datetime.datetime
date_delta = datetime.timedelta
now = datetime.datetime.utcnow()
return fmt, date, date_delta, now
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unique_list_dicts(dlist, key):
"""Return a list of dictionaries which are sorted for only unique entries. :param dlist: :param key: :return list: """
|
return list(dict((val[key], val) for val in dlist).values())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quoter(obj):
"""Return a Quoted URL. The quote function will return a URL encoded string. If there is an exception in the job which results in a "KeyError" the original string will be returned as it will be assumed to already be URL encoded. :param obj: ``basestring`` :return: ``str`` """
|
try:
try:
return urllib.quote(obj)
except AttributeError:
return urllib.parse.quote(obj)
except KeyError:
return obj
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Clone objects from one container to another. This method was built to clone a container between data-centers while using the same credentials. The method assumes that an authentication token will be valid within the two data centers. """
|
LOG.info('Clone warm up...')
# Create the target args
self._target_auth()
last_list_obj = None
while True:
self.indicator_options['msg'] = 'Gathering object list'
with indicator.Spinner(**self.indicator_options):
objects_list = self._list_contents(
single_page_return=True,
last_obj=last_list_obj
)
if not objects_list:
return
last_obj = utils.byte_encode(objects_list[-1].get('name'))
LOG.info(
'Last object [ %s ] Last object in the list [ %s ]',
last_obj,
last_list_obj
)
if last_list_obj == last_obj:
return
else:
last_list_obj = last_obj
self._clone_worker(objects_list=objects_list)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authenticate(job_args):
"""Authentication For Openstack API. Pulls the full Openstack Service Catalog Credentials are the Users API Username and Key/Password. Set a DC Endpoint and Authentication URL for the OpenStack environment """
|
# Load any authentication plugins as needed
job_args = utils.check_auth_plugin(job_args)
# Set the auth version
auth_version = utils.get_authversion(job_args=job_args)
# Define the base headers that are used in all authentications
auth_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
auth_headers.update(job_args['base_headers'])
if auth_version == 'v1.0':
auth = utils.V1Authentication(job_args=job_args)
auth_headers.update(auth.get_headers())
LOG.debug('Request Headers: [ %s ]', auth_headers)
auth_url = job_args['os_auth_url']
LOG.debug('Parsed Auth URL: [ %s ]', auth_url)
auth_kwargs = {
'url': auth_url,
'headers': auth_headers
}
else:
auth = utils.OSAuthentication(job_args=job_args)
auth_url = auth.parse_region()
LOG.debug('Parsed Auth URL: [ %s ]', auth_url)
auth_json = auth.parse_reqtype()
LOG.debug('Request Headers: [ %s ]', auth_headers)
auth_body = json.dumps(auth_json)
LOG.debug('Request JSON: [ %s ]', auth_body)
auth_kwargs = {
'url': auth_url,
'headers': auth_headers,
'body': auth_body
}
auth_resp = auth.auth_request(**auth_kwargs)
if auth_resp.status_code >= 300:
raise exceptions.AuthenticationProblem(
'Authentication Failure, Status: [ %s ] Reason: [ %s ]',
auth_resp.status_code,
auth_resp.reason
)
else:
return auth.parse_auth_response(auth_resp)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getConfig(self, key):
""" Get a Config Value """
|
if hasattr(self, key):
return getattr(self, key)
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addFilter(self, filter):
""" Register Custom Filter """
|
self.FILTERS.append(filter)
return "FILTER#{}".format(len(self.FILTERS) - 1)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addAction(self, action):
""" Register Custom Action """
|
self.ACTIONS.append(action)
return "ACTION#{}".format(len(self.ACTIONS) - 1)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def removeFilter(self, filter):
""" Remove Registered Filter """
|
filter = filter.split('#')
del self.FILTERS[int(filter[1])]
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def removeAction(self, action):
""" Remove Registered Action """
|
action = action.split('#')
del self.ACTIONS[int(action[1])]
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def info(self, msg):
""" Log Info Messages """
|
self._execActions('info', msg)
msg = self._execFilters('info', msg)
self._processMsg('info', msg)
self._sendMsg('info', msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def warning(self, msg):
""" Log Warning Messages """
|
self._execActions('warning', msg)
msg = self._execFilters('warning', msg)
self._processMsg('warning', msg)
self._sendMsg('warning', msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def error(self, msg):
""" Log Error Messages """
|
self._execActions('error', msg)
msg = self._execFilters('error', msg)
self._processMsg('error', msg)
self._sendMsg('error', msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def critical(self, msg):
""" Log Critical Messages """
|
self._execActions('critical', msg)
msg = self._execFilters('critical', msg)
self._processMsg('critical', msg)
self._sendMsg('critical', msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log(self, msg):
""" Log Normal Messages """
|
self._execActions('log', msg)
msg = self._execFilters('log', msg)
self._processMsg('log', msg)
self._sendMsg('log', msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _processMsg(self, type, msg):
""" Process Debug Messages """
|
now = datetime.datetime.now()
# Check If Path not provided
if self.LOG_FILE_PATH == '':
self.LOG_FILE_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
# Build absolute Path
log_file = self.LOG_FILE_PATH + now.strftime(self.LOG_FILE_FORMAT) + '.log'
# Add General Vars
msg = self.LOG_MESSAGE_FORMAT.format(
TYPE=type.upper(),
DATE=now.strftime(self.DATES_FORMAT),
DATETIME=now.strftime(self.DATETIME_FORMAT),
MESSAGE=msg,
)
# Check if to add platform data
if self.PLATFORM_DATA:
# Add Platform Specific Vars
msg = msg.format(
PL_TYPE=platform.machine(),
PL_NAME=platform.node(),
PL_PROCESSOR=platform.processor(),
PL_PY_BUILD_DATE=platform.python_build()[1],
PL_PY_COMPILER=platform.python_compiler(),
PL_PY_RELEASE=platform.release(),
PL_OS=platform.system(),
PL_TIMEZONE=strftime("%z", gmtime())
)
# Create Storage Instance
self._STORAGE = Storage(log_file)
# Write Storage
return self._STORAGE.write(msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _configMailer(self):
""" Config Mailer Class """
|
self._MAILER = Mailer(self.MAILER_HOST, self.MAILER_PORT)
self._MAILER.login(self.MAILER_USER, self.MAILER_PWD)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sendMsg(self, type, msg):
""" Send Alert Message To Emails """
|
if self.ALERT_STATUS and type in self.ALERT_TYPES:
self._configMailer()
self._MAILER.send(self.MAILER_FROM, self.ALERT_EMAIL, self.ALERT_SUBJECT, msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _execFilters(self, type, msg):
""" Execute Registered Filters """
|
for filter in self.FILTERS:
msg = filter(type, msg)
return msg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _execActions(self, type, msg):
""" Execute Registered Actions """
|
for action in self.ACTIONS:
action(type, msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth_plugins(auth_plugins=None):
"""Authentication plugins. Usage, Add any plugin here that will serve as a rapid means to authenticate to an OpenStack environment. Syntax is as follows: If the subdomain is in the auth url, as is the case with hp, add "%(region)s" to the "os_auth_url" value. The region value from the list of choices will be used as the string replacement. Note that if the `os_prefix` key is added the system will override the authentication body prefix with the string provided. At this time the choices are os_apikey, os_password, os_token. All key entries are optional and should one not be specified with a credential type a `NotImplementedError` will be raised. :param auth_plugins: Additional plugins to add in :type auth_plugins: ``dict`` :returns: ``dict`` """
|
__auth_plugins__ = {
'os_rax_auth': {
'os_auth_url': 'https://identity.api.rackspacecloud.com/v2.0/'
'tokens',
'os_prefix': {
'os_apikey': 'RAX-KSKEY:apiKeyCredentials',
'os_password': 'passwordCredentials'
},
'args': {
'commands': [
'--os-rax-auth'
],
'choices': [
'dfw',
'ord',
'iad',
'syd',
'hkg',
'lon'
],
'help': 'Authentication Plugin for Rackspace Cloud'
' env[OS_RAX_AUTH]',
'default': os.environ.get('OS_RAX_AUTH', None),
'metavar': '[REGION]'
}
},
'rax_auth_v1': {
'os_auth_version': 'v1.0',
'os_auth_url': 'https://identity.api.rackspacecloud.com/v1.0',
'args': {
'commands': [
'--rax-auth-v1'
],
'action': 'store_true',
'help': 'Authentication Plugin for Rackspace Cloud V1'
}
},
'os_rax_auth_lon': {
'os_auth_url': 'https://lon.identity.api.rackspacecloud.com/'
'v2.0/tokens',
'os_prefix': {
'os_apikey': 'RAX-KSKEY:apiKeyCredentials',
'os_password': 'passwordCredentials'
},
'args': {
'commands': [
'--os-rax-auth-lon'
],
'choices': [
'lon'
],
'help': 'Authentication Plugin for Rackspace Cloud'
' env[OS_RAX_AUTH_LON]',
'default': os.environ.get('OS_RAX_AUTH_LON', None),
'metavar': '[REGION]'
}
},
'os_hp_auth': {
'os_auth_url': 'https://%(region)s.identity.hpcloudsvc.com:35357/'
'v2.0/tokens',
'os_prefix': {
'os_password': 'passwordCredentials'
},
'args': {
'commands': [
'--os-hp-auth'
],
'choices': [
'region-b.geo-1',
'region-a.geo-1'
],
'help': 'Authentication Plugin for HP Cloud'
' env[OS_HP_AUTH]',
'default': os.environ.get('OS_HP_AUTH', None),
'metavar': '[REGION]'
}
}
}
if auth_plugins:
__auth_plugins__.update(auth_plugins)
return __auth_plugins__
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_basestring(item):
"""Return ``bol`` on string check item. :param item: Item to check if its a string :type item: ``str`` :returns: ``bol`` """
|
try:
return isinstance(item, (basestring, unicode))
except NameError:
return isinstance(item, str)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict_distance(self, X, batch_size=1, show_progressbar=False):
"""Predict distances to some input data."""
|
X = self._check_input(X)
X_shape = reduce(np.multiply, X.shape[:-1], 1)
batched = self._create_batches(X, batch_size, shuffle_data=False)
activations = []
activation = self._init_prev(batched)
for x in tqdm(batched, disable=not show_progressbar):
activation = self.forward(x, prev_activation=activation)[0]
activations.append(activation)
act = np.asarray(activations, dtype=np.float64).transpose((1, 0, 2))
act = act[:X_shape]
return act.reshape(X_shape, self.num_neurons)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate(self, num_to_generate, starting_place):
"""Generate data based on some initial position."""
|
res = []
activ = starting_place[None, :]
index = activ.__getattribute__(self.argfunc)(1)
item = self.weights[index]
for x in range(num_to_generate):
activ = self.forward(item, prev_activation=activ)[0]
index = activ.__getattribute__(self.argfunc)(1)
res.append(index)
item = self.weights[index]
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forward(self, x, **kwargs):
""" Perform a forward pass through the network. The forward pass in recursive som is based on a combination between the activation in the last time-step and the current time-step. Parameters x : numpy array The input data. prev_activation : numpy array. The activation of the network in the previous time-step. Returns ------- activations : tuple of activations and differences A tuple containing the activation of each unit, the differences between the weights and input and the differences between the context input and context weights. """
|
prev = kwargs['prev_activation']
# Differences is the components of the weights subtracted from
# the weight vector.
distance_x, diff_x = self.distance_function(x, self.weights)
distance_y, diff_y = self.distance_function(prev, self.context_weights)
x_ = distance_x * self.alpha
y_ = distance_y * self.beta
activation = np.exp(-(x_ + y_))
return activation, diff_x, diff_y
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(cls, path):
""" Load a recursive SOM from a JSON file. You can use this function to load weights of other SOMs. If there are no context weights, they will be set to 0. Parameters path : str The path to the JSON file. Returns ------- s : cls A som of the specified class. """
|
data = json.load(open(path))
weights = data['weights']
weights = np.asarray(weights, dtype=np.float64)
try:
context_weights = data['context_weights']
context_weights = np.asarray(context_weights,
dtype=np.float64)
except KeyError:
context_weights = np.zeros((len(weights), len(weights)))
try:
alpha = data['alpha']
beta = data['beta']
except KeyError:
alpha = 1.0
beta = 1.0
s = cls(data['map_dimensions'],
data['data_dimensionality'],
data['params']['lr']['orig'],
influence=data['params']['infl']['orig'],
alpha=alpha,
beta=beta,
lr_lambda=data['params']['lr']['factor'],
infl_lambda=data['params']['infl']['factor'])
s.weights = weights
s.context_weights = context_weights
s.trained = True
return s
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _return_base_data(self, url, container, container_object=None, container_headers=None, object_headers=None):
"""Return headers and a parsed url. :param url: :param container: :param container_object: :param container_headers: :return: ``tuple`` """
|
headers = self.job_args['base_headers']
headers.update({'X-Auth-Token': self.job_args['os_token']})
_container_uri = url.geturl().rstrip('/')
if container:
_container_uri = '%s/%s' % (
_container_uri, cloud_utils.quoter(container)
)
if container_object:
_container_uri = '%s/%s' % (
_container_uri, cloud_utils.quoter(container_object)
)
if object_headers:
headers.update(object_headers)
if container_headers:
headers.update(container_headers)
return headers, urlparse.urlparse(_container_uri)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.