text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_all_secrets(cls, user, client_id):
"""Delete all of the client's credentials""" |
can_delete = yield cls(client_id=client_id).can_delete(user)
if not can_delete:
raise exceptions.Unauthorized('User may not delete {} secrets'
.format(client_id))
results = yield cls.view.get(key=client_id, include_docs=True)
if results['rows']:
db = cls.db_client()
docs = [{
'_rev': doc['doc']['_rev'],
'_id': doc['doc']['_id'],
'_deleted': True
} for doc in results['rows']]
yield db.save_docs(docs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sendContact(self, context={}):
""" Send contact form message to single or multiple recipients """ |
for recipient in self.recipients:
super(ContactFormMail, self).__init__(recipient, self.async)
self.sendEmail('contactForm', 'New contact form message', context) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def proxify_elt(elt, bases=None, _dict=None, public=False):
"""Proxify input elt. :param elt: elt to proxify. :param bases: elt class base classes. If None, use elt type. :param dict _dict: specific elt class content to use. :param bool public: if True (default False), proxify only public members (where name starts with the character '_'). :return: proxified element. :raises: TypeError if elt does not implement all routines of bases and _dict. """ |
# ensure _dict is a dictionary
proxy_dict = {} if _dict is None else _dict.copy()
# set of proxified attribute names which are proxified during bases parsing
# and avoid to proxify them twice during _dict parsing
proxified_attribute_names = set()
# ensure bases is a tuple of types
if bases is None:
bases = (elt if isclass(elt) else elt.__class__,)
if isinstance(bases, string_types):
bases = (lookup(bases),)
elif isclass(bases):
bases = (bases,)
else:
bases = tuple(bases)
# fill proxy_dict with routines of bases
for base in bases:
# exclude object
if base is object:
continue
for name, member in getmembers(base, isroutine):
# check if name is public
if public and not name.startswith('_'):
continue
eltmember = getattr(elt, name, None)
if eltmember is None:
raise TypeError(
'Wrong elt {0}. Must implement {1} ({2}) of {3}.'.
format(elt, name, member, base)
)
# proxify member if member is not a constructor
if name not in ['__new__', '__init__']:
# get routine from proxy_dict or eltmember
routine = proxy_dict.get(name, eltmember)
# exclude object methods
if getattr(routine, '__objclass__', None) is not object:
# get routine proxy
routine_proxy = proxify_routine(routine)
if ismethod(routine_proxy):
routine_proxy = get_method_function(routine_proxy)
# update proxy_dict
proxy_dict[name] = routine_proxy
# and save the proxified attribute flag
proxified_attribute_names.add(name)
# proxify proxy_dict
for name in proxy_dict:
value = proxy_dict[name]
if not hasattr(elt, name):
raise TypeError(
'Wrong elt {0}. Must implement {1} ({2}).'.format(
elt, name, value
)
)
if isroutine(value):
# if member has not already been proxified
if name not in proxified_attribute_names:
# proxify it
value = proxify_routine(value)
proxy_dict[name] = value
# set default constructors if not present in proxy_dict
if '__new__' not in proxy_dict:
proxy_dict['__new__'] = object.__new__
if '__init__' not in proxy_dict:
proxy_dict['__init__'] = object.__init__
# generate a new proxy class
cls = type('Proxy', bases, proxy_dict)
# instantiate proxy cls
result = cls if isclass(elt) else cls()
# bind elt to proxy
setattr(result, __PROXIFIED__, elt)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def proxify_routine(routine, impl=None):
"""Proxify a routine with input impl. :param routine: routine to proxify. :param impl: new impl to use. If None, use routine. """ |
# init impl
impl = routine if impl is None else impl
is_method = ismethod(routine)
if is_method:
function = get_method_function(routine)
else:
function = routine
# flag which indicates that the function is not a pure python function
# and has to be wrapped
wrap_function = not hasattr(function, '__code__')
try:
# get params from routine
args, varargs, kwargs, _ = getargspec(function)
except TypeError:
# in case of error, wrap the function
wrap_function = True
if wrap_function:
# if function is not pure python, create a generic one
# with assignments
assigned = []
for wrapper_assignment in WRAPPER_ASSIGNMENTS:
if hasattr(function, wrapper_assignment):
assigned.append(wrapper_assignment)
# and updates
updated = []
for wrapper_update in WRAPPER_UPDATES:
if hasattr(function, wrapper_update):
updated.append(wrapper_update)
@wraps(function, assigned=assigned, updated=updated)
def wrappedfunction(*args, **kwargs):
"""Default wrap function."""
function = wrappedfunction
# get params from function
args, varargs, kwargs, _ = getargspec(function)
name = function.__name__
result = _compilecode(
function=function, name=name, impl=impl,
args=args, varargs=varargs, kwargs=kwargs
)
# set wrapping assignments
for wrapper_assignment in WRAPPER_ASSIGNMENTS:
try:
value = getattr(function, wrapper_assignment)
except AttributeError:
pass
else:
setattr(result, wrapper_assignment, value)
# set proxy module
result.__module__ = proxify_routine.__module__
# update wrapping updating
for wrapper_update in WRAPPER_UPDATES:
try:
value = getattr(function, wrapper_update)
except AttributeError:
pass
else:
getattr(result, wrapper_update).update(value)
# set proxyfied element on proxy
setattr(result, __PROXIFIED__, routine)
if is_method: # create a new method
args = [result, get_method_self(routine)]
if PY2:
args.append(routine.im_class)
result = MethodType(*args)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compilecode(function, name, impl, args, varargs, kwargs):
"""Get generated code. :return: function proxy generated code. :rtype: str """ |
newcodestr, generatedname, impl_name = _generatecode(
function=function, name=name, impl=impl,
args=args, varargs=varargs, kwargs=kwargs
)
try:
__file__ = getfile(function)
except TypeError:
__file__ = '<string>'
# compile newcodestr
code = compile(newcodestr, __file__, 'single')
# define the code with the new function
_globals = {}
exec_(code, _globals)
# get new code
_var = _globals[generatedname]
newco = get_function_code(_var)
# get new consts list
newconsts = list(newco.co_consts)
if PY3:
newcode = list(newco.co_code)
else:
newcode = [ord(co) for co in newco.co_code]
consts_values = {impl_name: impl}
# change LOAD_GLOBAL to LOAD_CONST
index = 0
newcodelen = len(newcode)
while index < newcodelen:
if newcode[index] == LOAD_GLOBAL:
oparg = newcode[index + 1] + (newcode[index + 2] << 8)
name = newco.co_names[oparg]
if name in consts_values:
const_value = consts_values[name]
if const_value in newconsts:
pos = newconsts.index(const_value)
else:
pos = len(newconsts)
newconsts.append(consts_values[name])
newcode[index] = LOAD_CONST
newcode[index + 1] = pos & 0xFF
newcode[index + 2] = pos >> 8
index += 1
codeobj = getcodeobj(newconsts, newcode, newco, get_function_code(function))
# instanciate a new function
if function is None or isbuiltin(function):
result = FunctionType(codeobj, {})
else:
result = type(function)(
codeobj,
get_function_globals(function),
function.__name__,
get_function_defaults(function),
get_function_closure(function)
)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_proxy(elt, bases=None, _dict=None):
"""Get proxy from an elt. If elt implements the proxy generator method (named ``__getproxy__``), use it instead of using this module functions. :param elt: elt to proxify. :type elt: object or function/method :param bases: base types to enrich in the result cls if not None. :param _dict: class members to proxify if not None. """ |
# try to find an instance proxy generator
proxygenerator = getattr(elt, __GETPROXY__, None)
# if a proxy generator is not found, use this module
if proxygenerator is None:
if isroutine(elt):
result = proxify_routine(elt)
else: # in case of object, result is a Proxy
result = proxify_elt(elt, bases=bases, _dict=_dict)
else: # otherwise, use the specific proxy generator
result = proxygenerator()
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def proxified_elt(proxy):
"""Get proxified element. :param proxy: proxy element from where get proxified element. :return: proxified element. None if proxy is not proxified. """ |
if ismethod(proxy):
proxy = get_method_function(proxy)
result = getattr(proxy, __PROXIFIED__, None)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_proxy(elt):
"""Return True if elt is a proxy. :param elt: elt to check such as a proxy. :return: True iif elt is a proxy. :rtype: bool """ |
if ismethod(elt):
elt = get_method_function(elt)
result = hasattr(elt, __PROXIFIED__)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ajax_login_required(view_func):
"""Handle non-authenticated users differently if it is an AJAX request.""" |
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.is_ajax():
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
else:
response = http.HttpResponse()
response['X-Django-Requires-Auth'] = True
response['X-Django-Login-Url'] = settings.LOGIN_URL
return response
else:
return login_required(view_func)(request, *args, **kwargs)
return _wrapped_view |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ajax_only(view_func):
"""Required the view is only accessed via AJAX.""" |
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.is_ajax():
return view_func(request, *args, **kwargs)
else:
return http.HttpResponseBadRequest()
return _wrapped_view |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def anonymous_required(func=None, url=None):
"""Required that the user is not logged in.""" |
url = url or "/"
def _dec(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(url)
else:
return view_func(request, *args, **kwargs)
return _wrapped_view
if func is None:
return _dec
else:
return _dec(func) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def secure(view_func):
"""Handles SSL redirect on the view level.""" |
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if not request.is_secure():
redirect = _redirect(request, True)
if redirect:
# Redirect might be None if SSL is not enabled
return redirect
return view_func(request, *args, **kwargs)
return _wrapped_view |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode_int(n):
""" Encodes an int as a variable length signed 29-bit integer as defined by the spec. @param n: The integer to be encoded @return: The encoded string @rtype: C{str} @raise OverflowError: Out of range. """ |
global ENCODED_INT_CACHE
try:
return ENCODED_INT_CACHE[n]
except KeyError:
pass
if n < MIN_29B_INT or n > MAX_29B_INT:
raise OverflowError("Out of range")
if n < 0:
n += 0x20000000
bytes = ''
real_value = None
if n > 0x1fffff:
real_value = n
n >>= 1
bytes += chr(0x80 | ((n >> 21) & 0xff))
if n > 0x3fff:
bytes += chr(0x80 | ((n >> 14) & 0xff))
if n > 0x7f:
bytes += chr(0x80 | ((n >> 7) & 0xff))
if real_value is not None:
n = real_value
if n > 0x1fffff:
bytes += chr(n & 0xff)
else:
bytes += chr(n & 0x7f)
ENCODED_INT_CACHE[n] = bytes
return bytes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeBoolean(self, value):
""" Writes a Boolean value. @type value: C{bool} @param value: A C{Boolean} value determining which byte is written. If the parameter is C{True}, C{1} is written; if C{False}, C{0} is written. @raise ValueError: Non-boolean value found. """ |
if not isinstance(value, bool):
raise ValueError("Non-boolean value found")
if value is True:
self.stream.write_uchar(1)
else:
self.stream.write_uchar(0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeMultiByte(self, value, charset):
""" Writes a multibyte string to the datastream using the specified character set. @type value: C{str} @param value: The string value to be written. @type charset: C{str} @param charset: The string denoting the character set to use. Possible character set strings include C{shift-jis}, C{cn-gb}, C{iso-8859-1} and others. @see: U{Supported character sets on Livedocs (external) <http://livedocs.adobe.com/flex/201/langref/charset-codes.html>} """ |
if type(value) is unicode:
value = value.encode(charset)
self.stream.write(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeUTF(self, value):
""" Writes a UTF-8 string to the data stream. The length of the UTF-8 string in bytes is written first, as a 16-bit integer, followed by the bytes representing the characters of the string. @type value: C{str} @param value: The string value to be written. """ |
buf = util.BufferedByteStream()
buf.write_utf8_string(value)
bytes = buf.getvalue()
self.stream.write_ushort(len(bytes))
self.stream.write(bytes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readMultiByte(self, length, charset):
""" Reads a multibyte string of specified length from the data stream using the specified character set. @type length: C{int} @param length: The number of bytes from the data stream to read. @type charset: C{str} @param charset: The string denoting the character set to use. @rtype: C{str} @return: UTF-8 encoded string. """ |
#FIXME nick: how to work out the code point byte size (on the fly)?
bytes = self.stream.read(length)
return unicode(bytes, charset) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readUTF(self):
""" Reads a UTF-8 string from the data stream. The string is assumed to be prefixed with an unsigned short indicating the length in bytes. @rtype: C{str} @return: A UTF-8 string produced by the byte representation of characters. """ |
length = self.stream.read_ushort()
return self.stream.read_utf8_string(length) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readBytes(self):
""" Reads and returns a utf-8 encoded byte array. """ |
length, is_reference = self._readLength()
if is_reference:
return self.context.getString(length)
if length == 0:
return ''
result = self.stream.read(length)
self.context.addString(result)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readString(self):
""" Reads and returns a string from the stream. """ |
length, is_reference = self._readLength()
if is_reference:
result = self.context.getString(length)
return self.context.getStringForBytes(result)
if length == 0:
return ''
result = self.stream.read(length)
self.context.addString(result)
return self.context.getStringForBytes(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readDate(self):
""" Read date from the stream. The timezone is ignored as the date is always in UTC. """ |
ref = self.readInteger(False)
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
ms = self.stream.read_double()
result = util.get_datetime(ms / 1000.0)
if self.timezone_offset is not None:
result += self.timezone_offset
self.context.addObject(result)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readArray(self):
""" Reads an array from the stream. @warning: There is a very specific problem with AMF3 where the first three bytes of an encoded empty C{dict} will mirror that of an encoded C{{'': 1, '2': 2}} """ |
size = self.readInteger(False)
if size & REFERENCE_BIT == 0:
return self.context.getObject(size >> 1)
size >>= 1
key = self.readBytes()
if key == '':
# integer indexes only -> python list
result = []
self.context.addObject(result)
for i in xrange(size):
result.append(self.readElement())
return result
result = pyamf.MixedArray()
self.context.addObject(result)
while key:
result[key] = self.readElement()
key = self.readBytes()
for i in xrange(size):
el = self.readElement()
result[i] = el
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getClassDefinition(self, ref):
""" Reads class definition from the stream. """ |
is_ref = ref & REFERENCE_BIT == 0
ref >>= 1
if is_ref:
class_def = self.context.getClassByReference(ref)
return class_def
name = self.readBytes()
alias = None
if name == '':
name = pyamf.ASObject
try:
alias = pyamf.get_class_alias(name)
except pyamf.UnknownClassAlias:
if self.strict:
raise
alias = pyamf.TypedObjectClassAlias(name)
class_def = ClassDefinition(alias)
class_def.encoding = ref & 0x03
class_def.attr_len = ref >> 2
class_def.static_properties = []
if class_def.attr_len > 0:
for i in xrange(class_def.attr_len):
key = self.readBytes()
class_def.static_properties.append(key)
self.context.addClass(class_def, alias.klass)
return class_def |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readObject(self):
""" Reads an object from the stream. """ |
ref = self.readInteger(False)
if ref & REFERENCE_BIT == 0:
obj = self.context.getObject(ref >> 1)
if obj is None:
raise pyamf.ReferenceError('Unknown reference %d' % (ref >> 1,))
if self.use_proxies is True:
obj = self.readProxy(obj)
return obj
ref >>= 1
class_def = self._getClassDefinition(ref)
alias = class_def.alias
obj = alias.createInstance(codec=self)
obj_attrs = dict()
self.context.addObject(obj)
if class_def.encoding in (ObjectEncoding.EXTERNAL, ObjectEncoding.PROXY):
obj.__readamf__(DataInput(self))
if self.use_proxies is True:
obj = self.readProxy(obj)
return obj
elif class_def.encoding == ObjectEncoding.DYNAMIC:
self._readStatic(class_def, obj_attrs)
self._readDynamic(class_def, obj_attrs)
elif class_def.encoding == ObjectEncoding.STATIC:
self._readStatic(class_def, obj_attrs)
else:
raise pyamf.DecodeError("Unknown object encoding")
alias.applyAttributes(obj, obj_attrs, codec=self)
if self.use_proxies is True:
obj = self.readProxy(obj)
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readXML(self):
""" Reads an xml object from the stream. @return: An etree interface compatible object @see: L{xml.set_default_interface} """ |
ref = self.readInteger(False)
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
xmlstring = self.stream.read(ref >> 1)
x = xml.fromstring(xmlstring)
self.context.addObject(x)
return x |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readByteArray(self):
""" Reads a string of data from the stream. Detects if the L{ByteArray} was compressed using C{zlib}. @see: L{ByteArray} @note: This is not supported in ActionScript 1.0 and 2.0. """ |
ref = self.readInteger(False)
if ref & REFERENCE_BIT == 0:
return self.context.getObject(ref >> 1)
buffer = self.stream.read(ref >> 1)
try:
buffer = zlib.decompress(buffer)
compressed = True
except zlib.error:
compressed = False
obj = ByteArray(buffer)
obj.compressed = compressed
self.context.addObject(obj)
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeBoolean(self, n):
""" Writes a Boolean to the stream. """ |
t = TYPE_BOOL_TRUE
if n is False:
t = TYPE_BOOL_FALSE
self.stream.write(t) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeInteger(self, n):
""" Writes an integer to the stream. @type n: integer data @param n: The integer data to be encoded to the AMF3 data stream. """ |
if n < MIN_29B_INT or n > MAX_29B_INT:
self.writeNumber(float(n))
return
self.stream.write(TYPE_INTEGER)
self.stream.write(encode_int(n)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeNumber(self, n):
""" Writes a float to the stream. @type n: C{float} """ |
self.stream.write(TYPE_NUMBER)
self.stream.write_double(n) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeProxy(self, obj):
""" Encodes a proxied object to the stream. @since: 0.6 """ |
proxy = self.context.getProxyForObject(obj)
self.writeObject(proxy, is_proxy=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeObject(self, obj, is_proxy=False):
""" Writes an object to the stream. """ |
if self.use_proxies and not is_proxy:
self.writeProxy(obj)
return
self.stream.write(TYPE_OBJECT)
ref = self.context.getObjectReference(obj)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(obj)
# object is not referenced, serialise it
kls = obj.__class__
definition = self.context.getClass(kls)
alias = None
class_ref = False # if the class definition is a reference
if definition:
class_ref = True
alias = definition.alias
else:
alias = self.context.getClassAlias(kls)
definition = ClassDefinition(alias)
self.context.addClass(definition, alias.klass)
if class_ref:
self.stream.write(definition.reference)
else:
ref = 0
if definition.encoding != ObjectEncoding.EXTERNAL:
ref += definition.attr_len << 4
final_reference = encode_int(ref | definition.encoding << 2 |
REFERENCE_BIT << 1 | REFERENCE_BIT)
self.stream.write(final_reference)
definition.reference = encode_int(
definition.reference << 2 | REFERENCE_BIT)
if alias.anonymous:
self.stream.write('\x01')
else:
self.serialiseString(alias.alias)
# work out what the final reference for the class will be.
# this is okay because the next time an object of the same
# class is encoded, class_ref will be True and never get here
# again.
if alias.external:
obj.__writeamf__(DataOutput(self))
return
attrs = alias.getEncodableAttributes(obj, codec=self)
if alias.static_attrs:
if not class_ref:
[self.serialiseString(attr) for attr in alias.static_attrs]
for attr in alias.static_attrs:
value = attrs.pop(attr)
self.writeElement(value)
if definition.encoding == ObjectEncoding.STATIC:
return
if definition.encoding == ObjectEncoding.DYNAMIC:
if attrs:
for attr, value in attrs.iteritems():
if type(attr) in python.int_types:
attr = str(attr)
self.serialiseString(attr)
self.writeElement(value)
self.stream.write('\x01') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeXML(self, n):
""" Writes a XML string to the data stream. @type n: L{ET<xml.ET>} @param n: The XML Document to be encoded to the AMF3 data stream. """ |
self.stream.write(TYPE_XMLSTRING)
ref = self.context.getObjectReference(n)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(n)
self.serialiseString(xml.tostring(n).encode('utf-8')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_tag_associate(request, tags, tag):
'''Associate an HTML element with a tag.
The association should be a JSON serialized object on the
request body. Here is an example association that should
make the object's structure clear:
.. code-block:: python
{
"url": "http://example.com/abc/xyz?foo=bar",
"text": "The text the user highlighted.",
"stream_id": "{unix timestamp}-{md5 of url}",
"hash": "{nilsimsa hash of the HTML}",
"timestamp": {unix timestamp},
"xpath": {
"start_node": "/html/body/p[1]/text()[2]",
"start_idx": 3,
"end_node": "/html/body/p[1]/text()[3]",
"end_idx": 9
}
}
All fields are required and cannot be empty or ``null``.
The tag of the association should be specified in the URL
and is delimited by ``//``.
'''
tag = tag.decode('utf-8').strip()
assoc = dict(json.loads(request.body.read()), **{'tag': tag})
tags.add(assoc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_tag_list(tags, tag=''):
'''List all direct children tags of the given parent.
If no parent is specified, then list all top-level tags.
The JSON returned for ``/dossier/v1/tags/list/foo/bar``
might look like this:
.. code-block:: python
{
'children': [
{'name': 'baz', 'parent': 'bar', 'tag': 'foo/bar/baz'},
]
}
'''
tag = tag.decode('utf-8').strip()
return {'children': tags.list(tag)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_tag_suggest(request, tags, prefix, parent=''):
'''Provide fast suggestions for tag components.
This yields suggestions for *components* of a tag and a given
prefix. For example, given the tags ``foo/bar/baz`` and
``fob/bob``, here are some example completions (ordering may be
different):
.. code-block:: text
/dossier/v1/tags/suggest/prefix/f => ['foo', 'fob']
/dossier/v1/tags/suggest/prefix/foo => ['foo']
/dossier/v1/tags/suggest/prefix/b/parent/foo => ['bar']
/dossier/v1/tags/suggest/prefix/b/parent/fob => ['bob']
/dossier/v1/tags/suggest/prefix/b/parent/foo/bar => ['baz']
N.B. Each of the lists above are wrapped in the following
JSON envelope for the response:
.. code-block:: text
{'suggestions': ['foo', 'fob']}
An optional query parameter, ``limit``, may be passed to control
the number of suggestions returned.
'''
prefix = prefix.decode('utf-8').strip()
parent = parent.decode('utf-8').strip()
limit = min(10000, int(request.params.get('limit', 100)))
return {'suggestions': tags.suggest(parent, prefix, limit=limit)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_stream_id_associations(tags, stream_id):
'''Retrieve associations for a given stream_id.
The associations returned have the exact same structure as defined
in the ``v1_tag_associate`` route with one addition: a ``tag``
field contains the full tag name for the association.
'''
stream_id = stream_id.decode('utf-8').strip()
return {'associations': tags.assocs_by_stream_id(stream_id)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_url_associations(tags, url):
'''Retrieve associations for a given URL.
The associations returned have the exact same structure as defined
in the ``v1_tag_associate`` route with one addition: a ``tag``
field contains the full tag name for the association.
'''
url = urllib.unquote_plus(url.decode('utf-8')).strip()
return {'associations': tags.assocs_by_url(url)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def v1_tag_associations(tags, tag):
'''Retrieve associations for a given tag.
The associations returned have the exact same structure as defined
in the ``v1_tag_associate`` route with one addition: a ``tag``
field contains the full tag name for the association.
'''
tag = tag.decode('utf-8').strip()
return {'associations': tags.assocs_by_tag(tag)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def delete_all(self):
'''Deletes all tag data.
This does not destroy the ES index, but instead only
deletes all tags with the configured doc types.
'''
try:
self.conn.indices.delete_mapping(
index=self.index, doc_type=self.type_tag)
except TransportError:
logger.warn('type %r in index %r already deleted',
self.index, self.type_tag, exc_info=True)
try:
self.conn.indices.delete_mapping(
index=self.index, doc_type=self.type_assoc)
except TransportError:
logger.warn('type %r in index %r already deleted',
self.index, self.type_assoc, exc_info=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _create_mappings(self):
'Create the field type mapping.'
created1 = self._create_tag_mapping()
created2 = self._create_assoc_mapping()
return created1 or created2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_last_traceback():
"""Retrieve the last traceback as an `unicode` string.""" |
import traceback
from StringIO import StringIO
tb = StringIO()
traceback.print_exc(file=tb)
return to_unicode(tb.getvalue()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(file_name, data):
"""Encode and write a Hip file.""" |
with open(file_name, 'w') as f:
f.write(encode(data)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, thread_uuid, uuid):
""" Get one thread member.""" |
members = (v for v in self.list(thread_uuid) if v.get('userUuid') == uuid)
for i in members:
self.log.debug(i)
return i
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, wg_uuid, uuid):
""" Delete one thread member.""" |
url = "%(base)s/%(wg_uuid)s/members/%(uuid)s" % {
'base': self.local_base_url,
'wg_uuid': wg_uuid,
'uuid': uuid
}
return self.core.delete(url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_from_cli():
""" Perform an update instigated from a CLI. """ |
arg_parser = argparse.ArgumentParser(
description='Read and write properties in a wp-config.php file. '
'Include a --value argument to set the value, omit it to '
'read the value of the specified key.',
prog='python -m wpconfigr')
arg_parser.add_argument('--filename',
help='wp-config.php filename',
required=True)
arg_parser.add_argument('--key',
help='Property key',
required=True)
arg_parser.add_argument('--value',
help='New property value',
required=False)
arg_parser.add_argument('--log-level',
default='CRITICAL',
help='Log level',
required=False)
arg_parser.add_argument('--set-true',
action='store_true',
help='Set the value as boolean true')
arg_parser.add_argument('--set-false',
action='store_true',
help='Set the value as boolean false')
args = arg_parser.parse_args()
if args.set_true and args.set_false:
arg_parser.error('Cannot set --set-true and --set-false.')
if args.value and args.set_true:
arg_parser.error('Cannot set --value and --set-true.')
if args.value and args.set_false:
arg_parser.error('Cannot set --value and --set-false.')
basicConfig(level=str(args.log_level).upper())
updater = WpConfigFile(filename=args.filename)
if args.set_true:
value = True
elif args.set_false:
value = False
else:
value = args.value
if value is not None:
updater.set(key=args.key, value=value)
else:
got = updater.get(key=args.key)
if got:
print(got) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def manage_file_analysis(args: argparse.Namespace, filename: str, data: object) -> None: """ Take care of the analysis of a datafile """ |
key = DataStore.hashfile(filename)
print('Analyzing {} --> {}'.format(filename, key))
if data.check_key(key): # if exists in database, prepopulate
fit = LineFit(filename, data=data.get_data(key))
else:
fit = LineFit(filename)
if args.time:
noise, curvature, rnge, domn = fit.analyze(time=args.time)
newrow = [args.time, noise, curvature,
rnge, domn, fit.accepts[args.time]]
data.update1(key, newrow, len(fit.noises))
else:
fit.analyze_full()
newrows = np.array([range(len(fit.noises)), fit.noises,
fit.curves, fit.ranges, fit.domains, fit.accepts])
data.update(key, newrows)
data.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_args() -> argparse.Namespace: """ Get program arguments. """ |
parser = argparse.ArgumentParser(prog='python3 linefit.py',
description=('Parameterize and analyze '
'usability of conduit edge data'))
parser.add_argument('files', metavar='F', type=str, nargs='*',
help=('File(s) for processing. '
'Each file has a specific format: '
'See README (or header) for specification.'))
parser.add_argument('-p', '--plot', action='store_true', default=False,
help=('Create Plot of file(s)? Note, unless --time flag used, '
'will plot middle time.'))
parser.add_argument('-pd', '--plotdata', action='store_true', default=False,
help='Create plot of current datastore.')
parser.add_argument('-a', '--analyze', action='store_true', default=False,
help=('Analyze the file and determine Curvature/Noise parameters. '
'If --time not specified, will examine entire file. '
'This will add results to datastore with false flags '
'in accept field if not provided.'))
parser.add_argument('-mt', '--machinetest', action='store_true', default=False,
help=('Determine if the times from the file are usable based on '
'supervised learning model. If --time not specified, '
'will examine entire file.'))
parser.add_argument('-m', '--model', type=str, default='nn',
help=('Learning Model to use. Options are ["nn", "svm", "forest", "sgd"]'))
parser.add_argument('-nnk', '--nnk', type=int, default=10,
help=('k-Parameter for k nearest neighbors. Google it.'))
parser.add_argument('-t', '--time', type=int, default=None,
help=('Time (column) of data to use for analysis OR plotting. '
'Zero-Indexed'))
parser.add_argument('-d', '--datastore', type=str, default=DATASTORE,
help=("Datastore filename override. "
"Don't do this unless you know what you're doing"))
parser.add_argument('-pds', '--printdata', action='store_true', default=False,
help=("Print data"))
parser.add_argument('-pdss', '--printdatashort', action='store_true', default=False,
help=("Print data short"))
args = parser.parse_args()
return args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_keys(self) -> typing.List[str]: """ Return list of SHA512 hash keys that exist in datafile :return: list of keys """ |
keys = []
for key in self.data.keys():
if key not in ['__header__', '__version__', '__globals__']:
keys.append(key)
return keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_key(self, key: str) -> bool: """ Checks if key exists in datastore. True if yes, False if no. :param: SHA512 hash key :return: whether or key not exists in datastore """ |
keys = self.get_keys()
return key in keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_traindata(self) -> np.ndarray: """ Pulls all available data and concatenates for model training :return: 2d array of points """ |
traindata = None
for key, value in self.data.items():
if key not in ['__header__', '__version__', '__globals__']:
if traindata is None:
traindata = value[np.where(value[:, 4] != 0)]
else:
traindata = np.concatenate((traindata, value[np.where(value[:, 4] != 0)]))
return traindata |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def printdata(self) -> None: """ Prints data to stdout """ |
np.set_printoptions(threshold=np.nan)
print(self.data)
np.set_printoptions(threshold=1000) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, key: str, data: np.ndarray) -> None: """ Update entry in datastore """ |
self.data[key] = data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update1(self, key: str, data: np.ndarray, size: int) -> None: """ Update one entry in specific record in datastore """ |
print(data)
if key in self.get_keys():
self.data[key][data[0]] = data
else:
newdata = np.zeros((size, 6))
newdata[data[0]] = data
self.data[key] = newdata |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hashfile(name: str) -> str: """ Gets a hash of a file using block parsing http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file Using SHA512 for long-term support (hehehehe) """ |
hasher = hashlib.sha512()
with open(name, 'rb') as openfile:
for chunk in iter(lambda: openfile.read(4096), b''):
hasher.update(chunk)
return hasher.hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _loadedges(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, float, np.ndarray]: """ Attempts to intelligently load the .mat file and take average of left and right edges :return: left and right averages :return: times for each column :return: accept/reject for each column :return: pixel-inch ratio """ |
data = sco.loadmat(self.filename)
datakeys = [k for k in data.keys()
if ('right' in k) or ('left' in k) or ('edge' in k)]
averagedata = ((data[datakeys[0]] + data[datakeys[1]]) / 2)
try:
times = (data['times'] - data['times'].min())[0]
except KeyError:
times = np.arange(len(data[datakeys[0]][0]))
try:
accept = data['accept']
except KeyError:
accept = np.zeros(len(times))
try:
ratio = data['ratio']
except KeyError:
ratio = 1
try:
viscosity = data['viscosity']
except KeyError:
viscosity = np.ones(len(times))
return averagedata, times, accept, ratio, viscosity |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_file(self, name: str=None, time: int=None) -> None: """ Plot specific time for provided datafile. If no time provided, will plot middle. :param: savefile name :param: time/data column """ |
if not time:
time = int(len(self.times) / 2)
if not name:
name = './img/' + self.filename + '.png'
yhat, residuals, residual_mean, noise = self._get_fit(time)
plt.figure()
plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2)
plt.plot(yhat)
plt.savefig(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _gaussian_function(self, datalength: int, values: np.ndarray, height: int, index: int) -> np.ndarray: """ i'th Regression Model Gaussian :param: len(x) :param: x values :param: height of gaussian :param: position of gaussian :return: gaussian bumps over domain """ |
return height * np.exp(-(1 / (self.spread_number * datalength)) *
(values - ((datalength / self.function_number) * index)) ** 2) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_fit(self, time: int) -> typing.Tuple[np.ndarray, np.ndarray, float, float]: """ Fit regression model to data :param: time (column of data) :return: predicted points :return: residuals :return: mean residual :return: error """ |
rawdata = self.averagedata[:, time]
domain = np.arange(len(rawdata))
datalength = len(domain)
coefficients = np.zeros((datalength, self.function_number + 2))
coefficients[:, 0] = 1
coefficients[:, 1] = domain
for i in range(self.function_number):
coefficients[:, 2 + i] = self._gaussian_function(datalength, domain, 1, i)
betas = linalg.inv(coefficients.transpose().dot(coefficients)).dot(coefficients.transpose().dot(rawdata))
predicted_values = coefficients.dot(betas)
residuals = rawdata - predicted_values
error = np.sqrt(residuals.transpose().dot(residuals) / (datalength - (self.function_number + 2)))
return predicted_values, residuals, residuals.mean(), error |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_noise(self, residuals: np.ndarray) -> float: """ Determine Noise of Residuals. :param: residuals :return: noise """ |
return np.mean(np.abs(residuals)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, time: int=None) -> typing.Tuple[float, float, int, int]: """ Determine noise, curvature, range, and domain of specified array. :param: pixel to inch ratio :param: time (column) to use. :return: curvature :return: noise :return: range :return: domain """ |
if not time:
time = int(len(self.times) / 2)
if self.domains[time] == 0:
yhat, residuals, mean_residual, error = self._get_fit(time)
yhat_p = self.ddiff(yhat)
yhat_pp = self.ddiff(yhat_p)
noise = self._get_noise(residuals)
curvature = (1 / self.ratio) * (1 / len(yhat_pp)) * np.sqrt(si.simps(yhat_pp ** 2))
rng = (self.ratio * (np.max(self.averagedata[:, time]) -
np.min(self.averagedata[:, time])))
dmn = self.ratio * len(self.averagedata[:, time])
self.noises[time] = np.log10(noise)
self.curves[time] = np.log10(curvature)
self.ranges[time] = np.log10(rng)
self.domains[time] = np.log10(dmn)
return self.noises[time], self.curves[time], self.ranges[time], self.domains[time] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def analyze_full(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ Determine noise, curvature, range, and domain of specified data. Like analyze, except examines the entire file. :param: float->pixel to inch ratio :return: array->curvatures :return: array->noises :return: array->ranges :return: array->domains """ |
if self.noises[0] == 0:
timelength = len(self.times)
for i in tqdm(range(timelength)):
self.analyze(time=i)
return self.noises, self.curves, self.ranges, self.domains |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_algo(self, args: argparse.Namespace, algo: str) -> object: """ Returns machine learning algorithm based on arguments """ |
if algo == 'nn':
return NearestNeighbor(args.nnk) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_fitspace(self, name: str, X: np.ndarray, y: np.ndarray, clf: object) -> None: """ Plot 2dplane of fitspace """ |
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = 0.01 # Mesh step size
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel(r'$\log_{10}$ Noise')
plt.ylabel(r'$\log_{10}$ Curvature')
plt.savefig(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def train(self, traindata: np.ndarray) -> None: """ Trains on dataset """ |
self.clf.fit(traindata[:, 1:5], traindata[:, 5]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, predictdata: np.ndarray) -> np.ndarray: """ predict given points """ |
return self.clf.predict(predictdata) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect_to(self, service_name, **kwargs):
""" Shortcut method to make instantiating the ``Connection`` classes easier. Forwards ``**kwargs`` like region, keys, etc. on to the constructor. :param service_name: A string that specifies the name of the desired service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :rtype: <kotocore.connection.Connection> instance """ |
service_class = self.get_connection(service_name)
return service_class.connect_to(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, **kwargs):
""" Create a ``FormEntry`` instance and related ``FieldEntry`` instances for each form field. """ |
entry = super(FormForForm, self).save(commit=False)
entry.form = self.form
entry.entry_time = now()
entry.save()
entry_fields = entry.fields.values_list("field_id", flat=True)
new_entry_fields = []
for field in self.form_fields:
field_key = "field_%s" % field.id
value = self.cleaned_data[field_key]
if value and self.fields[field_key].widget.needs_multipart_form:
value = fs.save(join("forms", str(uuid4()), value.name), value)
if isinstance(value, list):
value = ", ".join([v.strip() for v in value])
if field.id in entry_fields:
field_entry = entry.fields.get(field_id=field.id)
field_entry.value = value
field_entry.save()
else:
new = {"entry": entry, "field_id": field.id, "value": value}
new_entry_fields.append(FieldEntry(**new))
if new_entry_fields:
FieldEntry.objects.bulk_create(new_entry_fields)
return entry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def email_to(self):
""" Return the value entered for the first field of type ``forms.EmailField``. """ |
for field in self.form_fields:
if issubclass(fields.CLASSES[field.field_type], forms.EmailField):
return self.cleaned_data["field_%s" % field.id]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def columns(self):
""" Returns the list of selected column names. """ |
fields = [f.label for f in self.form_fields
if self.cleaned_data["field_%s_export" % f.id]]
if self.cleaned_data["field_0_export"]:
fields.append(self.entry_time_name)
return fields |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close the Marquise context, ensuring data is flushed and spool files are closed. This should always be closed explicitly, as there's no guarantees that it will happen when the instance is deleted. """ |
if self.marquise_ctx is None:
self.__debug("Marquise handle is already closed, will do nothing.")
# Multiple close() calls are okay.
return
self.__debug("Shutting down Marquise handle spooling to %s and %s" % (self.spool_path_points, self.spool_path_contents))
# At the time of writing this always succeeds (returns 0).
MARQUISE_SHUTDOWN(self.marquise_ctx)
# Signal that our context is no longer valid.
self.marquise_ctx = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_source(self, address, metadata_dict):
"""Pack the `metadata_dict` for an `address` into a data structure and ship it to the spool file. Arguments: address -- the address for which this metadata_dict applies. metadata_dict -- a Python dict of arbitrary string key-value pairs. """ |
if self.marquise_ctx is None:
raise ValueError("Attempted to write to a closed Marquise handle.")
self.__debug("Supplied address: %s" % address)
# Sanity check the input, everything must be UTF8 strings (not
# yet confirmed), no Nonetypes or anything stupid like that.
#
# The keys of the key-value pairs are unique, by virtue of
# taking a dict as input.
if any([ x is None for x in metadata_dict.keys() ]):
raise TypeError("One of your metadata_dict keys is a Nonetype")
# Values are allowed to be None, coerce to empty strings.
metadata_dict = dict([ (x[0],"" if x[1] is None else x[1]) for x in metadata_dict.items() ])
# Cast each string to a C-string. This may have unusual results if your
# keys/vals aren't particularly stringy, such as Python classes,
# Exceptions, etc. They will get str()'d, and they may look stupid.
# pylint: disable=multiple-statements
try: c_fields = [ cstring(str(x)) for x in metadata_dict.keys() ]
except Exception as exc: raise TypeError("One of your metadata_dict keys couldn't be cast to a Cstring, %s" % exc)
try: c_values = [ cstring(str(x)) for x in metadata_dict.values() ]
except Exception as exc: raise TypeError("One of your metadata_dict values couldn't be cast to a Cstring, %s" % exc)
# pylint: enable=multiple-statements
# Get our source_dict data structure
source_dict = MARQUISE_NEW_SOURCE(c_fields, c_values, len(metadata_dict))
if is_cnull(source_dict):
raise ValueError("errno is set to EINVAL on invalid input, our errno is %d" % FFI.errno)
# If you do something stupid, like passing a string where an
# int (address) is meant to go, CFFI will explode. Which is
# fine, but that causes memory leaks. The explosion still
# occurs, but we cleanup after (before?) ourselves.
try:
success = MARQUISE_UPDATE_SOURCE(self.marquise_ctx, address, source_dict)
except TypeError as exc:
MARQUISE_FREE_SOURCE(source_dict)
raise
self.__debug("marquise_update_source returned %d" % success)
if success != 0:
MARQUISE_FREE_SOURCE(source_dict)
raise RuntimeError("marquise_update_source was unsuccessful, errno is %d" % FFI.errno)
MARQUISE_FREE_SOURCE(source_dict)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_db_fields(self, obj):
""" Return list of database dictionaries, which are used as indexes for each attributes. Args: cached (bool, default True):
Use cached connection to database. Returns: list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`. """ |
for field in obj.indexes:
yield field, self._zeo_key(field) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_obj_properties(self, pub, name="pub"):
""" Make sure, that `pub` has the right interface. Args: pub (obj):
Instance which will be checked. name (str):
Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`. """ |
if not hasattr(pub, "indexes"):
raise InvalidType("`%s` doesn't have .indexes property!" % name)
if not pub.indexes:
raise InvalidType("`%s.indexes` is not set!" % name)
if not hasattr(pub, "project_key"):
raise InvalidType(
"`%s` doesn't have .project_key property!" % name
)
if not pub.project_key:
raise InvalidType("`%s.project_key` is not set!" % name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _put_into_indexes(self, obj):
""" Put publication into all indexes. Attr: obj (obj):
Indexable object. Raises: UnindexableObject: When there is no index (property) which can be used to index `obj` in database. """ |
no_of_used_indexes = 0
for field_name, db_index in list(self._get_db_fields(obj)):
attr_value = getattr(obj, field_name)
if attr_value is None: # index only by set attributes
continue
container = db_index.get(attr_value, None)
if container is None:
container = OOTreeSet()
db_index[attr_value] = container
container.insert(obj)
no_of_used_indexes += 1
# make sure that atleast one `attr_value` was used
if no_of_used_indexes <= 0:
raise UnindexableObject(
"You have to use atleast one of the identificators!"
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_object(self, obj):
""" Save `obj` into database and into proper indexes. Attr: obj (obj):
Indexable object. Raises: InvalidType: When the `obj` doesn't have right properties. Unindexableobjlication: When there is no indexes defined. """ |
self._check_obj_properties(obj)
with transaction.manager:
self._put_into_indexes(obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_subset_matches(self, query):
""" Yield publications, at indexes defined by `query` property values. Args: query (obj):
Object implementing proper interface. Yields: list: List of matching publications. """ |
for field_name, db_index in self._get_db_fields(query):
attr = getattr(query, field_name)
if attr is None: # don't use unset attributes
continue
results = db_index.get(attr, OOTreeSet())
if results:
yield results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _wrap_measure(individual_group_measure_process, group_measure, loaded_processes):
""" Creates a function on a state_collection, which creates analysis_collections for each group in the collection. """ |
def wrapped_measure(state_collection,overriding_parameters=None,loggers=None):
if loggers == None:
loggers = funtool.logger.set_default_loggers()
if loaded_processes != None :
if group_measure.grouping_selectors != None:
for grouping_selector_name in group_measure.grouping_selectors:
state_collection= funtool.state_collection.add_grouping(state_collection, grouping_selector_name, loaded_processes)
for group in funtool.state_collection.groups_in_grouping(state_collection, grouping_selector_name):
analysis_collection = funtool.analysis.AnalysisCollection(None,group,{},{})
if group_measure.analysis_selectors != None:
for analysis_selector in group_measure.analysis_selectors:
analysis_collection = loaded_processes["analysis_selector"][analysis_selector].process_function(analysis_collection,state_collection)
if analysis_collection != None:
individual_group_measure_process(analysis_collection,state_collection)
return state_collection
return wrapped_measure |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def getAWSAccountID():
'''
Print an instance's AWS account number or 0 when not in EC2
'''
link = "http://169.254.169.254/latest/dynamic/instance-identity/document"
try:
conn = urllib2.urlopen(url=link, timeout=5)
except urllib2.URLError:
return '0'
jsonData = json.loads(conn.read())
return jsonData['accountId'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readInstanceTag(instanceID, tagName="Name", connection=None):
""" Load a tag from EC2 :param str instanceID: Instance ID to read the tag on :param str tagName: Name of tag to load :param connection: optional boto connection to use :returns: the tag's value :rtype: str """ |
assert isinstance(instanceID, basestring), ("instanceID must be a string but is %r" % instanceID)
assert isinstance(tagName, basestring), ("tagName must be a string but is %r" % tagName)
if not connection:
# Assume AWS credentials are in the environment or the instance is using an IAM role
connection = boto.ec2.connect_to_region(myRegion())
# Filter the tag values for our instance_id
# http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeTags.html
tagData = connection.get_all_tags(filters={"resource-id": instanceID, "key": tagName})
if tagData:
tagValue = tagData[0].value
else:
raise RuntimeError, "%s: No such tag on %s" % (tagName, instanceID)
return tagValue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readMyEC2Tag(tagName, connection=None):
""" Load an EC2 tag for the running instance & print it. :param str tagName: Name of the tag to read :param connection: Optional boto connection """ |
assert isinstance(tagName, basestring), ("tagName must be a string but is %r" % tagName)
# Load metadata. if == {} we are on localhost
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html
if not connection:
# Assume AWS credentials are in the environment or the instance is using an IAM role
connection = boto.ec2.connect_to_region(myRegion())
return readInstanceTag(connection=connection,
instanceID=myInstanceID(),
tagName=tagName) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ver_to_tuple(value):
""" Convert version like string to a tuple of integers. """ |
return tuple(int(_f) for _f in re.split(r'\D+', value) if _f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def increment_name(self, name, i):
""" takes something like test.txt and returns test1.txt """ |
if i == 0:
return name
if '.' in name:
split = name.split('.')
split[-2] = split[-2] + str(i)
return '.'.join(split)
else:
return name + str(i) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_local_metadata(self):
""" save all the exposed variables to a json file """ |
# save to the set local path and add .json
with open(self.filepath + '.json', 'w+') as outfile:
json.dump(self._prep_metadata(), outfile, indent=4) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_forward(num_steps):
"""Moves the pen forward a few steps in the direction that its "turtle" is facing. Arguments: num_steps - a number like 20. A bigger number makes the pen move farther. """ |
assert int(num_steps) == num_steps, "move_forward() only accepts integers, but you gave it " + str(num_steps)
_make_cnc_request("move.forward./" + str(num_steps))
state['turtle'].forward(num_steps) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, v):
"""Add a new value.""" |
self._vals_added += 1
if self._mean is None:
self._mean = v
self._mean = self._mean + ((v - self._mean) / float(self._vals_added)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_trees_by_chrom(blocks, verbose=False):
""" Construct set of interval trees from an iterable of genome alignment blocks. :return: a dictionary indexed by chromosome name where each entry is an interval tree for that chromosome. """ |
if verbose:
sys.stderr.write("separating blocks by chromosome... ")
by_chrom = {}
for b in blocks:
if b.chrom not in by_chrom:
by_chrom[b.chrom] = []
by_chrom[b.chrom].append(b)
if verbose:
sys.stderr.write("done\n")
if verbose:
sys.stderr.write("building interval trees by chromosome... ")
res = {}
for c in by_chrom:
res[c] = IntervalTree(by_chrom[c], openEnded=True)
if verbose:
sys.stderr.write("done\n")
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_column_absolute(self, position, miss_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None):
""" return a column from the block as dictionary indexed by seq. name. :param position: the index to extract from the block; must be absolute coordinates (i.e. between self.start and self.end, not inclusive of the end). :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: dictionary where keys are sequence names and values are nucleotides (raw strings). """ |
if position < self.start or position >= self.end:
raise ValueError("getting column at genomic locus " + self._chrom + " " +
str(position) + " failed; locus is outside of genome " +
"alignment block")
rel_coord = self.sequence_to_alignment_coords(self.reference_sequence_name,
position, position + 1)
assert(len(rel_coord) == 1)
rel_start, rel_end = rel_coord[0]
assert(rel_end == rel_start + 1)
raw_col = self.get_column(rel_start, miss_seqs)
if species is None:
return raw_col
res = {}
for k in raw_col:
name_parts = k.split(".")
if name_parts[0] in species:
res[k] = raw_col[k]
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_column(self, chrom, position, missing_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None):
"""Get the alignment column at the specified chromosome and position.""" |
blocks = self.get_blocks(chrom, position, position + 1)
if len(blocks) == 0:
raise NoSuchAlignmentColumnError("Request for column on chrom " +
chrom + " at position " +
str(position) + " not possible; " +
"genome alignment not defined at " +
"that locus.")
if len(blocks) > 1:
raise NoUniqueColumnError("Request for column on chrom " + chrom +
" at position " + str(position) + "not " +
"possible; ambiguous alignment of that locus.")
return blocks[0].get_column_absolute(position, missing_seqs, species) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decorate_instance_methods(obj, decorator, includes=None, excludes=None):
"""Decorator instance methods of an object. :param obj: Python object whose instance methods have to be decorated :param decorator: instance method decorator. :param string list includes: restrict wrapped instance methods. Default is `None` meaning that all instance method are wrapped. :param string list excludes: used to prevent some instance methods to be wrapped. Default is `None` :return: new class that inherits the `clazz` specified in parameter. """ |
class InstanceMethodDecorator(object):
def __getattribute__(self, name):
value = obj.__getattribute__(name)
if excludes and name in excludes:
return value
if includes and name not in includes:
return value
if inspect.ismethod(value):
value = decorator(name, value)
return value
return InstanceMethodDecorator() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reraise(clazz):
""" Decorator catching every exception that might be raised by wrapped function and raise another exception instead. Exception initially raised is passed in first argument of the raised exception. :param: Exception class: clazz: Python exception class to raise """ |
def _decorator(f):
@functools.wraps(f)
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
raise clazz(e), None, sys.exc_info()[2]
return _wrap
return _decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getListOfBases():
""" This function is here mainly for purposes of unittest Returns: list of str: Valid bases as they are used as URL parameters in links at Aleph main page. """ |
downer = Downloader()
data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list")
dom = dhtmlparser.parseString(data.lower())
# from default aleph page filter links containing local_base in their href
base_links = filter(
lambda x: "href" in x.params and "local_base" in x.params["href"],
dom.find("a")
)
# split links by & - we will need only XXX from link.tld/..&local_base=XXX
base_links = map(
lambda x: x.params["href"].replace("?", "&", 1).split("&"),
base_links
)
# filter only sections containing bases
bases = map(
lambda link: filter(lambda base: "local_base=" in base, link)[0],
base_links
)
# filter bases from base sections
bases = map(lambda x: x.split("=")[1].strip(), bases)
return list(set(bases)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def searchInAleph(base, phrase, considerSimilar, field):
""" Send request to the aleph search engine. Request itself is pretty useless, but it can be later used as parameter for :func:`getDocumentIDs`, which can fetch records from Aleph. Args: base (str):
which database you want to use phrase (str):
what do you want to search considerSimilar (bool):
fuzzy search, which is not working at all, so don't use it field (str):
where you want to look (see: :attr:`VALID_ALEPH_FIELDS`) Returns: dictionary: consisting from following fields: | error (optional):
present if there was some form of error | no_entries (int):
number of entries that can be fetch from aleph | no_records (int):
no idea what is this, but it is always >= than `no_entries` | set_number (int):
important - something like ID of your request | session-id (str):
used to count users for licensing purposes Example: Returned dict:: { 'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB', 'set_number': 36520, 'no_records': 1, 'no_entries': 1 } Raises: AlephException: if Aleph doesn't return any information InvalidAlephFieldException: if specified field is not valid """ |
downer = Downloader()
if field.lower() not in VALID_ALEPH_FIELDS:
raise InvalidAlephFieldException("Unknown field '" + field + "'!")
param_url = Template(SEARCH_URL_TEMPLATE).substitute(
PHRASE=quote_plus(phrase), # urlencode phrase
BASE=base,
FIELD=field,
SIMILAR="Y" if considerSimilar else "N"
)
result = downer.download(ALEPH_URL + param_url)
dom = dhtmlparser.parseString(result)
find = dom.find("find") # find <find> element :)
if len(find) <= 0:
raise AlephException("Aleph didn't returned any information.")
find = find[0]
# convert aleph result into dictionary
result = _alephResultToDict(find)
# add informations about base into result
result["base"] = base
if "error" not in result:
return result
# handle errors
if result["error"] == "empty set":
result["no_entries"] = 0 # empty set have 0 entries
return result
else:
raise AlephException(result["error"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def downloadRecords(search_result, from_doc=1):
""" Download `MAX_RECORDS` documents from `search_result` starting from `from_doc`. Attr: search_result (dict):
returned from :func:`searchInAleph`. from_doc (int, default 1):
Start from document number `from_doc`. Returns: list: List of XML strings with documents in MARC OAI. """ |
downer = Downloader()
if "set_number" not in search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# download all no_records
records = []
for cnt in range(search_result["no_records"]):
doc_number = from_doc + cnt
if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]:
break
set_data = downer.download(
ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(
SET_NUM=set_number,
RECORD_NUM=doc_number,
)
)
records.append(set_data)
return records |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getDocumentIDs(aleph_search_result, number_of_docs=-1):
""" Get IDs, which can be used as parameters for other functions. Args: aleph_search_result (dict):
returned from :func:`searchInAleph` number_of_docs (int, optional):
how many :class:`DocumentID` from set given by `aleph_search_result` should be returned. Default -1 for all of them. Returns: list: :class:`DocumentID` named tuples to given `aleph_search_result`. Raises: AlephException: If Aleph returns unknown format of data. Note: Returned :class:`DocumentID` can be used as parameters to :func:`downloadMARCXML`. """ |
downer = Downloader()
if "set_number" not in aleph_search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(aleph_search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# limit number of fetched documents, if -1, download all
if number_of_docs <= 0:
number_of_docs = aleph_search_result["no_entries"]
# download data about given set
set_data = downer.download(
ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(
SET_NUMBER=set_number,
NUMBER_OF_DOCS=number_of_docs,
)
)
# parse data
dom = dhtmlparser.parseString(set_data)
set_data = dom.find("ill-get-set")
# there should be at least one <ill-get-set> field
if len(set_data) <= 0:
raise AlephException("Aleph didn't returned set data.")
ids = []
for library in set_data:
documents = _alephResultToDict(library)
if "error" in documents:
raise AlephException("getDocumentIDs: " + documents["error"])
# convert all document records to DocumentID named tuple and extend
# them to 'ids' array
if isinstance(documents["doc-number"], list):
ids.extend(
map(
lambda x: DocumentID(
x,
documents["set-library"],
aleph_search_result["base"]
),
set(documents["doc-number"])
)
)
else:
ids.append(
DocumentID(
documents["doc-number"],
documents["set-library"],
aleph_search_result["base"]
)
)
return ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _index_item(self, uri, num, batch_num):
""" queries the triplestore for an item sends it to elasticsearch """ |
data = RdfDataset(get_all_item_data(uri, self.namespace),
uri).base_class.es_json()
self.batch_data[batch_num].append(data)
self.count += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def share_with_org(project_ids, org, access_level, suppress_email_notification=False):
""" Shares one or more DNAnexus projects with an organization. Args: project_ids: `list`. One or more DNAnexus project identifiers, where each project ID is in the form "project-FXq6B809p5jKzp2vJkjkKvg3". org: `str`. The name of the DNAnexus org with which to share the projects. access_level: The permission level to give to members of the org - one of ["VIEW","UPLOAD","CONTRIBUTE","ADMINISTER"]. suppress_email_notification: `bool`. True means to allow the DNAnexus platform to send an email notification for each shared project. """ |
for p in project_ids:
dxpy.api.project_invite(object_id=p,input_params={"invitee": org,"level": access_level,"suppressEmailNotification": suppress_email_notification}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_url(url):
""" Takes a URL string and returns its protocol and server """ |
# Verify that the protocol makes sense. We shouldn't guess!
if not RE_PROTOCOL_SERVER.match(url):
raise Exception("URL should begin with `protocol://domain`")
protocol, server, path, _, _, _ = urlparse.urlparse(url)
return protocol, server |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def record_event(self, event):
"""Records the ``KindleEvent`` `event` in the store """ |
with open(self._path, 'a') as file_:
file_.write(str(event) + '\n') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_events(self):
"""Returns a list of all ``KindleEvent``s held in the store """ |
with open(self._path, 'r') as file_:
file_lines = file_.read().splitlines()
event_lines = [line for line in file_lines if line]
events = []
for event_line in event_lines:
for event_cls in (AddEvent, SetReadingEvent, ReadEvent,
SetFinishedEvent):
try:
event = event_cls.from_str(event_line)
except EventParseError:
pass
else:
events.append(event)
return events |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def name(self, value):
"""Set parameter name. :param str value: name value. """ |
if isinstance(value, string_types):
match = Parameter._PARAM_NAME_COMPILER_MATCHER(value)
if match is None or match.group() != value:
value = re_compile(value)
self._name = value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.