repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
wummel/dosage
dosagelib/plugins/s.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/plugins/s.py#L332-L336
def namer(cls, imageUrl, pageUrl): """Use strip index number for image name.""" index = int(compile(r'id=(\d+)').search(pageUrl).group(1)) ext = imageUrl.rsplit('.', 1)[1] return "SnowFlakes-%d.%s" % (index, ext)
[ "def", "namer", "(", "cls", ",", "imageUrl", ",", "pageUrl", ")", ":", "index", "=", "int", "(", "compile", "(", "r'id=(\\d+)'", ")", ".", "search", "(", "pageUrl", ")", ".", "group", "(", "1", ")", ")", "ext", "=", "imageUrl", ".", "rsplit", "(", ...
Use strip index number for image name.
[ "Use", "strip", "index", "number", "for", "image", "name", "." ]
python
train
guaix-ucm/numina
numina/array/display/iofunctions.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/iofunctions.py#L140-L256
def read_value(ftype, prompt, default=None, minval=None, maxval=None, allowed_single_chars=None, question_mark=True): """Return value read from keyboard Parameters ---------- ftype : int() or float() Function defining the expected type. prompt : str Prompt string. default : int or None Default value. minval : int or None Mininum allowed value. maxval : int or None Maximum allowed value. allowed_single_chars : str String containing allowed valid characters. question_mark : bool If True, display question mark after prompt. Returns ------- result : integer, float or str Integer, float of single character. """ # avoid PyCharm warning 'might be referenced before assignment' result = None # question mark if question_mark: cquestion_mark = ' ? ' else: cquestion_mark = '' # check minimum value if minval is not None: try: iminval = ftype(minval) except ValueError: raise ValueError("'" + str(minval) + "' cannot " + "be used as an minval in readi()") else: iminval = None # check maximum value if maxval is not None: try: imaxval = ftype(maxval) except ValueError: raise ValueError("'" + str(maxval) + "' cannot " + "be used as an maxval in readi()") else: imaxval = None # minimum and maximum values if minval is None and maxval is None: cminmax = '' elif minval is None: cminmax = ' (number <= ' + str(imaxval) + ')' elif maxval is None: cminmax = ' (number >= ' + str(iminval) + ')' else: cminmax = ' (' + str(minval) + ' <= number <= ' + str(maxval) + ')' # main loop loop = True while loop: # display prompt if default is None: print(prompt + cminmax + cquestion_mark, end='') sys.stdout.flush() else: print(prompt + cminmax + ' [' + str(default) + ']' + cquestion_mark, end='') sys.stdout.flush() # read user's input cresult = sys.stdin.readline().strip() if cresult == '' and default is not None: cresult = str(default) # if valid allowed single character, return character if len(cresult) == 1: if allowed_single_chars is not None: if cresult in allowed_single_chars: return cresult # convert to ftype value try: result = ftype(cresult) except ValueError: print("*** Error: invalid " + str(ftype) + " value. Try again!") else: # check number is within expected range if minval is None and maxval is None: loop = False elif minval is None: if result <= imaxval: loop = False else: print("*** Error: number out of range. Try again!") elif maxval is None: if result >= iminval: loop = False else: print("*** Error: number out of range. Try again!") else: if iminval <= result <= imaxval: loop = False else: print("*** Error: number out of range. Try again!") return result
[ "def", "read_value", "(", "ftype", ",", "prompt", ",", "default", "=", "None", ",", "minval", "=", "None", ",", "maxval", "=", "None", ",", "allowed_single_chars", "=", "None", ",", "question_mark", "=", "True", ")", ":", "# avoid PyCharm warning 'might be ref...
Return value read from keyboard Parameters ---------- ftype : int() or float() Function defining the expected type. prompt : str Prompt string. default : int or None Default value. minval : int or None Mininum allowed value. maxval : int or None Maximum allowed value. allowed_single_chars : str String containing allowed valid characters. question_mark : bool If True, display question mark after prompt. Returns ------- result : integer, float or str Integer, float of single character.
[ "Return", "value", "read", "from", "keyboard" ]
python
train
opencobra/memote
memote/support/helpers.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L664-L764
def find_met_in_model(model, mnx_id, compartment_id=None): """ Return specific metabolites by looking up IDs in METANETX_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. mnx_id : string Memote internal MetaNetX metabolite identifier used to map between cross-references in the METANETX_SHORTLIST. compartment_id : string, optional ID of the specific compartment where the metabolites should be found. Defaults to returning matching metabolites from all compartments. Returns ------- list cobra.Metabolite(s) matching the mnx_id. """ def compare_annotation(annotation): """ Return annotation IDs that match to METANETX_SHORTLIST references. Compares the set of METANETX_SHORTLIST references for a given mnx_id and the annotation IDs stored in a given annotation dictionary. """ query_values = set(utils.flatten(annotation.values())) ref_values = set(utils.flatten(METANETX_SHORTLIST[mnx_id])) return query_values & ref_values # Make sure that the MNX ID we're looking up exists in the metabolite # shortlist. if mnx_id not in METANETX_SHORTLIST.columns: raise ValueError( "{} is not in the MetaNetX Shortlist! Make sure " "you typed the ID correctly, if yes, update the " "shortlist by updating and re-running the script " "generate_mnx_shortlists.py.".format(mnx_id) ) candidates = [] # The MNX ID used in the model may or may not be tagged with a compartment # tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the # following regex. # If the MNX ID itself cannot be found as an ID, we try all other # identifiers that are provided by our shortlist of MetaNetX' mapping # table. regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(mnx_id)) if model.metabolites.query(regex): candidates = model.metabolites.query(regex) elif model.metabolites.query(compare_annotation, attribute='annotation'): candidates = model.metabolites.query( compare_annotation, attribute='annotation' ) else: for value in METANETX_SHORTLIST[mnx_id]: if value: for ident in value: regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(ident)) if model.metabolites.query(regex, attribute='id'): candidates.extend( model.metabolites.query(regex, attribute='id')) # Return a list of all possible candidates if no specific compartment ID # is provided. # Otherwise, just return the candidate in one specific compartment. Raise # an exception if there are more than one possible candidates for a given # compartment. if compartment_id is None: print("compartment_id = None?") return candidates else: candidates_in_compartment = \ [cand for cand in candidates if cand.compartment == compartment_id] if len(candidates_in_compartment) == 0: raise RuntimeError("It was not possible to identify " "any metabolite in compartment {} corresponding to " "the following MetaNetX identifier: {}." "Make sure that a cross-reference to this ID in " "the MetaNetX Database exists for your " "identifier " "namespace.".format(compartment_id, mnx_id)) elif len(candidates_in_compartment) > 1: raise RuntimeError("It was not possible to uniquely identify " "a single metabolite in compartment {} that " "corresponds to the following MetaNetX " "identifier: {}." "Instead these candidates were found: {}." "Check that metabolite compartment tags are " "correct. Consider switching to a namespace scheme " "where identifiers are truly " "unique.".format(compartment_id, mnx_id, utils.get_ids( candidates_in_compartment )) ) else: return candidates_in_compartment
[ "def", "find_met_in_model", "(", "model", ",", "mnx_id", ",", "compartment_id", "=", "None", ")", ":", "def", "compare_annotation", "(", "annotation", ")", ":", "\"\"\"\n Return annotation IDs that match to METANETX_SHORTLIST references.\n\n Compares the set of META...
Return specific metabolites by looking up IDs in METANETX_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. mnx_id : string Memote internal MetaNetX metabolite identifier used to map between cross-references in the METANETX_SHORTLIST. compartment_id : string, optional ID of the specific compartment where the metabolites should be found. Defaults to returning matching metabolites from all compartments. Returns ------- list cobra.Metabolite(s) matching the mnx_id.
[ "Return", "specific", "metabolites", "by", "looking", "up", "IDs", "in", "METANETX_SHORTLIST", "." ]
python
train
Gorialis/jishaku
jishaku/cog.py
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/cog.py#L175-L184
async def jsk_hide(self, ctx: commands.Context): """ Hides Jishaku from the help command. """ if self.jsk.hidden: return await ctx.send("Jishaku is already hidden.") self.jsk.hidden = True await ctx.send("Jishaku is now hidden.")
[ "async", "def", "jsk_hide", "(", "self", ",", "ctx", ":", "commands", ".", "Context", ")", ":", "if", "self", ".", "jsk", ".", "hidden", ":", "return", "await", "ctx", ".", "send", "(", "\"Jishaku is already hidden.\"", ")", "self", ".", "jsk", ".", "h...
Hides Jishaku from the help command.
[ "Hides", "Jishaku", "from", "the", "help", "command", "." ]
python
train
JdeRobot/base
src/libs/comm_py/comm/ros/listenerLaser.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/libs/comm_py/comm/ros/listenerLaser.py#L79-L84
def start (self): ''' Starts (Subscribes) the client. ''' self.sub = rospy.Subscriber(self.topic, LaserScan, self.__callback)
[ "def", "start", "(", "self", ")", ":", "self", ".", "sub", "=", "rospy", ".", "Subscriber", "(", "self", ".", "topic", ",", "LaserScan", ",", "self", ".", "__callback", ")" ]
Starts (Subscribes) the client.
[ "Starts", "(", "Subscribes", ")", "the", "client", "." ]
python
train
richardchien/python-cqhttp
cqhttp_helper.py
https://github.com/richardchien/python-cqhttp/blob/1869819a8f89001e3f70668e31afc6c78f7f5bc2/cqhttp_helper.py#L212-L225
def send_private_msg(self, *, user_id, message, auto_escape=False): ''' 发送私聊消息 ------------ :param int user_id: 对方 QQ 号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: {"message_id": int 消息ID} :rtype: dict[string, int] ''' return super().__getattr__('send_private_msg') \ (user_id=user_id, message=message, auto_escape=auto_escape)
[ "def", "send_private_msg", "(", "self", ",", "*", ",", "user_id", ",", "message", ",", "auto_escape", "=", "False", ")", ":", "return", "super", "(", ")", ".", "__getattr__", "(", "'send_private_msg'", ")", "(", "user_id", "=", "user_id", ",", "message", ...
发送私聊消息 ------------ :param int user_id: 对方 QQ 号 :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: {"message_id": int 消息ID} :rtype: dict[string, int]
[ "发送私聊消息" ]
python
valid
msiemens/tinydb
tinydb/database.py
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L179-L200
def table(self, name=DEFAULT_TABLE, **options): """ Get access to a specific table. Creates a new table, if it hasn't been created before, otherwise it returns the cached :class:`~tinydb.Table` object. :param name: The name of the table. :type name: str :param cache_size: How many query results to cache. :param table_class: Which table class to use. """ if name in self._table_cache: return self._table_cache[name] table_class = options.pop('table_class', self._cls_table) table = table_class(self._cls_storage_proxy(self._storage, name), name, **options) self._table_cache[name] = table return table
[ "def", "table", "(", "self", ",", "name", "=", "DEFAULT_TABLE", ",", "*", "*", "options", ")", ":", "if", "name", "in", "self", ".", "_table_cache", ":", "return", "self", ".", "_table_cache", "[", "name", "]", "table_class", "=", "options", ".", "pop"...
Get access to a specific table. Creates a new table, if it hasn't been created before, otherwise it returns the cached :class:`~tinydb.Table` object. :param name: The name of the table. :type name: str :param cache_size: How many query results to cache. :param table_class: Which table class to use.
[ "Get", "access", "to", "a", "specific", "table", "." ]
python
train
corydolphin/flask-cors
flask_cors/core.py
https://github.com/corydolphin/flask-cors/blob/13fbb1ea4c1bb422de91a726c3c7f1038d3743a3/flask_cors/core.py#L283-L295
def get_cors_options(appInstance, *dicts): """ Compute CORS options for an application by combining the DEFAULT_OPTIONS, the app's configuration-specified options and any dictionaries passed. The last specified option wins. """ options = DEFAULT_OPTIONS.copy() options.update(get_app_kwarg_dict(appInstance)) if dicts: for d in dicts: options.update(d) return serialize_options(options)
[ "def", "get_cors_options", "(", "appInstance", ",", "*", "dicts", ")", ":", "options", "=", "DEFAULT_OPTIONS", ".", "copy", "(", ")", "options", ".", "update", "(", "get_app_kwarg_dict", "(", "appInstance", ")", ")", "if", "dicts", ":", "for", "d", "in", ...
Compute CORS options for an application by combining the DEFAULT_OPTIONS, the app's configuration-specified options and any dictionaries passed. The last specified option wins.
[ "Compute", "CORS", "options", "for", "an", "application", "by", "combining", "the", "DEFAULT_OPTIONS", "the", "app", "s", "configuration", "-", "specified", "options", "and", "any", "dictionaries", "passed", ".", "The", "last", "specified", "option", "wins", "."...
python
valid
GNS3/gns3-server
gns3server/compute/dynamips/nios/nio.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nios/nio.py#L95-L119
def bind_filter(self, direction, filter_name): """ Adds a packet filter to this NIO. Filter "freq_drop" drops packets. Filter "capture" captures packets. :param direction: "in", "out" or "both" :param filter_name: name of the filter to apply """ if direction not in self._dynamips_direction: raise DynamipsError("Unknown direction {} to bind filter {}:".format(direction, filter_name)) dynamips_direction = self._dynamips_direction[direction] yield from self._hypervisor.send("nio bind_filter {name} {direction} {filter}".format(name=self._name, direction=dynamips_direction, filter=filter_name)) if direction == "in": self._input_filter = filter_name elif direction == "out": self._output_filter = filter_name elif direction == "both": self._input_filter = filter_name self._output_filter = filter_name
[ "def", "bind_filter", "(", "self", ",", "direction", ",", "filter_name", ")", ":", "if", "direction", "not", "in", "self", ".", "_dynamips_direction", ":", "raise", "DynamipsError", "(", "\"Unknown direction {} to bind filter {}:\"", ".", "format", "(", "direction",...
Adds a packet filter to this NIO. Filter "freq_drop" drops packets. Filter "capture" captures packets. :param direction: "in", "out" or "both" :param filter_name: name of the filter to apply
[ "Adds", "a", "packet", "filter", "to", "this", "NIO", ".", "Filter", "freq_drop", "drops", "packets", ".", "Filter", "capture", "captures", "packets", "." ]
python
train
huyingxi/Synonyms
synonyms/utils.py
https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L255-L259
def is_digit(obj): ''' Check if an object is Number ''' return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
[ "def", "is_digit", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "(", "numbers", ".", "Integral", ",", "numbers", ".", "Complex", ",", "numbers", ".", "Real", ")", ")" ]
Check if an object is Number
[ "Check", "if", "an", "object", "is", "Number" ]
python
train
JosuaKrause/quick_server
quick_server/quick_server.py
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L2768-L2785
def can_ignore_error(self, reqhnd=None): """Tests if the error is worth reporting. """ value = sys.exc_info()[1] try: if isinstance(value, BrokenPipeError) or \ isinstance(value, ConnectionResetError): return True except NameError: pass if not self.done: return False if not isinstance(value, socket.error): return False need_close = value.errno == 9 if need_close and reqhnd is not None: reqhnd.close_connection = 1 return need_close
[ "def", "can_ignore_error", "(", "self", ",", "reqhnd", "=", "None", ")", ":", "value", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "try", ":", "if", "isinstance", "(", "value", ",", "BrokenPipeError", ")", "or", "isinstance", "(", "value", ...
Tests if the error is worth reporting.
[ "Tests", "if", "the", "error", "is", "worth", "reporting", "." ]
python
train
tango-controls/pytango
tango/databaseds/database.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/databaseds/database.py#L416-L444
def DbDeleteDeviceAttributeProperty(self, argin): """ delete a device attribute property from the database :param argin: Str[0] = Device name Str[1] = Attribute name Str[2] = Property name Str[n] = Property name :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid """ self._log.debug("In DbDeleteDeviceAttributeProperty()") if len(argin) < 3: self.warn_stream("DataBase::db_delete_device_attribute_property(): insufficient number of arguments ") th_exc(DB_IncorrectArguments, "insufficient number of arguments to delete device attribute property", "DataBase::DeleteDeviceAttributeProperty()") dev_name, attr_name = argin[:2] ret, dev_name, dfm = check_device_name(argin) if not ret: self.warn_stream("DataBase::db_delete_device_attribute_property(): device name " + argin + " incorrect ") th_exc(DB_IncorrectDeviceName, "failed to delete device attribute property, device name incorrect", "DataBase::DeleteDeviceAttributeProperty()") for prop_name in argin[2:]: self.db.delete_device_attribute_property(dev_name, attr_name, prop_name)
[ "def", "DbDeleteDeviceAttributeProperty", "(", "self", ",", "argin", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"In DbDeleteDeviceAttributeProperty()\"", ")", "if", "len", "(", "argin", ")", "<", "3", ":", "self", ".", "warn_stream", "(", "\"DataBase:...
delete a device attribute property from the database :param argin: Str[0] = Device name Str[1] = Attribute name Str[2] = Property name Str[n] = Property name :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid
[ "delete", "a", "device", "attribute", "property", "from", "the", "database" ]
python
train
ihmeuw/vivarium
src/vivarium/framework/randomness.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/randomness.py#L659-L675
def register_simulants(self, simulants: pd.DataFrame): """Adds new simulants to the randomness mapping. Parameters ---------- simulants : A table with state data representing the new simulants. Each simulant should pass through this function exactly once. Raises ------ RandomnessError : If the provided table does not contain all key columns specified in the configuration. """ if not all(k in simulants.columns for k in self._key_columns): raise RandomnessError("The simulants dataframe does not have all specified key_columns.") self._key_mapping.update(simulants.set_index(self._key_columns).index)
[ "def", "register_simulants", "(", "self", ",", "simulants", ":", "pd", ".", "DataFrame", ")", ":", "if", "not", "all", "(", "k", "in", "simulants", ".", "columns", "for", "k", "in", "self", ".", "_key_columns", ")", ":", "raise", "RandomnessError", "(", ...
Adds new simulants to the randomness mapping. Parameters ---------- simulants : A table with state data representing the new simulants. Each simulant should pass through this function exactly once. Raises ------ RandomnessError : If the provided table does not contain all key columns specified in the configuration.
[ "Adds", "new", "simulants", "to", "the", "randomness", "mapping", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xganttwidget/xganttwidgetitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttwidgetitem.py#L402-L419
def setItemStyle(self, itemStyle): """ Sets the item style that will be used for this widget. If you are trying to set a style on an item that has children, make sure to turn off the useGroupStyleWithChildren option, or it will always display as a group. :param itemStyle | <XGanttWidgetItem.ItemStyle> """ self._itemStyle = itemStyle # initialize the group icon for group style if itemStyle == XGanttWidgetItem.ItemStyle.Group and \ self.icon(0).isNull(): ico = projexui.resources.find('img/folder_close.png') expand_ico = projexui.resources.find('img/folder_open.png') self.setIcon(0, QIcon(ico)) self.setExpandedIcon(0, QIcon(expand_ico))
[ "def", "setItemStyle", "(", "self", ",", "itemStyle", ")", ":", "self", ".", "_itemStyle", "=", "itemStyle", "# initialize the group icon for group style\r", "if", "itemStyle", "==", "XGanttWidgetItem", ".", "ItemStyle", ".", "Group", "and", "self", ".", "icon", "...
Sets the item style that will be used for this widget. If you are trying to set a style on an item that has children, make sure to turn off the useGroupStyleWithChildren option, or it will always display as a group. :param itemStyle | <XGanttWidgetItem.ItemStyle>
[ "Sets", "the", "item", "style", "that", "will", "be", "used", "for", "this", "widget", ".", "If", "you", "are", "trying", "to", "set", "a", "style", "on", "an", "item", "that", "has", "children", "make", "sure", "to", "turn", "off", "the", "useGroupSty...
python
train
saltstack/salt
salt/modules/neutron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/neutron.py#L348-L366
def update_port(port, name, admin_state_up=True, profile=None): ''' Updates a port CLI Example: .. code-block:: bash salt '*' neutron.update_port port-name network-name new-port-name :param port: Port name or ID :param name: Name of this port :param admin_state_up: Set admin state up to true or false, default: true (Optional) :param profile: Profile to build on (Optional) :return: Value of updated port information ''' conn = _auth(profile) return conn.update_port(port, name, admin_state_up)
[ "def", "update_port", "(", "port", ",", "name", ",", "admin_state_up", "=", "True", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "update_port", "(", "port", ",", "name", ",", "admin_state_up", ...
Updates a port CLI Example: .. code-block:: bash salt '*' neutron.update_port port-name network-name new-port-name :param port: Port name or ID :param name: Name of this port :param admin_state_up: Set admin state up to true or false, default: true (Optional) :param profile: Profile to build on (Optional) :return: Value of updated port information
[ "Updates", "a", "port" ]
python
train
ronhanson/python-tbx
tbx/text.py
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L310-L318
def dict_to_xml(xml_dict): """ Converts a dictionary to an XML ElementTree Element """ import lxml.etree as etree root_tag = list(xml_dict.keys())[0] root = etree.Element(root_tag) _dict_to_xml_recurse(root, xml_dict[root_tag]) return root
[ "def", "dict_to_xml", "(", "xml_dict", ")", ":", "import", "lxml", ".", "etree", "as", "etree", "root_tag", "=", "list", "(", "xml_dict", ".", "keys", "(", ")", ")", "[", "0", "]", "root", "=", "etree", ".", "Element", "(", "root_tag", ")", "_dict_to...
Converts a dictionary to an XML ElementTree Element
[ "Converts", "a", "dictionary", "to", "an", "XML", "ElementTree", "Element" ]
python
train
pandas-dev/pandas
pandas/core/computation/expr.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L94-L116
def _replace_locals(tok): """Replace local variables with a syntactically valid name. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values Notes ----- This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it. """ toknum, tokval = tok if toknum == tokenize.OP and tokval == '@': return tokenize.OP, _LOCAL_TAG return toknum, tokval
[ "def", "_replace_locals", "(", "tok", ")", ":", "toknum", ",", "tokval", "=", "tok", "if", "toknum", "==", "tokenize", ".", "OP", "and", "tokval", "==", "'@'", ":", "return", "tokenize", ".", "OP", ",", "_LOCAL_TAG", "return", "toknum", ",", "tokval" ]
Replace local variables with a syntactically valid name. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values Notes ----- This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
[ "Replace", "local", "variables", "with", "a", "syntactically", "valid", "name", "." ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/build.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/build.py#L105-L152
def compile_pycos(toc): """Given a TOC or equivalent list of tuples, generates all the required pyc/pyo files, writing in a local directory if required, and returns the list of tuples with the updated pathnames. """ global BUILDPATH # For those modules that need to be rebuilt, use the build directory # PyInstaller creates during the build process. basepath = os.path.join(BUILDPATH, "localpycos") new_toc = [] for (nm, fnm, typ) in toc: # Trim the terminal "c" or "o" source_fnm = fnm[:-1] # If the source is newer than the compiled, or the compiled doesn't # exist, we need to perform a build ourselves. if mtime(source_fnm) > mtime(fnm): try: py_compile.compile(source_fnm) except IOError: # If we're compiling on a system directory, probably we don't # have write permissions; thus we compile to a local directory # and change the TOC entry accordingly. ext = os.path.splitext(fnm)[1] if "__init__" not in fnm: # If it's a normal module, use last part of the qualified # name as module name and the first as leading path leading, mod_name = nm.split(".")[:-1], nm.split(".")[-1] else: # In case of a __init__ module, use all the qualified name # as leading path and use "__init__" as the module name leading, mod_name = nm.split("."), "__init__" leading = os.path.join(basepath, *leading) if not os.path.exists(leading): os.makedirs(leading) fnm = os.path.join(leading, mod_name + ext) py_compile.compile(source_fnm, fnm) new_toc.append((nm, fnm, typ)) return new_toc
[ "def", "compile_pycos", "(", "toc", ")", ":", "global", "BUILDPATH", "# For those modules that need to be rebuilt, use the build directory", "# PyInstaller creates during the build process.", "basepath", "=", "os", ".", "path", ".", "join", "(", "BUILDPATH", ",", "\"localpyco...
Given a TOC or equivalent list of tuples, generates all the required pyc/pyo files, writing in a local directory if required, and returns the list of tuples with the updated pathnames.
[ "Given", "a", "TOC", "or", "equivalent", "list", "of", "tuples", "generates", "all", "the", "required", "pyc", "/", "pyo", "files", "writing", "in", "a", "local", "directory", "if", "required", "and", "returns", "the", "list", "of", "tuples", "with", "the"...
python
train
saltstack/salt
salt/cloud/clouds/profitbricks.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/profitbricks.py#L679-L689
def get_node(conn, name): ''' Return a node for the named VM ''' datacenter_id = get_datacenter_id() for item in conn.list_servers(datacenter_id)['items']: if item['properties']['name'] == name: node = {'id': item['id']} node.update(item['properties']) return node
[ "def", "get_node", "(", "conn", ",", "name", ")", ":", "datacenter_id", "=", "get_datacenter_id", "(", ")", "for", "item", "in", "conn", ".", "list_servers", "(", "datacenter_id", ")", "[", "'items'", "]", ":", "if", "item", "[", "'properties'", "]", "["...
Return a node for the named VM
[ "Return", "a", "node", "for", "the", "named", "VM" ]
python
train
tensorflow/probability
tensorflow_probability/python/bijectors/bijector.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L112-L120
def _merge(self, old, new, use_equals=False): """Helper to merge which handles merging one value.""" if old is None: return new if new is None: return old if (old == new) if use_equals else (old is new): return old raise ValueError("Incompatible values: %s != %s" % (old, new))
[ "def", "_merge", "(", "self", ",", "old", ",", "new", ",", "use_equals", "=", "False", ")", ":", "if", "old", "is", "None", ":", "return", "new", "if", "new", "is", "None", ":", "return", "old", "if", "(", "old", "==", "new", ")", "if", "use_equa...
Helper to merge which handles merging one value.
[ "Helper", "to", "merge", "which", "handles", "merging", "one", "value", "." ]
python
test
kristianfoerster/melodist
melodist/stationstatistics.py
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L188-L223
def to_json(self, filename=None): """ Exports statistical data to a JSON formatted file Parameters ---------- filename: output file that holds statistics data """ def json_encoder(obj): if isinstance(obj, pd.DataFrame) or isinstance(obj, pd.Series): if isinstance(obj.index, pd.core.index.MultiIndex): obj = obj.reset_index() # convert MultiIndex to columns return json.loads(obj.to_json(date_format='iso')) elif isinstance(obj, melodist.cascade.CascadeStatistics): return obj.__dict__ elif isinstance(obj, np.ndarray): return obj.tolist() else: raise TypeError('%s not supported' % type(obj)) d = dict( temp=self.temp, wind=self.wind, precip=self.precip, hum=self.hum, glob=self.glob ) j = json.dumps(d, default=json_encoder, indent=4) if filename is None: return j else: with open(filename, 'w') as f: f.write(j)
[ "def", "to_json", "(", "self", ",", "filename", "=", "None", ")", ":", "def", "json_encoder", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "pd", ".", "DataFrame", ")", "or", "isinstance", "(", "obj", ",", "pd", ".", "Series", ")", ":"...
Exports statistical data to a JSON formatted file Parameters ---------- filename: output file that holds statistics data
[ "Exports", "statistical", "data", "to", "a", "JSON", "formatted", "file" ]
python
train
knaperek/djangosaml2
djangosaml2/views.py
https://github.com/knaperek/djangosaml2/blob/643969701d3b4257a8d64c5c577602ebaa61de70/djangosaml2/views.py#L362-L408
def logout(request, config_loader_path=None): """SAML Logout Request initiator This view initiates the SAML2 Logout request using the pysaml2 library to create the LogoutRequest. """ state = StateCache(request.session) conf = get_config(config_loader_path, request) client = Saml2Client(conf, state_cache=state, identity_cache=IdentityCache(request.session)) subject_id = _get_subject_id(request.session) if subject_id is None: logger.warning( 'The session does not contain the subject id for user %s', request.user) result = client.global_logout(subject_id) state.sync() if not result: logger.error("Looks like the user %s is not logged in any IdP/AA", subject_id) return HttpResponseBadRequest("You are not logged in any IdP/AA") if len(result) > 1: logger.error('Sorry, I do not know how to logout from several sources. I will logout just from the first one') for entityid, logout_info in result.items(): if isinstance(logout_info, tuple): binding, http_info = logout_info if binding == BINDING_HTTP_POST: logger.debug('Returning form to the IdP to continue the logout process') body = ''.join(http_info['data']) return HttpResponse(body) elif binding == BINDING_HTTP_REDIRECT: logger.debug('Redirecting to the IdP to continue the logout process') return HttpResponseRedirect(get_location(http_info)) else: logger.error('Unknown binding: %s', binding) return HttpResponseServerError('Failed to log out') else: # We must have had a soap logout return finish_logout(request, logout_info) logger.error('Could not logout because there only the HTTP_REDIRECT is supported') return HttpResponseServerError('Logout Binding not supported')
[ "def", "logout", "(", "request", ",", "config_loader_path", "=", "None", ")", ":", "state", "=", "StateCache", "(", "request", ".", "session", ")", "conf", "=", "get_config", "(", "config_loader_path", ",", "request", ")", "client", "=", "Saml2Client", "(", ...
SAML Logout Request initiator This view initiates the SAML2 Logout request using the pysaml2 library to create the LogoutRequest.
[ "SAML", "Logout", "Request", "initiator" ]
python
train
krinj/k-util
k_util/region.py
https://github.com/krinj/k-util/blob/b118826b1d6f49ca4e1ca7327d5b171db332ac23/k_util/region.py#L78-L85
def is_in_bounds(self, width, height) -> bool: """ Check if this entire region is contained within the bounds of a given stage size.""" if self._top < 0 \ or self._bottom > height \ or self._left < 0 \ or self._right > width: return False return True
[ "def", "is_in_bounds", "(", "self", ",", "width", ",", "height", ")", "->", "bool", ":", "if", "self", ".", "_top", "<", "0", "or", "self", ".", "_bottom", ">", "height", "or", "self", ".", "_left", "<", "0", "or", "self", ".", "_right", ">", "wi...
Check if this entire region is contained within the bounds of a given stage size.
[ "Check", "if", "this", "entire", "region", "is", "contained", "within", "the", "bounds", "of", "a", "given", "stage", "size", "." ]
python
train
coldfix/udiskie
udiskie/config.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/config.py#L126-L138
def value(self, kind, device): """ Get the value for the device object associated with this filter. If :meth:`match` is False for the device, the return value of this method is undefined. """ self._log.debug(_('{0}(match={1!r}, {2}={3!r}) used for {4}', self.__class__.__name__, self._match, kind, self._values[kind], device.object_path)) return self._values[kind]
[ "def", "value", "(", "self", ",", "kind", ",", "device", ")", ":", "self", ".", "_log", ".", "debug", "(", "_", "(", "'{0}(match={1!r}, {2}={3!r}) used for {4}'", ",", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "_match", ",", "kind", ",...
Get the value for the device object associated with this filter. If :meth:`match` is False for the device, the return value of this method is undefined.
[ "Get", "the", "value", "for", "the", "device", "object", "associated", "with", "this", "filter", "." ]
python
train
dropbox/stone
stone/backends/python_rsrc/stone_serializers.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_rsrc/stone_serializers.py#L582-L612
def decode_struct(self, data_type, obj): """ The data_type argument must be a Struct. See json_compat_obj_decode() for argument descriptions. """ if obj is None and data_type.has_default(): return data_type.get_default() elif not isinstance(obj, dict): raise bv.ValidationError('expected object, got %s' % bv.generic_type_name(obj)) all_fields = data_type.definition._all_fields_ for extra_permission in self.caller_permissions.permissions: all_extra_fields = '_all_{}_fields_'.format(extra_permission) all_fields = all_fields + getattr(data_type.definition, all_extra_fields, []) if self.strict: all_field_names = data_type.definition._all_field_names_ for extra_permission in self.caller_permissions.permissions: all_extra_field_names = '_all_{}_field_names_'.format(extra_permission) all_field_names = all_field_names.union( getattr(data_type.definition, all_extra_field_names, {})) for key in obj: if (key not in all_field_names and not key.startswith('.tag')): raise bv.ValidationError("unknown field '%s'" % key) ins = data_type.definition() self.decode_struct_fields(ins, all_fields, obj) # Check that all required fields have been set. data_type.validate_fields_only_with_permissions(ins, self.caller_permissions) return ins
[ "def", "decode_struct", "(", "self", ",", "data_type", ",", "obj", ")", ":", "if", "obj", "is", "None", "and", "data_type", ".", "has_default", "(", ")", ":", "return", "data_type", ".", "get_default", "(", ")", "elif", "not", "isinstance", "(", "obj", ...
The data_type argument must be a Struct. See json_compat_obj_decode() for argument descriptions.
[ "The", "data_type", "argument", "must", "be", "a", "Struct", ".", "See", "json_compat_obj_decode", "()", "for", "argument", "descriptions", "." ]
python
train
projectshift/shift-boiler
boiler/user/models.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L215-L218
def lock_account(self, minutes=30): """ Lock user account for a period """ period = datetime.timedelta(minutes=minutes) self.locked_until = datetime.datetime.utcnow() + period
[ "def", "lock_account", "(", "self", ",", "minutes", "=", "30", ")", ":", "period", "=", "datetime", ".", "timedelta", "(", "minutes", "=", "minutes", ")", "self", ".", "locked_until", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "+", "per...
Lock user account for a period
[ "Lock", "user", "account", "for", "a", "period" ]
python
train
Netflix-Skunkworks/swag-client
swag_client/backends/file.py
https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/file.py#L105-L111
def get_all(self): """Gets all items in file.""" logger.debug('Fetching items. Path: {data_file}'.format( data_file=self.data_file )) return load_file(self.data_file)
[ "def", "get_all", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Fetching items. Path: {data_file}'", ".", "format", "(", "data_file", "=", "self", ".", "data_file", ")", ")", "return", "load_file", "(", "self", ".", "data_file", ")" ]
Gets all items in file.
[ "Gets", "all", "items", "in", "file", "." ]
python
train
GGiecold/Cluster_Ensembles
src/Cluster_Ensembles/Cluster_Ensembles.py
https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L186-L215
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name): """Write an hypergraph adjacency to disk to disk in an HDF5 data structure. Parameters ---------- hypergraph_adjacency : compressed sparse row matrix hdf5_file_name : file handle or string """ assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix) byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes FILTERS = get_compression_filter(byte_counts) with tables.open_file(hdf5_file_name, 'r+') as fileh: for par in ('data', 'indices', 'indptr', 'shape'): try: n = getattr(fileh.root.consensus_group, par) n._f_remove() except AttributeError: pass array = np.array(getattr(hypergraph_adjacency, par)) atom = tables.Atom.from_dtype(array.dtype) ds = fileh.create_carray(fileh.root.consensus_group, par, atom, array.shape, filters = FILTERS) ds[:] = array
[ "def", "store_hypergraph_adjacency", "(", "hypergraph_adjacency", ",", "hdf5_file_name", ")", ":", "assert", "(", "hypergraph_adjacency", ".", "__class__", "==", "scipy", ".", "sparse", ".", "csr", ".", "csr_matrix", ")", "byte_counts", "=", "hypergraph_adjacency", ...
Write an hypergraph adjacency to disk to disk in an HDF5 data structure. Parameters ---------- hypergraph_adjacency : compressed sparse row matrix hdf5_file_name : file handle or string
[ "Write", "an", "hypergraph", "adjacency", "to", "disk", "to", "disk", "in", "an", "HDF5", "data", "structure", ".", "Parameters", "----------", "hypergraph_adjacency", ":", "compressed", "sparse", "row", "matrix", "hdf5_file_name", ":", "file", "handle", "or", "...
python
train
unitedstack/steth
stetho/agent/api.py
https://github.com/unitedstack/steth/blob/955884ceebf3bdc474c93cc5cf555e67d16458f1/stetho/agent/api.py#L74-L88
def add_vlan_to_interface(self, interface, vlan_id): """Add vlan interface. ip link add link eth0 name eth0.10 type vlan id 10 """ subif = '%s.%s' % (interface, vlan_id) vlan_id = '%s' % vlan_id cmd = ['ip', 'link', 'add', 'link', interface, 'name', subif, 'type', 'vlan', 'id', vlan_id] stdcode, stdout = agent_utils.execute(cmd, root=True) if stdcode == 0: return agent_utils.make_response(code=stdcode) # execute failed. message = stdout.pop(0) return agent_utils.make_response(code=stdcode, message=message)
[ "def", "add_vlan_to_interface", "(", "self", ",", "interface", ",", "vlan_id", ")", ":", "subif", "=", "'%s.%s'", "%", "(", "interface", ",", "vlan_id", ")", "vlan_id", "=", "'%s'", "%", "vlan_id", "cmd", "=", "[", "'ip'", ",", "'link'", ",", "'add'", ...
Add vlan interface. ip link add link eth0 name eth0.10 type vlan id 10
[ "Add", "vlan", "interface", "." ]
python
train
abilian/abilian-core
abilian/web/views/object.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/web/views/object.py#L625-L631
def get_item(self, obj): """Return a result item. :param obj: Instance object :returns: a dictionnary with at least `id` and `text` values """ return {"id": obj.id, "text": self.get_label(obj), "name": obj.name}
[ "def", "get_item", "(", "self", ",", "obj", ")", ":", "return", "{", "\"id\"", ":", "obj", ".", "id", ",", "\"text\"", ":", "self", ".", "get_label", "(", "obj", ")", ",", "\"name\"", ":", "obj", ".", "name", "}" ]
Return a result item. :param obj: Instance object :returns: a dictionnary with at least `id` and `text` values
[ "Return", "a", "result", "item", "." ]
python
train
onnx/onnxmltools
onnxutils/onnxconverter_common/optimizer.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxutils/onnxconverter_common/optimizer.py#L60-L65
def in_single_path_and_inner(self): """ Test if a node is not linking to any fan in or out node. """ return len(self.successor) == 1 and self.successor[0] is not None and not self.successor[0].in_or_out and \ len(self.precedence) == 1 and self.precedence[0] is not None and not self.successor[0].in_or_out
[ "def", "in_single_path_and_inner", "(", "self", ")", ":", "return", "len", "(", "self", ".", "successor", ")", "==", "1", "and", "self", ".", "successor", "[", "0", "]", "is", "not", "None", "and", "not", "self", ".", "successor", "[", "0", "]", ".",...
Test if a node is not linking to any fan in or out node.
[ "Test", "if", "a", "node", "is", "not", "linking", "to", "any", "fan", "in", "or", "out", "node", "." ]
python
train
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/restconf.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/restconf.py#L273-L326
def get_json(self, instance=True): '''get_json High-level api: get_json returns json_val of the config node. Parameters ---------- instance : `bool` True if only one instance of list or leaf-list is required. False if all instances of list or leaf-list are needed. Returns ------- str A string in JSON format. ''' def get_json_instance(node): pk = Parker(xml_fromstring=_fromstring, dict_type=OrderedDict) default_ns = {} nodes = [node] + node.findall('.//') for item in nodes: parents = [p for p in node.findall('.//{}/..'.format(item.tag)) if item in p.findall('*')] if parents and id(parents[0]) in default_ns: default_url = default_ns[id(parents[0])] ns, tag = self.device.convert_tag(default_url, item.tag, dst=Tag.JSON_NAME) else: ns, tag = self.device.convert_tag('', item.tag, dst=Tag.JSON_NAME) default_ns[id(item)] = ns item.tag = tag return pk.data(node, preserve_root=True) def convert_node(node): # lxml.etree does not allow tag name like oc-if:enable # so it is converted to xml.etree.ElementTree string = etree.tostring(node, encoding='unicode', pretty_print=False) return ElementTree.fromstring(string) if instance: return json.dumps(get_json_instance(convert_node(self.node))) else: nodes = [n for n in self.node.getparent() \ .iterchildren(tag=self.node.tag)] if len(nodes) > 1: return json.dumps([get_json_instance(convert_node(n)) for n in nodes]) else: return json.dumps(get_json_instance(convert_node(nodes[0])))
[ "def", "get_json", "(", "self", ",", "instance", "=", "True", ")", ":", "def", "get_json_instance", "(", "node", ")", ":", "pk", "=", "Parker", "(", "xml_fromstring", "=", "_fromstring", ",", "dict_type", "=", "OrderedDict", ")", "default_ns", "=", "{", ...
get_json High-level api: get_json returns json_val of the config node. Parameters ---------- instance : `bool` True if only one instance of list or leaf-list is required. False if all instances of list or leaf-list are needed. Returns ------- str A string in JSON format.
[ "get_json" ]
python
train
wavycloud/pyboto3
pyboto3/dynamodb.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/dynamodb.py#L2657-L3215
def query(TableName=None, IndexName=None, Select=None, AttributesToGet=None, Limit=None, ConsistentRead=None, KeyConditions=None, QueryFilter=None, ConditionalOperator=None, ScanIndexForward=None, ExclusiveStartKey=None, ReturnConsumedCapacity=None, ProjectionExpression=None, FilterExpression=None, KeyConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None): """ A Query operation uses the primary key of a table or a secondary index to directly access items from that table or index. Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression . You can use the ScanIndexForward parameter to get results in forward or reverse order, by sort key. Queries that do not return results consume the minimum number of read capacity units for that type of read operation. If the total number of items meeting the query criteria exceeds the result set size limit of 1 MB, the query stops and results are returned to the user with the LastEvaluatedKey element to continue the query in a subsequent operation. Unlike a Scan operation, a Query operation never returns both an empty result set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if you have used the Limit parameter, or if the result set exceeds 1 MB (prior to applying a filter). You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index. See also: AWS API Documentation Examples This example queries items in the Music table. The table has a partition key and sort key (Artist and SongTitle), but this query only specifies the partition key value. It returns song titles by the artist named "No One You Know". Expected Output: :example: response = client.query( TableName='string', IndexName='string', Select='ALL_ATTRIBUTES'|'ALL_PROJECTED_ATTRIBUTES'|'SPECIFIC_ATTRIBUTES'|'COUNT', AttributesToGet=[ 'string', ], Limit=123, ConsistentRead=True|False, KeyConditions={ 'string': { 'AttributeValueList': [ { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, ], 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH' } }, QueryFilter={ 'string': { 'AttributeValueList': [ { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, ], 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH' } }, ConditionalOperator='AND'|'OR', ScanIndexForward=True|False, ExclusiveStartKey={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE', ProjectionExpression='string', FilterExpression='string', KeyConditionExpression='string', ExpressionAttributeNames={ 'string': 'string' }, ExpressionAttributeValues={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } } ) :type TableName: string :param TableName: [REQUIRED] The name of the table containing the requested items. :type IndexName: string :param IndexName: The name of an index to query. This index can be any local secondary index or global secondary index on the table. Note that if you use the IndexName parameter, you must also provide TableName. :type Select: string :param Select: The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index. ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index DynamoDB will fetch the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required. ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES . COUNT - Returns the number of matching items, rather than the matching items themselves. SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet . This return value is equivalent to specifying AttributesToGet without specifying any value for Select . If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency. If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table. If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES . (This usage is equivalent to specifying AttributesToGet without any value for Select .) Note If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error. :type AttributesToGet: list :param AttributesToGet: This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide . (string) -- :type Limit: integer :param Limit: The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide . :type ConsistentRead: boolean :param ConsistentRead: Determines the read consistency model: If set to true , then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads. Strongly consistent reads are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead set to true , you will receive a ValidationException . :type KeyConditions: dict :param KeyConditions: This is a legacy parameter. Use KeyConditionExpression instead. For more information, see KeyConditions in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the selection criteria for a Query or Scan operation: For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions , only the following comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter , which evaluates the query results and returns only the desired values. For a Scan operation, Condition is used in a ScanFilter , which evaluates the scan results and returns only the desired values. AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters . For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values. (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true ComparisonOperator (string) -- [REQUIRED]A comparator for evaluating attributes. For example, equals, greater than, less than, etc. The following comparison operators are available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each comparison operator. EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps. Note This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator. NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps. Note This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator. CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type). IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true. BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} For usage examples of AttributeValueList and ComparisonOperator , see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide . :type QueryFilter: dict :param QueryFilter: This is a legacy parameter. Use FilterExpression instead. For more information, see QueryFilter in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the selection criteria for a Query or Scan operation: For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions , only the following comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter , which evaluates the query results and returns only the desired values. For a Scan operation, Condition is used in a ScanFilter , which evaluates the scan results and returns only the desired values. AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters . For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values. (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true ComparisonOperator (string) -- [REQUIRED]A comparator for evaluating attributes. For example, equals, greater than, less than, etc. The following comparison operators are available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each comparison operator. EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps. Note This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator. NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps. Note This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator. CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type). IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true. BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} For usage examples of AttributeValueList and ComparisonOperator , see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide . :type ConditionalOperator: string :param ConditionalOperator: This is a legacy parameter. Use FilterExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide . :type ScanIndexForward: boolean :param ScanIndexForward: Specifies the order for index traversal: If true (default), the traversal is performed in ascending order; if false , the traversal is performed in descending order. Items with the same partition key value are stored in sorted order by sort key. If the sort key data type is Number, the results are stored in numeric order. For type String, the results are stored in order of ASCII character code values. For type Binary, DynamoDB treats each byte of the binary data as unsigned. If ScanIndexForward is true , DynamoDB returns the results in the order in which they are stored (by sort key value). This is the default behavior. If ScanIndexForward is false , DynamoDB reads the results in reverse order by sort key value, and then returns the results to the client. :type ExclusiveStartKey: dict :param ExclusiveStartKey: The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation. The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed. (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :type ReturnConsumedCapacity: string :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response: INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s). TOTAL - The response includes only the aggregate ConsumedCapacity for the operation. NONE - No ConsumedCapacity details are included in the response. :type ProjectionExpression: string :param ProjectionExpression: A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide . :type FilterExpression: string :param FilterExpression: A string that contains conditions that DynamoDB applies after the Query operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned. A FilterExpression does not allow key attributes. You cannot define a filter expression based on a partition key or a sort key. Note A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units. For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide . :type KeyConditionExpression: string :param KeyConditionExpression: The condition that specifies the key value(s) for items to be retrieved by the Query action. The condition must perform an equality test on a single partition key value. The condition can also perform one of several comparison tests on a single sort key value. Query can use KeyConditionExpression to retrieve one item with a given partition key value and sort key value, or several items that have the same partition key value but different sort key values. The partition key equality test is required, and must be specified in the following format: partitionKeyName = :partitionkeyval If you also want to provide a condition for the sort key, it must be combined using AND with the condition for the sort key. Following is an example, using the = comparison operator for the sort key: partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval Valid comparisons for the sort key condition are as follows: sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval . sortKeyName ```` :sortkeyval - true if the sort key value is less than :sortkeyval . sortKeyName = :sortkeyval - true if the sort key value is less than or equal to :sortkeyval . sortKeyName ```` :sortkeyval - true if the sort key value is greater than :sortkeyval . sortKeyName = :sortkeyval - true if the sort key value is greater than or equal to :sortkeyval . sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key value is greater than or equal to :sortkeyval1 , and less than or equal to :sortkeyval2 . begins_with ( sortKeyName , :sortkeyval ) - true if the sort key value begins with a particular operand. (You cannot use this function with a sort key that is of type Number.) Note that the function name begins_with is case-sensitive. Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval and :sortval with actual values at runtime. You can optionally use the ExpressionAttributeNames parameter to replace the names of the partition key and sort key with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word: Size = :myval To work around this, define a placeholder (such a #S ) to represent the attribute name Size . KeyConditionExpression then is as follows: #S = :myval For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide . For more information on ExpressionAttributeNames and ExpressionAttributeValues , see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide . :type ExpressionAttributeNames: dict :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames : To access an attribute whose name conflicts with a DynamoDB reserved word. To create a placeholder for repeating occurrences of an attribute name in an expression. To prevent special characters in an attribute name from being misinterpreted in an expression. Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name: Percentile The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames : {'#P':'Percentile'} You could then use this substitution in an expression, as in this example: #P = :val Note Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime. For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide . (string) -- (string) -- :type ExpressionAttributeValues: dict :param ExpressionAttributeValues: One or more values that can be substituted in an expression. Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following: Available | Backordered | Discontinued You would first need to specify ExpressionAttributeValues as follows: { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} } You could then use these values in an expression, such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :rtype: dict :return: { 'Items': [ { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, ], 'Count': 123, 'ScannedCount': 123, 'LastEvaluatedKey': { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, 'ConsumedCapacity': { 'TableName': 'string', 'CapacityUnits': 123.0, 'Table': { 'CapacityUnits': 123.0 }, 'LocalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } }, 'GlobalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } } } } :returns: (string) -- """ pass
[ "def", "query", "(", "TableName", "=", "None", ",", "IndexName", "=", "None", ",", "Select", "=", "None", ",", "AttributesToGet", "=", "None", ",", "Limit", "=", "None", ",", "ConsistentRead", "=", "None", ",", "KeyConditions", "=", "None", ",", "QueryFi...
A Query operation uses the primary key of a table or a secondary index to directly access items from that table or index. Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression . You can use the ScanIndexForward parameter to get results in forward or reverse order, by sort key. Queries that do not return results consume the minimum number of read capacity units for that type of read operation. If the total number of items meeting the query criteria exceeds the result set size limit of 1 MB, the query stops and results are returned to the user with the LastEvaluatedKey element to continue the query in a subsequent operation. Unlike a Scan operation, a Query operation never returns both an empty result set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if you have used the Limit parameter, or if the result set exceeds 1 MB (prior to applying a filter). You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index. See also: AWS API Documentation Examples This example queries items in the Music table. The table has a partition key and sort key (Artist and SongTitle), but this query only specifies the partition key value. It returns song titles by the artist named "No One You Know". Expected Output: :example: response = client.query( TableName='string', IndexName='string', Select='ALL_ATTRIBUTES'|'ALL_PROJECTED_ATTRIBUTES'|'SPECIFIC_ATTRIBUTES'|'COUNT', AttributesToGet=[ 'string', ], Limit=123, ConsistentRead=True|False, KeyConditions={ 'string': { 'AttributeValueList': [ { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, ], 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH' } }, QueryFilter={ 'string': { 'AttributeValueList': [ { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, ], 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH' } }, ConditionalOperator='AND'|'OR', ScanIndexForward=True|False, ExclusiveStartKey={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE', ProjectionExpression='string', FilterExpression='string', KeyConditionExpression='string', ExpressionAttributeNames={ 'string': 'string' }, ExpressionAttributeValues={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } } ) :type TableName: string :param TableName: [REQUIRED] The name of the table containing the requested items. :type IndexName: string :param IndexName: The name of an index to query. This index can be any local secondary index or global secondary index on the table. Note that if you use the IndexName parameter, you must also provide TableName. :type Select: string :param Select: The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index. ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index DynamoDB will fetch the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required. ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES . COUNT - Returns the number of matching items, rather than the matching items themselves. SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet . This return value is equivalent to specifying AttributesToGet without specifying any value for Select . If you query or scan a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency. If you query or scan a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table. If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES . (This usage is equivalent to specifying AttributesToGet without any value for Select .) Note If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error. :type AttributesToGet: list :param AttributesToGet: This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide . (string) -- :type Limit: integer :param Limit: The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide . :type ConsistentRead: boolean :param ConsistentRead: Determines the read consistency model: If set to true , then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads. Strongly consistent reads are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead set to true , you will receive a ValidationException . :type KeyConditions: dict :param KeyConditions: This is a legacy parameter. Use KeyConditionExpression instead. For more information, see KeyConditions in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the selection criteria for a Query or Scan operation: For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions , only the following comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter , which evaluates the query results and returns only the desired values. For a Scan operation, Condition is used in a ScanFilter , which evaluates the scan results and returns only the desired values. AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters . For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values. (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true ComparisonOperator (string) -- [REQUIRED]A comparator for evaluating attributes. For example, equals, greater than, less than, etc. The following comparison operators are available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each comparison operator. EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps. Note This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator. NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps. Note This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator. CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type). IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true. BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} For usage examples of AttributeValueList and ComparisonOperator , see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide . :type QueryFilter: dict :param QueryFilter: This is a legacy parameter. Use FilterExpression instead. For more information, see QueryFilter in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the selection criteria for a Query or Scan operation: For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions , only the following comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter , which evaluates the query results and returns only the desired values. For a Scan operation, Condition is used in a ScanFilter , which evaluates the scan results and returns only the desired values. AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters . For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values. (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true ComparisonOperator (string) -- [REQUIRED]A comparator for evaluating attributes. For example, equals, greater than, less than, etc. The following comparison operators are available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each comparison operator. EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps. Note This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator. NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps. Note This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator. CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type). IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true. BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} For usage examples of AttributeValueList and ComparisonOperator , see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide . :type ConditionalOperator: string :param ConditionalOperator: This is a legacy parameter. Use FilterExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide . :type ScanIndexForward: boolean :param ScanIndexForward: Specifies the order for index traversal: If true (default), the traversal is performed in ascending order; if false , the traversal is performed in descending order. Items with the same partition key value are stored in sorted order by sort key. If the sort key data type is Number, the results are stored in numeric order. For type String, the results are stored in order of ASCII character code values. For type Binary, DynamoDB treats each byte of the binary data as unsigned. If ScanIndexForward is true , DynamoDB returns the results in the order in which they are stored (by sort key value). This is the default behavior. If ScanIndexForward is false , DynamoDB reads the results in reverse order by sort key value, and then returns the results to the client. :type ExclusiveStartKey: dict :param ExclusiveStartKey: The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation. The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed. (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :type ReturnConsumedCapacity: string :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response: INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s). TOTAL - The response includes only the aggregate ConsumedCapacity for the operation. NONE - No ConsumedCapacity details are included in the response. :type ProjectionExpression: string :param ProjectionExpression: A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide . :type FilterExpression: string :param FilterExpression: A string that contains conditions that DynamoDB applies after the Query operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned. A FilterExpression does not allow key attributes. You cannot define a filter expression based on a partition key or a sort key. Note A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units. For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide . :type KeyConditionExpression: string :param KeyConditionExpression: The condition that specifies the key value(s) for items to be retrieved by the Query action. The condition must perform an equality test on a single partition key value. The condition can also perform one of several comparison tests on a single sort key value. Query can use KeyConditionExpression to retrieve one item with a given partition key value and sort key value, or several items that have the same partition key value but different sort key values. The partition key equality test is required, and must be specified in the following format: partitionKeyName = :partitionkeyval If you also want to provide a condition for the sort key, it must be combined using AND with the condition for the sort key. Following is an example, using the = comparison operator for the sort key: partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval Valid comparisons for the sort key condition are as follows: sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval . sortKeyName ```` :sortkeyval - true if the sort key value is less than :sortkeyval . sortKeyName = :sortkeyval - true if the sort key value is less than or equal to :sortkeyval . sortKeyName ```` :sortkeyval - true if the sort key value is greater than :sortkeyval . sortKeyName = :sortkeyval - true if the sort key value is greater than or equal to :sortkeyval . sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key value is greater than or equal to :sortkeyval1 , and less than or equal to :sortkeyval2 . begins_with ( sortKeyName , :sortkeyval ) - true if the sort key value begins with a particular operand. (You cannot use this function with a sort key that is of type Number.) Note that the function name begins_with is case-sensitive. Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval and :sortval with actual values at runtime. You can optionally use the ExpressionAttributeNames parameter to replace the names of the partition key and sort key with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word: Size = :myval To work around this, define a placeholder (such a #S ) to represent the attribute name Size . KeyConditionExpression then is as follows: #S = :myval For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide . For more information on ExpressionAttributeNames and ExpressionAttributeValues , see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide . :type ExpressionAttributeNames: dict :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames : To access an attribute whose name conflicts with a DynamoDB reserved word. To create a placeholder for repeating occurrences of an attribute name in an expression. To prevent special characters in an attribute name from being misinterpreted in an expression. Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name: Percentile The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames : {'#P':'Percentile'} You could then use this substitution in an expression, as in this example: #P = :val Note Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime. For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide . (string) -- (string) -- :type ExpressionAttributeValues: dict :param ExpressionAttributeValues: One or more values that can be substituted in an expression. Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following: Available | Backordered | Discontinued You would first need to specify ExpressionAttributeValues as follows: { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} } You could then use these values in an expression, such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :rtype: dict :return: { 'Items': [ { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, ], 'Count': 123, 'ScannedCount': 123, 'LastEvaluatedKey': { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, 'ConsumedCapacity': { 'TableName': 'string', 'CapacityUnits': 123.0, 'Table': { 'CapacityUnits': 123.0 }, 'LocalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } }, 'GlobalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } } } } :returns: (string) --
[ "A", "Query", "operation", "uses", "the", "primary", "key", "of", "a", "table", "or", "a", "secondary", "index", "to", "directly", "access", "items", "from", "that", "table", "or", "index", ".", "Use", "the", "KeyConditionExpression", "parameter", "to", "pro...
python
train
justanr/flask-allows
src/flask_allows/overrides.py
https://github.com/justanr/flask-allows/blob/39fa5c8692836a33646ea43b4081e7c2181ec7c4/src/flask_allows/overrides.py#L180-L187
def override(self, override, use_parent=False): """ Allows temporarily pushing an override context, yields the new context into the following block. """ self.push(override, use_parent) yield self.current self.pop()
[ "def", "override", "(", "self", ",", "override", ",", "use_parent", "=", "False", ")", ":", "self", ".", "push", "(", "override", ",", "use_parent", ")", "yield", "self", ".", "current", "self", ".", "pop", "(", ")" ]
Allows temporarily pushing an override context, yields the new context into the following block.
[ "Allows", "temporarily", "pushing", "an", "override", "context", "yields", "the", "new", "context", "into", "the", "following", "block", "." ]
python
test
fake-name/ChromeController
ChromeController/transport.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/transport.py#L630-L664
def recv(self, tab_key, message_id=None, timeout=30): ''' Recieve a message, optionally filtering for a specified message id. If `message_id` is none, the first command in the receive queue is returned. If `message_id` is not none, the command waits untill a message is received with the specified id, or it times out. Timeout is the number of seconds to wait for a response, or `None` if the timeout has expired with no response. ''' self.__check_open_socket(tab_key) # First, check if the message has already been received. for idx in range(len(self.messages[tab_key])): if self.messages[tab_key][idx]: if "id" in self.messages[tab_key][idx] and message_id: if self.messages[tab_key][idx]['id'] == message_id: return self.messages[tab_key].pop(idx) # Then spin untill we either have the message, # or have timed out. def check_func(message): if message_id is None: return True if not message: self.log.debug("Message is not true (%s)!", message) return False if "id" in message: return message['id'] == message_id return False return self.recv_filtered(check_func, tab_key, timeout)
[ "def", "recv", "(", "self", ",", "tab_key", ",", "message_id", "=", "None", ",", "timeout", "=", "30", ")", ":", "self", ".", "__check_open_socket", "(", "tab_key", ")", "# First, check if the message has already been received.", "for", "idx", "in", "range", "("...
Recieve a message, optionally filtering for a specified message id. If `message_id` is none, the first command in the receive queue is returned. If `message_id` is not none, the command waits untill a message is received with the specified id, or it times out. Timeout is the number of seconds to wait for a response, or `None` if the timeout has expired with no response.
[ "Recieve", "a", "message", "optionally", "filtering", "for", "a", "specified", "message", "id", "." ]
python
train
dschien/PyExcelModelingHelper
excel_helper/__init__.py
https://github.com/dschien/PyExcelModelingHelper/blob/d00d98ae2f28ad71cfcd2a365c3045e439517df2/excel_helper/__init__.py#L379-L422
def growth_coefficients(start_date, end_date, ref_date, alpha, samples): """ Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0). a growth rate alpha t0 start date t' end date y' output y0 start value """ start_offset = 0 if ref_date < start_date: offset_delta = rdelta.relativedelta(start_date, ref_date) start_offset = offset_delta.months + 12 * offset_delta.years start_date = ref_date end_offset = 0 if ref_date > end_date: offset_delta = rdelta.relativedelta(ref_date, end_date) end_offset = offset_delta.months + 12 * offset_delta.years end_date = ref_date delta_ar = rdelta.relativedelta(ref_date, start_date) ar = delta_ar.months + 12 * delta_ar.years delta_br = rdelta.relativedelta(end_date, ref_date) br = delta_br.months + 12 * delta_br.years # we place the ref point on the lower interval (delta_ar + 1) but let it start from 0 # in turn we let the upper interval start from 1 g = np.fromfunction(lambda i, j: np.power(1 - alpha, np.abs(i) / 12), (ar + 1, samples), dtype=float) h = np.fromfunction(lambda i, j: np.power(1 + alpha, np.abs(i + 1) / 12), (br, samples), dtype=float) g = np.flipud(g) # now join the two arrays a = np.vstack((g, h)) if start_offset > 0: a = a[start_offset:] if end_offset > 0: a = a[:-end_offset] return a
[ "def", "growth_coefficients", "(", "start_date", ",", "end_date", ",", "ref_date", ",", "alpha", ",", "samples", ")", ":", "start_offset", "=", "0", "if", "ref_date", "<", "start_date", ":", "offset_delta", "=", "rdelta", ".", "relativedelta", "(", "start_date...
Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0). a growth rate alpha t0 start date t' end date y' output y0 start value
[ "Build", "a", "matrix", "of", "growth", "factors", "according", "to", "the", "CAGR", "formula", "y", "=", "y0", "(", "1", "+", "a", ")", "^", "(", "t", "-", "t0", ")", "." ]
python
train
elmotec/massedit
massedit.py
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L170-L193
def edit_content(self, original_lines, file_name): """Processes a file contents. First processes the contents line by line applying the registered expressions, then process the resulting contents using the registered functions. Arguments: original_lines (list of str): file content. file_name (str): name of the file. """ lines = [self.edit_line(line) for line in original_lines] for function in self._functions: try: lines = list(function(lines, file_name)) except UnicodeDecodeError as err: log.error('failed to process %s: %s', file_name, err) return lines except Exception as err: log.error("failed to process %s with code %s: %s", file_name, function, err) raise # Let the exception be handled at a higher level. return lines
[ "def", "edit_content", "(", "self", ",", "original_lines", ",", "file_name", ")", ":", "lines", "=", "[", "self", ".", "edit_line", "(", "line", ")", "for", "line", "in", "original_lines", "]", "for", "function", "in", "self", ".", "_functions", ":", "tr...
Processes a file contents. First processes the contents line by line applying the registered expressions, then process the resulting contents using the registered functions. Arguments: original_lines (list of str): file content. file_name (str): name of the file.
[ "Processes", "a", "file", "contents", "." ]
python
train
biolink/biolink-model
metamodel/generators/jsonldgen.py
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/jsonldgen.py#L102-L104
def cli(yamlfile, format, context): """ Generate JSONLD file from biolink schema """ print(JSONLDGenerator(yamlfile, format).serialize(context=context))
[ "def", "cli", "(", "yamlfile", ",", "format", ",", "context", ")", ":", "print", "(", "JSONLDGenerator", "(", "yamlfile", ",", "format", ")", ".", "serialize", "(", "context", "=", "context", ")", ")" ]
Generate JSONLD file from biolink schema
[ "Generate", "JSONLD", "file", "from", "biolink", "schema" ]
python
train
etcher-be/epab
epab/utils/_repo.py
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L175-L183
def ensure(): """ Makes sure the current working directory is a Git repository. """ LOGGER.debug('checking repository') if not os.path.exists('.git'): LOGGER.error('This command is meant to be ran in a Git repository.') sys.exit(-1) LOGGER.debug('repository OK')
[ "def", "ensure", "(", ")", ":", "LOGGER", ".", "debug", "(", "'checking repository'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "'.git'", ")", ":", "LOGGER", ".", "error", "(", "'This command is meant to be ran in a Git repository.'", ")", "sys"...
Makes sure the current working directory is a Git repository.
[ "Makes", "sure", "the", "current", "working", "directory", "is", "a", "Git", "repository", "." ]
python
train
blockchain/api-v1-client-python
blockchain/createwallet.py
https://github.com/blockchain/api-v1-client-python/blob/52ea562f824f04303e75239364e06722bec8620f/blockchain/createwallet.py#L10-L35
def create_wallet(password, api_code, service_url, priv=None, label=None, email=None): """Create a new Blockchain.info wallet. It can be created containing a pre-generated private key or will otherwise generate a new private key. :param str password: password for the new wallet. At least 10 characters. :param str api_code: API code with create wallets permission :param str service_url: URL to an instance of service-my-wallet-v3 (with trailing slash) :param str priv: private key to add to the wallet (optional) :param str label: label for the first address in the wallet (optional) :param str email: email to associate with the new wallet (optional) :return: an instance of :class:`WalletResponse` class """ params = {'password': password, 'api_code': api_code} if priv is not None: params['priv'] = priv if label is not None: params['label'] = label if email is not None: params['email'] = email response = util.call_api("api/v2/create", params, base_url=service_url) json_response = json.loads(response) return CreateWalletResponse(json_response['guid'], json_response['address'], json_response['label'])
[ "def", "create_wallet", "(", "password", ",", "api_code", ",", "service_url", ",", "priv", "=", "None", ",", "label", "=", "None", ",", "email", "=", "None", ")", ":", "params", "=", "{", "'password'", ":", "password", ",", "'api_code'", ":", "api_code",...
Create a new Blockchain.info wallet. It can be created containing a pre-generated private key or will otherwise generate a new private key. :param str password: password for the new wallet. At least 10 characters. :param str api_code: API code with create wallets permission :param str service_url: URL to an instance of service-my-wallet-v3 (with trailing slash) :param str priv: private key to add to the wallet (optional) :param str label: label for the first address in the wallet (optional) :param str email: email to associate with the new wallet (optional) :return: an instance of :class:`WalletResponse` class
[ "Create", "a", "new", "Blockchain", ".", "info", "wallet", ".", "It", "can", "be", "created", "containing", "a", "pre", "-", "generated", "private", "key", "or", "will", "otherwise", "generate", "a", "new", "private", "key", "." ]
python
train
inspirehep/refextract
refextract/references/regexs.py
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/regexs.py#L733-L772
def get_reference_line_numeration_marker_patterns(prefix=u''): """Return a list of compiled regex patterns used to search for the marker of a reference line in a full-text document. @param prefix: (string) the possible prefix to a reference line @return: (list) of compiled regex patterns. """ title = u"" if type(prefix) in (str, unicode): title = prefix g_name = u'(?P<mark>' g_close = u')' space = ur'\s*' patterns = [ # [1] space + title + g_name + ur'\[\s*(?P<marknum>\d+)\s*\]' + g_close, # [<letters and numbers] space + title + g_name + ur'\[\s*[a-zA-Z:-]+\+?\s?(\d{1,4}[A-Za-z:-]?)?\s*\]' + g_close, # {1} space + title + g_name + ur'\{\s*(?P<marknum>\d+)\s*\}' + g_close, # (1) space + title + g_name + ur'\<\s*(?P<marknum>\d+)\s*\>' + g_close, space + title + g_name + ur'\(\s*(?P<marknum>\d+)\s*\)' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s*\.(?!\d)' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s+' + g_close, space + title + g_name + ur'(?P<marknum>\d+)\s*\]' + g_close, # 1] space + title + g_name + ur'(?P<marknum>\d+)\s*\}' + g_close, # 1} space + title + g_name + ur'(?P<marknum>\d+)\s*\)' + g_close, # 1) space + title + g_name + ur'(?P<marknum>\d+)\s*\>' + g_close, # [1.1] space + title + g_name + ur'\[\s*\d+\.\d+\s*\]' + g_close, # [ ] space + title + g_name + ur'\[\s*\]' + g_close, # * space + title + g_name + ur'\*' + g_close, ] return [re.compile(p, re.I | re.UNICODE) for p in patterns]
[ "def", "get_reference_line_numeration_marker_patterns", "(", "prefix", "=", "u''", ")", ":", "title", "=", "u\"\"", "if", "type", "(", "prefix", ")", "in", "(", "str", ",", "unicode", ")", ":", "title", "=", "prefix", "g_name", "=", "u'(?P<mark>'", "g_close"...
Return a list of compiled regex patterns used to search for the marker of a reference line in a full-text document. @param prefix: (string) the possible prefix to a reference line @return: (list) of compiled regex patterns.
[ "Return", "a", "list", "of", "compiled", "regex", "patterns", "used", "to", "search", "for", "the", "marker", "of", "a", "reference", "line", "in", "a", "full", "-", "text", "document", "." ]
python
train
zomux/deepy
deepy/networks/network.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/networks/network.py#L284-L291
def report(self): """ Print network statistics. """ logging.info("network inputs: %s", " ".join(map(str, self.input_variables))) logging.info("network targets: %s", " ".join(map(str, self.target_variables))) logging.info("network parameters: %s", " ".join(map(str, self.all_parameters))) logging.info("parameter count: %d", self.parameter_count)
[ "def", "report", "(", "self", ")", ":", "logging", ".", "info", "(", "\"network inputs: %s\"", ",", "\" \"", ".", "join", "(", "map", "(", "str", ",", "self", ".", "input_variables", ")", ")", ")", "logging", ".", "info", "(", "\"network targets: %s\"", ...
Print network statistics.
[ "Print", "network", "statistics", "." ]
python
test
jaywink/federation
federation/protocols/diaspora/encrypted.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/protocols/diaspora/encrypted.py#L69-L88
def encrypt(payload, public_key): """ Encrypt a payload using an encrypted JSON wrapper. See: https://diaspora.github.io/diaspora_federation/federation/encryption.html :param payload: Payload document as a string. :param public_key: Public key of recipient as an RSA object. :return: Encrypted JSON wrapper as dict. """ iv, key, encrypter = EncryptedPayload.get_iv_key_encrypter() aes_key_json = EncryptedPayload.get_aes_key_json(iv, key) cipher = PKCS1_v1_5.new(public_key) aes_key = b64encode(cipher.encrypt(aes_key_json)) padded_payload = pkcs7_pad(payload.encode("utf-8"), AES.block_size) encrypted_me = b64encode(encrypter.encrypt(padded_payload)) return { "aes_key": aes_key.decode("utf-8"), "encrypted_magic_envelope": encrypted_me.decode("utf8"), }
[ "def", "encrypt", "(", "payload", ",", "public_key", ")", ":", "iv", ",", "key", ",", "encrypter", "=", "EncryptedPayload", ".", "get_iv_key_encrypter", "(", ")", "aes_key_json", "=", "EncryptedPayload", ".", "get_aes_key_json", "(", "iv", ",", "key", ")", "...
Encrypt a payload using an encrypted JSON wrapper. See: https://diaspora.github.io/diaspora_federation/federation/encryption.html :param payload: Payload document as a string. :param public_key: Public key of recipient as an RSA object. :return: Encrypted JSON wrapper as dict.
[ "Encrypt", "a", "payload", "using", "an", "encrypted", "JSON", "wrapper", "." ]
python
train
bitesofcode/projexui
projexui/dialogs/xshortcutdialog/xshortcutdialog.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xshortcutdialog/xshortcutdialog.py#L77-L90
def accept( self ): """ Saves the current settings for the actions in the list and exits the dialog. """ if ( not self.save() ): return for i in range(self.uiActionTREE.topLevelItemCount()): item = self.uiActionTREE.topLevelItem(i) action = item.action() action.setShortcut( QKeySequence(item.text(1)) ) super(XShortcutDialog, self).accept()
[ "def", "accept", "(", "self", ")", ":", "if", "(", "not", "self", ".", "save", "(", ")", ")", ":", "return", "for", "i", "in", "range", "(", "self", ".", "uiActionTREE", ".", "topLevelItemCount", "(", ")", ")", ":", "item", "=", "self", ".", "uiA...
Saves the current settings for the actions in the list and exits the dialog.
[ "Saves", "the", "current", "settings", "for", "the", "actions", "in", "the", "list", "and", "exits", "the", "dialog", "." ]
python
train
CalebBell/fluids
fluids/particle_size_distribution.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/particle_size_distribution.py#L851-L887
def pdf_Gates_Gaudin_Schuhman_basis_integral(d, d_characteristic, m, n): r'''Calculates the integral of the multiplication of d^n by the Gates, Gaudin and Schuhman (GGS) model given a particle diameter `d`, characteristic (maximum) particle diameter `d_characteristic`, and exponent `m`. .. math:: \int d^n\cdot q(d)\; dd =\frac{m}{m+n} d^n \left(\frac{d} {d_{characteristic}}\right)^m Parameters ---------- d : float Specified particle diameter, [m] d_characteristic : float Characteristic particle diameter; in this model, it is the largest particle size diameter in the distribution, [m] m : float Particle size distribution exponent, [-] n : int Exponent of the multiplied n, [-] Returns ------- pdf_basis_integral : float Integral of Rosin Rammler pdf multiplied by d^n, [-] Notes ----- This integral does not have any numerical issues as `d` approaches 0. Examples -------- >>> pdf_Gates_Gaudin_Schuhman_basis_integral(d=2E-4, d_characteristic=1E-3, m=2.3, n=-3) -10136984887.543015 ''' return m/(m+n)*d**n*(d/d_characteristic)**m
[ "def", "pdf_Gates_Gaudin_Schuhman_basis_integral", "(", "d", ",", "d_characteristic", ",", "m", ",", "n", ")", ":", "return", "m", "/", "(", "m", "+", "n", ")", "*", "d", "**", "n", "*", "(", "d", "/", "d_characteristic", ")", "**", "m" ]
r'''Calculates the integral of the multiplication of d^n by the Gates, Gaudin and Schuhman (GGS) model given a particle diameter `d`, characteristic (maximum) particle diameter `d_characteristic`, and exponent `m`. .. math:: \int d^n\cdot q(d)\; dd =\frac{m}{m+n} d^n \left(\frac{d} {d_{characteristic}}\right)^m Parameters ---------- d : float Specified particle diameter, [m] d_characteristic : float Characteristic particle diameter; in this model, it is the largest particle size diameter in the distribution, [m] m : float Particle size distribution exponent, [-] n : int Exponent of the multiplied n, [-] Returns ------- pdf_basis_integral : float Integral of Rosin Rammler pdf multiplied by d^n, [-] Notes ----- This integral does not have any numerical issues as `d` approaches 0. Examples -------- >>> pdf_Gates_Gaudin_Schuhman_basis_integral(d=2E-4, d_characteristic=1E-3, m=2.3, n=-3) -10136984887.543015
[ "r", "Calculates", "the", "integral", "of", "the", "multiplication", "of", "d^n", "by", "the", "Gates", "Gaudin", "and", "Schuhman", "(", "GGS", ")", "model", "given", "a", "particle", "diameter", "d", "characteristic", "(", "maximum", ")", "particle", "diam...
python
train
UCSBarchlab/PyRTL
pyrtl/transform.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/transform.py#L166-L182
def clone_wire(old_wire, name=None): """ Makes a copy of any existing wire :param old_wire: The wire to clone :param name: a name fo rhte new wire Note that this function is mainly intended to be used when the two wires are from different blocks. Making two wires with the same name in the same block is not allowed """ if isinstance(old_wire, Const): return Const(old_wire.val, old_wire.bitwidth) else: if name is None: return old_wire.__class__(old_wire.bitwidth, name=old_wire.name) return old_wire.__class__(old_wire.bitwidth, name=name)
[ "def", "clone_wire", "(", "old_wire", ",", "name", "=", "None", ")", ":", "if", "isinstance", "(", "old_wire", ",", "Const", ")", ":", "return", "Const", "(", "old_wire", ".", "val", ",", "old_wire", ".", "bitwidth", ")", "else", ":", "if", "name", "...
Makes a copy of any existing wire :param old_wire: The wire to clone :param name: a name fo rhte new wire Note that this function is mainly intended to be used when the two wires are from different blocks. Making two wires with the same name in the same block is not allowed
[ "Makes", "a", "copy", "of", "any", "existing", "wire" ]
python
train
devassistant/devassistant
devassistant/excepthook.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/excepthook.py#L24-L31
def is_local_subsection(command_dict): """Returns True if command dict is "local subsection", meaning that it is "if", "else" or "for" (not a real call, but calls run_section recursively.""" for local_com in ['if ', 'for ', 'else ']: if list(command_dict.keys())[0].startswith(local_com): return True return False
[ "def", "is_local_subsection", "(", "command_dict", ")", ":", "for", "local_com", "in", "[", "'if '", ",", "'for '", ",", "'else '", "]", ":", "if", "list", "(", "command_dict", ".", "keys", "(", ")", ")", "[", "0", "]", ".", "startswith", "(", "local_c...
Returns True if command dict is "local subsection", meaning that it is "if", "else" or "for" (not a real call, but calls run_section recursively.
[ "Returns", "True", "if", "command", "dict", "is", "local", "subsection", "meaning", "that", "it", "is", "if", "else", "or", "for", "(", "not", "a", "real", "call", "but", "calls", "run_section", "recursively", "." ]
python
train
saltstack/salt
salt/executors/sudo.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/executors/sudo.py#L25-L79
def execute(opts, data, func, args, kwargs): ''' Allow for the calling of execution modules via sudo. This module is invoked by the minion if the ``sudo_user`` minion config is present. Example minion config: .. code-block:: yaml sudo_user: saltdev Once this setting is made, any execution module call done by the minion will be run under ``sudo -u <sudo_user> salt-call``. For example, with the above minion config, .. code-block:: bash salt sudo_minion cmd.run 'cat /etc/sudoers' is equivalent to .. code-block:: bash sudo -u saltdev salt-call cmd.run 'cat /etc/sudoers' being run on ``sudo_minion``. ''' cmd = ['sudo', '-u', opts.get('sudo_user'), 'salt-call', '--out', 'json', '--metadata', '-c', opts.get('config_dir'), '--', data.get('fun')] if data['fun'] in ('state.sls', 'state.highstate', 'state.apply'): kwargs['concurrent'] = True for arg in args: cmd.append(_cmd_quote(six.text_type(arg))) for key in kwargs: cmd.append(_cmd_quote('{0}={1}'.format(key, kwargs[key]))) cmd_ret = __salt__['cmd.run_all'](cmd, use_vt=True, python_shell=False) if cmd_ret['retcode'] == 0: cmd_meta = salt.utils.json.loads(cmd_ret['stdout'])['local'] ret = cmd_meta['return'] __context__['retcode'] = cmd_meta.get('retcode', 0) else: ret = cmd_ret['stderr'] __context__['retcode'] = cmd_ret['retcode'] return ret
[ "def", "execute", "(", "opts", ",", "data", ",", "func", ",", "args", ",", "kwargs", ")", ":", "cmd", "=", "[", "'sudo'", ",", "'-u'", ",", "opts", ".", "get", "(", "'sudo_user'", ")", ",", "'salt-call'", ",", "'--out'", ",", "'json'", ",", "'--met...
Allow for the calling of execution modules via sudo. This module is invoked by the minion if the ``sudo_user`` minion config is present. Example minion config: .. code-block:: yaml sudo_user: saltdev Once this setting is made, any execution module call done by the minion will be run under ``sudo -u <sudo_user> salt-call``. For example, with the above minion config, .. code-block:: bash salt sudo_minion cmd.run 'cat /etc/sudoers' is equivalent to .. code-block:: bash sudo -u saltdev salt-call cmd.run 'cat /etc/sudoers' being run on ``sudo_minion``.
[ "Allow", "for", "the", "calling", "of", "execution", "modules", "via", "sudo", "." ]
python
train
MacHu-GWU/angora-project
angora/algorithm/iterable.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/algorithm/iterable.py#L194-L209
def running_windows(iterable, size): """Generate n-size running windows. Usage:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] """ fifo = collections.deque(maxlen=size) for i in iterable: fifo.append(i) if len(fifo) == size: yield list(fifo)
[ "def", "running_windows", "(", "iterable", ",", "size", ")", ":", "fifo", "=", "collections", ".", "deque", "(", "maxlen", "=", "size", ")", "for", "i", "in", "iterable", ":", "fifo", ".", "append", "(", "i", ")", "if", "len", "(", "fifo", ")", "==...
Generate n-size running windows. Usage:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5]
[ "Generate", "n", "-", "size", "running", "windows", "." ]
python
train
openid/python-openid
openid/extensions/draft/pape2.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/draft/pape2.py#L100-L129
def parseExtensionArgs(self, args): """Set the state of this request to be that expressed in these PAPE arguments @param args: The PAPE arguments without a namespace @rtype: None @raises ValueError: When the max_auth_age is not parseable as an integer """ # preferred_auth_policies is a space-separated list of policy URIs self.preferred_auth_policies = [] policies_str = args.get('preferred_auth_policies') if policies_str: for uri in policies_str.split(' '): if uri not in self.preferred_auth_policies: self.preferred_auth_policies.append(uri) # max_auth_age is base-10 integer number of seconds max_auth_age_str = args.get('max_auth_age') self.max_auth_age = None if max_auth_age_str: try: self.max_auth_age = int(max_auth_age_str) except ValueError: pass
[ "def", "parseExtensionArgs", "(", "self", ",", "args", ")", ":", "# preferred_auth_policies is a space-separated list of policy URIs", "self", ".", "preferred_auth_policies", "=", "[", "]", "policies_str", "=", "args", ".", "get", "(", "'preferred_auth_policies'", ")", ...
Set the state of this request to be that expressed in these PAPE arguments @param args: The PAPE arguments without a namespace @rtype: None @raises ValueError: When the max_auth_age is not parseable as an integer
[ "Set", "the", "state", "of", "this", "request", "to", "be", "that", "expressed", "in", "these", "PAPE", "arguments" ]
python
train
Alignak-monitoring/alignak
alignak/objects/servicedependency.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/servicedependency.py#L206-L303
def explode(self, hostgroups): # pylint: disable=too-many-locals, too-many-branches """Explode all service dependency for each member of hostgroups Each member of dependent hostgroup or hostgroup in dependency have to get a copy of service dependencies (quite complex to parse) :param hostgroups: used to look for hostgroup :type hostgroups: alignak.objects.hostgroup.Hostgroups :return: None """ # The "old" services will be removed. All services with # more than one host or a host group will be in it srvdep_to_remove = [] # Then for every host create a copy of the service with just the host # because we are adding services, we can't just loop in it servicedeps = list(self.items.keys()) for s_id in servicedeps: servicedep = self.items[s_id] # First case: we only have to propagate the services dependencies to all the hosts # of some hostgroups # Either a specific property is defined (Shinken) or no dependent hosts groups # is defined if bool(getattr(servicedep, 'explode_hostgroup', 0)) or \ (hasattr(servicedep, 'hostgroup_name') and not hasattr(servicedep, 'dependent_hostgroup_name')): self.explode_hostgroup(servicedep, hostgroups) srvdep_to_remove.append(s_id) continue # Get the list of all FATHER hosts and service dependenciess hnames = [] if hasattr(servicedep, 'hostgroup_name'): hg_names = [n.strip() for n in servicedep.hostgroup_name.split(',')] hg_names = [hg_name.strip() for hg_name in hg_names] for hg_name in hg_names: hostgroup = hostgroups.find_by_name(hg_name) if hostgroup is None: err = "ERROR: the servicedependecy got an" \ " unknown hostgroup_name '%s'" % hg_name hostgroup.add_error(err) continue hnames.extend([m.strip() for m in hostgroup.get_hosts()]) if not hasattr(servicedep, 'host_name'): servicedep.host_name = '' if servicedep.host_name != '': hnames.extend([n.strip() for n in servicedep.host_name.split(',')]) snames = [d.strip() for d in servicedep.service_description.split(',')] couples = [] for hname in hnames: for sname in snames: couples.append((hname.strip(), sname.strip())) if not hasattr(servicedep, 'dependent_hostgroup_name') \ and hasattr(servicedep, 'hostgroup_name'): servicedep.dependent_hostgroup_name = servicedep.hostgroup_name # Now the dependent part (the sons) dep_hnames = [] if hasattr(servicedep, 'dependent_hostgroup_name'): hg_names = [n.strip() for n in servicedep.dependent_hostgroup_name.split(',')] hg_names = [hg_name.strip() for hg_name in hg_names] for hg_name in hg_names: hostgroup = hostgroups.find_by_name(hg_name) if hostgroup is None: err = "ERROR: the servicedependecy got an " \ "unknown dependent_hostgroup_name '%s'" % hg_name hostgroup.add_error(err) continue dep_hnames.extend([m.strip() for m in hostgroup.get_hosts()]) if not hasattr(servicedep, 'dependent_host_name'): servicedep.dependent_host_name = getattr(servicedep, 'host_name', '') if servicedep.dependent_host_name != '': dep_hnames.extend([n.strip() for n in servicedep.dependent_host_name.split(',')]) dep_snames = [d.strip() for d in servicedep.dependent_service_description.split(',')] dep_couples = [] for dep_hname in dep_hnames: for dep_sname in dep_snames: dep_couples.append((dep_hname.strip(), dep_sname.strip())) # Create the new service deps from all of this. for (dep_hname, dep_sname) in dep_couples: # the sons, like HTTP for (hname, sname) in couples: # the fathers, like MySQL new_sd = servicedep.copy() new_sd.host_name = hname new_sd.service_description = sname new_sd.dependent_host_name = dep_hname new_sd.dependent_service_description = dep_sname self.add_item(new_sd) # Ok so we can remove the old one srvdep_to_remove.append(s_id) self.delete_servicesdep_by_id(srvdep_to_remove)
[ "def", "explode", "(", "self", ",", "hostgroups", ")", ":", "# pylint: disable=too-many-locals, too-many-branches", "# The \"old\" services will be removed. All services with", "# more than one host or a host group will be in it", "srvdep_to_remove", "=", "[", "]", "# Then for every ho...
Explode all service dependency for each member of hostgroups Each member of dependent hostgroup or hostgroup in dependency have to get a copy of service dependencies (quite complex to parse) :param hostgroups: used to look for hostgroup :type hostgroups: alignak.objects.hostgroup.Hostgroups :return: None
[ "Explode", "all", "service", "dependency", "for", "each", "member", "of", "hostgroups", "Each", "member", "of", "dependent", "hostgroup", "or", "hostgroup", "in", "dependency", "have", "to", "get", "a", "copy", "of", "service", "dependencies", "(", "quite", "c...
python
train
michal-stuglik/django-blastplus
blastplus/features/record.py
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/features/record.py#L119-L121
def get_id(self): """Returns unique id of an alignment. """ return hash(str(self.title) + str(self.best_score()) + str(self.hit_def))
[ "def", "get_id", "(", "self", ")", ":", "return", "hash", "(", "str", "(", "self", ".", "title", ")", "+", "str", "(", "self", ".", "best_score", "(", ")", ")", "+", "str", "(", "self", ".", "hit_def", ")", ")" ]
Returns unique id of an alignment.
[ "Returns", "unique", "id", "of", "an", "alignment", "." ]
python
train
odlgroup/odl
odl/trafos/wavelet.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/wavelet.py#L424-L432
def _call(self, x): """Return wavelet transform of ``x``.""" if self.impl == 'pywt': coeffs = pywt.wavedecn( x, wavelet=self.pywt_wavelet, level=self.nlevels, mode=self.pywt_pad_mode, axes=self.axes) return pywt.ravel_coeffs(coeffs, axes=self.axes)[0] else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
[ "def", "_call", "(", "self", ",", "x", ")", ":", "if", "self", ".", "impl", "==", "'pywt'", ":", "coeffs", "=", "pywt", ".", "wavedecn", "(", "x", ",", "wavelet", "=", "self", ".", "pywt_wavelet", ",", "level", "=", "self", ".", "nlevels", ",", "...
Return wavelet transform of ``x``.
[ "Return", "wavelet", "transform", "of", "x", "." ]
python
train
getgauge/gauge-python
getgauge/parser_redbaron.py
https://github.com/getgauge/gauge-python/blob/90f3547dcfd2d16d51f116cdd4e53527eeab1a57/getgauge/parser_redbaron.py#L62-L80
def _step_decorator_args(self, decorator): """ Get arguments passed to step decorators converted to python objects. """ args = decorator.call.value step = None if len(args) == 1: try: step = args[0].value.to_python() except (ValueError, SyntaxError): pass if isinstance(step, six.string_types + (list,)): return step logging.error("Decorator step accepts either a string or a list of \ strings - %s", self.file_path) else: logging.error("Decorator step accepts only one argument - %s", self.file_path)
[ "def", "_step_decorator_args", "(", "self", ",", "decorator", ")", ":", "args", "=", "decorator", ".", "call", ".", "value", "step", "=", "None", "if", "len", "(", "args", ")", "==", "1", ":", "try", ":", "step", "=", "args", "[", "0", "]", ".", ...
Get arguments passed to step decorators converted to python objects.
[ "Get", "arguments", "passed", "to", "step", "decorators", "converted", "to", "python", "objects", "." ]
python
test
openpaperwork/paperwork-backend
paperwork_backend/index.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/index.py#L387-L401
def add_doc(self, doc, index_update=True, label_guesser_update=True): """ Add a document to the index """ if not self.index_writer and index_update: self.index_writer = self.index.writer() if not self.label_guesser_updater and label_guesser_update: self.label_guesser_updater = self.label_guesser.get_updater() logger.info("Indexing new doc: %s" % doc) if index_update: self._update_doc_in_index(self.index_writer, doc) if label_guesser_update: self.label_guesser_updater.add_doc(doc) if doc.docid not in self._docs_by_id: self._docs_by_id[doc.docid] = doc
[ "def", "add_doc", "(", "self", ",", "doc", ",", "index_update", "=", "True", ",", "label_guesser_update", "=", "True", ")", ":", "if", "not", "self", ".", "index_writer", "and", "index_update", ":", "self", ".", "index_writer", "=", "self", ".", "index", ...
Add a document to the index
[ "Add", "a", "document", "to", "the", "index" ]
python
train
sludgedesk/metoffer
metoffer.py
https://github.com/sludgedesk/metoffer/blob/449748d31f913d961d6f0406542bb784e931a95b/metoffer.py#L323-L340
def parse_sitelist(sitelist): """Return list of Site instances from retrieved sitelist data""" sites = [] for site in sitelist["Locations"]["Location"]: try: ident = site["id"] name = site["name"] except KeyError: ident = site["@id"] # Difference between loc-spec and text for some reason name = site["@name"] if "latitude" in site: lat = float(site["latitude"]) lon = float(site["longitude"]) else: lat = lon = None s = Site(ident, name, lat, lon) sites.append(s) return sites
[ "def", "parse_sitelist", "(", "sitelist", ")", ":", "sites", "=", "[", "]", "for", "site", "in", "sitelist", "[", "\"Locations\"", "]", "[", "\"Location\"", "]", ":", "try", ":", "ident", "=", "site", "[", "\"id\"", "]", "name", "=", "site", "[", "\"...
Return list of Site instances from retrieved sitelist data
[ "Return", "list", "of", "Site", "instances", "from", "retrieved", "sitelist", "data" ]
python
train
rosenbrockc/fortpy
fortpy/scripts/analyze.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L246-L251
def _complete_cases(self, text, line, istart, iend): """Returns the completion list of possible test cases for the active unit test.""" if text == "": return list(self.live.keys()) else: return [c for c in self.live if c.startswith(text)]
[ "def", "_complete_cases", "(", "self", ",", "text", ",", "line", ",", "istart", ",", "iend", ")", ":", "if", "text", "==", "\"\"", ":", "return", "list", "(", "self", ".", "live", ".", "keys", "(", ")", ")", "else", ":", "return", "[", "c", "for"...
Returns the completion list of possible test cases for the active unit test.
[ "Returns", "the", "completion", "list", "of", "possible", "test", "cases", "for", "the", "active", "unit", "test", "." ]
python
train
carter-j-h/iterable-python-wrapper
iterablepythonwrapper/client.py
https://github.com/carter-j-h/iterable-python-wrapper/blob/10d5db034ddfdfc3333efeee07fc9228b6a998c4/iterablepythonwrapper/client.py#L1435-L1454
def disable_device(self, token, email=None, user_id=None): """ This request manually disable pushes to a device until it comes online again. """ call = "/api/users/disableDevice" payload ={} payload["token"] = str(token) if email is not None: payload["email"] = str(email) if user_id is not None: payload["userId"] = str(user_id) return self.api_call(call= call, method="POST", json=payload)
[ "def", "disable_device", "(", "self", ",", "token", ",", "email", "=", "None", ",", "user_id", "=", "None", ")", ":", "call", "=", "\"/api/users/disableDevice\"", "payload", "=", "{", "}", "payload", "[", "\"token\"", "]", "=", "str", "(", "token", ")", ...
This request manually disable pushes to a device until it comes online again.
[ "This", "request", "manually", "disable", "pushes", "to", "a", "device", "until", "it", "comes", "online", "again", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/video/epva.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L53-L124
def van_image_enc_2d(x, first_depth, reuse=False, hparams=None): """The image encoder for the VAN. Similar architecture as Ruben's paper (http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf). Args: x: The image to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. hparams: The python hparams. Returns: The encoded image. """ with tf.variable_scope('van_image_enc', reuse=reuse): enc_history = [x] enc = tf.layers.conv2d( x, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d( enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') enc = tf.nn.dropout(enc, hparams.van_keep_prob) enc = tf.contrib.layers.layer_norm(enc) enc_history.append(enc) enc = tf.layers.conv2d( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') enc = tf.nn.dropout(enc, hparams.van_keep_prob) enc = tf.contrib.layers.layer_norm(enc) enc_history.append(enc) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') return enc, enc_history
[ "def", "van_image_enc_2d", "(", "x", ",", "first_depth", ",", "reuse", "=", "False", ",", "hparams", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "'van_image_enc'", ",", "reuse", "=", "reuse", ")", ":", "enc_history", "=", "[", "x", ...
The image encoder for the VAN. Similar architecture as Ruben's paper (http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf). Args: x: The image to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. hparams: The python hparams. Returns: The encoded image.
[ "The", "image", "encoder", "for", "the", "VAN", "." ]
python
train
alorence/django-modern-rpc
modernrpc/system_methods.py
https://github.com/alorence/django-modern-rpc/blob/6dc42857d35764b24e2c09334f4b578629a75f9e/modernrpc/system_methods.py#L7-L12
def __system_listMethods(**kwargs): """Returns a list of all methods available in the current entry point""" entry_point = kwargs.get(ENTRY_POINT_KEY) protocol = kwargs.get(PROTOCOL_KEY) return registry.get_all_method_names(entry_point, protocol, sort_methods=True)
[ "def", "__system_listMethods", "(", "*", "*", "kwargs", ")", ":", "entry_point", "=", "kwargs", ".", "get", "(", "ENTRY_POINT_KEY", ")", "protocol", "=", "kwargs", ".", "get", "(", "PROTOCOL_KEY", ")", "return", "registry", ".", "get_all_method_names", "(", ...
Returns a list of all methods available in the current entry point
[ "Returns", "a", "list", "of", "all", "methods", "available", "in", "the", "current", "entry", "point" ]
python
train
ekiro/haps
haps/container.py
https://github.com/ekiro/haps/blob/64b6746187e44dadb23b842607d294e03c30a0be/haps/container.py#L96-L145
def configure(config: List[Egg], subclass: 'Container' = None) -> None: """ Configure haps manually, an alternative to :func:`~haps.Container.autodiscover` :param config: List of configured Eggs :param subclass: Optional Container subclass that should be used """ profiles = Configuration().get_var(PROFILES, tuple) assert isinstance(profiles, (list, tuple)) profiles = tuple(profiles) + (None,) seen = set() registered = set() filtered_config: List[Egg] = [] for profile in profiles: for egg_ in (e for e in config if e.profile == profile): ident = (egg_.base_, egg_.qualifier, egg_.profile) if ident in seen: raise ConfigurationError( "Ambiguous implementation %s" % repr(egg_.base_)) dep_ident = (egg_.base_, egg_.qualifier) if dep_ident in registered: continue filtered_config.append(egg_) registered.add(dep_ident) seen.add(ident) config = filtered_config with Container._lock: if Container.__configured: raise AlreadyConfigured if subclass is None: subclass = Container Container.__subclass = subclass Container.__configured = True container = Container() if not all(isinstance(o, Egg) for o in config): raise ConfigurationError('All config items should be the eggs') container.config = config container.register_scope(INSTANCE_SCOPE, InstanceScope) container.register_scope(SINGLETON_SCOPE, SingletonScope)
[ "def", "configure", "(", "config", ":", "List", "[", "Egg", "]", ",", "subclass", ":", "'Container'", "=", "None", ")", "->", "None", ":", "profiles", "=", "Configuration", "(", ")", ".", "get_var", "(", "PROFILES", ",", "tuple", ")", "assert", "isinst...
Configure haps manually, an alternative to :func:`~haps.Container.autodiscover` :param config: List of configured Eggs :param subclass: Optional Container subclass that should be used
[ "Configure", "haps", "manually", "an", "alternative", "to", ":", "func", ":", "~haps", ".", "Container", ".", "autodiscover" ]
python
train
DecBayComp/RWA-python
rwa/generic.py
https://github.com/DecBayComp/RWA-python/blob/734a52e15a0e8c244d84d74acf3fd64721074732/rwa/generic.py#L241-L274
def pokeStorable(self, storable, objname, obj, container, visited=None, _stack=None, **kwargs): """ Arguments: storable (StorableHandler): storable instance. objname (any): record reference. obj (any): object to be serialized. container (any): container. visited (dict): map of the previously serialized objects that are passed by references; keys are the objects' IDs. _stack (CallStack): stack of parent object names. Trailing keyword arguments are passed to the :class:`Storable` instance's :attr:`~Storable.poke`. """ #print((objname, storable.storable_type)) # debug storable.poke(self, objname, obj, container, visited=visited, _stack=_stack, **kwargs) try: record = self.getRecord(objname, container) except KeyError: # fake storable; silently skip if self.verbose: print("skipping `{}` (type: {})".format(objname, storable.storable_type)) if 1 < self.verbose: print(traceback.format_exc()) else: self.setRecordAttr('type', storable.storable_type, record) if storable.version is not None: self.setRecordAttr('version', from_version(storable.version), record)
[ "def", "pokeStorable", "(", "self", ",", "storable", ",", "objname", ",", "obj", ",", "container", ",", "visited", "=", "None", ",", "_stack", "=", "None", ",", "*", "*", "kwargs", ")", ":", "#print((objname, storable.storable_type)) # debug", "storable", ".",...
Arguments: storable (StorableHandler): storable instance. objname (any): record reference. obj (any): object to be serialized. container (any): container. visited (dict): map of the previously serialized objects that are passed by references; keys are the objects' IDs. _stack (CallStack): stack of parent object names. Trailing keyword arguments are passed to the :class:`Storable` instance's :attr:`~Storable.poke`.
[ "Arguments", ":" ]
python
train
ktbyers/netmiko
netmiko/linux/linux_ssh.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/linux/linux_ssh.py#L135-L139
def remote_file_size(self, remote_cmd="", remote_file=None): """Get the file size of the remote file.""" return self._remote_file_size_unix( remote_cmd=remote_cmd, remote_file=remote_file )
[ "def", "remote_file_size", "(", "self", ",", "remote_cmd", "=", "\"\"", ",", "remote_file", "=", "None", ")", ":", "return", "self", ".", "_remote_file_size_unix", "(", "remote_cmd", "=", "remote_cmd", ",", "remote_file", "=", "remote_file", ")" ]
Get the file size of the remote file.
[ "Get", "the", "file", "size", "of", "the", "remote", "file", "." ]
python
train
cocoakekeyu/cancan
cancan/ability.py
https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L62-L70
def relevant_rules_for_match(self, action, subject): """retrive match action and subject""" matches = [] for rule in self.rules: rule.expanded_actions = self.expand_actions(rule.actions) if rule.is_relevant(action, subject): matches.append(rule) return self.optimize(matches[::-1])
[ "def", "relevant_rules_for_match", "(", "self", ",", "action", ",", "subject", ")", ":", "matches", "=", "[", "]", "for", "rule", "in", "self", ".", "rules", ":", "rule", ".", "expanded_actions", "=", "self", ".", "expand_actions", "(", "rule", ".", "act...
retrive match action and subject
[ "retrive", "match", "action", "and", "subject" ]
python
train
Kensuke-Mitsuzawa/JapaneseTokenizers
JapaneseTokenizer/common/text_preprocess.py
https://github.com/Kensuke-Mitsuzawa/JapaneseTokenizers/blob/3bdfb6be73de0f78e5c08f3a51376ad3efa00b6c/JapaneseTokenizer/common/text_preprocess.py#L50-L74
def normalize_text(input_text, dictionary_mode='ipadic', new_line_replaced='。', is_replace_eos=True, is_kana=True, is_ascii=True, is_digit=True): # type: (text_type,text_type,text_type,bool,bool,bool,bool)->text_type """* What you can do - It converts input-text into normalized-text which is good for tokenizer input. * Params - new_line_replaced: a string which replaces from \n string. """ if is_replace_eos: without_new_line = input_text.replace('\n', new_line_replaced) else: without_new_line = new_line_replaced if dictionary_mode=='neologd' and is_neologdn_valid: return neologdn.normalize(normalize_text_normal_ipadic(without_new_line)) elif dictionary_mode=='neologd' and is_neologdn_valid == False: raise Exception("You could not call neologd dictionary bacause you do NOT install the package neologdn.") else: return normalize_text_normal_ipadic(without_new_line, kana=is_kana, ascii=is_ascii, digit=is_digit)
[ "def", "normalize_text", "(", "input_text", ",", "dictionary_mode", "=", "'ipadic'", ",", "new_line_replaced", "=", "'。',", "", "is_replace_eos", "=", "True", ",", "is_kana", "=", "True", ",", "is_ascii", "=", "True", ",", "is_digit", "=", "True", ")", ":", ...
* What you can do - It converts input-text into normalized-text which is good for tokenizer input. * Params - new_line_replaced: a string which replaces from \n string.
[ "*", "What", "you", "can", "do", "-", "It", "converts", "input", "-", "text", "into", "normalized", "-", "text", "which", "is", "good", "for", "tokenizer", "input", "." ]
python
train
biolink/ontobio
ontobio/golr/golr_query.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L241-L269
def goassoc_fieldmap(relationship_type=ACTS_UPSTREAM_OF_OR_WITHIN): """ Returns a mapping of canonical monarch fields to amigo-golr. See: https://github.com/geneontology/amigo/blob/master/metadata/ann-config.yaml """ return { M.SUBJECT: 'bioentity', M.SUBJECT_CLOSURE: 'bioentity', ## In the GO AmiGO instance, the type field is not correctly populated ## See above in the code for hack that restores this for planteome instance ## M.SUBJECT_CATEGORY: 'type', M.SUBJECT_CATEGORY: None, M.SUBJECT_LABEL: 'bioentity_label', M.SUBJECT_TAXON: 'taxon', M.SUBJECT_TAXON_LABEL: 'taxon_label', M.SUBJECT_TAXON_CLOSURE: 'taxon_closure', M.RELATION: 'qualifier', M.OBJECT: 'annotation_class', M.OBJECT_CLOSURE: REGULATES_CLOSURE if relationship_type == ACTS_UPSTREAM_OF_OR_WITHIN else ISA_PARTOF_CLOSURE, M.OBJECT_LABEL: 'annotation_class_label', M.OBJECT_TAXON: 'object_taxon', M.OBJECT_TAXON_LABEL: 'object_taxon_label', M.OBJECT_TAXON_CLOSURE: 'object_taxon_closure', M.OBJECT_CATEGORY: None, M.EVIDENCE_OBJECT_CLOSURE: 'evidence_subset_closure', M.IS_DEFINED_BY: 'assigned_by' }
[ "def", "goassoc_fieldmap", "(", "relationship_type", "=", "ACTS_UPSTREAM_OF_OR_WITHIN", ")", ":", "return", "{", "M", ".", "SUBJECT", ":", "'bioentity'", ",", "M", ".", "SUBJECT_CLOSURE", ":", "'bioentity'", ",", "## In the GO AmiGO instance, the type field is not correct...
Returns a mapping of canonical monarch fields to amigo-golr. See: https://github.com/geneontology/amigo/blob/master/metadata/ann-config.yaml
[ "Returns", "a", "mapping", "of", "canonical", "monarch", "fields", "to", "amigo", "-", "golr", "." ]
python
train
RetailMeNotSandbox/acky
acky/ec2.py
https://github.com/RetailMeNotSandbox/acky/blob/fcd4d092c42892ede7c924cafc41e9cf4be3fb9f/acky/ec2.py#L473-L483
def get(self, volume_ids=None, filters=None): """List EBS Volume info.""" params = {} if filters: params["filters"] = make_filters(filters) if isinstance(volume_ids, str): volume_ids = [volume_ids] return self.call("DescribeVolumes", VolumeIds=volume_ids, response_data_key="Volumes", **params)
[ "def", "get", "(", "self", ",", "volume_ids", "=", "None", ",", "filters", "=", "None", ")", ":", "params", "=", "{", "}", "if", "filters", ":", "params", "[", "\"filters\"", "]", "=", "make_filters", "(", "filters", ")", "if", "isinstance", "(", "vo...
List EBS Volume info.
[ "List", "EBS", "Volume", "info", "." ]
python
train
SmokinCaterpillar/pypet
pypet/naturalnaming.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/naturalnaming.py#L3115-L3170
def f_get(self, name, fast_access=False, with_links=True, shortcuts=True, max_depth=None, auto_load=False): """Searches and returns an item (parameter/result/group node) with the given `name`. :param name: Name of the item (full name or parts of the full name) :param fast_access: Whether fast access should be applied. :param with_links: If links are considered. Cannot be set to ``False`` if ``auto_load`` is ``True``. :param shortcuts: If shortcuts are allowed and the trajectory can *hop* over nodes in the path. :param max_depth: Maximum depth relative to starting node (inclusive). `None` means no depth limit. :param auto_load: If data should be loaded from the storage service if it cannot be found in the current trajectory tree. Auto-loading will load group and leaf nodes currently not in memory and it will load data into empty leaves. Be aware that auto-loading does not work with shortcuts. :return: The found instance (result/parameter/group node) or if fast access is True and you found a parameter or result that supports fast access, the contained value is returned. :raises: AttributeError: If no node with the given name can be found NotUniqueNodeError In case of forward search if more than one candidate node is found within a particular depth of the tree. In case of backwards search if more than one candidate is found regardless of the depth. DataNotInStorageError: In case auto-loading fails Any exception raised by the StorageService in case auto-loading is enabled """ return self._nn_interface._get(self, name, fast_access=fast_access, shortcuts=shortcuts, max_depth=max_depth, auto_load=auto_load, with_links=with_links)
[ "def", "f_get", "(", "self", ",", "name", ",", "fast_access", "=", "False", ",", "with_links", "=", "True", ",", "shortcuts", "=", "True", ",", "max_depth", "=", "None", ",", "auto_load", "=", "False", ")", ":", "return", "self", ".", "_nn_interface", ...
Searches and returns an item (parameter/result/group node) with the given `name`. :param name: Name of the item (full name or parts of the full name) :param fast_access: Whether fast access should be applied. :param with_links: If links are considered. Cannot be set to ``False`` if ``auto_load`` is ``True``. :param shortcuts: If shortcuts are allowed and the trajectory can *hop* over nodes in the path. :param max_depth: Maximum depth relative to starting node (inclusive). `None` means no depth limit. :param auto_load: If data should be loaded from the storage service if it cannot be found in the current trajectory tree. Auto-loading will load group and leaf nodes currently not in memory and it will load data into empty leaves. Be aware that auto-loading does not work with shortcuts. :return: The found instance (result/parameter/group node) or if fast access is True and you found a parameter or result that supports fast access, the contained value is returned. :raises: AttributeError: If no node with the given name can be found NotUniqueNodeError In case of forward search if more than one candidate node is found within a particular depth of the tree. In case of backwards search if more than one candidate is found regardless of the depth. DataNotInStorageError: In case auto-loading fails Any exception raised by the StorageService in case auto-loading is enabled
[ "Searches", "and", "returns", "an", "item", "(", "parameter", "/", "result", "/", "group", "node", ")", "with", "the", "given", "name", "." ]
python
test
IdentityPython/pysaml2
src/saml2/assertion.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/assertion.py#L69-L141
def filter_on_attributes(ava, required=None, optional=None, acs=None, fail_on_unfulfilled_requirements=True): """ Filter :param ava: An attribute value assertion as a dictionary :param required: list of RequestedAttribute instances defined to be required :param optional: list of RequestedAttribute instances defined to be optional :param fail_on_unfulfilled_requirements: If required attributes are missing fail or fail not depending on this parameter. :return: The modified attribute value assertion """ def _match_attr_name(attr, ava): local_name = None for a in ['name_format', 'friendly_name']: _val = attr.get(a) if _val: if a == 'name_format': local_name = get_local_name(acs, attr['name'], _val) else: local_name = _val break if local_name: _fn = _match(local_name, ava) else: _fn = None if not _fn: # In the unlikely case that someone has provided us with # URIs as attribute names _fn = _match(attr["name"], ava) return _fn def _apply_attr_value_restrictions(attr, res, must=False): try: values = [av["text"] for av in attr["attribute_value"]] except KeyError: values = [] try: res[_fn].extend(_filter_values(ava[_fn], values)) except KeyError: res[_fn] = _filter_values(ava[_fn], values) return _filter_values(ava[_fn], values, must) res = {} if required is None: required = [] for attr in required: _fn = _match_attr_name(attr, ava) if _fn: _apply_attr_value_restrictions(attr, res, True) elif fail_on_unfulfilled_requirements: desc = "Required attribute missing: '%s'" % (attr["name"]) raise MissingValue(desc) if optional is None: optional = [] for attr in optional: _fn = _match_attr_name(attr, ava) if _fn: _apply_attr_value_restrictions(attr, res, False) return res
[ "def", "filter_on_attributes", "(", "ava", ",", "required", "=", "None", ",", "optional", "=", "None", ",", "acs", "=", "None", ",", "fail_on_unfulfilled_requirements", "=", "True", ")", ":", "def", "_match_attr_name", "(", "attr", ",", "ava", ")", ":", "l...
Filter :param ava: An attribute value assertion as a dictionary :param required: list of RequestedAttribute instances defined to be required :param optional: list of RequestedAttribute instances defined to be optional :param fail_on_unfulfilled_requirements: If required attributes are missing fail or fail not depending on this parameter. :return: The modified attribute value assertion
[ "Filter" ]
python
train
classner/pymp
pymp/__init__.py
https://github.com/classner/pymp/blob/9895ec2ec01ad2778a400449cbfa17f162491180/pymp/__init__.py#L186-L207
def range(self, start, stop=None, step=1): """ Get the correctly distributed parallel chunks. This corresponds to using the OpenMP 'static' schedule. """ self._assert_active() if stop is None: start, stop = 0, start full_list = range(start, stop, step) per_worker = len(full_list) // self._num_threads rem = len(full_list) % self._num_threads schedule = [ per_worker + 1 if thread_idx < rem else per_worker for thread_idx in range(self._num_threads) ] # pylint: disable=undefined-variable start_idx = _functools.reduce( lambda x, y: x + y, schedule[: self.thread_num], 0 ) end_idx = start_idx + schedule[self._thread_num] return full_list[start_idx:end_idx]
[ "def", "range", "(", "self", ",", "start", ",", "stop", "=", "None", ",", "step", "=", "1", ")", ":", "self", ".", "_assert_active", "(", ")", "if", "stop", "is", "None", ":", "start", ",", "stop", "=", "0", ",", "start", "full_list", "=", "range...
Get the correctly distributed parallel chunks. This corresponds to using the OpenMP 'static' schedule.
[ "Get", "the", "correctly", "distributed", "parallel", "chunks", "." ]
python
train
HttpRunner/HttpRunner
httprunner/validator.py
https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/validator.py#L141-L171
def get_uniform_comparator(comparator): """ convert comparator alias to uniform name """ if comparator in ["eq", "equals", "==", "is"]: return "equals" elif comparator in ["lt", "less_than"]: return "less_than" elif comparator in ["le", "less_than_or_equals"]: return "less_than_or_equals" elif comparator in ["gt", "greater_than"]: return "greater_than" elif comparator in ["ge", "greater_than_or_equals"]: return "greater_than_or_equals" elif comparator in ["ne", "not_equals"]: return "not_equals" elif comparator in ["str_eq", "string_equals"]: return "string_equals" elif comparator in ["len_eq", "length_equals", "count_eq"]: return "length_equals" elif comparator in ["len_gt", "count_gt", "length_greater_than", "count_greater_than"]: return "length_greater_than" elif comparator in ["len_ge", "count_ge", "length_greater_than_or_equals", \ "count_greater_than_or_equals"]: return "length_greater_than_or_equals" elif comparator in ["len_lt", "count_lt", "length_less_than", "count_less_than"]: return "length_less_than" elif comparator in ["len_le", "count_le", "length_less_than_or_equals", \ "count_less_than_or_equals"]: return "length_less_than_or_equals" else: return comparator
[ "def", "get_uniform_comparator", "(", "comparator", ")", ":", "if", "comparator", "in", "[", "\"eq\"", ",", "\"equals\"", ",", "\"==\"", ",", "\"is\"", "]", ":", "return", "\"equals\"", "elif", "comparator", "in", "[", "\"lt\"", ",", "\"less_than\"", "]", ":...
convert comparator alias to uniform name
[ "convert", "comparator", "alias", "to", "uniform", "name" ]
python
train
jpscaletti/solution
solution/fields/file/helpers.py
https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/fields/file/helpers.py#L247-L324
def save(self, filesto, upload_to=None, name=None, secret=None, prefix=None, allowed=None, denied=None, max_size=None, **kwargs): """ Except for `filesto`, all of these parameters are optional, so only bother setting the ones relevant to *this upload*. filesto : A `werkzeug.FileUploader`. upload_to : Relative path to where to upload secret : If True, instead of the original filename, a random one'll be used. prefix : To avoid race-conditions between users uploading files with the same name at the same time. If `secret` is True, this will be ignored. name : If set, it'll be used as the name of the uploaded file. Instead of a string, this can also be a callable. allowed : List of allowed file extensions. `None` to allow all of them. If the uploaded file doesn't have one of these extensions, an `UnsupportedMediaType` exception will be raised. denied : List of forbidden extensions. Set to `None` to disable. If the uploaded file *does* have one of these extensions, a `UnsupportedMediaType` exception will be raised. max_size : Maximum file size, in bytes, that file can have. Note: The attribute `max_content_length` defined in the `request` object has higher priority. """ if not filesto: return None upload_to = upload_to or self.upload_to secret = secret or self.secret prefix = prefix or self.prefix original_filename = filesto.filename allowed = allowed or self.allowed denied = denied or self.denied self.validate(filesto, allowed, denied, max_size) if callable(upload_to): filepath = upload_to(original_filename) else: filepath = upload_to oname, ext = os.path.splitext(original_filename) if name: new_name = name(original_filename) if callable(name) else name else: new_name = get_random_filename() if secret else prefix + oname filename = get_unique_filename(self.base_path, filepath, new_name, ext=ext) fullpath = os.path.join( make_dirs(self.base_path, filepath), filename ) filesto.save(fullpath) filesize = os.path.getsize(fullpath) # Post validation if max_size and filesize > max_size: self.delete_file(fullpath) raise RequestEntityTooLarge return os.path.join(filepath, filename)
[ "def", "save", "(", "self", ",", "filesto", ",", "upload_to", "=", "None", ",", "name", "=", "None", ",", "secret", "=", "None", ",", "prefix", "=", "None", ",", "allowed", "=", "None", ",", "denied", "=", "None", ",", "max_size", "=", "None", ",",...
Except for `filesto`, all of these parameters are optional, so only bother setting the ones relevant to *this upload*. filesto : A `werkzeug.FileUploader`. upload_to : Relative path to where to upload secret : If True, instead of the original filename, a random one'll be used. prefix : To avoid race-conditions between users uploading files with the same name at the same time. If `secret` is True, this will be ignored. name : If set, it'll be used as the name of the uploaded file. Instead of a string, this can also be a callable. allowed : List of allowed file extensions. `None` to allow all of them. If the uploaded file doesn't have one of these extensions, an `UnsupportedMediaType` exception will be raised. denied : List of forbidden extensions. Set to `None` to disable. If the uploaded file *does* have one of these extensions, a `UnsupportedMediaType` exception will be raised. max_size : Maximum file size, in bytes, that file can have. Note: The attribute `max_content_length` defined in the `request` object has higher priority.
[ "Except", "for", "filesto", "all", "of", "these", "parameters", "are", "optional", "so", "only", "bother", "setting", "the", "ones", "relevant", "to", "*", "this", "upload", "*", "." ]
python
train
pandas-dev/pandas
pandas/io/parquet.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parquet.py#L254-L282
def read_parquet(path, engine='auto', columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. .. versionadded 0.21.0 Parameters ---------- path : string File path engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. .. versionadded 0.21.1 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame """ impl = get_engine(engine) return impl.read(path, columns=columns, **kwargs)
[ "def", "read_parquet", "(", "path", ",", "engine", "=", "'auto'", ",", "columns", "=", "None", ",", "*", "*", "kwargs", ")", ":", "impl", "=", "get_engine", "(", "engine", ")", "return", "impl", ".", "read", "(", "path", ",", "columns", "=", "columns...
Load a parquet object from the file path, returning a DataFrame. .. versionadded 0.21.0 Parameters ---------- path : string File path engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. .. versionadded 0.21.1 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame
[ "Load", "a", "parquet", "object", "from", "the", "file", "path", "returning", "a", "DataFrame", "." ]
python
train
openvax/mhcflurry
mhcflurry/class1_affinity_predictor.py
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L456-L472
def model_name(allele, num): """ Generate a model name Parameters ---------- allele : string num : int Returns ------- string """ random_string = hashlib.sha1( str(time.time()).encode()).hexdigest()[:16] return "%s-%d-%s" % (allele.upper(), num, random_string)
[ "def", "model_name", "(", "allele", ",", "num", ")", ":", "random_string", "=", "hashlib", ".", "sha1", "(", "str", "(", "time", ".", "time", "(", ")", ")", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")", "[", ":", "16", "]", "return", ...
Generate a model name Parameters ---------- allele : string num : int Returns ------- string
[ "Generate", "a", "model", "name", "Parameters", "----------", "allele", ":", "string", "num", ":", "int" ]
python
train
glyph/automat
automat/_visualize.py
https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_visualize.py#L18-L28
def elementMaker(name, *children, **attrs): """ Construct a string from the HTML element description. """ formattedAttrs = ' '.join('{}={}'.format(key, _gvquote(str(value))) for key, value in sorted(attrs.items())) formattedChildren = ''.join(children) return u'<{name} {attrs}>{children}</{name}>'.format( name=name, attrs=formattedAttrs, children=formattedChildren)
[ "def", "elementMaker", "(", "name", ",", "*", "children", ",", "*", "*", "attrs", ")", ":", "formattedAttrs", "=", "' '", ".", "join", "(", "'{}={}'", ".", "format", "(", "key", ",", "_gvquote", "(", "str", "(", "value", ")", ")", ")", "for", "key"...
Construct a string from the HTML element description.
[ "Construct", "a", "string", "from", "the", "HTML", "element", "description", "." ]
python
train
ThreatResponse/margaritashotgun
margaritashotgun/repository.py
https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/repository.py#L290-L308
def fetch_module(self, module): """ Download and verify kernel module :type module: str :param module: kernel module path """ tm = int(time.time()) datestamp = datetime.utcfromtimestamp(tm).isoformat() filename = "lime-{0}-{1}.ko".format(datestamp, module['version']) url = "{0}/{1}".format(self.url, module['location']) logger.info("downloading {0} as {1}".format(url, filename)) req = requests.get(url, stream=True) with open(filename, 'wb') as f: f.write(req.raw.read()) self.verify_module(filename, module, self.gpg_verify) return filename
[ "def", "fetch_module", "(", "self", ",", "module", ")", ":", "tm", "=", "int", "(", "time", ".", "time", "(", ")", ")", "datestamp", "=", "datetime", ".", "utcfromtimestamp", "(", "tm", ")", ".", "isoformat", "(", ")", "filename", "=", "\"lime-{0}-{1}....
Download and verify kernel module :type module: str :param module: kernel module path
[ "Download", "and", "verify", "kernel", "module" ]
python
train
wummel/linkchecker
linkcheck/checker/urlbase.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/urlbase.py#L306-L311
def add_info (self, s): """ Add an info string. """ if s not in self.info: self.info.append(s)
[ "def", "add_info", "(", "self", ",", "s", ")", ":", "if", "s", "not", "in", "self", ".", "info", ":", "self", ".", "info", ".", "append", "(", "s", ")" ]
Add an info string.
[ "Add", "an", "info", "string", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/Misc/compare_gold_standard.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/Misc/compare_gold_standard.py#L635-L729
def findOverlappingSNPsWithGoldStandard(prefix, gold_prefixe, out_prefix, use_marker_names=False): """Find the overlapping SNPs in 4 different data sets.""" # Reading the main file sourceSnpToExtract = {} if use_marker_names: sourceSnpToExtract = set() duplicates = set() try: with open(prefix + ".bim", "r") as inputFile: for line in inputFile: row = line.rstrip("\r\n").split("\t") chromosome = row[0] position = row[3] snpName = row[1] if use_marker_names: sourceSnpToExtract.add(snpName) else: if (chromosome, position) not in sourceSnpToExtract: sourceSnpToExtract[(chromosome, position)] = snpName else: # It's a duplicate duplicates.add((chromosome, position)) except IOError: msg = "%s.bim: no such file" % prefix raise ProgramError(msg) # Removing duplicates from the list if not use_marker_names: logger.info(" - There are {} duplicated markers " "in {};".format(len(duplicates), prefix + ".bim")) logger.info(" - removing them for simplicity...") for snpID in duplicates: del sourceSnpToExtract[snpID] # Reading the Gold standard files goldSnpToExtract = {} if use_marker_names: goldSnpToExtract = set() with open(gold_prefixe + ".bim", "r") as inputFile: for line in inputFile: row = line.rstrip("\r\n").split("\t") chromosome = row[0] position = row[3] snpName = row[1] if use_marker_names: if snpName in sourceSnpToExtract: goldSnpToExtract.add(snpName) else: if (chromosome, position) in sourceSnpToExtract: # We want this SNP goldSnpToExtract[(chromosome, position)] = snpName # Printing the names of the SNPs to extract goldOutputFile = None try: goldOutputFile = open(out_prefix + ".gold_snp_to_extract", "w") except IOError: msg = "%(out_prefix)s.goldSnpToExtract: can't write file" % locals() raise ProgramError(msg) sourceOutputFile = None try: sourceOutputFile = open(out_prefix + ".source_snp_to_extract", "w") except IOError: msg = "%(out_prefix)s.sourceSnpToExtract: can't write file" % locals() raise ProgramError(msg) changeNameOutputFile = None try: changeNameOutputFile = open(out_prefix + ".update_names", "w") except IOError: msg = "%(out_prefix)s.updateNames: can't write file" % locals() raise ProgramError(msg) # Writing the file if use_marker_names: for snpName in goldSnpToExtract: print >>sourceOutputFile, snpName print >>goldOutputFile, snpName else: for snpID in goldSnpToExtract.iterkeys(): print >>sourceOutputFile, sourceSnpToExtract[snpID] print >>goldOutputFile, goldSnpToExtract[snpID] print >>changeNameOutputFile, "\t".join([ goldSnpToExtract[snpID], sourceSnpToExtract[snpID], ]) # Closing the output file goldOutputFile.close() sourceOutputFile.close() changeNameOutputFile.close()
[ "def", "findOverlappingSNPsWithGoldStandard", "(", "prefix", ",", "gold_prefixe", ",", "out_prefix", ",", "use_marker_names", "=", "False", ")", ":", "# Reading the main file", "sourceSnpToExtract", "=", "{", "}", "if", "use_marker_names", ":", "sourceSnpToExtract", "="...
Find the overlapping SNPs in 4 different data sets.
[ "Find", "the", "overlapping", "SNPs", "in", "4", "different", "data", "sets", "." ]
python
train
gem/oq-engine
openquake/hmtk/comparison/rate_grids.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/comparison/rate_grids.py#L114-L144
def from_model_files(cls, limits, input_model, investigation_time=1.0, simple_mesh_spacing=1.0, complex_mesh_spacing=5.0, mfd_width=0.1, area_discretisation=10.0): """ Reads the hazard model from a file :param list limits: Grid configuration [west, east, xspc, south, north, yspc, upper, lower, zspc] :param str input_model: Path to input source model :param float investigation_time: Investigation time of Poisson model :param float simple_mesh_spacing: Rupture mesh spacing of simple fault (km) :param float complex_mesh_spacing: Rupture mesh spacing of complex fault (km) :param float mfd_width: Spacing (in magnitude units) of MFD :param float area_discretisation: Spacing of discretisation of area source (km) """ converter = SourceConverter(investigation_time, simple_mesh_spacing, complex_mesh_spacing, mfd_width, area_discretisation) sources = [] for grp in nrml.to_python(input_model, converter): sources.extend(grp.sources) return cls(limits, sources, area_discretisation)
[ "def", "from_model_files", "(", "cls", ",", "limits", ",", "input_model", ",", "investigation_time", "=", "1.0", ",", "simple_mesh_spacing", "=", "1.0", ",", "complex_mesh_spacing", "=", "5.0", ",", "mfd_width", "=", "0.1", ",", "area_discretisation", "=", "10.0...
Reads the hazard model from a file :param list limits: Grid configuration [west, east, xspc, south, north, yspc, upper, lower, zspc] :param str input_model: Path to input source model :param float investigation_time: Investigation time of Poisson model :param float simple_mesh_spacing: Rupture mesh spacing of simple fault (km) :param float complex_mesh_spacing: Rupture mesh spacing of complex fault (km) :param float mfd_width: Spacing (in magnitude units) of MFD :param float area_discretisation: Spacing of discretisation of area source (km)
[ "Reads", "the", "hazard", "model", "from", "a", "file" ]
python
train
westurner/pyrpo
pyrpo/pyrpo.py
https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L666-L686
def lately(self, count=15): """ Show ``count`` most-recently modified files by mtime Yields: tuple: (strftime-formatted mtime, self.fpath-relative file path) """ excludes = '|'.join(('*.pyc', '*.swp', '*.bak', '*~')) cmd = ('''find . -printf "%%T@ %%p\\n" ''' '''| egrep -v '%s' ''' '''| sort -n ''' '''| tail -n %d''') % (excludes, count) op = self.sh(cmd, shell=True) for l in op.split('\n'): l = l.strip() if not l: continue mtime, fname = l.split(' ', 1) mtime = datetime.datetime.fromtimestamp(float(mtime)) mtimestr = dtformat(mtime) yield mtimestr, fname
[ "def", "lately", "(", "self", ",", "count", "=", "15", ")", ":", "excludes", "=", "'|'", ".", "join", "(", "(", "'*.pyc'", ",", "'*.swp'", ",", "'*.bak'", ",", "'*~'", ")", ")", "cmd", "=", "(", "'''find . -printf \"%%T@ %%p\\\\n\" '''", "'''| egrep -v '%s...
Show ``count`` most-recently modified files by mtime Yields: tuple: (strftime-formatted mtime, self.fpath-relative file path)
[ "Show", "count", "most", "-", "recently", "modified", "files", "by", "mtime" ]
python
train
uber/tchannel-python
tchannel/tornado/connection.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/connection.py#L359-L402
def initiate_handshake(self, headers, timeout=None): """Initiate a handshake with the remote host. :param headers: A dictionary of headers to send. :returns: A future that resolves (with a value of None) when the handshake is complete. """ io_loop = IOLoop.current() timeout = timeout or DEFAULT_INIT_TIMEOUT_SECS self.writer.put(messages.InitRequestMessage( version=PROTOCOL_VERSION, headers=headers )) init_res_future = self.reader.get() timeout_handle = io_loop.call_later(timeout, ( lambda: init_res_future.set_exception(errors.TimeoutError( 'Handshake with %s:%d timed out. Did not receive an INIT_RES ' 'after %s seconds' % ( self.remote_host, self.remote_host_port, str(timeout) ) )) )) io_loop.add_future( init_res_future, (lambda _: io_loop.remove_timeout(timeout_handle)), ) init_res = yield init_res_future if init_res.message_type != Types.INIT_RES: raise errors.UnexpectedError( "Expected handshake response, got %s" % repr(init_res) ) self._extract_handshake_headers(init_res) self._handshake_performed = True # The receive loop is started only after the handshake has been # completed. self._loop() raise tornado.gen.Return(init_res)
[ "def", "initiate_handshake", "(", "self", ",", "headers", ",", "timeout", "=", "None", ")", ":", "io_loop", "=", "IOLoop", ".", "current", "(", ")", "timeout", "=", "timeout", "or", "DEFAULT_INIT_TIMEOUT_SECS", "self", ".", "writer", ".", "put", "(", "mess...
Initiate a handshake with the remote host. :param headers: A dictionary of headers to send. :returns: A future that resolves (with a value of None) when the handshake is complete.
[ "Initiate", "a", "handshake", "with", "the", "remote", "host", "." ]
python
train
Equitable/trump
trump/orm.py
https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L212-L236
def delete(self, symbol): """ Deletes a Symbol. Parameters ---------- symbol : str or Symbol """ if isinstance(symbol, (str, unicode)): sym = self.get(symbol) elif isinstance(symbol, Symbol): sym = symbol else: raise Exception("Invalid symbol {}".format((repr(symbol)))) # Has to handle the case where the table would exist already # and where it wouldn't. try: sym.datatable = Table(sym.name, Base.metadata, autoload=True) sym.datatable.drop(self.eng, checkfirst=True) except NoSuchTableError: print "No worries, {} never existed to begin with.".format(sym.name) self.ses.delete(sym) self.ses.commit()
[ "def", "delete", "(", "self", ",", "symbol", ")", ":", "if", "isinstance", "(", "symbol", ",", "(", "str", ",", "unicode", ")", ")", ":", "sym", "=", "self", ".", "get", "(", "symbol", ")", "elif", "isinstance", "(", "symbol", ",", "Symbol", ")", ...
Deletes a Symbol. Parameters ---------- symbol : str or Symbol
[ "Deletes", "a", "Symbol", ".", "Parameters", "----------", "symbol", ":", "str", "or", "Symbol" ]
python
train
intuition-io/intuition
intuition/core/analyzes.py
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/core/analyzes.py#L111-L133
def overall_metrics(self, timestamp='one_month', metrics=None): ''' Use zipline results to compute some performance indicators ''' perfs = dict() # If no rolling perfs provided, computes it if metrics is None: metrics = self.rolling_performances(timestamp=timestamp) riskfree = np.mean(metrics['treasury_period_return']) perfs['sharpe'] = qstk_get_sharpe_ratio( metrics['algorithm_period_return'].values, risk_free=riskfree) perfs['algorithm_period_return'] = ( ((metrics['algorithm_period_return'] + 1).cumprod()) - 1)[-1] perfs['max_drawdown'] = max(metrics['max_drawdown']) perfs['algo_volatility'] = np.mean(metrics['algo_volatility']) perfs['beta'] = np.mean(metrics['beta']) perfs['alpha'] = np.mean(metrics['alpha']) perfs['benchmark_period_return'] = ( ((metrics['benchmark_period_return'] + 1).cumprod()) - 1)[-1] return perfs
[ "def", "overall_metrics", "(", "self", ",", "timestamp", "=", "'one_month'", ",", "metrics", "=", "None", ")", ":", "perfs", "=", "dict", "(", ")", "# If no rolling perfs provided, computes it", "if", "metrics", "is", "None", ":", "metrics", "=", "self", ".", ...
Use zipline results to compute some performance indicators
[ "Use", "zipline", "results", "to", "compute", "some", "performance", "indicators" ]
python
train
bitprophet/releases
releases/models.py
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/models.py#L82-L125
def default_spec(self, manager): """ Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.) """ # TODO: I feel like this + the surrounding bits in add_to_manager() # could be consolidated & simplified... specstr = "" # Make sure truly-default spec skips 0.x if prehistory was unstable. stable_families = manager.stable_families if manager.config.releases_unstable_prehistory and stable_families: specstr = ">={}".format(min(stable_families)) if self.is_featurelike: # TODO: if app->config-><releases_always_forwardport_features or # w/e if True: specstr = ">={}".format(max(manager.keys())) else: # Can only meaningfully limit to minor release buckets if they # actually exist yet. buckets = self.minor_releases(manager) if buckets: specstr = ">={}".format(max(buckets)) return Spec(specstr) if specstr else Spec()
[ "def", "default_spec", "(", "self", ",", "manager", ")", ":", "# TODO: I feel like this + the surrounding bits in add_to_manager()", "# could be consolidated & simplified...", "specstr", "=", "\"\"", "# Make sure truly-default spec skips 0.x if prehistory was unstable.", "stable_families...
Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.)
[ "Given", "the", "current", "release", "-", "lines", "structure", "return", "a", "default", "Spec", "." ]
python
train
pepkit/peppy
peppy/utils.py
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L540-L557
def is_command_callable(command, name=""): """ Check if command can be called. :param str command: actual command to call :param str name: nickname/alias by which to reference the command, optional :return bool: whether given command's call succeeded """ # Use `command` to see if command is callable, store exit code code = os.system( "command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command)) if code != 0: alias_value = " ('{}') ".format(name) if name else " " _LOGGER.debug("Command '{0}' is not callable: {1}". format(alias_value, command)) return not bool(code)
[ "def", "is_command_callable", "(", "command", ",", "name", "=", "\"\"", ")", ":", "# Use `command` to see if command is callable, store exit code", "code", "=", "os", ".", "system", "(", "\"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}\"", ".", "format", "(", "command", ...
Check if command can be called. :param str command: actual command to call :param str name: nickname/alias by which to reference the command, optional :return bool: whether given command's call succeeded
[ "Check", "if", "command", "can", "be", "called", "." ]
python
train
apache/spark
python/pyspark/streaming/context.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L350-L356
def addStreamingListener(self, streamingListener): """ Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for receiving system events related to streaming. """ self._jssc.addStreamingListener(self._jvm.JavaStreamingListenerWrapper( self._jvm.PythonStreamingListenerWrapper(streamingListener)))
[ "def", "addStreamingListener", "(", "self", ",", "streamingListener", ")", ":", "self", ".", "_jssc", ".", "addStreamingListener", "(", "self", ".", "_jvm", ".", "JavaStreamingListenerWrapper", "(", "self", ".", "_jvm", ".", "PythonStreamingListenerWrapper", "(", ...
Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for receiving system events related to streaming.
[ "Add", "a", "[[", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "scheduler", ".", "StreamingListener", "]]", "object", "for", "receiving", "system", "events", "related", "to", "streaming", "." ]
python
train
kislyuk/aegea
aegea/packages/github3/session.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/session.py#L108-L121
def token_auth(self, token): """Use an application token for authentication. :param str token: Application token retrieved from GitHub's /authorizations endpoint """ if not token: return self.headers.update({ 'Authorization': 'token {0}'.format(token) }) # Unset username/password so we stop sending them self.auth = None
[ "def", "token_auth", "(", "self", ",", "token", ")", ":", "if", "not", "token", ":", "return", "self", ".", "headers", ".", "update", "(", "{", "'Authorization'", ":", "'token {0}'", ".", "format", "(", "token", ")", "}", ")", "# Unset username/password so...
Use an application token for authentication. :param str token: Application token retrieved from GitHub's /authorizations endpoint
[ "Use", "an", "application", "token", "for", "authentication", "." ]
python
train
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/commands/save_sandbox.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/commands/save_sandbox.py#L38-L91
def save_app(self, si, logger, vcenter_data_model, reservation_id, save_app_actions, cancellation_context): """ Cretaes an artifact of an app, that can later be restored :param vcenter_data_model: VMwarevCenterResourceModel :param vim.ServiceInstance si: py_vmomi service instance :type si: vim.ServiceInstance :param logger: Logger :type logger: cloudshell.core.logger.qs_logger.get_qs_logger :param list[SaveApp] save_app_actions: :param cancellation_context: """ results = [] logger.info('Save Sandbox command starting on ' + vcenter_data_model.default_datacenter) if not save_app_actions: raise Exception('Failed to save app, missing data in request.') actions_grouped_by_save_types = groupby(save_app_actions, lambda x: x.actionParams.saveDeploymentModel) # artifactSaver or artifactHandler are different ways to save artifacts. For example, currently # we clone a vm, thenk take a snapshot. restore will be to deploy from linked snapshot # a future artifact handler we might develop is save vm to OVF file and restore from file. artifactSaversToActions = {ArtifactHandler.factory(k, self.pyvmomi_service, vcenter_data_model, si, logger, self.deployer, reservation_id, self.resource_model_parser, self.snapshot_saver, self.task_waiter, self.folder_manager, self.port_group_configurer, self.cs) : list(g) for k, g in actions_grouped_by_save_types} self.validate_requested_save_types_supported(artifactSaversToActions, logger, results) error_results = [r for r in results if not r.success] if not error_results: logger.info('Handling Save App requests') results = self._execute_save_actions_using_pool(artifactSaversToActions, cancellation_context, logger, results) logger.info('Completed Save Sandbox command') else: logger.error('Some save app requests were not valid, Save Sandbox command failed.') return results
[ "def", "save_app", "(", "self", ",", "si", ",", "logger", ",", "vcenter_data_model", ",", "reservation_id", ",", "save_app_actions", ",", "cancellation_context", ")", ":", "results", "=", "[", "]", "logger", ".", "info", "(", "'Save Sandbox command starting on '",...
Cretaes an artifact of an app, that can later be restored :param vcenter_data_model: VMwarevCenterResourceModel :param vim.ServiceInstance si: py_vmomi service instance :type si: vim.ServiceInstance :param logger: Logger :type logger: cloudshell.core.logger.qs_logger.get_qs_logger :param list[SaveApp] save_app_actions: :param cancellation_context:
[ "Cretaes", "an", "artifact", "of", "an", "app", "that", "can", "later", "be", "restored" ]
python
train
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2518-L2549
def read_array(self, key, start=None, stop=None): """ read an array for the specified node (off of group """ import tables node = getattr(self.group, key) attrs = node._v_attrs transposed = getattr(attrs, 'transposed', False) if isinstance(node, tables.VLArray): ret = node[0][start:stop] else: dtype = getattr(attrs, 'value_type', None) shape = getattr(attrs, 'shape', None) if shape is not None: # length 0 axis ret = np.empty(shape, dtype=dtype) else: ret = node[start:stop] if dtype == 'datetime64': # reconstruct a timezone if indicated ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True) elif dtype == 'timedelta64': ret = np.asarray(ret, dtype='m8[ns]') if transposed: return ret.T else: return ret
[ "def", "read_array", "(", "self", ",", "key", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "import", "tables", "node", "=", "getattr", "(", "self", ".", "group", ",", "key", ")", "attrs", "=", "node", ".", "_v_attrs", "transposed", ...
read an array for the specified node (off of group
[ "read", "an", "array", "for", "the", "specified", "node", "(", "off", "of", "group" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/language_translator_v3.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/language_translator_v3.py#L568-L573
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'languages') and self.languages is not None: _dict['languages'] = [x._to_dict() for x in self.languages] return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'languages'", ")", "and", "self", ".", "languages", "is", "not", "None", ":", "_dict", "[", "'languages'", "]", "=", "[", "x", ".", "_to_dict", "(...
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
log2timeline/dfdatetime
dfdatetime/precisions.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/precisions.py#L78-L101
def CopyToDateTimeString(cls, time_elements_tuple, fraction_of_second): """Copies the time elements and fraction of second to a string. Args: time_elements_tuple (tuple[int, int, int, int, int, int]): time elements, contains year, month, day of month, hours, minutes and seconds. fraction_of_second (decimal.Decimal): fraction of second, which must be a value between 0.0 and 1.0. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss Raises: ValueError: if the fraction of second is out of bounds. """ if fraction_of_second < 0.0 or fraction_of_second >= 1.0: raise ValueError('Fraction of second value: {0:f} out of bounds.'.format( fraction_of_second)) return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format( time_elements_tuple[0], time_elements_tuple[1], time_elements_tuple[2], time_elements_tuple[3], time_elements_tuple[4], time_elements_tuple[5])
[ "def", "CopyToDateTimeString", "(", "cls", ",", "time_elements_tuple", ",", "fraction_of_second", ")", ":", "if", "fraction_of_second", "<", "0.0", "or", "fraction_of_second", ">=", "1.0", ":", "raise", "ValueError", "(", "'Fraction of second value: {0:f} out of bounds.'"...
Copies the time elements and fraction of second to a string. Args: time_elements_tuple (tuple[int, int, int, int, int, int]): time elements, contains year, month, day of month, hours, minutes and seconds. fraction_of_second (decimal.Decimal): fraction of second, which must be a value between 0.0 and 1.0. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss Raises: ValueError: if the fraction of second is out of bounds.
[ "Copies", "the", "time", "elements", "and", "fraction", "of", "second", "to", "a", "string", "." ]
python
train
Gandi/gandi.cli
gandi/cli/modules/paas.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/paas.py#L193-L234
def update(cls, id, name, size, quantity, password, sshkey, upgrade, console, snapshot_profile, reset_mysql_password, background): """Update a PaaS instance.""" if not background and not cls.intty(): background = True paas_params = {} if name: paas_params['name'] = name if size: paas_params['size'] = size if quantity: paas_params['quantity'] = quantity if password: paas_params['password'] = password paas_params.update(cls.convert_sshkey(sshkey)) if upgrade: paas_params['upgrade'] = upgrade if console: paas_params['console'] = console # XXX to delete a snapshot_profile the value has to be an empty string if snapshot_profile is not None: paas_params['snapshot_profile'] = snapshot_profile if reset_mysql_password: paas_params['reset_mysql_password'] = reset_mysql_password result = cls.call('paas.update', cls.usable_id(id), paas_params) if background: return result # interactive mode, run a progress bar cls.echo('Updating your PaaS instance.') cls.display_progress(result)
[ "def", "update", "(", "cls", ",", "id", ",", "name", ",", "size", ",", "quantity", ",", "password", ",", "sshkey", ",", "upgrade", ",", "console", ",", "snapshot_profile", ",", "reset_mysql_password", ",", "background", ")", ":", "if", "not", "background",...
Update a PaaS instance.
[ "Update", "a", "PaaS", "instance", "." ]
python
train
mikedh/trimesh
trimesh/path/entities.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/entities.py#L43-L50
def closed(self): """ If the first point is the same as the end point the entity is closed """ closed = (len(self.points) > 2 and self.points[0] == self.points[-1]) return closed
[ "def", "closed", "(", "self", ")", ":", "closed", "=", "(", "len", "(", "self", ".", "points", ")", ">", "2", "and", "self", ".", "points", "[", "0", "]", "==", "self", ".", "points", "[", "-", "1", "]", ")", "return", "closed" ]
If the first point is the same as the end point the entity is closed
[ "If", "the", "first", "point", "is", "the", "same", "as", "the", "end", "point", "the", "entity", "is", "closed" ]
python
train
bwohlberg/sporco
sporco/dictlrn/onlinecdl.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/onlinecdl.py#L209-L238
def solve(self, S, dimK=None): """Compute sparse coding and dictionary update for training data `S`.""" # Use dimK specified in __init__ as default if dimK is None and self.dimK is not None: dimK = self.dimK # Start solve timer self.timer.start(['solve', 'solve_wo_eval']) # Solve CSC problem on S and do dictionary step self.init_vars(S, dimK) self.xstep(S, self.lmbda, dimK) self.dstep() # Stop solve timer self.timer.stop('solve_wo_eval') # Extract and record iteration stats self.manage_itstat() # Increment iteration count self.j += 1 # Stop solve timer self.timer.stop('solve') # Return current dictionary return self.getdict()
[ "def", "solve", "(", "self", ",", "S", ",", "dimK", "=", "None", ")", ":", "# Use dimK specified in __init__ as default", "if", "dimK", "is", "None", "and", "self", ".", "dimK", "is", "not", "None", ":", "dimK", "=", "self", ".", "dimK", "# Start solve tim...
Compute sparse coding and dictionary update for training data `S`.
[ "Compute", "sparse", "coding", "and", "dictionary", "update", "for", "training", "data", "S", "." ]
python
train
rapidpro/expressions
python/temba_expressions/utils.py
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/utils.py#L22-L31
def decimal_round(number, num_digits, rounding=ROUND_HALF_UP): """ Rounding for decimals with support for negative digits """ exp = Decimal(10) ** -num_digits if num_digits >= 0: return number.quantize(exp, rounding) else: return exp * (number / exp).to_integral_value(rounding)
[ "def", "decimal_round", "(", "number", ",", "num_digits", ",", "rounding", "=", "ROUND_HALF_UP", ")", ":", "exp", "=", "Decimal", "(", "10", ")", "**", "-", "num_digits", "if", "num_digits", ">=", "0", ":", "return", "number", ".", "quantize", "(", "exp"...
Rounding for decimals with support for negative digits
[ "Rounding", "for", "decimals", "with", "support", "for", "negative", "digits" ]
python
train
vmalyi/adb_android
adb_android/adb_android.py
https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L103-L111
def uninstall(app, opts=[]): """ Uninstall app from target :param app: app name to uninstall from target (e.g. "com.example.android.valid") :param opts: list command options (e.g. ["-r", "-a"]) :return: result of _exec_command() execution """ adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_UNINSTALL, _convert_opts(opts), app] return _exec_command(adb_full_cmd)
[ "def", "uninstall", "(", "app", ",", "opts", "=", "[", "]", ")", ":", "adb_full_cmd", "=", "[", "v", ".", "ADB_COMMAND_PREFIX", ",", "v", ".", "ADB_COMMAND_UNINSTALL", ",", "_convert_opts", "(", "opts", ")", ",", "app", "]", "return", "_exec_command", "(...
Uninstall app from target :param app: app name to uninstall from target (e.g. "com.example.android.valid") :param opts: list command options (e.g. ["-r", "-a"]) :return: result of _exec_command() execution
[ "Uninstall", "app", "from", "target", ":", "param", "app", ":", "app", "name", "to", "uninstall", "from", "target", "(", "e", ".", "g", ".", "com", ".", "example", ".", "android", ".", "valid", ")", ":", "param", "opts", ":", "list", "command", "opti...
python
train
yyuu/botornado
boto/dynamodb/layer2.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/dynamodb/layer2.py#L258-L290
def build_key_from_values(self, schema, hash_key, range_key=None): """ Build a Key structure to be used for accessing items in Amazon DynamoDB. This method takes the supplied hash_key and optional range_key and validates them against the schema. If there is a mismatch, a TypeError is raised. Otherwise, a Python dict version of a Amazon DynamoDB Key data structure is returned. :type hash_key: int, float, str, or unicode :param hash_key: The hash key of the item you are looking for. The type of the hash key should match the type defined in the schema. :type range_key: int, float, str or unicode :param range_key: The range key of the item your are looking for. This should be supplied only if the schema requires a range key. The type of the range key should match the type defined in the schema. """ dynamodb_key = {} dynamodb_value = self.dynamize_value(hash_key) if dynamodb_value.keys()[0] != schema.hash_key_type: msg = 'Hashkey must be of type: %s' % schema.hash_key_type raise TypeError(msg) dynamodb_key['HashKeyElement'] = dynamodb_value if range_key is not None: dynamodb_value = self.dynamize_value(range_key) if dynamodb_value.keys()[0] != schema.range_key_type: msg = 'RangeKey must be of type: %s' % schema.range_key_type raise TypeError(msg) dynamodb_key['RangeKeyElement'] = dynamodb_value return dynamodb_key
[ "def", "build_key_from_values", "(", "self", ",", "schema", ",", "hash_key", ",", "range_key", "=", "None", ")", ":", "dynamodb_key", "=", "{", "}", "dynamodb_value", "=", "self", ".", "dynamize_value", "(", "hash_key", ")", "if", "dynamodb_value", ".", "key...
Build a Key structure to be used for accessing items in Amazon DynamoDB. This method takes the supplied hash_key and optional range_key and validates them against the schema. If there is a mismatch, a TypeError is raised. Otherwise, a Python dict version of a Amazon DynamoDB Key data structure is returned. :type hash_key: int, float, str, or unicode :param hash_key: The hash key of the item you are looking for. The type of the hash key should match the type defined in the schema. :type range_key: int, float, str or unicode :param range_key: The range key of the item your are looking for. This should be supplied only if the schema requires a range key. The type of the range key should match the type defined in the schema.
[ "Build", "a", "Key", "structure", "to", "be", "used", "for", "accessing", "items", "in", "Amazon", "DynamoDB", ".", "This", "method", "takes", "the", "supplied", "hash_key", "and", "optional", "range_key", "and", "validates", "them", "against", "the", "schema"...
python
train
go-macaroon-bakery/py-macaroon-bakery
macaroonbakery/checkers/_checkers.py
https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/checkers/_checkers.py#L67-L83
def check_first_party_caveat(self, ctx, cav): ''' Checks the caveat against all registered caveat conditions. :return: error message string if any or None ''' try: cond, arg = parse_caveat(cav) except ValueError as ex: # If we can't parse it, perhaps it's in some other format, # return a not-recognised error. return 'cannot parse caveat "{}": {}'.format(cav, ex.args[0]) checker = self._checkers.get(cond) if checker is None: return 'caveat "{}" not satisfied: caveat not recognized'.format( cav) err = checker.check(ctx, cond, arg) if err is not None: return 'caveat "{}" not satisfied: {}'.format(cav, err)
[ "def", "check_first_party_caveat", "(", "self", ",", "ctx", ",", "cav", ")", ":", "try", ":", "cond", ",", "arg", "=", "parse_caveat", "(", "cav", ")", "except", "ValueError", "as", "ex", ":", "# If we can't parse it, perhaps it's in some other format,", "# return...
Checks the caveat against all registered caveat conditions. :return: error message string if any or None
[ "Checks", "the", "caveat", "against", "all", "registered", "caveat", "conditions", ".", ":", "return", ":", "error", "message", "string", "if", "any", "or", "None" ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L676-L710
def get_unknown_check_result_brok(cmd_line): """Create unknown check result brok and fill it with command data :param cmd_line: command line to extract data :type cmd_line: str :return: unknown check result brok :rtype: alignak.objects.brok.Brok """ match = re.match( r'^\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;' r'([^\;]*);([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line) if not match: match = re.match( r'^\[([0-9]{10})] PROCESS_(HOST)_CHECK_RESULT;' r'([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line) if not match: return None data = { 'time_stamp': int(match.group(1)), 'host_name': match.group(3), } if match.group(2) == 'SERVICE': data['service_description'] = match.group(4) data['return_code'] = match.group(5) data['output'] = match.group(6) data['perf_data'] = match.group(7) else: data['return_code'] = match.group(4) data['output'] = match.group(5) data['perf_data'] = match.group(6) return Brok({'type': 'unknown_%s_check_result' % match.group(2).lower(), 'data': data})
[ "def", "get_unknown_check_result_brok", "(", "cmd_line", ")", ":", "match", "=", "re", ".", "match", "(", "r'^\\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;'", "r'([^\\;]*);([^\\;]*);([^\\;]*);([^\\|]*)(?:\\|(.*))?'", ",", "cmd_line", ")", "if", "not", "match", ":", "match...
Create unknown check result brok and fill it with command data :param cmd_line: command line to extract data :type cmd_line: str :return: unknown check result brok :rtype: alignak.objects.brok.Brok
[ "Create", "unknown", "check", "result", "brok", "and", "fill", "it", "with", "command", "data" ]
python
train
ladybug-tools/ladybug
ladybug/skymodel.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/skymodel.py#L411-L462
def _dirint_bins(ktp, alt, w, dktp): """ Determine the bins for the DIRINT coefficients. Args: ktp : Altitude-independent clearness index alt : Solar altitude angle w : precipitable water estimated from surface dew-point temperature dktp : stability index Returns: tuple of ktp_bin, alt_bin, w_bin, dktp_bin """ it = range(len(ktp)) # Create kt_prime bins ktp_bin = [-1] * len(ktp) ktp_bin = [0 if ktp[i] >= 0 and ktp[i] < 0.24 else ktp_bin[i] for i in it] ktp_bin = [1 if ktp[i] >= 0.24 and ktp[i] < 0.4 else ktp_bin[i] for i in it] ktp_bin = [2 if ktp[i] >= 0.4 and ktp[i] < 0.56 else ktp_bin[i] for i in it] ktp_bin = [3 if ktp[i] >= 0.56 and ktp[i] < 0.7 else ktp_bin[i] for i in it] ktp_bin = [4 if ktp[i] >= 0.7 and ktp[i] < 0.8 else ktp_bin[i] for i in it] ktp_bin = [5 if ktp[i] >= 0.8 and ktp[i] <= 1 else ktp_bin[i] for i in it] # Create altitude angle bins alt_bin = [-1] * len(alt) alt_bin = [0 if alt[i] <= 90 and alt[i] > 65 else alt_bin[i] for i in it] alt_bin = [1 if alt[i] <= 65 and alt[i] > 50 else alt_bin[i] for i in it] alt_bin = [2 if alt[i] <= 50 and alt[i] > 35 else alt_bin[i] for i in it] alt_bin = [3 if alt[i] <= 35 and alt[i] > 20 else alt_bin[i] for i in it] alt_bin = [4 if alt[i] <= 20 and alt[i] > 10 else alt_bin[i] for i in it] alt_bin = [5 if alt[i] <= 10 else alt_bin[i] for i in it] # Create the bins for w based on dew point temperature w_bin = [-1] * len(w) w_bin = [0 if w[i] >= 0 and w[i] < 1 else w_bin[i] for i in it] w_bin = [1 if w[i] >= 1 and w[i] < 2 else w_bin[i] for i in it] w_bin = [2 if w[i] >= 2 and w[i] < 3 else w_bin[i] for i in it] w_bin = [3 if w[i] >= 3 else w_bin[i] for i in it] w_bin = [4 if w[i] == -1 else w_bin[i] for i in it] # Create delta_kt_prime binning. dktp_bin = [-1] * len(dktp) dktp_bin = [0 if dktp[i] >= 0 and dktp[i] < 0.015 else dktp_bin[i] for i in it] dktp_bin = [1 if dktp[i] >= 0.015 and dktp[i] < 0.035 else dktp_bin[i] for i in it] dktp_bin = [2 if dktp[i] >= 0.035 and dktp[i] < 0.07 else dktp_bin[i] for i in it] dktp_bin = [3 if dktp[i] >= 0.07 and dktp[i] < 0.15 else dktp_bin[i] for i in it] dktp_bin = [4 if dktp[i] >= 0.15 and dktp[i] < 0.3 else dktp_bin[i] for i in it] dktp_bin = [5 if dktp[i] >= 0.3 and dktp[i] <= 1 else dktp_bin[i] for i in it] dktp_bin = [6 if dktp[i] == -1 else dktp_bin[i] for i in it] return ktp_bin, alt_bin, w_bin, dktp_bin
[ "def", "_dirint_bins", "(", "ktp", ",", "alt", ",", "w", ",", "dktp", ")", ":", "it", "=", "range", "(", "len", "(", "ktp", ")", ")", "# Create kt_prime bins", "ktp_bin", "=", "[", "-", "1", "]", "*", "len", "(", "ktp", ")", "ktp_bin", "=", "[", ...
Determine the bins for the DIRINT coefficients. Args: ktp : Altitude-independent clearness index alt : Solar altitude angle w : precipitable water estimated from surface dew-point temperature dktp : stability index Returns: tuple of ktp_bin, alt_bin, w_bin, dktp_bin
[ "Determine", "the", "bins", "for", "the", "DIRINT", "coefficients", "." ]
python
train