repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
edeposit/marcxml_parser
src/marcxml_parser/query.py
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L40-L93
def _parse_corporations(self, datafield, subfield, roles=["any"]): """ Parse informations about corporations from given field identified by `datafield` parameter. Args: datafield (str): MARC field ID ("``110``", "``610``", etc..) subfield (str): MARC subfield ID with name, which is typically stored in "``a``" subfield. roles (str): specify which roles you need. Set to ``["any"]`` for any role, ``["dst"]`` for distributors, etc.. For details, see http://www.loc.gov/marc/relators/relaterm.html Returns: list: :class:`Corporation` objects. """ if len(datafield) != 3: raise ValueError( "datafield parameter have to be exactly 3 chars long!" ) if len(subfield) != 1: raise ValueError( "Bad subfield specification - subield have to be 3 chars long!" ) parsed_corporations = [] for corporation in self.get_subfields(datafield, subfield): other_subfields = corporation.other_subfields # check if corporation have at least one of the roles specified in # 'roles' parameter of function if "4" in other_subfields and roles != ["any"]: corp_roles = other_subfields["4"] # list of role parameters relevant = any(map(lambda role: role in roles, corp_roles)) # skip non-relevant corporations if not relevant: continue name = "" place = "" date = "" name = corporation if "c" in other_subfields: place = ",".join(other_subfields["c"]) if "d" in other_subfields: date = ",".join(other_subfields["d"]) parsed_corporations.append(Corporation(name, place, date)) return parsed_corporations
[ "def", "_parse_corporations", "(", "self", ",", "datafield", ",", "subfield", ",", "roles", "=", "[", "\"any\"", "]", ")", ":", "if", "len", "(", "datafield", ")", "!=", "3", ":", "raise", "ValueError", "(", "\"datafield parameter have to be exactly 3 chars long...
Parse informations about corporations from given field identified by `datafield` parameter. Args: datafield (str): MARC field ID ("``110``", "``610``", etc..) subfield (str): MARC subfield ID with name, which is typically stored in "``a``" subfield. roles (str): specify which roles you need. Set to ``["any"]`` for any role, ``["dst"]`` for distributors, etc.. For details, see http://www.loc.gov/marc/relators/relaterm.html Returns: list: :class:`Corporation` objects.
[ "Parse", "informations", "about", "corporations", "from", "given", "field", "identified", "by", "datafield", "parameter", "." ]
python
valid
Tsjerk/Insane
insane/structure.py
https://github.com/Tsjerk/Insane/blob/b73f08910ddb0b66597b20ff75ecee7f65f4ecf6/insane/structure.py#L332-L366
def write_gro(outfile, title, atoms, box): """ Write a GRO file. Parameters ---------- outfile The stream to write in. title The title of the GRO file. Must be a single line. atoms An instance of Structure containing the atoms to write. box The periodic box as a 3x3 matrix. """ # Print the title print(title, file=outfile) # Print the number of atoms print("{:5d}".format(len(atoms)), file=outfile) # Print the atoms atom_template = "{:5d}{:<5s}{:>5s}{:5d}{:8.3f}{:8.3f}{:8.3f}" for idx, atname, resname, resid, x, y, z in atoms: print(atom_template .format(int(resid % 1e5), resname, atname, int(idx % 1e5), x, y, z), file=outfile) # Print the box grobox = (box[0][0], box[1][1], box[2][2], box[0][1], box[0][2], box[1][0], box[1][2], box[2][0], box[2][1]) box_template = '{:10.5f}' * 9 print(box_template.format(*grobox), file=outfile)
[ "def", "write_gro", "(", "outfile", ",", "title", ",", "atoms", ",", "box", ")", ":", "# Print the title", "print", "(", "title", ",", "file", "=", "outfile", ")", "# Print the number of atoms", "print", "(", "\"{:5d}\"", ".", "format", "(", "len", "(", "a...
Write a GRO file. Parameters ---------- outfile The stream to write in. title The title of the GRO file. Must be a single line. atoms An instance of Structure containing the atoms to write. box The periodic box as a 3x3 matrix.
[ "Write", "a", "GRO", "file", "." ]
python
test
sernst/cauldron
cauldron/session/reloading.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/session/reloading.py#L70-L93
def reload_children(parent_module: types.ModuleType, newer_than: int) -> bool: """ Reloads all imported children of the specified parent module object :param parent_module: A module object whose children should be refreshed if their currently loaded versions are out of date. :param newer_than: An integer time in seconds for comparison. Any children modules that were modified more recently than this time will be reloaded. :return: Whether or not any children were reloaded """ if not hasattr(parent_module, '__path__'): return False parent_name = get_module_name(parent_module) children = filter( lambda item: item[0].startswith(parent_name), sys.modules.items() ) return any([do_reload(item[1], newer_than) for item in children])
[ "def", "reload_children", "(", "parent_module", ":", "types", ".", "ModuleType", ",", "newer_than", ":", "int", ")", "->", "bool", ":", "if", "not", "hasattr", "(", "parent_module", ",", "'__path__'", ")", ":", "return", "False", "parent_name", "=", "get_mod...
Reloads all imported children of the specified parent module object :param parent_module: A module object whose children should be refreshed if their currently loaded versions are out of date. :param newer_than: An integer time in seconds for comparison. Any children modules that were modified more recently than this time will be reloaded. :return: Whether or not any children were reloaded
[ "Reloads", "all", "imported", "children", "of", "the", "specified", "parent", "module", "object" ]
python
train
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/aio/async_receive_handler.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/aio/async_receive_handler.py#L246-L277
async def close(self, exception=None): """Close down the receiver connection. If the receiver has already closed, this operation will do nothing. An optional exception can be passed in to indicate that the handler was shutdown due to error. It is recommended to open a handler within a context manager as opposed to calling the method directly. The receiver will be implicitly closed on completion of the message iterator, however this method will need to be called explicitly if the message iterator is not run to completion. .. note:: This operation is not thread-safe. :param exception: An optional exception if the handler is closing due to an error. :type exception: Exception Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START open_close_receiver_directly] :end-before: [END open_close_receiver_directly] :language: python :dedent: 4 :caption: Iterate then explicitly close a Receiver. """ if not self.running: return self.running = False self.receiver_shutdown = True self._used.set() await super(Receiver, self).close(exception=exception)
[ "async", "def", "close", "(", "self", ",", "exception", "=", "None", ")", ":", "if", "not", "self", ".", "running", ":", "return", "self", ".", "running", "=", "False", "self", ".", "receiver_shutdown", "=", "True", "self", ".", "_used", ".", "set", ...
Close down the receiver connection. If the receiver has already closed, this operation will do nothing. An optional exception can be passed in to indicate that the handler was shutdown due to error. It is recommended to open a handler within a context manager as opposed to calling the method directly. The receiver will be implicitly closed on completion of the message iterator, however this method will need to be called explicitly if the message iterator is not run to completion. .. note:: This operation is not thread-safe. :param exception: An optional exception if the handler is closing due to an error. :type exception: Exception Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START open_close_receiver_directly] :end-before: [END open_close_receiver_directly] :language: python :dedent: 4 :caption: Iterate then explicitly close a Receiver.
[ "Close", "down", "the", "receiver", "connection", "." ]
python
test
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/span_utils.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/span_utils.py#L217-L260
def bioul_tags_to_spans(tag_sequence: List[str], classes_to_ignore: List[str] = None) -> List[TypedStringSpan]: """ Given a sequence corresponding to BIOUL tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are not allowed and will raise ``InvalidTagSequence``. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "I", "O", "U", and "L"). Parameters ---------- tag_sequence : ``List[str]``, required. The tag sequence encoded in BIOUL, e.g. ["B-PER", "L-PER", "O"]. classes_to_ignore : ``List[str]``, optional (default = None). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. Returns ------- spans : ``List[TypedStringSpan]`` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). """ spans = [] classes_to_ignore = classes_to_ignore or [] index = 0 while index < len(tag_sequence): label = tag_sequence[index] if label[0] == 'U': spans.append((label.partition('-')[2], (index, index))) elif label[0] == 'B': start = index while label[0] != 'L': index += 1 if index >= len(tag_sequence): raise InvalidTagSequence(tag_sequence) label = tag_sequence[index] if not (label[0] == 'I' or label[0] == 'L'): raise InvalidTagSequence(tag_sequence) spans.append((label.partition('-')[2], (start, index))) else: if label != 'O': raise InvalidTagSequence(tag_sequence) index += 1 return [span for span in spans if span[0] not in classes_to_ignore]
[ "def", "bioul_tags_to_spans", "(", "tag_sequence", ":", "List", "[", "str", "]", ",", "classes_to_ignore", ":", "List", "[", "str", "]", "=", "None", ")", "->", "List", "[", "TypedStringSpan", "]", ":", "spans", "=", "[", "]", "classes_to_ignore", "=", "...
Given a sequence corresponding to BIOUL tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are not allowed and will raise ``InvalidTagSequence``. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "I", "O", "U", and "L"). Parameters ---------- tag_sequence : ``List[str]``, required. The tag sequence encoded in BIOUL, e.g. ["B-PER", "L-PER", "O"]. classes_to_ignore : ``List[str]``, optional (default = None). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. Returns ------- spans : ``List[TypedStringSpan]`` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
[ "Given", "a", "sequence", "corresponding", "to", "BIOUL", "tags", "extracts", "spans", ".", "Spans", "are", "inclusive", "and", "can", "be", "of", "zero", "length", "representing", "a", "single", "word", "span", ".", "Ill", "-", "formed", "spans", "are", "...
python
train
IAMconsortium/pyam
pyam/core.py
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1162-L1169
def stack_plot(self, *args, **kwargs): """Plot timeseries stacks of existing data see pyam.plotting.stack_plot() for all available options """ df = self.as_pandas(with_metadata=True) ax = plotting.stack_plot(df, *args, **kwargs) return ax
[ "def", "stack_plot", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "df", "=", "self", ".", "as_pandas", "(", "with_metadata", "=", "True", ")", "ax", "=", "plotting", ".", "stack_plot", "(", "df", ",", "*", "args", ",", "*", "...
Plot timeseries stacks of existing data see pyam.plotting.stack_plot() for all available options
[ "Plot", "timeseries", "stacks", "of", "existing", "data" ]
python
train
Dullage/starlingbank
starlingbank/__init__.py
https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L111-L129
def get_image(self, filename: str=None) -> None: """Download the photo associated with a Savings Goal.""" if filename is None: filename = "{0}.png".format(self.name) endpoint = "/account/{0}/savings-goals/{1}/photo".format( self._account_uid, self.uid ) response = get( _url(endpoint, self._sandbox), headers=self._auth_headers ) response.raise_for_status() base64_image = response.json()['base64EncodedPhoto'] with open(filename, 'wb') as file: file.write(b64decode(base64_image))
[ "def", "get_image", "(", "self", ",", "filename", ":", "str", "=", "None", ")", "->", "None", ":", "if", "filename", "is", "None", ":", "filename", "=", "\"{0}.png\"", ".", "format", "(", "self", ".", "name", ")", "endpoint", "=", "\"/account/{0}/savings...
Download the photo associated with a Savings Goal.
[ "Download", "the", "photo", "associated", "with", "a", "Savings", "Goal", "." ]
python
train
BlueBrain/hpcbench
hpcbench/benchmark/ior.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/ior.py#L220-L225
def apis(self): """List of API to test""" value = self.attributes['apis'] if isinstance(value, six.string_types): value = shlex.split(value) return value
[ "def", "apis", "(", "self", ")", ":", "value", "=", "self", ".", "attributes", "[", "'apis'", "]", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "value", "=", "shlex", ".", "split", "(", "value", ")", "return", "value" ...
List of API to test
[ "List", "of", "API", "to", "test" ]
python
train
PredixDev/predixpy
predix/admin/eventhub.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/eventhub.py#L28-L38
def create(self): """ Create an instance of the Time Series Service with the typical starting settings. """ self.service.create() os.environ[predix.config.get_env_key(self.use_class, 'host')] = self.get_eventhub_host() os.environ[predix.config.get_env_key(self.use_class, 'port')] = self.get_eventhub_grpc_port() os.environ[predix.config.get_env_key(self.use_class, 'wss_publish_uri')] = self.get_publish_wss_uri() os.environ[predix.config.get_env_key(self.use_class, 'zone_id')] = self.get_zone_id()
[ "def", "create", "(", "self", ")", ":", "self", ".", "service", ".", "create", "(", ")", "os", ".", "environ", "[", "predix", ".", "config", ".", "get_env_key", "(", "self", ".", "use_class", ",", "'host'", ")", "]", "=", "self", ".", "get_eventhub_h...
Create an instance of the Time Series Service with the typical starting settings.
[ "Create", "an", "instance", "of", "the", "Time", "Series", "Service", "with", "the", "typical", "starting", "settings", "." ]
python
train
mitsei/dlkit
dlkit/json_/commenting/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/sessions.py#L2404-L2421
def has_parent_books(self, book_id): """Tests if the ``Book`` has any parents. arg: book_id (osid.id.Id): a book ``Id`` return: (boolean) - ``true`` if the book has parents, f ``alse`` otherwise raise: NotFound - ``book_id`` is not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.has_parent_bins if self._catalog_session is not None: return self._catalog_session.has_parent_catalogs(catalog_id=book_id) return self._hierarchy_session.has_parents(id_=book_id)
[ "def", "has_parent_books", "(", "self", ",", "book_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.has_parent_bins", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "...
Tests if the ``Book`` has any parents. arg: book_id (osid.id.Id): a book ``Id`` return: (boolean) - ``true`` if the book has parents, f ``alse`` otherwise raise: NotFound - ``book_id`` is not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Tests", "if", "the", "Book", "has", "any", "parents", "." ]
python
train
modin-project/modin
modin/engines/ray/pandas_on_ray/io.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/pandas_on_ray/io.py#L100-L119
def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """ df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
[ "def", "_read_hdf_columns", "(", "path_or_buf", ",", "columns", ",", "num_splits", ",", "kwargs", ")", ":", "# pragma: no cover", "df", "=", "pandas", ".", "read_hdf", "(", "path_or_buf", ",", "columns", "=", "columns", ",", "*", "*", "kwargs", ")", "# Appen...
Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
[ "Use", "a", "Ray", "task", "to", "read", "columns", "from", "HDF5", "into", "a", "Pandas", "DataFrame", "." ]
python
train
jbaiter/hidapi-cffi
hidapi.py
https://github.com/jbaiter/hidapi-cffi/blob/227116a2a2ba25ff5a56bf5205ae2bd58ee98c82/hidapi.py#L310-L326
def get_indexed_string(self, idx): """ Get a string from the device, based on its string index. :param idx: The index of the string to get :type idx: int :return: The string at the index :rtype: unicode """ self._check_device_status() bufp = ffi.new("wchar_t*") rv = hidapi.hid_get_indexed_string(self._device, idx, bufp, 65536) if rv == -1: raise IOError("Failed to read string with index {0} from HID " "device: {0}" .format(idx, self._get_last_error_string())) return ffi.buffer(bufp, 65536)[:].strip()
[ "def", "get_indexed_string", "(", "self", ",", "idx", ")", ":", "self", ".", "_check_device_status", "(", ")", "bufp", "=", "ffi", ".", "new", "(", "\"wchar_t*\"", ")", "rv", "=", "hidapi", ".", "hid_get_indexed_string", "(", "self", ".", "_device", ",", ...
Get a string from the device, based on its string index. :param idx: The index of the string to get :type idx: int :return: The string at the index :rtype: unicode
[ "Get", "a", "string", "from", "the", "device", "based", "on", "its", "string", "index", "." ]
python
train
luckydonald/pytgbot
code_generation/output/pytgbot/api_types/sendable/reply_markup.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/sendable/reply_markup.py#L201-L215
def to_array(self): """ Serializes this KeyboardButton to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(KeyboardButton, self).to_array() array['text'] = u(self.text) # py2: type unicode, py3: type str if self.request_contact is not None: array['request_contact'] = bool(self.request_contact) # type bool if self.request_location is not None: array['request_location'] = bool(self.request_location) # type bool return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "KeyboardButton", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'text'", "]", "=", "u", "(", "self", ".", "text", ")", "# py2: type unicode, py3: type str", "if", "self",...
Serializes this KeyboardButton to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "KeyboardButton", "to", "a", "dictionary", "." ]
python
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L100-L122
def _cache_provider_details(conn=None): ''' Provide a place to hang onto results of --list-[locations|sizes|images] so we don't have to go out to the API and get them every time. ''' DETAILS['avail_locations'] = {} DETAILS['avail_sizes'] = {} DETAILS['avail_images'] = {} locations = avail_locations(conn) images = avail_images(conn) sizes = avail_sizes(conn) for key, location in six.iteritems(locations): DETAILS['avail_locations'][location['name']] = location DETAILS['avail_locations'][key] = location for key, image in six.iteritems(images): DETAILS['avail_images'][image['name']] = image DETAILS['avail_images'][key] = image for key, vm_size in six.iteritems(sizes): DETAILS['avail_sizes'][vm_size['name']] = vm_size DETAILS['avail_sizes'][key] = vm_size
[ "def", "_cache_provider_details", "(", "conn", "=", "None", ")", ":", "DETAILS", "[", "'avail_locations'", "]", "=", "{", "}", "DETAILS", "[", "'avail_sizes'", "]", "=", "{", "}", "DETAILS", "[", "'avail_images'", "]", "=", "{", "}", "locations", "=", "a...
Provide a place to hang onto results of --list-[locations|sizes|images] so we don't have to go out to the API and get them every time.
[ "Provide", "a", "place", "to", "hang", "onto", "results", "of", "--", "list", "-", "[", "locations|sizes|images", "]", "so", "we", "don", "t", "have", "to", "go", "out", "to", "the", "API", "and", "get", "them", "every", "time", "." ]
python
train
OpenKMIP/PyKMIP
kmip/core/primitives.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/primitives.py#L754-L770
def write_value(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the value of the Boolean object to the output stream. Args: ostream (Stream): A buffer to contain the encoded bytes of the value of a Boolean object. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. """ try: ostream.write(pack('!Q', self.value)) except Exception: self.logger.error("Error writing boolean value to buffer") raise
[ "def", "write_value", "(", "self", ",", "ostream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "try", ":", "ostream", ".", "write", "(", "pack", "(", "'!Q'", ",", "self", ".", "value", ")", ")", "except", "Exceptio...
Write the value of the Boolean object to the output stream. Args: ostream (Stream): A buffer to contain the encoded bytes of the value of a Boolean object. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
[ "Write", "the", "value", "of", "the", "Boolean", "object", "to", "the", "output", "stream", "." ]
python
test
googledatalab/pydatalab
google/datalab/ml/_feature_slice_view.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_feature_slice_view.py#L46-L88
def plot(self, data): """ Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by "%%sql --module module_name". A pandas DataFrame. Regardless of data type, it must include the following columns: "feature": identifies a slice of features. For example: "petal_length:4.0-4.2". "count": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required. """ import IPython if ((sys.version_info.major > 2 and isinstance(data, str)) or (sys.version_info.major <= 2 and isinstance(data, basestring))): data = bq.Query(data) if isinstance(data, bq.Query): df = data.execute().result().to_dataframe() data = self._get_lantern_format(df) elif isinstance(data, pd.core.frame.DataFrame): data = self._get_lantern_format(data) else: raise Exception('data needs to be a sql query, or a pandas DataFrame.') HTML_TEMPLATE = """<link rel="import" href="/nbextensions/gcpdatalab/extern/lantern-browser.html" > <lantern-browser id="{html_id}"></lantern-browser> <script> var browser = document.querySelector('#{html_id}'); browser.metrics = {metrics}; browser.data = {data}; browser.sourceType = 'colab'; browser.weightedExamplesColumn = 'count'; browser.calibrationPlotUriFn = function(s) {{ return '/' + s; }} </script>""" # Serialize the data and list of metrics names to JSON string. metrics_str = str(map(str, data[0]['metricValues'].keys())) data_str = str([{str(k): json.dumps(v) for k, v in elem.iteritems()} for elem in data]) html_id = 'l' + datalab.utils.commands.Html.next_id() html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str) IPython.display.display(IPython.display.HTML(html))
[ "def", "plot", "(", "self", ",", "data", ")", ":", "import", "IPython", "if", "(", "(", "sys", ".", "version_info", ".", "major", ">", "2", "and", "isinstance", "(", "data", ",", "str", ")", ")", "or", "(", "sys", ".", "version_info", ".", "major",...
Plots a featire slice view on given data. Args: data: Can be one of: A string of sql query. A sql query module defined by "%%sql --module module_name". A pandas DataFrame. Regardless of data type, it must include the following columns: "feature": identifies a slice of features. For example: "petal_length:4.0-4.2". "count": number of instances in that slice of features. All other columns are viewed as metrics for its feature slice. At least one is required.
[ "Plots", "a", "featire", "slice", "view", "on", "given", "data", "." ]
python
train
estnltk/estnltk
estnltk/textcleaner.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/textcleaner.py#L36-L38
def clean(self, text): """Remove all unwanted characters from text.""" return ''.join([c for c in text if c in self.alphabet])
[ "def", "clean", "(", "self", ",", "text", ")", ":", "return", "''", ".", "join", "(", "[", "c", "for", "c", "in", "text", "if", "c", "in", "self", ".", "alphabet", "]", ")" ]
Remove all unwanted characters from text.
[ "Remove", "all", "unwanted", "characters", "from", "text", "." ]
python
train
project-ncl/pnc-cli
pnc_cli/buildrecords.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L91-L97
def list_built_artifacts(id, page_size=200, page_index=0, sort="", q=""): """ List Artifacts associated with a BuildRecord """ data = list_built_artifacts_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_built_artifacts", "(", "id", ",", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_built_artifacts_raw", "(", "id", ",", "page_size", ",", "page_index", ",", ...
List Artifacts associated with a BuildRecord
[ "List", "Artifacts", "associated", "with", "a", "BuildRecord" ]
python
train
gwastro/pycbc-glue
pycbc_glue/lal.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/lal.py#L652-L657
def fromfile(cls, fileobj, coltype=LIGOTimeGPS): """ Return a Cache object whose entries are read from an open file. """ c = [cls.entry_class(line, coltype=coltype) for line in fileobj] return cls(c)
[ "def", "fromfile", "(", "cls", ",", "fileobj", ",", "coltype", "=", "LIGOTimeGPS", ")", ":", "c", "=", "[", "cls", ".", "entry_class", "(", "line", ",", "coltype", "=", "coltype", ")", "for", "line", "in", "fileobj", "]", "return", "cls", "(", "c", ...
Return a Cache object whose entries are read from an open file.
[ "Return", "a", "Cache", "object", "whose", "entries", "are", "read", "from", "an", "open", "file", "." ]
python
train
adewes/blitzdb
blitzdb/backends/file/index.py
https://github.com/adewes/blitzdb/blob/4b459e0bcde9e1f6224dd4e3bea74194586864b0/blitzdb/backends/file/index.py#L300-L331
def add_key(self, attributes, store_key): """Add key to the index. :param attributes: Attributes to be added to the index :type attributes: dict(str) :param store_key: The key for the document in the store :type store_key: str """ undefined = False try: value = self.get_value(attributes) except (KeyError, IndexError): undefined = True # We remove old values in _reverse_index self.remove_key(store_key) if not undefined: if isinstance(value, (list,tuple)): # We add an extra hash value for the list itself # (this allows for querying the whole list) values = value hash_value = self.get_hash_for(value) self.add_hashed_value(hash_value, store_key) else: values = [value] for value in values: hash_value = self.get_hash_for(value) self.add_hashed_value(hash_value, store_key) else: self.add_undefined(store_key)
[ "def", "add_key", "(", "self", ",", "attributes", ",", "store_key", ")", ":", "undefined", "=", "False", "try", ":", "value", "=", "self", ".", "get_value", "(", "attributes", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "undefined", "=",...
Add key to the index. :param attributes: Attributes to be added to the index :type attributes: dict(str) :param store_key: The key for the document in the store :type store_key: str
[ "Add", "key", "to", "the", "index", "." ]
python
train
great-expectations/great_expectations
great_expectations/dataset/dataset.py
https://github.com/great-expectations/great_expectations/blob/08385c40529d4f14a1c46916788aecc47f33ee9d/great_expectations/dataset/dataset.py#L1135-L1177
def expect_column_values_to_match_strftime_format(self, column, strftime_format, mostly=None, result_format=None, include_config=False, catch_exceptions=None, meta=None ): """Expect column entries to be strings representing a date or time with a given format. expect_column_values_to_match_strftime_format is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): \ The column name. strftime_format (str): \ A strftime format string to use for matching Keyword Args: mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. """ raise NotImplementedError
[ "def", "expect_column_values_to_match_strftime_format", "(", "self", ",", "column", ",", "strftime_format", ",", "mostly", "=", "None", ",", "result_format", "=", "None", ",", "include_config", "=", "False", ",", "catch_exceptions", "=", "None", ",", "meta", "=", ...
Expect column entries to be strings representing a date or time with a given format. expect_column_values_to_match_strftime_format is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): \ The column name. strftime_format (str): \ A strftime format string to use for matching Keyword Args: mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
[ "Expect", "column", "entries", "to", "be", "strings", "representing", "a", "date", "or", "time", "with", "a", "given", "format", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/rl/trainer_model_based_params.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/trainer_model_based_params.py#L294-L300
def rlmb_base_stochastic(): """Base setting with a stochastic next-frame model.""" hparams = rlmb_base() hparams.initial_epoch_train_steps_multiplier = 5 hparams.generative_model = "next_frame_basic_stochastic" hparams.generative_model_params = "next_frame_basic_stochastic" return hparams
[ "def", "rlmb_base_stochastic", "(", ")", ":", "hparams", "=", "rlmb_base", "(", ")", "hparams", ".", "initial_epoch_train_steps_multiplier", "=", "5", "hparams", ".", "generative_model", "=", "\"next_frame_basic_stochastic\"", "hparams", ".", "generative_model_params", ...
Base setting with a stochastic next-frame model.
[ "Base", "setting", "with", "a", "stochastic", "next", "-", "frame", "model", "." ]
python
train
yyuu/botornado
boto/sts/credentials.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/sts/credentials.py#L63-L75
def load(cls, file_path): """ Create and return a new Session Token based on the contents of a previously saved JSON-format file. :type file_path: str :param file_path: The fully qualified path to the JSON-format file containing the previously saved Session Token information. """ fp = open(file_path) json_doc = fp.read() fp.close() return cls.from_json(json_doc)
[ "def", "load", "(", "cls", ",", "file_path", ")", ":", "fp", "=", "open", "(", "file_path", ")", "json_doc", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "return", "cls", ".", "from_json", "(", "json_doc", ")" ]
Create and return a new Session Token based on the contents of a previously saved JSON-format file. :type file_path: str :param file_path: The fully qualified path to the JSON-format file containing the previously saved Session Token information.
[ "Create", "and", "return", "a", "new", "Session", "Token", "based", "on", "the", "contents", "of", "a", "previously", "saved", "JSON", "-", "format", "file", "." ]
python
train
googledatalab/pydatalab
datalab/bigquery/_api.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L300-L309
def datasets_update(self, dataset_name, dataset_info): """Updates the Dataset info. Args: dataset_name: the name of the dataset to update as a tuple of components. dataset_info: the Dataset resource with updated fields. """ url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name) return datalab.utils.Http.request(url, method='PUT', data=dataset_info, credentials=self._credentials)
[ "def", "datasets_update", "(", "self", ",", "dataset_name", ",", "dataset_info", ")", ":", "url", "=", "Api", ".", "_ENDPOINT", "+", "(", "Api", ".", "_DATASETS_PATH", "%", "dataset_name", ")", "return", "datalab", ".", "utils", ".", "Http", ".", "request"...
Updates the Dataset info. Args: dataset_name: the name of the dataset to update as a tuple of components. dataset_info: the Dataset resource with updated fields.
[ "Updates", "the", "Dataset", "info", "." ]
python
train
frictionlessdata/tableschema-py
tableschema/schema.py
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L72-L85
def foreign_keys(self): """https://github.com/frictionlessdata/tableschema-py#schema """ foreign_keys = self.__current_descriptor.get('foreignKeys', []) for key in foreign_keys: key.setdefault('fields', []) key.setdefault('reference', {}) key['reference'].setdefault('resource', '') key['reference'].setdefault('fields', []) if not isinstance(key['fields'], list): key['fields'] = [key['fields']] if not isinstance(key['reference']['fields'], list): key['reference']['fields'] = [key['reference']['fields']] return foreign_keys
[ "def", "foreign_keys", "(", "self", ")", ":", "foreign_keys", "=", "self", ".", "__current_descriptor", ".", "get", "(", "'foreignKeys'", ",", "[", "]", ")", "for", "key", "in", "foreign_keys", ":", "key", ".", "setdefault", "(", "'fields'", ",", "[", "]...
https://github.com/frictionlessdata/tableschema-py#schema
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "tableschema", "-", "py#schema" ]
python
train
datalib/StatsCounter
statscounter/_stats.py
https://github.com/datalib/StatsCounter/blob/5386c967808bbe451025af1d550f060cd7f86669/statscounter/_stats.py#L64-L85
def mean(data): """Return the sample arithmetic mean of data. >>> mean([1, 2, 3, 4, 4]) 2.8 >>> from fractions import Fraction as F >>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)]) Fraction(13, 21) >>> from decimal import Decimal as D >>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")]) Decimal('0.5625') If ``data`` is empty, StatisticsError will be raised. """ if iter(data) is data: data = list(data) n = len(data) if n < 1: raise StatisticsError('mean requires at least one data point') return _sum(data)/n
[ "def", "mean", "(", "data", ")", ":", "if", "iter", "(", "data", ")", "is", "data", ":", "data", "=", "list", "(", "data", ")", "n", "=", "len", "(", "data", ")", "if", "n", "<", "1", ":", "raise", "StatisticsError", "(", "'mean requires at least o...
Return the sample arithmetic mean of data. >>> mean([1, 2, 3, 4, 4]) 2.8 >>> from fractions import Fraction as F >>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)]) Fraction(13, 21) >>> from decimal import Decimal as D >>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")]) Decimal('0.5625') If ``data`` is empty, StatisticsError will be raised.
[ "Return", "the", "sample", "arithmetic", "mean", "of", "data", "." ]
python
train
ethereum/eth-abi
eth_abi/codec.py
https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L50-L65
def encode_single(self, typ: TypeStr, arg: Any) -> bytes: """ Encodes the python value ``arg`` as a binary value of the ABI type ``typ``. :param typ: The string representation of the ABI type that will be used for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc. :param arg: The python value to be encoded. :returns: The binary representation of the python value ``arg`` as a value of the ABI type ``typ``. """ encoder = self._registry.get_encoder(typ) return encoder(arg)
[ "def", "encode_single", "(", "self", ",", "typ", ":", "TypeStr", ",", "arg", ":", "Any", ")", "->", "bytes", ":", "encoder", "=", "self", ".", "_registry", ".", "get_encoder", "(", "typ", ")", "return", "encoder", "(", "arg", ")" ]
Encodes the python value ``arg`` as a binary value of the ABI type ``typ``. :param typ: The string representation of the ABI type that will be used for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc. :param arg: The python value to be encoded. :returns: The binary representation of the python value ``arg`` as a value of the ABI type ``typ``.
[ "Encodes", "the", "python", "value", "arg", "as", "a", "binary", "value", "of", "the", "ABI", "type", "typ", "." ]
python
train
atlassian-api/atlassian-python-api
atlassian/jira.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L752-L756
def get_issue_link_type(self, issue_link_type_id): """Returns for a given issue link type id all information about this issue link type. """ url = 'rest/api/2/issueLinkType/{issueLinkTypeId}'.format(issueLinkTypeId=issue_link_type_id) return self.get(url)
[ "def", "get_issue_link_type", "(", "self", ",", "issue_link_type_id", ")", ":", "url", "=", "'rest/api/2/issueLinkType/{issueLinkTypeId}'", ".", "format", "(", "issueLinkTypeId", "=", "issue_link_type_id", ")", "return", "self", ".", "get", "(", "url", ")" ]
Returns for a given issue link type id all information about this issue link type.
[ "Returns", "for", "a", "given", "issue", "link", "type", "id", "all", "information", "about", "this", "issue", "link", "type", "." ]
python
train
jtpaasch/simplygithub
simplygithub/internals/blobs.py
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/internals/blobs.py#L16-L35
def get_blob(profile, sha): """Fetch a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the blob to fetch. Returns: A dict with data about the blob. """ resource = "/blobs/" + sha data = api.get_request(profile, resource) return prepare(data)
[ "def", "get_blob", "(", "profile", ",", "sha", ")", ":", "resource", "=", "\"/blobs/\"", "+", "sha", "data", "=", "api", ".", "get_request", "(", "profile", ",", "resource", ")", "return", "prepare", "(", "data", ")" ]
Fetch a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the blob to fetch. Returns: A dict with data about the blob.
[ "Fetch", "a", "blob", "." ]
python
train
urbn/Caesium
caesium/handler.py
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L559-L574
def put(self, id): """ Update a revision by ID :param id: BSON id :return: """ collection_name = self.request.headers.get("collection") if not collection_name: self.raise_error(400, "Missing a collection name header") self.client = BaseAsyncMotorDocument("%s_revisions" % collection_name) super(self.__class__, self).put(id)
[ "def", "put", "(", "self", ",", "id", ")", ":", "collection_name", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "\"collection\"", ")", "if", "not", "collection_name", ":", "self", ".", "raise_error", "(", "400", ",", "\"Missing a collectio...
Update a revision by ID :param id: BSON id :return:
[ "Update", "a", "revision", "by", "ID" ]
python
train
javipalanca/spade
spade/message.py
https://github.com/javipalanca/spade/blob/59942bd1a1edae4c807d06cabb178d5630cbf61b/spade/message.py#L100-L110
def sender(self, jid: str): """ Set jid of the sender Args: jid (str): jid of the sender """ if jid is not None and not isinstance(jid, str): raise TypeError("'sender' MUST be a string") self._sender = aioxmpp.JID.fromstr(jid) if jid is not None else None
[ "def", "sender", "(", "self", ",", "jid", ":", "str", ")", ":", "if", "jid", "is", "not", "None", "and", "not", "isinstance", "(", "jid", ",", "str", ")", ":", "raise", "TypeError", "(", "\"'sender' MUST be a string\"", ")", "self", ".", "_sender", "="...
Set jid of the sender Args: jid (str): jid of the sender
[ "Set", "jid", "of", "the", "sender" ]
python
train
radujica/baloo
baloo/core/series.py
https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/core/series.py#L551-L572
def from_pandas(cls, series): """Create baloo Series from pandas Series. Parameters ---------- series : pandas.series.Series Returns ------- Series """ from pandas import Index as PandasIndex, MultiIndex as PandasMultiIndex if isinstance(series.index, PandasIndex): baloo_index = Index.from_pandas(series.index) elif isinstance(series.index, PandasMultiIndex): baloo_index = MultiIndex.from_pandas(series.index) else: raise TypeError('Cannot convert pandas index of type={} to baloo'.format(type(series.index))) return _series_from_pandas(series, baloo_index)
[ "def", "from_pandas", "(", "cls", ",", "series", ")", ":", "from", "pandas", "import", "Index", "as", "PandasIndex", ",", "MultiIndex", "as", "PandasMultiIndex", "if", "isinstance", "(", "series", ".", "index", ",", "PandasIndex", ")", ":", "baloo_index", "=...
Create baloo Series from pandas Series. Parameters ---------- series : pandas.series.Series Returns ------- Series
[ "Create", "baloo", "Series", "from", "pandas", "Series", "." ]
python
train
insomnia-lab/libreant
libreantdb/api.py
https://github.com/insomnia-lab/libreant/blob/55d529435baf4c05a86b8341899e9f5e14e50245/libreantdb/api.py#L211-L222
def clone_index(self, new_indexname, index_conf=None): '''Clone current index All entries of the current index will be copied into the newly created one named `new_indexname` :param index_conf: Configuration to be used in the new index creation. This param will be passed directly to :py:func:`DB.create_index` ''' log.debug("Cloning index '{}' into '{}'".format(self.index_name, new_indexname)) self.create_index(indexname=new_indexname, index_conf=index_conf) reindex(self.es, self.index_name, new_indexname)
[ "def", "clone_index", "(", "self", ",", "new_indexname", ",", "index_conf", "=", "None", ")", ":", "log", ".", "debug", "(", "\"Cloning index '{}' into '{}'\"", ".", "format", "(", "self", ".", "index_name", ",", "new_indexname", ")", ")", "self", ".", "crea...
Clone current index All entries of the current index will be copied into the newly created one named `new_indexname` :param index_conf: Configuration to be used in the new index creation. This param will be passed directly to :py:func:`DB.create_index`
[ "Clone", "current", "index" ]
python
train
quantopian/zipline
zipline/algorithm.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L408-L421
def init_engine(self, get_loader): """ Construct and store a PipelineEngine from loader. If get_loader is None, constructs an ExplodingPipelineEngine """ if get_loader is not None: self.engine = SimplePipelineEngine( get_loader, self.asset_finder, self.default_pipeline_domain(self.trading_calendar), ) else: self.engine = ExplodingPipelineEngine()
[ "def", "init_engine", "(", "self", ",", "get_loader", ")", ":", "if", "get_loader", "is", "not", "None", ":", "self", ".", "engine", "=", "SimplePipelineEngine", "(", "get_loader", ",", "self", ".", "asset_finder", ",", "self", ".", "default_pipeline_domain", ...
Construct and store a PipelineEngine from loader. If get_loader is None, constructs an ExplodingPipelineEngine
[ "Construct", "and", "store", "a", "PipelineEngine", "from", "loader", "." ]
python
train
ChristopherRabotin/bungiesearch
bungiesearch/__init__.py
https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L155-L210
def map_raw_results(cls, raw_results, instance=None): ''' Maps raw results to database model objects. :param raw_results: list raw results as returned from elasticsearch-dsl-py. :param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex. :return: list of mapped results in the *same* order as returned by elasticsearch. ''' # Let's iterate over the results and determine the appropriate mapping. model_results = defaultdict(list) # Initializing the list to the number of returned results. This allows us to restore each item in its position. if hasattr(raw_results, 'hits'): results = [None] * len(raw_results.hits) else: results = [None] * len(raw_results) found_results = {} for pos, result in enumerate(raw_results): model_name = result.meta.doc_type if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]: logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result)) results[pos] = result else: meta = Bungiesearch.get_model_index(model_name).Meta model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id) found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta) # Now that we have model ids per model name, let's fetch everything at once. for ref_name, ids in iteritems(model_results): index_name, model_name = ref_name.split('.') model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name] model_obj = model_idx.get_model() items = model_obj.objects.filter(pk__in=ids) if instance: if instance._only == '__model' or model_idx.optimize_queries: desired_fields = model_idx.fields_to_fetch elif instance._only == '__fields': desired_fields = instance._fields else: desired_fields = instance._only if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch. items = items.only( *[field.name for field in model_obj._meta.get_fields() # For complete backwards compatibility, you may want to exclude # GenericForeignKey from the results. if field.name in desired_fields and \ not (field.many_to_one and field.related_model is None) ] ) # Let's reposition each item in the results and set the _searchmeta meta information. for item in items: pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)] item._searchmeta = meta results[pos] = item return results
[ "def", "map_raw_results", "(", "cls", ",", "raw_results", ",", "instance", "=", "None", ")", ":", "# Let's iterate over the results and determine the appropriate mapping.", "model_results", "=", "defaultdict", "(", "list", ")", "# Initializing the list to the number of returned...
Maps raw results to database model objects. :param raw_results: list raw results as returned from elasticsearch-dsl-py. :param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex. :return: list of mapped results in the *same* order as returned by elasticsearch.
[ "Maps", "raw", "results", "to", "database", "model", "objects", ".", ":", "param", "raw_results", ":", "list", "raw", "results", "as", "returned", "from", "elasticsearch", "-", "dsl", "-", "py", ".", ":", "param", "instance", ":", "Bungiesearch", "instance",...
python
train
bitshares/python-bitshares
bitshares/bitshares.py
https://github.com/bitshares/python-bitshares/blob/8a3b5954a6abcaaff7c6a5c41d910e58eea3142f/bitshares/bitshares.py#L541-L570
def update_memo_key(self, key, account=None, **kwargs): """ Update an account's memo public key This method does **not** add any private keys to your wallet but merely changes the memo public key. :param str key: New memo public key :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") PublicKey(key, prefix=self.prefix) account = Account(account, blockchain_instance=self) account["options"]["memo_key"] = key op = operations.Account_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "account": account["id"], "new_options": account["options"], "extensions": {}, "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active", **kwargs)
[ "def", "update_memo_key", "(", "self", ",", "key", ",", "account", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "account", ":", "if", "\"default_account\"", "in", "self", ".", "config", ":", "account", "=", "self", ".", "config", "[", ...
Update an account's memo public key This method does **not** add any private keys to your wallet but merely changes the memo public key. :param str key: New memo public key :param str account: (optional) the account to allow access to (defaults to ``default_account``)
[ "Update", "an", "account", "s", "memo", "public", "key" ]
python
train
juju/charm-helpers
charmhelpers/contrib/storage/linux/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/utils.py#L78-L95
def zap_disk(block_device): ''' Clear a block device of partition table. Relies on sgdisk, which is installed as pat of the 'gdisk' package in Ubuntu. :param block_device: str: Full path of block device to clean. ''' # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--', block_device]) call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
[ "def", "zap_disk", "(", "block_device", ")", ":", "# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b", "# sometimes sgdisk exits non-zero; this is OK, dd will clean up", "call", "(", "[", "'sgdisk'", ",", "'--zap-all'", ",", "'--'", ",", "block_device",...
Clear a block device of partition table. Relies on sgdisk, which is installed as pat of the 'gdisk' package in Ubuntu. :param block_device: str: Full path of block device to clean.
[ "Clear", "a", "block", "device", "of", "partition", "table", ".", "Relies", "on", "sgdisk", "which", "is", "installed", "as", "pat", "of", "the", "gdisk", "package", "in", "Ubuntu", "." ]
python
train
presslabs/z3
z3/pput.py
https://github.com/presslabs/z3/blob/965898cccddd351ce4c56402a215c3bda9f37b5e/z3/pput.py#L32-L46
def multipart_etag(digests): """ Computes etag for multipart uploads :type digests: list of hex-encoded md5 sums (string) :param digests: The list of digests for each individual chunk. :rtype: string :returns: The etag computed from the individual chunks. """ etag = hashlib.md5() count = 0 for dig in digests: count += 1 etag.update(binascii.a2b_hex(dig)) return '"{}-{}"'.format(etag.hexdigest(), count)
[ "def", "multipart_etag", "(", "digests", ")", ":", "etag", "=", "hashlib", ".", "md5", "(", ")", "count", "=", "0", "for", "dig", "in", "digests", ":", "count", "+=", "1", "etag", ".", "update", "(", "binascii", ".", "a2b_hex", "(", "dig", ")", ")"...
Computes etag for multipart uploads :type digests: list of hex-encoded md5 sums (string) :param digests: The list of digests for each individual chunk. :rtype: string :returns: The etag computed from the individual chunks.
[ "Computes", "etag", "for", "multipart", "uploads", ":", "type", "digests", ":", "list", "of", "hex", "-", "encoded", "md5", "sums", "(", "string", ")", ":", "param", "digests", ":", "The", "list", "of", "digests", "for", "each", "individual", "chunk", "....
python
train
IBM/ibm-cos-sdk-python-s3transfer
ibm_s3transfer/aspera/manager.py
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/manager.py#L581-L620
def _process_waiting_queue(self): ''' thread to processes the waiting queue fetches transfer spec then calls start transfer ensures that max ascp is not exceeded ''' logger.info("Queue processing thread started") while not self.is_stop(): self._processing_event.wait(3) self._processing_event.clear() if self.is_stop(): break while self.waiting_coordinator_count() > 0: if self.is_stop(): break _used_slots = self.tracked_coordinator_count(True) _free_slots = self._config.ascp_max_concurrent - _used_slots if _free_slots <= 0: break with self._lockw: # check are there enough free slots _req_slots = self._waiting_transfer_coordinators[0].session_count if _req_slots > _free_slots: break _coordinator = self._waiting_transfer_coordinators.popleft() self.add_transfer_coordinator(_coordinator) if not _coordinator.set_transfer_spec(): self.remove_aspera_coordinator(_coordinator) else: logger.info("ASCP process queue - Max(%d) InUse(%d) Free(%d) New(%d)" % (self._config.ascp_max_concurrent, _used_slots, _free_slots, _req_slots)) _coordinator.start_transfer() logger.info("Queue processing thread stopped") self._processing_stopped_event.set()
[ "def", "_process_waiting_queue", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Queue processing thread started\"", ")", "while", "not", "self", ".", "is_stop", "(", ")", ":", "self", ".", "_processing_event", ".", "wait", "(", "3", ")", "self", ".", ...
thread to processes the waiting queue fetches transfer spec then calls start transfer ensures that max ascp is not exceeded
[ "thread", "to", "processes", "the", "waiting", "queue", "fetches", "transfer", "spec", "then", "calls", "start", "transfer", "ensures", "that", "max", "ascp", "is", "not", "exceeded" ]
python
train
bcbio/bcbio-nextgen
bcbio/provenance/system.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/system.py#L159-L186
def _sge_get_mem(xmlstring, queue_name): """ Get memory information from qhost """ rootxml = ET.fromstring(xmlstring) my_machine_dict = {} # on some machines rootxml.tag looks like "{...}qhost" where the "{...}" gets prepended to all attributes rootTag = rootxml.tag.rstrip("qhost") for host in rootxml.findall(rootTag + 'host'): # find all hosts supporting queues for queues in host.findall(rootTag + 'queue'): # if the user specified queue matches that in the xml: if not queue_name or any(q in queues.attrib['name'] for q in queue_name.split(",")): my_machine_dict[host.attrib['name']] = {} # values from xml for number of processors and mem_total on each machine for hostvalues in host.findall(rootTag + 'hostvalue'): if('mem_total' == hostvalues.attrib['name']): if hostvalues.text.lower().endswith('g'): multip = 1 elif hostvalues.text.lower().endswith('m'): multip = 1 / float(1024) elif hostvalues.text.lower().endswith('t'): multip = 1024 else: raise Exception("Unrecognized suffix in mem_tot from SGE") my_machine_dict[host.attrib['name']]['mem_total'] = \ float(hostvalues.text[:-1]) * float(multip) break return my_machine_dict
[ "def", "_sge_get_mem", "(", "xmlstring", ",", "queue_name", ")", ":", "rootxml", "=", "ET", ".", "fromstring", "(", "xmlstring", ")", "my_machine_dict", "=", "{", "}", "# on some machines rootxml.tag looks like \"{...}qhost\" where the \"{...}\" gets prepended to all attribut...
Get memory information from qhost
[ "Get", "memory", "information", "from", "qhost" ]
python
train
KelSolaar/Foundations
foundations/trace.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/trace.py#L311-L324
def get_method_name(method): """ Returns given method name. :param method: Method to retrieve the name. :type method: object :return: Method name. :rtype: unicode """ name = get_object_name(method) if name.startswith("__") and not name.endswith("__"): name = "_{0}{1}".format(get_object_name(method.im_class), name) return name
[ "def", "get_method_name", "(", "method", ")", ":", "name", "=", "get_object_name", "(", "method", ")", "if", "name", ".", "startswith", "(", "\"__\"", ")", "and", "not", "name", ".", "endswith", "(", "\"__\"", ")", ":", "name", "=", "\"_{0}{1}\"", ".", ...
Returns given method name. :param method: Method to retrieve the name. :type method: object :return: Method name. :rtype: unicode
[ "Returns", "given", "method", "name", "." ]
python
train
MillionIntegrals/vel
vel/storage/classic.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/classic.py#L161-L168
def create(model_config, backend, checkpoint_strategy, streaming=None): """ Vel factory function """ return ClassicStorage( model_config=model_config, backend=backend, checkpoint_strategy=checkpoint_strategy, streaming=streaming )
[ "def", "create", "(", "model_config", ",", "backend", ",", "checkpoint_strategy", ",", "streaming", "=", "None", ")", ":", "return", "ClassicStorage", "(", "model_config", "=", "model_config", ",", "backend", "=", "backend", ",", "checkpoint_strategy", "=", "che...
Vel factory function
[ "Vel", "factory", "function" ]
python
train
dcaune/perseus-lib-python-common
exifread/tags/makernote/olympus.py
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/exifread/tags/makernote/olympus.py#L4-L21
def special_mode(v): """decode Olympus SpecialMode tag in MakerNote""" mode1 = { 0: 'Normal', 1: 'Unknown', 2: 'Fast', 3: 'Panorama', } mode2 = { 0: 'Non-panoramic', 1: 'Left to right', 2: 'Right to left', 3: 'Bottom to top', 4: 'Top to bottom', } if not v or (v[0] not in mode1 or v[2] not in mode2): return v return '%s - sequence %d - %s' % (mode1[v[0]], v[1], mode2[v[2]])
[ "def", "special_mode", "(", "v", ")", ":", "mode1", "=", "{", "0", ":", "'Normal'", ",", "1", ":", "'Unknown'", ",", "2", ":", "'Fast'", ",", "3", ":", "'Panorama'", ",", "}", "mode2", "=", "{", "0", ":", "'Non-panoramic'", ",", "1", ":", "'Left ...
decode Olympus SpecialMode tag in MakerNote
[ "decode", "Olympus", "SpecialMode", "tag", "in", "MakerNote" ]
python
train
NoviceLive/intellicoder
intellicoder/sources.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/sources.py#L77-L93
def make_c_header(name, front, body): """ Build a C header from the front and body. """ return """ {0} # ifndef _GU_ZHENGXIONG_{1}_H # define _GU_ZHENGXIONG_{1}_H {2} # endif /* {3}.h */ """.strip().format(front, name.upper(), body, name) + '\n'
[ "def", "make_c_header", "(", "name", ",", "front", ",", "body", ")", ":", "return", "\"\"\"\n{0}\n\n\n# ifndef _GU_ZHENGXIONG_{1}_H\n# define _GU_ZHENGXIONG_{1}_H\n\n\n{2}\n\n\n# endif /* {3}.h */\n \"\"\"", ".", "strip", "(", ")", ".", "format", "(", "front", ",", "name...
Build a C header from the front and body.
[ "Build", "a", "C", "header", "from", "the", "front", "and", "body", "." ]
python
train
cackharot/suds-py3
suds/xsd/schema.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/xsd/schema.py#L313-L334
def dereference(self): """ Instruct all children to perform dereferencing. """ all = [] indexes = {} for child in self.children: child.content(all) deplist = DepList() for x in all: x.qualify() midx, deps = x.dependencies() item = (x, tuple(deps)) deplist.add(item) indexes[x] = midx for x, deps in deplist.sort(): midx = indexes.get(x) if midx is None: continue d = deps[midx] log.debug('(%s) merging %s <== %s', self.tns[1], Repr(x), Repr(d)) x.merge(d)
[ "def", "dereference", "(", "self", ")", ":", "all", "=", "[", "]", "indexes", "=", "{", "}", "for", "child", "in", "self", ".", "children", ":", "child", ".", "content", "(", "all", ")", "deplist", "=", "DepList", "(", ")", "for", "x", "in", "all...
Instruct all children to perform dereferencing.
[ "Instruct", "all", "children", "to", "perform", "dereferencing", "." ]
python
train
savoirfairelinux/num2words
num2words/lang_ID.py
https://github.com/savoirfairelinux/num2words/blob/f4b2bac098ae8e4850cf2f185f6ff52a5979641f/num2words/lang_ID.py#L53-L76
def split_by_3(self, number): """ starting here, it groups the number by three from the tail '1234567' -> (('1',),('234',),('567',)) :param number:str :rtype:tuple """ blocks = () length = len(number) if length < 3: blocks += ((number,),) else: len_of_first_block = length % 3 if len_of_first_block > 0: first_block = number[0:len_of_first_block], blocks += first_block, for i in range(len_of_first_block, length, 3): next_block = (number[i:i + 3],), blocks += next_block return blocks
[ "def", "split_by_3", "(", "self", ",", "number", ")", ":", "blocks", "=", "(", ")", "length", "=", "len", "(", "number", ")", "if", "length", "<", "3", ":", "blocks", "+=", "(", "(", "number", ",", ")", ",", ")", "else", ":", "len_of_first_block", ...
starting here, it groups the number by three from the tail '1234567' -> (('1',),('234',),('567',)) :param number:str :rtype:tuple
[ "starting", "here", "it", "groups", "the", "number", "by", "three", "from", "the", "tail", "1234567", "-", ">", "((", "1", ")", "(", "234", ")", "(", "567", "))", ":", "param", "number", ":", "str", ":", "rtype", ":", "tuple" ]
python
test
ctuning/ck
ck/repo/module/web/module.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/repo/module/web/module.py#L193-L226
def web_err(i): """ Input: { http - http object type - content type bin - bytes to output } Output: { return - 0 } """ http=i['http'] tp=i['type'] bin=i['bin'] try: bin=bin.decode('utf-8') except Exception as e: pass if tp=='json': rx=ck.dumps_json({'dict':{'return':1, 'error':bin}}) if rx['return']>0: bin2=rx['error'].encode('utf8') else: bin2=rx['string'].encode('utf-8') elif tp=='con': bin2=bin.encode('utf8') else: bin2=b'<html><body><pre>'+bin.encode('utf8')+b'</pre></body></html>' i['bin']=bin2 return web_out(i)
[ "def", "web_err", "(", "i", ")", ":", "http", "=", "i", "[", "'http'", "]", "tp", "=", "i", "[", "'type'", "]", "bin", "=", "i", "[", "'bin'", "]", "try", ":", "bin", "=", "bin", ".", "decode", "(", "'utf-8'", ")", "except", "Exception", "as", ...
Input: { http - http object type - content type bin - bytes to output } Output: { return - 0 }
[ "Input", ":", "{", "http", "-", "http", "object", "type", "-", "content", "type", "bin", "-", "bytes", "to", "output", "}" ]
python
train
prompt-toolkit/pyvim
pyvim/commands/handler.py
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/commands/handler.py#L48-L53
def _go_to_line(editor, line): """ Move cursor to this line in the current buffer. """ b = editor.application.current_buffer b.cursor_position = b.document.translate_row_col_to_index(max(0, int(line) - 1), 0)
[ "def", "_go_to_line", "(", "editor", ",", "line", ")", ":", "b", "=", "editor", ".", "application", ".", "current_buffer", "b", ".", "cursor_position", "=", "b", ".", "document", ".", "translate_row_col_to_index", "(", "max", "(", "0", ",", "int", "(", "...
Move cursor to this line in the current buffer.
[ "Move", "cursor", "to", "this", "line", "in", "the", "current", "buffer", "." ]
python
train
Kopachris/seshet
seshet/config.py
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/config.py#L156-L222
def build_bot(config_file=None): """Parse a config and return a SeshetBot instance. After, the bot can be run simply by calling .connect() and then .start() Optional arguments: config_file - valid file path or ConfigParser instance If config_file is None, will read default config defined in this module. """ from . import bot config = ConfigParser(interpolation=None) if config_file is None: config.read_string(default_config) elif isinstance(config_file, ConfigParser): config = config_file else: config.read(config_file) # shorter names db_conf = config['database'] conn_conf = config['connection'] client_conf = config['client'] log_conf = config['logging'] verbosity = config['debug']['verbosity'].lower() or 'notset' debug_file = config['debug']['file'] or None # add more as they're used if db_conf.getboolean('use_db'): db = DAL(db_conf['db_string']) build_db_tables(db) log_file = None log_fmts = {} else: db = None log_file = log_conf.pop('file') log_fmts = dict(log_conf) # debug logging debug_lvls = {'notset': 0, 'debug': 10, 'info': 20, 'warning': 30, 'error': 40, 'critical': 50, } lvl = int(debug_lvls[verbosity]) seshetbot = bot.SeshetBot(client_conf['nickname'], db, debug_file, lvl) # connection info for connect() seshetbot.default_host = conn_conf['server'] seshetbot.default_port = int(conn_conf['port']) seshetbot.default_channel = conn_conf['channels'].split(',') seshetbot.default_use_ssl = conn_conf.getboolean('ssl') # client info seshetbot.user = client_conf['user'] seshetbot.real_name = client_conf['realname'] # logging info seshetbot.log_file = log_file seshetbot.log_formats = log_fmts seshetbot.locale = dict(config['locale']) return seshetbot
[ "def", "build_bot", "(", "config_file", "=", "None", ")", ":", "from", ".", "import", "bot", "config", "=", "ConfigParser", "(", "interpolation", "=", "None", ")", "if", "config_file", "is", "None", ":", "config", ".", "read_string", "(", "default_config", ...
Parse a config and return a SeshetBot instance. After, the bot can be run simply by calling .connect() and then .start() Optional arguments: config_file - valid file path or ConfigParser instance If config_file is None, will read default config defined in this module.
[ "Parse", "a", "config", "and", "return", "a", "SeshetBot", "instance", ".", "After", "the", "bot", "can", "be", "run", "simply", "by", "calling", ".", "connect", "()", "and", "then", ".", "start", "()", "Optional", "arguments", ":", "config_file", "-", "...
python
train
ibm-watson-iot/iot-python
src/wiotp/sdk/client.py
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/src/wiotp/sdk/client.py#L323-L343
def _onDisconnect(self, mqttc, obj, rc): """ Called when the client disconnects from IBM Watson IoT Platform. See [paho.mqtt.python#on_disconnect](https://github.com/eclipse/paho.mqtt.python#on_disconnect) for more information # Parameters mqttc (paho.mqtt.client.Client): The client instance for this callback obj (object): The private user data as set in Client() or user_data_set() rc (int): indicates the disconnection state. If `MQTT_ERR_SUCCESS` (0), the callback was called in response to a `disconnect()` call. If any other value the disconnection was unexpected, such as might be caused by a network error. """ # Clear the event to indicate we're no longer connected self.connectEvent.clear() if rc != 0: self.logger.error("Unexpected disconnect from IBM Watson IoT Platform: %d" % (rc)) else: self.logger.info("Disconnected from IBM Watson IoT Platform")
[ "def", "_onDisconnect", "(", "self", ",", "mqttc", ",", "obj", ",", "rc", ")", ":", "# Clear the event to indicate we're no longer connected", "self", ".", "connectEvent", ".", "clear", "(", ")", "if", "rc", "!=", "0", ":", "self", ".", "logger", ".", "error...
Called when the client disconnects from IBM Watson IoT Platform. See [paho.mqtt.python#on_disconnect](https://github.com/eclipse/paho.mqtt.python#on_disconnect) for more information # Parameters mqttc (paho.mqtt.client.Client): The client instance for this callback obj (object): The private user data as set in Client() or user_data_set() rc (int): indicates the disconnection state. If `MQTT_ERR_SUCCESS` (0), the callback was called in response to a `disconnect()` call. If any other value the disconnection was unexpected, such as might be caused by a network error.
[ "Called", "when", "the", "client", "disconnects", "from", "IBM", "Watson", "IoT", "Platform", ".", "See", "[", "paho", ".", "mqtt", ".", "python#on_disconnect", "]", "(", "https", ":", "//", "github", ".", "com", "/", "eclipse", "/", "paho", ".", "mqtt",...
python
test
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewwidget.py#L271-L284
def registerViewType(self, cls, window=None): """ Registers the inputed widget class as a potential view class. If the \ optional window argument is supplied, then the registerToWindow method \ will be called for the class. :param cls | <subclass of XView> window | <QMainWindow> || <QDialog> || None """ if ( not cls in self._viewTypes ): self._viewTypes.append(cls) if ( window ): cls.registerToWindow(window)
[ "def", "registerViewType", "(", "self", ",", "cls", ",", "window", "=", "None", ")", ":", "if", "(", "not", "cls", "in", "self", ".", "_viewTypes", ")", ":", "self", ".", "_viewTypes", ".", "append", "(", "cls", ")", "if", "(", "window", ")", ":", ...
Registers the inputed widget class as a potential view class. If the \ optional window argument is supplied, then the registerToWindow method \ will be called for the class. :param cls | <subclass of XView> window | <QMainWindow> || <QDialog> || None
[ "Registers", "the", "inputed", "widget", "class", "as", "a", "potential", "view", "class", ".", "If", "the", "\\", "optional", "window", "argument", "is", "supplied", "then", "the", "registerToWindow", "method", "\\", "will", "be", "called", "for", "the", "c...
python
train
alerta/alerta
alerta/database/backends/mongodb/base.py
https://github.com/alerta/alerta/blob/6478d6addc217c96a4a6688fab841035bef134e1/alerta/database/backends/mongodb/base.py#L171-L220
def dedup_alert(self, alert, history): """ Update alert status, service, value, text, timeout and rawData, increment duplicate count and set repeat=True, and keep track of last receive id and time but don't append to history unless status changes. """ query = { 'environment': alert.environment, 'resource': alert.resource, 'event': alert.event, 'severity': alert.severity, 'customer': alert.customer } now = datetime.utcnow() update = { '$set': { 'status': alert.status, 'service': alert.service, 'value': alert.value, 'text': alert.text, 'timeout': alert.timeout, 'rawData': alert.raw_data, 'repeat': True, 'lastReceiveId': alert.id, 'lastReceiveTime': now }, '$addToSet': {'tags': {'$each': alert.tags}}, '$inc': {'duplicateCount': 1} } # only update those attributes that are specifically defined attributes = {'attributes.' + k: v for k, v in alert.attributes.items()} update['$set'].update(attributes) if alert.update_time: update['$set']['updateTime'] = alert.update_time if history: update['$push'] = { 'history': { '$each': [history.serialize], '$slice': -abs(current_app.config['HISTORY_LIMIT']) } } return self.get_db().alerts.find_one_and_update( query, update=update, return_document=ReturnDocument.AFTER )
[ "def", "dedup_alert", "(", "self", ",", "alert", ",", "history", ")", ":", "query", "=", "{", "'environment'", ":", "alert", ".", "environment", ",", "'resource'", ":", "alert", ".", "resource", ",", "'event'", ":", "alert", ".", "event", ",", "'severity...
Update alert status, service, value, text, timeout and rawData, increment duplicate count and set repeat=True, and keep track of last receive id and time but don't append to history unless status changes.
[ "Update", "alert", "status", "service", "value", "text", "timeout", "and", "rawData", "increment", "duplicate", "count", "and", "set", "repeat", "=", "True", "and", "keep", "track", "of", "last", "receive", "id", "and", "time", "but", "don", "t", "append", ...
python
train
reingart/gui2py
gui/controls/listbox.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/listbox.py#L101-L128
def _set_items(self, a_iter): "Clear and set the strings (and data if any) in the control from a list" self._items_dict = {} if not a_iter: string_list = [] data_list = [] elif not isinstance(a_iter, (tuple, list, dict)): raise ValueError("items must be an iterable") elif isinstance(a_iter, dict): # use keys as data, values as label strings self._items_dict = a_iter string_list = a_iter.values() data_list = a_iter.keys() elif isinstance(a_iter[0], (tuple, list)) and len(a_iter[0]) == 2: # like the dict, but ordered self._items_dict = dict(a_iter) data_list, string_list = zip(*a_iter) else: # use the same strings as data string_list = a_iter data_list = a_iter # set the strings self.wx_obj.SetItems(string_list) # set the associated data for i, data in enumerate(data_list): self.set_data(i, data)
[ "def", "_set_items", "(", "self", ",", "a_iter", ")", ":", "self", ".", "_items_dict", "=", "{", "}", "if", "not", "a_iter", ":", "string_list", "=", "[", "]", "data_list", "=", "[", "]", "elif", "not", "isinstance", "(", "a_iter", ",", "(", "tuple",...
Clear and set the strings (and data if any) in the control from a list
[ "Clear", "and", "set", "the", "strings", "(", "and", "data", "if", "any", ")", "in", "the", "control", "from", "a", "list" ]
python
test
drdoctr/doctr
doctr/local.py
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L227-L264
def get_travis_token(*, GitHub_token=None, **login_kwargs): """ Generate a temporary token for authenticating with Travis The GitHub token can be passed in to the ``GitHub_token`` keyword argument. If no token is passed in, a GitHub token is generated temporarily, and then immediately deleted. This is needed to activate a private repo Returns the secret token. It should be added to the headers like headers['Authorization'] = "token {}".format(token) """ _headers = { 'Content-Type': 'application/json', 'User-Agent': 'MyClient/1.0.0', } headersv2 = {**_headers, **Travis_APIv2} token_id = None try: if not GitHub_token: print(green("I need to generate a temporary token with GitHub to authenticate with Travis. You may get a warning email from GitHub about this.")) print(green("It will be deleted immediately. If you still see it after this at https://github.com/settings/tokens after please delete it manually.")) # /auth/github doesn't seem to exist in the Travis API v3. tok_dict = generate_GitHub_token(scopes=["read:org", "user:email", "repo"], note="temporary token for doctr to auth against travis (delete me)", **login_kwargs) GitHub_token = tok_dict['token'] token_id = tok_dict['id'] data = {'github_token': GitHub_token} res = requests.post('https://api.travis-ci.com/auth/github', data=json.dumps(data), headers=headersv2) return res.json()['access_token'] finally: if token_id: delete_GitHub_token(token_id, **login_kwargs)
[ "def", "get_travis_token", "(", "*", ",", "GitHub_token", "=", "None", ",", "*", "*", "login_kwargs", ")", ":", "_headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'User-Agent'", ":", "'MyClient/1.0.0'", ",", "}", "headersv2", "=", "{", ...
Generate a temporary token for authenticating with Travis The GitHub token can be passed in to the ``GitHub_token`` keyword argument. If no token is passed in, a GitHub token is generated temporarily, and then immediately deleted. This is needed to activate a private repo Returns the secret token. It should be added to the headers like headers['Authorization'] = "token {}".format(token)
[ "Generate", "a", "temporary", "token", "for", "authenticating", "with", "Travis" ]
python
train
johnnoone/facts
facts/grafts/system_grafts.py
https://github.com/johnnoone/facts/blob/82d38a46c15d9c01200445526f4c0d1825fc1e51/facts/grafts/system_grafts.py#L173-L193
def devices_data(): """Returns devices data. """ response = {} for part in psutil.disk_partitions(): device = part.device response[device] = { 'device': device, 'mountpoint': part.mountpoint, 'fstype': part.fstype, 'opts': part.opts, } if part.mountpoint: usage = psutil.disk_usage(part.mountpoint) response[device]['usage'] = { 'size': mark(usage.total, 'bytes'), 'used': mark(usage.used, 'bytes'), 'free': mark(usage.free, 'bytes'), 'percent': mark(usage.percent, 'percentage') } return response
[ "def", "devices_data", "(", ")", ":", "response", "=", "{", "}", "for", "part", "in", "psutil", ".", "disk_partitions", "(", ")", ":", "device", "=", "part", ".", "device", "response", "[", "device", "]", "=", "{", "'device'", ":", "device", ",", "'m...
Returns devices data.
[ "Returns", "devices", "data", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/gb/grain.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/gb/grain.py#L153-L165
def sigma_from_site_prop(self): """ This method returns the sigma value of the gb from site properties. If the GB structure merge some atoms due to the atoms too closer with each other, this property will not work. """ num_coi = 0 if None in self.site_properties['grain_label']: raise RuntimeError('Site were merged, this property do not work') for tag in self.site_properties['grain_label']: if 'incident' in tag: num_coi += 1 return int(round(self.num_sites / num_coi))
[ "def", "sigma_from_site_prop", "(", "self", ")", ":", "num_coi", "=", "0", "if", "None", "in", "self", ".", "site_properties", "[", "'grain_label'", "]", ":", "raise", "RuntimeError", "(", "'Site were merged, this property do not work'", ")", "for", "tag", "in", ...
This method returns the sigma value of the gb from site properties. If the GB structure merge some atoms due to the atoms too closer with each other, this property will not work.
[ "This", "method", "returns", "the", "sigma", "value", "of", "the", "gb", "from", "site", "properties", ".", "If", "the", "GB", "structure", "merge", "some", "atoms", "due", "to", "the", "atoms", "too", "closer", "with", "each", "other", "this", "property",...
python
train
edx/edx-enterprise
integrated_channels/degreed/exporters/learner_data.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/degreed/exporters/learner_data.py#L25-L68
def get_learner_data_records( self, enterprise_enrollment, completed_date=None, is_passing=False, **kwargs ): # pylint: disable=arguments-differ,unused-argument """ Return a DegreedLearnerDataTransmissionAudit with the given enrollment and course completion data. If completed_date is None, then course completion has not been met. If no remote ID can be found, return None. """ # Degreed expects completion dates of the form 'yyyy-mm-dd'. completed_timestamp = completed_date.strftime("%F") if isinstance(completed_date, datetime) else None if enterprise_enrollment.enterprise_customer_user.get_remote_id() is not None: DegreedLearnerDataTransmissionAudit = apps.get_model( # pylint: disable=invalid-name 'degreed', 'DegreedLearnerDataTransmissionAudit' ) # We return two records here, one with the course key and one with the course run id, to account for # uncertainty about the type of content (course vs. course run) that was sent to the integrated channel. return [ DegreedLearnerDataTransmissionAudit( enterprise_course_enrollment_id=enterprise_enrollment.id, degreed_user_email=enterprise_enrollment.enterprise_customer_user.user_email, course_id=parse_course_key(enterprise_enrollment.course_id), course_completed=completed_date is not None and is_passing, completed_timestamp=completed_timestamp, ), DegreedLearnerDataTransmissionAudit( enterprise_course_enrollment_id=enterprise_enrollment.id, degreed_user_email=enterprise_enrollment.enterprise_customer_user.user_email, course_id=enterprise_enrollment.course_id, course_completed=completed_date is not None and is_passing, completed_timestamp=completed_timestamp, ) ] else: LOGGER.debug( 'No learner data was sent for user [%s] because a Degreed user ID could not be found.', enterprise_enrollment.enterprise_customer_user.username )
[ "def", "get_learner_data_records", "(", "self", ",", "enterprise_enrollment", ",", "completed_date", "=", "None", ",", "is_passing", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=arguments-differ,unused-argument", "# Degreed expects completion dates of...
Return a DegreedLearnerDataTransmissionAudit with the given enrollment and course completion data. If completed_date is None, then course completion has not been met. If no remote ID can be found, return None.
[ "Return", "a", "DegreedLearnerDataTransmissionAudit", "with", "the", "given", "enrollment", "and", "course", "completion", "data", "." ]
python
valid
rmorshea/dstruct
dstruct/dstruct.py
https://github.com/rmorshea/dstruct/blob/c5eec8ac659c0846835e35ce1f59e7c3f9c9f25c/dstruct/dstruct.py#L364-L373
def del_fields(self, *names): """Delete data fields from this struct instance""" cls = type(self) self.__class__ = cls for n in names: # don't raise error if a field is absent if isinstance(getattr(cls, n, None), DataField): if n in self._field_values: del self._field_values[n] delattr(cls, n)
[ "def", "del_fields", "(", "self", ",", "*", "names", ")", ":", "cls", "=", "type", "(", "self", ")", "self", ".", "__class__", "=", "cls", "for", "n", "in", "names", ":", "# don't raise error if a field is absent", "if", "isinstance", "(", "getattr", "(", ...
Delete data fields from this struct instance
[ "Delete", "data", "fields", "from", "this", "struct", "instance" ]
python
train
edx/edx-sphinx-theme
edx_theme/__init__.py
https://github.com/edx/edx-sphinx-theme/blob/0abdc8c64ca1453f571a45f4603a6b2907a34378/edx_theme/__init__.py#L38-L42
def update_context(app, pagename, templatename, context, doctree): # pylint: disable=unused-argument """ Update the page rendering context to include ``feedback_form_url``. """ context['feedback_form_url'] = feedback_form_url(app.config.project, pagename)
[ "def", "update_context", "(", "app", ",", "pagename", ",", "templatename", ",", "context", ",", "doctree", ")", ":", "# pylint: disable=unused-argument", "context", "[", "'feedback_form_url'", "]", "=", "feedback_form_url", "(", "app", ".", "config", ".", "project...
Update the page rendering context to include ``feedback_form_url``.
[ "Update", "the", "page", "rendering", "context", "to", "include", "feedback_form_url", "." ]
python
train
LionelAuroux/pyrser
pyrser/parsing/base.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/parsing/base.py#L151-L155
def begin_tag(self, name: str) -> Node: """Save the current index under the given name.""" # Check if we could attach tag cache to current rule_nodes scope self.tag_cache[name] = Tag(self._stream, self._stream.index) return True
[ "def", "begin_tag", "(", "self", ",", "name", ":", "str", ")", "->", "Node", ":", "# Check if we could attach tag cache to current rule_nodes scope", "self", ".", "tag_cache", "[", "name", "]", "=", "Tag", "(", "self", ".", "_stream", ",", "self", ".", "_strea...
Save the current index under the given name.
[ "Save", "the", "current", "index", "under", "the", "given", "name", "." ]
python
test
allenai/allennlp
allennlp/data/instance.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/instance.py#L59-L72
def index_fields(self, vocab: Vocabulary) -> None: """ Indexes all fields in this ``Instance`` using the provided ``Vocabulary``. This `mutates` the current object, it does not return a new ``Instance``. A ``DataIterator`` will call this on each pass through a dataset; we use the ``indexed`` flag to make sure that indexing only happens once. This means that if for some reason you modify your vocabulary after you've indexed your instances, you might get unexpected behavior. """ if not self.indexed: self.indexed = True for field in self.fields.values(): field.index(vocab)
[ "def", "index_fields", "(", "self", ",", "vocab", ":", "Vocabulary", ")", "->", "None", ":", "if", "not", "self", ".", "indexed", ":", "self", ".", "indexed", "=", "True", "for", "field", "in", "self", ".", "fields", ".", "values", "(", ")", ":", "...
Indexes all fields in this ``Instance`` using the provided ``Vocabulary``. This `mutates` the current object, it does not return a new ``Instance``. A ``DataIterator`` will call this on each pass through a dataset; we use the ``indexed`` flag to make sure that indexing only happens once. This means that if for some reason you modify your vocabulary after you've indexed your instances, you might get unexpected behavior.
[ "Indexes", "all", "fields", "in", "this", "Instance", "using", "the", "provided", "Vocabulary", ".", "This", "mutates", "the", "current", "object", "it", "does", "not", "return", "a", "new", "Instance", ".", "A", "DataIterator", "will", "call", "this", "on",...
python
train
SiLab-Bonn/pixel_clusterizer
pixel_clusterizer/cluster_functions.py
https://github.com/SiLab-Bonn/pixel_clusterizer/blob/d2c8c3072fb03ebb7c6a3e8c57350fbbe38efd4d/pixel_clusterizer/cluster_functions.py#L125-L133
def _set_1d_array(array, value, size=-1): ''' Set array elemets to value for given number of elements (if size is negative number set all elements to value). ''' if size >= 0: for i in range(size): array[i] = value else: for i in range(array.shape[0]): array[i] = value
[ "def", "_set_1d_array", "(", "array", ",", "value", ",", "size", "=", "-", "1", ")", ":", "if", "size", ">=", "0", ":", "for", "i", "in", "range", "(", "size", ")", ":", "array", "[", "i", "]", "=", "value", "else", ":", "for", "i", "in", "ra...
Set array elemets to value for given number of elements (if size is negative number set all elements to value).
[ "Set", "array", "elemets", "to", "value", "for", "given", "number", "of", "elements", "(", "if", "size", "is", "negative", "number", "set", "all", "elements", "to", "value", ")", "." ]
python
test
walkr/nanoservice
nanoservice/reqrep.py
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/nanoservice/reqrep.py#L144-L147
def build_payload(cls, method, args): """ Build the payload to be sent to a `Responder` """ ref = str(uuid.uuid4()) return (method, args, ref)
[ "def", "build_payload", "(", "cls", ",", "method", ",", "args", ")", ":", "ref", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "return", "(", "method", ",", "args", ",", "ref", ")" ]
Build the payload to be sent to a `Responder`
[ "Build", "the", "payload", "to", "be", "sent", "to", "a", "Responder" ]
python
train
loads/molotov
molotov/slave.py
https://github.com/loads/molotov/blob/bd2c94e7f250e1fbb21940f02c68b4437655bc11/molotov/slave.py#L62-L122
def main(): """Moloslave clones a git repo and runs a molotov test """ parser = argparse.ArgumentParser(description='Github-based load test') parser.add_argument('--version', action='store_true', default=False, help='Displays version and exits.') parser.add_argument('--virtualenv', type=str, default='virtualenv', help='Virtualenv executable.') parser.add_argument('--python', type=str, default=sys.executable, help='Python executable.') parser.add_argument('--config', type=str, default='molotov.json', help='Path of the configuration file.') parser.add_argument('repo', help='Github repo', type=str, nargs="?") parser.add_argument('run', help='Test to run', nargs="?") args = parser.parse_args() if args.version: print(__version__) sys.exit(0) tempdir = tempfile.mkdtemp() curdir = os.getcwd() os.chdir(tempdir) print('Working directory is %s' % tempdir) try: clone_repo(args.repo) config_file = os.path.join(tempdir, args.config) with open(config_file) as f: config = json.loads(f.read()) # creating the virtualenv create_virtualenv(args.virtualenv, args.python) # install deps if 'requirements' in config['molotov']: install_reqs(config['molotov']['requirements']) # load deps into sys.path pyver = '%d.%d' % (sys.version_info.major, sys.version_info.minor) site_pkg = os.path.join(tempdir, 'venv', 'lib', 'python' + pyver, 'site-packages') site.addsitedir(site_pkg) pkg_resources.working_set.add_entry(site_pkg) # environment if 'env' in config['molotov']: for key, value in config['molotov']['env'].items(): os.environ[key] = value run_test(**config['molotov']['tests'][args.run]) except Exception: os.chdir(curdir) shutil.rmtree(tempdir, ignore_errors=True) raise
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Github-based load test'", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", ...
Moloslave clones a git repo and runs a molotov test
[ "Moloslave", "clones", "a", "git", "repo", "and", "runs", "a", "molotov", "test" ]
python
train
pantsbuild/pants
pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py#L70-L76
def load_config(json_path): """Load config info from a .json file and return it.""" with open(json_path, 'r') as json_file: config = json.loads(json_file.read()) # sanity-test the config: assert(config['tree'][0]['page'] == 'index') return config
[ "def", "load_config", "(", "json_path", ")", ":", "with", "open", "(", "json_path", ",", "'r'", ")", "as", "json_file", ":", "config", "=", "json", ".", "loads", "(", "json_file", ".", "read", "(", ")", ")", "# sanity-test the config:", "assert", "(", "c...
Load config info from a .json file and return it.
[ "Load", "config", "info", "from", "a", ".", "json", "file", "and", "return", "it", "." ]
python
train
Synerty/pytmpdir
pytmpdir/Directory.py
https://github.com/Synerty/pytmpdir/blob/8f21d7a0b28d4f5c3a0ed91f9660ac5310773605/pytmpdir/Directory.py#L264-L283
def scan(self) -> ['File']: """ Scan Scan the directory for files and folders and update the file dictionary. @return: List of files """ self._files = {} output = self._listFilesWin() if isWindows else self._listFilesPosix() output = [line for line in output if "__MACOSX" not in line] for pathName in output: if not pathName: # Sometimes we get empty lines continue pathName = pathName[len(self._path) + 1:] file = File(self, pathName=pathName, exists=True) self._files[file.pathName] = file return self.files
[ "def", "scan", "(", "self", ")", "->", "[", "'File'", "]", ":", "self", ".", "_files", "=", "{", "}", "output", "=", "self", ".", "_listFilesWin", "(", ")", "if", "isWindows", "else", "self", ".", "_listFilesPosix", "(", ")", "output", "=", "[", "l...
Scan Scan the directory for files and folders and update the file dictionary. @return: List of files
[ "Scan" ]
python
train
euske/pdfminer
pdfminer/encodingdb.py
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/encodingdb.py#L13-L20
def name2unicode(name): """Converts Adobe glyph names to Unicode numbers.""" if name in glyphname2unicode: return glyphname2unicode[name] m = STRIP_NAME.search(name) if not m: raise KeyError(name) return unichr(int(m.group(0)))
[ "def", "name2unicode", "(", "name", ")", ":", "if", "name", "in", "glyphname2unicode", ":", "return", "glyphname2unicode", "[", "name", "]", "m", "=", "STRIP_NAME", ".", "search", "(", "name", ")", "if", "not", "m", ":", "raise", "KeyError", "(", "name",...
Converts Adobe glyph names to Unicode numbers.
[ "Converts", "Adobe", "glyph", "names", "to", "Unicode", "numbers", "." ]
python
train
quintusdias/glymur
glymur/lib/openjp2.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/lib/openjp2.py#L859-L885
def image_tile_create(comptparms, clrspc): """Creates a new image structure. Wraps the openjp2 library function opj_image_tile_create. Parameters ---------- cmptparms : comptparms_t The component parameters. clrspc : int Specifies the color space. Returns ------- image : ImageType Reference to ImageType instance. """ ARGTYPES = [ctypes.c_uint32, ctypes.POINTER(ImageComptParmType), COLOR_SPACE_TYPE] OPENJP2.opj_image_tile_create.argtypes = ARGTYPES OPENJP2.opj_image_tile_create.restype = ctypes.POINTER(ImageType) image = OPENJP2.opj_image_tile_create(len(comptparms), comptparms, clrspc) return image
[ "def", "image_tile_create", "(", "comptparms", ",", "clrspc", ")", ":", "ARGTYPES", "=", "[", "ctypes", ".", "c_uint32", ",", "ctypes", ".", "POINTER", "(", "ImageComptParmType", ")", ",", "COLOR_SPACE_TYPE", "]", "OPENJP2", ".", "opj_image_tile_create", ".", ...
Creates a new image structure. Wraps the openjp2 library function opj_image_tile_create. Parameters ---------- cmptparms : comptparms_t The component parameters. clrspc : int Specifies the color space. Returns ------- image : ImageType Reference to ImageType instance.
[ "Creates", "a", "new", "image", "structure", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/roster.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/roster.py#L906-L930
def _roster_set(self, item, callback, error_callback): """Send a 'roster set' to the server. :Parameters: - `item`: the requested change :Types: - `item`: `RosterItem` """ stanza = Iq(to_jid = self.server, stanza_type = "set") payload = RosterPayload([item]) stanza.set_payload(payload) def success_cb(result_stanza): """Success callback for roster set.""" if callback: callback(item) def error_cb(error_stanza): """Error callback for roster set.""" if error_callback: error_callback(error_stanza) else: logger.error("Roster change of '{0}' failed".format(item.jid)) processor = self.stanza_processor processor.set_response_handlers(stanza, success_cb, error_cb) processor.send(stanza)
[ "def", "_roster_set", "(", "self", ",", "item", ",", "callback", ",", "error_callback", ")", ":", "stanza", "=", "Iq", "(", "to_jid", "=", "self", ".", "server", ",", "stanza_type", "=", "\"set\"", ")", "payload", "=", "RosterPayload", "(", "[", "item", ...
Send a 'roster set' to the server. :Parameters: - `item`: the requested change :Types: - `item`: `RosterItem`
[ "Send", "a", "roster", "set", "to", "the", "server", "." ]
python
valid
hufman/flask_rdf
flask_rdf/format.py
https://github.com/hufman/flask_rdf/blob/9bf86023288171eb0665c15fb28070250f80310c/flask_rdf/format.py#L135-L147
def add_format(mimetype, format, requires_context=False): """ Registers a new format to be used in a graph's serialize call If you've installed an rdflib serializer plugin, use this to add it to the content negotiation system Set requires_context=True if this format requires a context-aware graph """ global formats global ctxless_mimetypes global all_mimetypes formats[mimetype] = format if not requires_context: ctxless_mimetypes.append(mimetype) all_mimetypes.append(mimetype)
[ "def", "add_format", "(", "mimetype", ",", "format", ",", "requires_context", "=", "False", ")", ":", "global", "formats", "global", "ctxless_mimetypes", "global", "all_mimetypes", "formats", "[", "mimetype", "]", "=", "format", "if", "not", "requires_context", ...
Registers a new format to be used in a graph's serialize call If you've installed an rdflib serializer plugin, use this to add it to the content negotiation system Set requires_context=True if this format requires a context-aware graph
[ "Registers", "a", "new", "format", "to", "be", "used", "in", "a", "graph", "s", "serialize", "call", "If", "you", "ve", "installed", "an", "rdflib", "serializer", "plugin", "use", "this", "to", "add", "it", "to", "the", "content", "negotiation", "system", ...
python
train
glitchassassin/lackey
lackey/InputEmulation.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/InputEmulation.py#L87-L94
def buttonDown(self, button=mouse.LEFT): """ Holds down the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT """ self._lock.acquire() mouse.press(button) self._lock.release()
[ "def", "buttonDown", "(", "self", ",", "button", "=", "mouse", ".", "LEFT", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "mouse", ".", "press", "(", "button", ")", "self", ".", "_lock", ".", "release", "(", ")" ]
Holds down the specified mouse button. Use Mouse.LEFT, Mouse.MIDDLE, Mouse.RIGHT
[ "Holds", "down", "the", "specified", "mouse", "button", "." ]
python
train
wavycloud/pyboto3
pyboto3/dynamodb.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/dynamodb.py#L3728-L4306
def update_item(TableName=None, Key=None, AttributeUpdates=None, Expected=None, ConditionalOperator=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, UpdateExpression=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None): """ Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values). You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter. See also: AWS API Documentation Examples This example updates an item in the Music table. It adds a new attribute (Year) and modifies the AlbumTitle attribute. All of the attributes in the item, as they appear after the update, are returned in the response. Expected Output: :example: response = client.update_item( TableName='string', Key={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, AttributeUpdates={ 'string': { 'Value': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, 'Action': 'ADD'|'PUT'|'DELETE' } }, Expected={ 'string': { 'Value': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, 'Exists': True|False, 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH', 'AttributeValueList': [ { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, ] } }, ConditionalOperator='AND'|'OR', ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW', ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE', ReturnItemCollectionMetrics='SIZE'|'NONE', UpdateExpression='string', ConditionExpression='string', ExpressionAttributeNames={ 'string': 'string' }, ExpressionAttributeValues={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } } ) :type TableName: string :param TableName: [REQUIRED] The name of the table containing the item to update. :type Key: dict :param Key: [REQUIRED] The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute. For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key. (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :type AttributeUpdates: dict :param AttributeUpdates: This is a legacy parameter. Use UpdateExpression instead. For more information, see AttributeUpdates in the Amazon DynamoDB Developer Guide . (string) -- (dict) --For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each. Note You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes. Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception. Value (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data TYpes in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true Action (string) --Specifies how to perform the update. Valid values are PUT (default), DELETE , and ADD . The behavior depends on whether the specified primary key already exists in the table. If an item with the specified *Key* is found in the table: PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value. DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c] , then the final attribute value would be [b] . Specifying an empty set is an error. ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute: If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute. Note If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 . If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2] , and the ADD action specified [3] , then the final attribute value would be [1,2,3] . An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets. This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types. If no item with the specified *Key* is found: PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute. DELETE - Nothing happens; there is no attribute to delete. ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified. :type Expected: dict :param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways: Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds. Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false. Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception. Value (dict) --Represents the data for the expected attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation: If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException . If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException . The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied. DynamoDB returns a ValidationException if: Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.) Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.) ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc. The following comparison operators are available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each comparison operator. EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps. Note This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator. NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps. Note This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator. CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type). IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true. BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters . For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values. For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide . (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :type ConditionalOperator: string :param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide . :type ReturnValues: string :param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem , the valid values are: NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .) ALL_OLD - Returns all of the attributes of the item, as they appeared before the UpdateItem operation. UPDATED_OLD - Returns only the updated attributes, as they appeared before the UpdateItem operation. ALL_NEW - Returns all of the attributes of the item, as they appear after the UpdateItem operation. UPDATED_NEW - Returns only the updated attributes, as they appear after the UpdateItem operation. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed. Values returned are strongly consistent :type ReturnConsumedCapacity: string :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response: INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s). TOTAL - The response includes only the aggregate ConsumedCapacity for the operation. NONE - No ConsumedCapacity details are included in the response. :type ReturnItemCollectionMetrics: string :param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned. :type UpdateExpression: string :param UpdateExpression: An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them. The following action values are available for UpdateExpression . SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val SET supports the following functions: if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item. list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands. These function names are case-sensitive. REMOVE - Removes one or more attributes from an item. ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute: If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute. Note If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 . If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2] , and the ADD action specified [3] , then the final attribute value is [1,2,3] . An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. Warning The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes. DELETE - Deletes an element from a set. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c] , then the final attribute value is [b] . Specifying an empty set is an error. Warning The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes. You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide . :type ConditionExpression: string :param ConditionExpression: A condition that must be satisfied in order for a conditional update to succeed. An expression can contain any of the following: Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive. Comparison operators: = | | | | = | = | BETWEEN | IN Logical operators: AND | OR | NOT For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide . :type ExpressionAttributeNames: dict :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames : To access an attribute whose name conflicts with a DynamoDB reserved word. To create a placeholder for repeating occurrences of an attribute name in an expression. To prevent special characters in an attribute name from being misinterpreted in an expression. Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name: Percentile The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames : {'#P':'Percentile'} You could then use this substitution in an expression, as in this example: #P = :val Note Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime. For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide . (string) -- (string) -- :type ExpressionAttributeValues: dict :param ExpressionAttributeValues: One or more values that can be substituted in an expression. Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following: Available | Backordered | Discontinued You would first need to specify ExpressionAttributeValues as follows: { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} } You could then use these values in an expression, such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :rtype: dict :return: { 'Attributes': { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, 'ConsumedCapacity': { 'TableName': 'string', 'CapacityUnits': 123.0, 'Table': { 'CapacityUnits': 123.0 }, 'LocalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } }, 'GlobalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } } }, 'ItemCollectionMetrics': { 'ItemCollectionKey': { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, 'SizeEstimateRangeGB': [ 123.0, ] } } :returns: (string) -- """ pass
[ "def", "update_item", "(", "TableName", "=", "None", ",", "Key", "=", "None", ",", "AttributeUpdates", "=", "None", ",", "Expected", "=", "None", ",", "ConditionalOperator", "=", "None", ",", "ReturnValues", "=", "None", ",", "ReturnConsumedCapacity", "=", "...
Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values). You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter. See also: AWS API Documentation Examples This example updates an item in the Music table. It adds a new attribute (Year) and modifies the AlbumTitle attribute. All of the attributes in the item, as they appear after the update, are returned in the response. Expected Output: :example: response = client.update_item( TableName='string', Key={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, AttributeUpdates={ 'string': { 'Value': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, 'Action': 'ADD'|'PUT'|'DELETE' } }, Expected={ 'string': { 'Value': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, 'Exists': True|False, 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH', 'AttributeValueList': [ { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, ] } }, ConditionalOperator='AND'|'OR', ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW', ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE', ReturnItemCollectionMetrics='SIZE'|'NONE', UpdateExpression='string', ConditionExpression='string', ExpressionAttributeNames={ 'string': 'string' }, ExpressionAttributeValues={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } } ) :type TableName: string :param TableName: [REQUIRED] The name of the table containing the item to update. :type Key: dict :param Key: [REQUIRED] The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute. For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key. (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :type AttributeUpdates: dict :param AttributeUpdates: This is a legacy parameter. Use UpdateExpression instead. For more information, see AttributeUpdates in the Amazon DynamoDB Developer Guide . (string) -- (dict) --For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each. Note You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes. Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception. Value (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data TYpes in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true Action (string) --Specifies how to perform the update. Valid values are PUT (default), DELETE , and ADD . The behavior depends on whether the specified primary key already exists in the table. If an item with the specified *Key* is found in the table: PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value. DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c] , then the final attribute value would be [b] . Specifying an empty set is an error. ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute: If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute. Note If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 . If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2] , and the ADD action specified [3] , then the final attribute value would be [1,2,3] . An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets. This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types. If no item with the specified *Key* is found: PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute. DELETE - Nothing happens; there is no attribute to delete. ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified. :type Expected: dict :param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways: Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds. Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false. Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception. Value (dict) --Represents the data for the expected attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation: If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException . If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException . The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied. DynamoDB returns a ValidationException if: Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.) Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.) ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc. The following comparison operators are available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each comparison operator. EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps. Note This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator. NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps. Note This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator. CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type). IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true. BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters . For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values. For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide . (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :type ConditionalOperator: string :param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide . :type ReturnValues: string :param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem , the valid values are: NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .) ALL_OLD - Returns all of the attributes of the item, as they appeared before the UpdateItem operation. UPDATED_OLD - Returns only the updated attributes, as they appeared before the UpdateItem operation. ALL_NEW - Returns all of the attributes of the item, as they appear after the UpdateItem operation. UPDATED_NEW - Returns only the updated attributes, as they appear after the UpdateItem operation. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed. Values returned are strongly consistent :type ReturnConsumedCapacity: string :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response: INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s). TOTAL - The response includes only the aggregate ConsumedCapacity for the operation. NONE - No ConsumedCapacity details are included in the response. :type ReturnItemCollectionMetrics: string :param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned. :type UpdateExpression: string :param UpdateExpression: An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them. The following action values are available for UpdateExpression . SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val SET supports the following functions: if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item. list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands. These function names are case-sensitive. REMOVE - Removes one or more attributes from an item. ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute: If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute. Note If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 . If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2] , and the ADD action specified [3] , then the final attribute value is [1,2,3] . An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. Warning The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes. DELETE - Deletes an element from a set. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c] , then the final attribute value is [b] . Specifying an empty set is an error. Warning The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes. You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide . :type ConditionExpression: string :param ConditionExpression: A condition that must be satisfied in order for a conditional update to succeed. An expression can contain any of the following: Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive. Comparison operators: = | | | | = | = | BETWEEN | IN Logical operators: AND | OR | NOT For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide . :type ExpressionAttributeNames: dict :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames : To access an attribute whose name conflicts with a DynamoDB reserved word. To create a placeholder for repeating occurrences of an attribute name in an expression. To prevent special characters in an attribute name from being misinterpreted in an expression. Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name: Percentile The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames : {'#P':'Percentile'} You could then use this substitution in an expression, as in this example: #P = :val Note Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime. For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide . (string) -- (string) -- :type ExpressionAttributeValues: dict :param ExpressionAttributeValues: One or more values that can be substituted in an expression. Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following: Available | Backordered | Discontinued You would first need to specify ExpressionAttributeValues as follows: { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} } You could then use these values in an expression, such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :rtype: dict :return: { 'Attributes': { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, 'ConsumedCapacity': { 'TableName': 'string', 'CapacityUnits': 123.0, 'Table': { 'CapacityUnits': 123.0 }, 'LocalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } }, 'GlobalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } } }, 'ItemCollectionMetrics': { 'ItemCollectionKey': { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, 'SizeEstimateRangeGB': [ 123.0, ] } } :returns: (string) --
[ "Edits", "an", "existing", "item", "s", "attributes", "or", "adds", "a", "new", "item", "to", "the", "table", "if", "it", "does", "not", "already", "exist", ".", "You", "can", "put", "delete", "or", "add", "attribute", "values", ".", "You", "can", "als...
python
train
markokr/rarfile
rarfile.py
https://github.com/markokr/rarfile/blob/2704344e8d7a1658c96c8ed8f449d7ba01bedea3/rarfile.py#L2109-L2118
def _skip(self, cnt): """Read and discard data""" while cnt > 0: if cnt > 8192: buf = self.read(8192) else: buf = self.read(cnt) if not buf: break cnt -= len(buf)
[ "def", "_skip", "(", "self", ",", "cnt", ")", ":", "while", "cnt", ">", "0", ":", "if", "cnt", ">", "8192", ":", "buf", "=", "self", ".", "read", "(", "8192", ")", "else", ":", "buf", "=", "self", ".", "read", "(", "cnt", ")", "if", "not", ...
Read and discard data
[ "Read", "and", "discard", "data" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/FortranCommon.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/FortranCommon.py#L88-L98
def ComputeFortranSuffixes(suffixes, ppsuffixes): """suffixes are fortran source files, and ppsuffixes the ones to be pre-processed. Both should be sequences, not strings.""" assert len(suffixes) > 0 s = suffixes[0] sup = s.upper() upper_suffixes = [_.upper() for _ in suffixes] if SCons.Util.case_sensitive_suffixes(s, sup): ppsuffixes.extend(upper_suffixes) else: suffixes.extend(upper_suffixes)
[ "def", "ComputeFortranSuffixes", "(", "suffixes", ",", "ppsuffixes", ")", ":", "assert", "len", "(", "suffixes", ")", ">", "0", "s", "=", "suffixes", "[", "0", "]", "sup", "=", "s", ".", "upper", "(", ")", "upper_suffixes", "=", "[", "_", ".", "upper...
suffixes are fortran source files, and ppsuffixes the ones to be pre-processed. Both should be sequences, not strings.
[ "suffixes", "are", "fortran", "source", "files", "and", "ppsuffixes", "the", "ones", "to", "be", "pre", "-", "processed", ".", "Both", "should", "be", "sequences", "not", "strings", "." ]
python
train
lorien/grab
grab/spider/cache_backend/mongodb.py
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/spider/cache_backend/mongodb.py#L45-L52
def get_item(self, url): """ Returned item should have specific interface. See module docstring. """ _hash = self.build_hash(url) query = {'_id': _hash} return self.dbase.cache.find_one(query)
[ "def", "get_item", "(", "self", ",", "url", ")", ":", "_hash", "=", "self", ".", "build_hash", "(", "url", ")", "query", "=", "{", "'_id'", ":", "_hash", "}", "return", "self", ".", "dbase", ".", "cache", ".", "find_one", "(", "query", ")" ]
Returned item should have specific interface. See module docstring.
[ "Returned", "item", "should", "have", "specific", "interface", ".", "See", "module", "docstring", "." ]
python
train
angr/angr
angr/knowledge_plugins/functions/function.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/knowledge_plugins/functions/function.py#L820-L846
def _call_to(self, from_node, to_func, ret_node, stmt_idx=None, ins_addr=None, return_to_outside=False): """ Registers an edge between the caller basic block and callee function. :param from_addr: The basic block that control flow leaves during the transition. :type from_addr: angr.knowledge.CodeNode :param to_func: The function that we are calling :type to_func: Function :param ret_node The basic block that control flow should return to after the function call. :type to_func: angr.knowledge.CodeNode or None :param stmt_idx: Statement ID of this call. :type stmt_idx: int, str or None :param ins_addr: Instruction address of this call. :type ins_addr: int or None """ self._register_nodes(True, from_node) if to_func.is_syscall: self.transition_graph.add_edge(from_node, to_func, type='syscall', stmt_idx=stmt_idx, ins_addr=ins_addr) else: self.transition_graph.add_edge(from_node, to_func, type='call', stmt_idx=stmt_idx, ins_addr=ins_addr) if ret_node is not None: self._fakeret_to(from_node, ret_node, to_outside=return_to_outside) self._local_transition_graph = None
[ "def", "_call_to", "(", "self", ",", "from_node", ",", "to_func", ",", "ret_node", ",", "stmt_idx", "=", "None", ",", "ins_addr", "=", "None", ",", "return_to_outside", "=", "False", ")", ":", "self", ".", "_register_nodes", "(", "True", ",", "from_node", ...
Registers an edge between the caller basic block and callee function. :param from_addr: The basic block that control flow leaves during the transition. :type from_addr: angr.knowledge.CodeNode :param to_func: The function that we are calling :type to_func: Function :param ret_node The basic block that control flow should return to after the function call. :type to_func: angr.knowledge.CodeNode or None :param stmt_idx: Statement ID of this call. :type stmt_idx: int, str or None :param ins_addr: Instruction address of this call. :type ins_addr: int or None
[ "Registers", "an", "edge", "between", "the", "caller", "basic", "block", "and", "callee", "function", "." ]
python
train
cloudnull/cloudlib
cloudlib/http.py
https://github.com/cloudnull/cloudlib/blob/5038111ce02521caa2558117e3bae9e1e806d315/cloudlib/http.py#L104-L116
def _get_url(url): """Returns a URL string. If the ``url`` parameter is a ParsedResult from `urlparse` the full url will be unparsed and made into a string. Otherwise the ``url`` parameter is returned as is. :param url: ``str`` || ``object`` """ if isinstance(url, urlparse.ParseResult): return urlparse.urlunparse(url) else: return url
[ "def", "_get_url", "(", "url", ")", ":", "if", "isinstance", "(", "url", ",", "urlparse", ".", "ParseResult", ")", ":", "return", "urlparse", ".", "urlunparse", "(", "url", ")", "else", ":", "return", "url" ]
Returns a URL string. If the ``url`` parameter is a ParsedResult from `urlparse` the full url will be unparsed and made into a string. Otherwise the ``url`` parameter is returned as is. :param url: ``str`` || ``object``
[ "Returns", "a", "URL", "string", "." ]
python
train
oceanprotocol/squid-py
squid_py/keeper/keeper.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/keeper.py#L68-L81
def get_network_name(network_id): """ Return the keeper network name based on the current ethereum network id. Return `development` for every network id that is not mapped. :param network_id: Network id, int :return: Network name, str """ if os.environ.get('KEEPER_NETWORK_NAME'): logging.debug('keeper network name overridden by an environment variable: {}'.format( os.environ.get('KEEPER_NETWORK_NAME'))) return os.environ.get('KEEPER_NETWORK_NAME') return Keeper._network_name_map.get(network_id, Keeper.DEFAULT_NETWORK_NAME)
[ "def", "get_network_name", "(", "network_id", ")", ":", "if", "os", ".", "environ", ".", "get", "(", "'KEEPER_NETWORK_NAME'", ")", ":", "logging", ".", "debug", "(", "'keeper network name overridden by an environment variable: {}'", ".", "format", "(", "os", ".", ...
Return the keeper network name based on the current ethereum network id. Return `development` for every network id that is not mapped. :param network_id: Network id, int :return: Network name, str
[ "Return", "the", "keeper", "network", "name", "based", "on", "the", "current", "ethereum", "network", "id", ".", "Return", "development", "for", "every", "network", "id", "that", "is", "not", "mapped", "." ]
python
train
choderalab/pymbar
pymbar/mbar.py
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/mbar.py#L407-L459
def computeOverlap(self): """ Compute estimate of overlap matrix between the states. Returns ------- result_vals : dictonary Possible keys in the result_vals dictionary: 'scalar' : np.ndarray, float, shape=(K, K) One minus the largest nontrival eigenvalue (largest is 1 or -1) 'eigenvalues' : np.ndarray, float, shape=(K) The sorted (descending) eigenvalues of the overlap matrix. 'O' : np.ndarray, float, shape=(K, K) Estimated state overlap matrix: O[i,j] is an estimate of the probability of observing a sample from state i in state j Notes ----- .. code-block:: none W.T * W \approx \int (p_i p_j /\sum_k N_k p_k)^2 \sum_k N_k p_k dq^N = \int (p_i p_j /\sum_k N_k p_k) dq^N Multiplying elementwise by N_i, the elements of row i give the probability for a sample from state i being observed in state j. Examples -------- >>> from pymbar import testsystems >>> (x_kn, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> results = mbar.computeOverlap() """ W = np.matrix(self.getWeights(), np.float64) O = np.multiply(self.N_k, W.T * W) (eigenvals, eigevec) = linalg.eig(O) # sort in descending order eigenvals = np.sort(eigenvals)[::-1] overlap_scalar = 1 - eigenvals[1] # 1 minus the second largest eigenvalue results_vals = dict() results_vals['scalar'] = overlap_scalar results_vals['eigenvalues'] = eigenvals results_vals['matrix'] = O return results_vals
[ "def", "computeOverlap", "(", "self", ")", ":", "W", "=", "np", ".", "matrix", "(", "self", ".", "getWeights", "(", ")", ",", "np", ".", "float64", ")", "O", "=", "np", ".", "multiply", "(", "self", ".", "N_k", ",", "W", ".", "T", "*", "W", "...
Compute estimate of overlap matrix between the states. Returns ------- result_vals : dictonary Possible keys in the result_vals dictionary: 'scalar' : np.ndarray, float, shape=(K, K) One minus the largest nontrival eigenvalue (largest is 1 or -1) 'eigenvalues' : np.ndarray, float, shape=(K) The sorted (descending) eigenvalues of the overlap matrix. 'O' : np.ndarray, float, shape=(K, K) Estimated state overlap matrix: O[i,j] is an estimate of the probability of observing a sample from state i in state j Notes ----- .. code-block:: none W.T * W \approx \int (p_i p_j /\sum_k N_k p_k)^2 \sum_k N_k p_k dq^N = \int (p_i p_j /\sum_k N_k p_k) dq^N Multiplying elementwise by N_i, the elements of row i give the probability for a sample from state i being observed in state j. Examples -------- >>> from pymbar import testsystems >>> (x_kn, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> results = mbar.computeOverlap()
[ "Compute", "estimate", "of", "overlap", "matrix", "between", "the", "states", "." ]
python
train
Miserlou/Zappa
zappa/handler.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/handler.py#L343-L598
def handler(self, event, context): """ An AWS Lambda function which parses specific API Gateway input into a WSGI request, feeds it to our WSGI app, procceses the response, and returns that back to the API Gateway. """ settings = self.settings # If in DEBUG mode, log all raw incoming events. if settings.DEBUG: logger.debug('Zappa Event: {}'.format(event)) # Set any API Gateway defined Stage Variables # as env vars if event.get('stageVariables'): for key in event['stageVariables'].keys(): os.environ[str(key)] = event['stageVariables'][key] # This is the result of a keep alive, recertify # or scheduled event. if event.get('detail-type') == u'Scheduled Event': whole_function = event['resources'][0].split('/')[-1].split('-')[-1] # This is a scheduled function. if '.' in whole_function: app_function = self.import_module_and_get_function(whole_function) # Execute the function! return self.run_function(app_function, event, context) # Else, let this execute as it were. # This is a direct command invocation. elif event.get('command', None): whole_function = event['command'] app_function = self.import_module_and_get_function(whole_function) result = self.run_function(app_function, event, context) print("Result of %s:" % whole_function) print(result) return result # This is a direct, raw python invocation. # It's _extremely_ important we don't allow this event source # to be overridden by unsanitized, non-admin user input. elif event.get('raw_command', None): raw_command = event['raw_command'] exec(raw_command) return # This is a Django management command invocation. elif event.get('manage', None): from django.core import management try: # Support both for tests from zappa.ext.django_zappa import get_django_wsgi except ImportError as e: # pragma: no cover from django_zappa_app import get_django_wsgi # Get the Django WSGI app from our extension # We don't actually need the function, # but we do need to do all of the required setup for it. app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS) # Couldn't figure out how to get the value into stdout with StringIO.. # Read the log for now. :[] management.call_command(*event['manage'].split(' ')) return {} # This is an AWS-event triggered invocation. elif event.get('Records', None): records = event.get('Records') result = None whole_function = self.get_function_for_aws_event(records[0]) if whole_function: app_function = self.import_module_and_get_function(whole_function) result = self.run_function(app_function, event, context) logger.debug(result) else: logger.error("Cannot find a function to process the triggered event.") return result # this is an AWS-event triggered from Lex bot's intent elif event.get('bot'): result = None whole_function = self.get_function_from_bot_intent_trigger(event) if whole_function: app_function = self.import_module_and_get_function(whole_function) result = self.run_function(app_function, event, context) logger.debug(result) else: logger.error("Cannot find a function to process the triggered event.") return result # This is an API Gateway authorizer event elif event.get('type') == u'TOKEN': whole_function = self.settings.AUTHORIZER_FUNCTION if whole_function: app_function = self.import_module_and_get_function(whole_function) policy = self.run_function(app_function, event, context) return policy else: logger.error("Cannot find a function to process the authorization request.") raise Exception('Unauthorized') # This is an AWS Cognito Trigger Event elif event.get('triggerSource', None): triggerSource = event.get('triggerSource') whole_function = self.get_function_for_cognito_trigger(triggerSource) result = event if whole_function: app_function = self.import_module_and_get_function(whole_function) result = self.run_function(app_function, event, context) logger.debug(result) else: logger.error("Cannot find a function to handle cognito trigger {}".format(triggerSource)) return result # Normal web app flow try: # Timing time_start = datetime.datetime.now() # This is a normal HTTP request if event.get('httpMethod', None): script_name = '' is_elb_context = False headers = merge_headers(event) if event.get('requestContext', None) and event['requestContext'].get('elb', None): # Related: https://github.com/Miserlou/Zappa/issues/1715 # inputs/outputs for lambda loadbalancer # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html is_elb_context = True # host is lower-case when forwarded from ELB host = headers.get('host') # TODO: pathParameters is a first-class citizen in apigateway but not available without # some parsing work for ELB (is this parameter used for anything?) event['pathParameters'] = '' else: if headers: host = headers.get('Host') else: host = None logger.debug('host found: [{}]'.format(host)) if host: if 'amazonaws.com' in host: logger.debug('amazonaws found in host') # The path provided in th event doesn't include the # stage, so we must tell Flask to include the API # stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014 script_name = '/' + settings.API_STAGE else: # This is a test request sent from the AWS console if settings.DOMAIN: # Assume the requests received will be on the specified # domain. No special handling is required pass else: # Assume the requests received will be to the # amazonaws.com endpoint, so tell Flask to include the # API stage script_name = '/' + settings.API_STAGE base_path = getattr(settings, 'BASE_PATH', None) # Create the environment for WSGI and handle the request environ = create_wsgi_request( event, script_name=script_name, base_path=base_path, trailing_slash=self.trailing_slash, binary_support=settings.BINARY_SUPPORT, context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS ) # We are always on https on Lambda, so tell our wsgi app that. environ['HTTPS'] = 'on' environ['wsgi.url_scheme'] = 'https' environ['lambda.context'] = context environ['lambda.event'] = event # Execute the application with Response.from_app(self.wsgi_app, environ) as response: # This is the object we're going to return. # Pack the WSGI response into our special dictionary. zappa_returndict = dict() # Issue #1715: ALB support. ALB responses must always include # base64 encoding and status description if is_elb_context: zappa_returndict.setdefault('isBase64Encoded', False) zappa_returndict.setdefault('statusDescription', response.status) if response.data: if settings.BINARY_SUPPORT: if not response.mimetype.startswith("text/") \ or response.mimetype != "application/json": zappa_returndict['body'] = base64.b64encode(response.data).decode('utf-8') zappa_returndict["isBase64Encoded"] = True else: zappa_returndict['body'] = response.data else: zappa_returndict['body'] = response.get_data(as_text=True) zappa_returndict['statusCode'] = response.status_code if 'headers' in event: zappa_returndict['headers'] = {} for key, value in response.headers: zappa_returndict['headers'][key] = value if 'multiValueHeaders' in event: zappa_returndict['multiValueHeaders'] = {} for key, value in response.headers: zappa_returndict['multiValueHeaders'][key] = response.headers.getlist(key) # Calculate the total response time, # and log it in the Common Log format. time_end = datetime.datetime.now() delta = time_end - time_start response_time_ms = delta.total_seconds() * 1000 response.content = response.data common_log(environ, response, response_time=response_time_ms) return zappa_returndict except Exception as e: # pragma: no cover # Print statements are visible in the logs either way print(e) exc_info = sys.exc_info() message = ('An uncaught exception happened while servicing this request. ' 'You can investigate this with the `zappa tail` command.') # If we didn't even build an app_module, just raise. if not settings.DJANGO_SETTINGS: try: self.app_module except NameError as ne: message = 'Failed to import module: {}'.format(ne.message) # Call exception handler for unhandled exceptions exception_handler = self.settings.EXCEPTION_HANDLER self._process_exception(exception_handler=exception_handler, event=event, context=context, exception=e) # Return this unspecified exception as a 500, using template that API Gateway expects. content = collections.OrderedDict() content['statusCode'] = 500 body = {'message': message} if settings.DEBUG: # only include traceback if debug is on. body['traceback'] = traceback.format_exception(*exc_info) # traceback as a list for readability. content['body'] = json.dumps(str(body), sort_keys=True, indent=4) return content
[ "def", "handler", "(", "self", ",", "event", ",", "context", ")", ":", "settings", "=", "self", ".", "settings", "# If in DEBUG mode, log all raw incoming events.", "if", "settings", ".", "DEBUG", ":", "logger", ".", "debug", "(", "'Zappa Event: {}'", ".", "form...
An AWS Lambda function which parses specific API Gateway input into a WSGI request, feeds it to our WSGI app, procceses the response, and returns that back to the API Gateway.
[ "An", "AWS", "Lambda", "function", "which", "parses", "specific", "API", "Gateway", "input", "into", "a", "WSGI", "request", "feeds", "it", "to", "our", "WSGI", "app", "procceses", "the", "response", "and", "returns", "that", "back", "to", "the", "API", "G...
python
train
ssato/python-anyconfig
src/anyconfig/processors.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/processors.py#L243-L249
def register(self, *pclss): """ :param pclss: A list of :class:`Processor` or its children classes """ for pcls in pclss: if pcls.cid() not in self._processors: self._processors[pcls.cid()] = pcls
[ "def", "register", "(", "self", ",", "*", "pclss", ")", ":", "for", "pcls", "in", "pclss", ":", "if", "pcls", ".", "cid", "(", ")", "not", "in", "self", ".", "_processors", ":", "self", ".", "_processors", "[", "pcls", ".", "cid", "(", ")", "]", ...
:param pclss: A list of :class:`Processor` or its children classes
[ ":", "param", "pclss", ":", "A", "list", "of", ":", "class", ":", "Processor", "or", "its", "children", "classes" ]
python
train
gwpy/gwpy
gwpy/io/hdf5.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/hdf5.py#L46-L62
def with_read_hdf5(func): """Decorate an HDF5-reading function to open a filepath if needed ``func`` should be written to presume an `h5py.Group` as the first positional argument. """ @wraps(func) def decorated_func(fobj, *args, **kwargs): # pylint: disable=missing-docstring if not isinstance(fobj, h5py.HLObject): if isinstance(fobj, FILE_LIKE): fobj = fobj.name with h5py.File(fobj, 'r') as h5f: return func(h5f, *args, **kwargs) return func(fobj, *args, **kwargs) return decorated_func
[ "def", "with_read_hdf5", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "decorated_func", "(", "fobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=missing-docstring", "if", "not", "isinstance", "(", "fobj", ",", ...
Decorate an HDF5-reading function to open a filepath if needed ``func`` should be written to presume an `h5py.Group` as the first positional argument.
[ "Decorate", "an", "HDF5", "-", "reading", "function", "to", "open", "a", "filepath", "if", "needed" ]
python
train
igorcoding/asynctnt-queue
asynctnt_queue/tube.py
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/tube.py#L156-L166
async def peek(self, task_id): """ Get task without changing its state :param task_id: Task id :return: Task instance """ args = (task_id,) res = await self.conn.call(self.__funcs['peek'], args) return self._create_task(res.body)
[ "async", "def", "peek", "(", "self", ",", "task_id", ")", ":", "args", "=", "(", "task_id", ",", ")", "res", "=", "await", "self", ".", "conn", ".", "call", "(", "self", ".", "__funcs", "[", "'peek'", "]", ",", "args", ")", "return", "self", ".",...
Get task without changing its state :param task_id: Task id :return: Task instance
[ "Get", "task", "without", "changing", "its", "state" ]
python
train
pywbem/pywbem
pywbem/tupletree.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/tupletree.py#L281-L426
def check_invalid_utf8_sequences(utf8_string, meaning, conn_id=None): """ Examine a UTF-8 encoded string and raise a `pywbem.XMLParseError` exception if the string contains invalid UTF-8 sequences (incorrectly encoded or ill-formed). This function works in both "wide" and "narrow" Unicode builds of Python and supports the full range of Unicode characters from U+0000 to U+10FFFF. This function is used to improve the error information raised from Python's `xml.dom.minidom` and `xml.sax` packages and should be called only after having catched an `ExpatError` from `xml.dom.minidom` or a `SAXParseException` from `xml.sax` . Parameters: utf8_string (:term:`byte string`): The UTF-8 encoded XML string to be examined. meaning (:term:`string`): Short text with meaning of the XML string, for messages in exceptions. conn_id (:term:`connection id`): Connection ID to be used in any exceptions that may be raised. Returns: :term:`unicode string`: The input string, converted to Unicode. Raises: TypeError: Invoked with incorrect Python object type for `utf8_xml`. pywbem.XMLParseError: `utf8_xml` contains invalid UTF-8 sequences. Notes on Unicode support in Python: (1) For internally representing Unicode characters in the unicode type, a "wide" Unicode build of Python uses UTF-32, while a "narrow" Unicode build uses UTF-16. The difference is visible to Python programs for Unicode characters assigned to code points above U+FFFF: The "narrow" build uses 2 characters (a surrogate pair) for them, while the "wide" build uses just 1 character. This affects all position- and length-oriented functions, such as `len()` or string slicing. (2) In a "wide" Unicode build of Python, the Unicode characters assigned to code points U+10000 to U+10FFFF are represented directly (using code points U+10000 to U+10FFFF) and the surrogate code points U+D800...U+DFFF are never used; in a "narrow" Unicode build of Python, the Unicode characters assigned to code points U+10000 to U+10FFFF are represented using pairs of the surrogate code points U+D800...U+DFFF. Notes on the Unicode code points U+D800...U+DFFF ("surrogate code points"): (1) These code points have no corresponding Unicode characters assigned, because they are reserved for surrogates in the UTF-16 encoding. (2) The UTF-8 encoding can technically represent the surrogate code points. ISO/IEC 10646 defines that a UTF-8 sequence containing the surrogate code points is ill-formed, but it is technically possible that such a sequence is in a UTF-8 encoded XML string. (3) The Python escapes ``\\u`` and ``\\U`` used in literal strings can represent the surrogate code points (as well as all other code points, regardless of whether they are assigned to Unicode characters). (4) The Python `encode()` and `decode()` functions successfully translate the surrogate code points back and forth for encoding UTF-8. For example, ``b'\\xed\\xb0\\x80'.decode("utf-8") = u'\\udc00'``. (5) Because Python supports the encoding and decoding of UTF-8 sequences also for the surrogate code points, the "narrow" Unicode build of Python can be (mis-)used to transport each surrogate unit separately encoded in (ill-formed) UTF-8. For example, code point U+10122 can be (illegally) created from a sequence of code points U+D800,U+DD22 represented in UTF-8: ``b'\\xED\\xA0\\x80\\xED\\xB4\\xA2'.decode("utf-8") = u'\\U00010122'`` while the correct UTF-8 sequence for this code point is: ``u'\\U00010122'.encode("utf-8") = b'\\xf0\\x90\\x84\\xa2'`` """ context_before = 16 # number of chars to print before any bad chars context_after = 16 # number of chars to print after any bad chars try: assert isinstance(utf8_string, six.binary_type) except AssertionError: raise TypeError( _format("utf8_string parameter is not a byte string, but has " "type {0}", type(utf8_string))) # Check for ill-formed UTF-8 sequences. This needs to be done # before the str type gets decoded to unicode, because afterwards # surrogates produced from ill-formed UTF-8 cannot be distinguished from # legally produced surrogates (for code points above U+FFFF). ifs_list = list() for m in _ILL_FORMED_UTF8_RE.finditer(utf8_string): ifs_pos = m.start(1) ifs_seq = m.group(1) ifs_list.append((ifs_pos, ifs_seq)) if ifs_list: exc_txt = _format("Ill-formed (surrogate) UTF-8 Byte sequences found " "in {0}:", meaning) for (ifs_pos, ifs_seq) in ifs_list: exc_txt += "\n At offset {0}:".format(ifs_pos) for ifs_ord in six.iterbytes(ifs_seq): exc_txt += " 0x{0:02X}".format(ifs_ord) cpos1 = max(ifs_pos - context_before, 0) cpos2 = min(ifs_pos + context_after, len(utf8_string)) exc_txt += _format(", CIM-XML snippet: {0!A}", utf8_string[cpos1:cpos2]) raise XMLParseError(exc_txt, conn_id=conn_id) # Check for incorrectly encoded UTF-8 sequences. # @ibm.13@ Simplified logic (removed loop). try: utf8_string_u = utf8_string.decode("utf-8") except UnicodeDecodeError as exc: # Only raised for incorrectly encoded UTF-8 sequences; technically # correct sequences that are ill-formed (e.g. representing surrogates) # do not cause this exception to be raised. # If more than one incorrectly encoded sequence is present, only # information about the first one is returned in the exception object. # Also, the stated reason (in _msg) is not always correct. # pylint: disable=unbalanced-tuple-unpacking unused_codec, unused_str, _p1, _p2, unused_msg = exc.args exc_txt = "Incorrectly encoded UTF-8 Byte sequences found in {0}". \ format(meaning) exc_txt += "\n At offset {0}:".format(_p1) ies_seq = utf8_string[_p1:_p2 + 1] for ies_ord in six.iterbytes(ies_seq): exc_txt += " 0x{0:02X}".format(ies_ord) cpos1 = max(_p1 - context_before, 0) cpos2 = min(_p2 + context_after, len(utf8_string)) exc_txt += _format(", CIM-XML snippet: {0!A}", utf8_string[cpos1:cpos2]) raise XMLParseError(exc_txt, conn_id=conn_id) return utf8_string_u
[ "def", "check_invalid_utf8_sequences", "(", "utf8_string", ",", "meaning", ",", "conn_id", "=", "None", ")", ":", "context_before", "=", "16", "# number of chars to print before any bad chars", "context_after", "=", "16", "# number of chars to print after any bad chars", "try...
Examine a UTF-8 encoded string and raise a `pywbem.XMLParseError` exception if the string contains invalid UTF-8 sequences (incorrectly encoded or ill-formed). This function works in both "wide" and "narrow" Unicode builds of Python and supports the full range of Unicode characters from U+0000 to U+10FFFF. This function is used to improve the error information raised from Python's `xml.dom.minidom` and `xml.sax` packages and should be called only after having catched an `ExpatError` from `xml.dom.minidom` or a `SAXParseException` from `xml.sax` . Parameters: utf8_string (:term:`byte string`): The UTF-8 encoded XML string to be examined. meaning (:term:`string`): Short text with meaning of the XML string, for messages in exceptions. conn_id (:term:`connection id`): Connection ID to be used in any exceptions that may be raised. Returns: :term:`unicode string`: The input string, converted to Unicode. Raises: TypeError: Invoked with incorrect Python object type for `utf8_xml`. pywbem.XMLParseError: `utf8_xml` contains invalid UTF-8 sequences. Notes on Unicode support in Python: (1) For internally representing Unicode characters in the unicode type, a "wide" Unicode build of Python uses UTF-32, while a "narrow" Unicode build uses UTF-16. The difference is visible to Python programs for Unicode characters assigned to code points above U+FFFF: The "narrow" build uses 2 characters (a surrogate pair) for them, while the "wide" build uses just 1 character. This affects all position- and length-oriented functions, such as `len()` or string slicing. (2) In a "wide" Unicode build of Python, the Unicode characters assigned to code points U+10000 to U+10FFFF are represented directly (using code points U+10000 to U+10FFFF) and the surrogate code points U+D800...U+DFFF are never used; in a "narrow" Unicode build of Python, the Unicode characters assigned to code points U+10000 to U+10FFFF are represented using pairs of the surrogate code points U+D800...U+DFFF. Notes on the Unicode code points U+D800...U+DFFF ("surrogate code points"): (1) These code points have no corresponding Unicode characters assigned, because they are reserved for surrogates in the UTF-16 encoding. (2) The UTF-8 encoding can technically represent the surrogate code points. ISO/IEC 10646 defines that a UTF-8 sequence containing the surrogate code points is ill-formed, but it is technically possible that such a sequence is in a UTF-8 encoded XML string. (3) The Python escapes ``\\u`` and ``\\U`` used in literal strings can represent the surrogate code points (as well as all other code points, regardless of whether they are assigned to Unicode characters). (4) The Python `encode()` and `decode()` functions successfully translate the surrogate code points back and forth for encoding UTF-8. For example, ``b'\\xed\\xb0\\x80'.decode("utf-8") = u'\\udc00'``. (5) Because Python supports the encoding and decoding of UTF-8 sequences also for the surrogate code points, the "narrow" Unicode build of Python can be (mis-)used to transport each surrogate unit separately encoded in (ill-formed) UTF-8. For example, code point U+10122 can be (illegally) created from a sequence of code points U+D800,U+DD22 represented in UTF-8: ``b'\\xED\\xA0\\x80\\xED\\xB4\\xA2'.decode("utf-8") = u'\\U00010122'`` while the correct UTF-8 sequence for this code point is: ``u'\\U00010122'.encode("utf-8") = b'\\xf0\\x90\\x84\\xa2'``
[ "Examine", "a", "UTF", "-", "8", "encoded", "string", "and", "raise", "a", "pywbem", ".", "XMLParseError", "exception", "if", "the", "string", "contains", "invalid", "UTF", "-", "8", "sequences", "(", "incorrectly", "encoded", "or", "ill", "-", "formed", "...
python
train
csirtgadgets/csirtgsdk-py
csirtgsdk/feed.py
https://github.com/csirtgadgets/csirtgsdk-py/blob/5a7ed9c5e6fa27170366ecbdef710dc80d537dc2/csirtgsdk/feed.py#L53-L65
def delete(self, user, name): """ Removes a feed :param user: feed username :param name: feed name :return: true/false """ uri = self.client.remote + '/users/{}/feeds/{}'.format(user, name) resp = self.client.session.delete(uri) return resp.status_code
[ "def", "delete", "(", "self", ",", "user", ",", "name", ")", ":", "uri", "=", "self", ".", "client", ".", "remote", "+", "'/users/{}/feeds/{}'", ".", "format", "(", "user", ",", "name", ")", "resp", "=", "self", ".", "client", ".", "session", ".", ...
Removes a feed :param user: feed username :param name: feed name :return: true/false
[ "Removes", "a", "feed" ]
python
train
ratt-ru/PyMORESANE
pymoresane/beam_fit.py
https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/beam_fit.py#L6-L69
def beam_fit(psf, cdelt1, cdelt2): """ The following contructs a restoring beam from the psf. This is accoplished by fitting an elliptical Gaussian to the central lobe of the PSF. INPUTS: psf (no default): Array containing the psf for the image in question. cdelt1, cdelt2 (no default): Header of the psf. """ if psf.shape>512: psf_slice = tuple([slice(psf.shape[0]/2-256, psf.shape[0]/2+256),slice(psf.shape[1]/2-256, psf.shape[1]/2+256)]) else: psf_slice = tuple([slice(0, psf.shape[0]),slice(0, psf.shape[1])]) psf_centre = psf[psf_slice]/np.max(psf[psf_slice]) max_location = np.unravel_index(np.argmax(psf_centre), psf_centre.shape) threshold_psf = np.where(psf_centre>0.5 , psf_centre, 0) labelled_psf, labels = ndimage.label(threshold_psf) extracted_primary_beam = np.where(labelled_psf==labelled_psf[max_location], psf_centre, 0) # Following creates row and column values of interest for the central PSF lobe and then selects those values # from the PSF using np.where. Additionally, the inputs for the fitting are created by reshaping the x,y, # and z data into columns. x = np.arange(-max_location[1],-max_location[1]+psf_centre.shape[1],1) y = np.arange(-max_location[0],-max_location[0]+psf_centre.shape[0],1) z = extracted_primary_beam gridx, gridy = np.meshgrid(x,-y) xyz = np.column_stack((gridx.reshape(-1,1),gridy.reshape(-1,1),z.reshape(-1,1,order="C"))) # Elliptical gaussian which can be fit to the central lobe of the PSF. xy must be an Nx2 array consisting of # pairs of row and column values for the region of interest. def ellipgauss(xy,A,xsigma,ysigma,theta): return A*np.exp(-1*(((xy[:,0]*np.cos(theta)-xy[:,1]*np.sin(theta))**2)/(2*(xsigma**2)) + ((xy[:,0]*np.sin(theta)+xy[:,1]*np.cos(theta))**2)/(2*(ysigma**2)))) # This command from scipy performs the fitting of the 2D gaussian, and returns the optimal coefficients in opt. opt = curve_fit(ellipgauss, xyz[:,0:2],xyz[:,2],(1,1,1,0))[0] # Following create the data for the new images. The cleanbeam has to be reshaped to reclaim it in 2D. clean_beam = np.zeros_like(psf) clean_beam[psf_slice] = ellipgauss(xyz[:,0:2],opt[0],opt[1],opt[2],opt[3]).reshape(psf_centre.shape,order="C") # Experimental - forces the beam to be normalised. This should be redundant, but helps when the PSF is bad. clean_beam = clean_beam/np.max(clean_beam) bmaj = 2*np.sqrt(2*np.log(2))*max(opt[1],opt[2])*cdelt1 bmin = 2*np.sqrt(2*np.log(2))*min(opt[1],opt[2])*cdelt2 bpa = np.degrees(opt[3])%360 - 90 beam_params = [abs(bmaj), abs(bmin), bpa] return clean_beam, beam_params
[ "def", "beam_fit", "(", "psf", ",", "cdelt1", ",", "cdelt2", ")", ":", "if", "psf", ".", "shape", ">", "512", ":", "psf_slice", "=", "tuple", "(", "[", "slice", "(", "psf", ".", "shape", "[", "0", "]", "/", "2", "-", "256", ",", "psf", ".", "...
The following contructs a restoring beam from the psf. This is accoplished by fitting an elliptical Gaussian to the central lobe of the PSF. INPUTS: psf (no default): Array containing the psf for the image in question. cdelt1, cdelt2 (no default): Header of the psf.
[ "The", "following", "contructs", "a", "restoring", "beam", "from", "the", "psf", ".", "This", "is", "accoplished", "by", "fitting", "an", "elliptical", "Gaussian", "to", "the", "central", "lobe", "of", "the", "PSF", "." ]
python
train
lambdalisue/django-permission
src/permission/utils/field_lookup.py
https://github.com/lambdalisue/django-permission/blob/580f7a1f857701d06ccf41163f188ac04fbc4fac/src/permission/utils/field_lookup.py#L6-L40
def field_lookup(obj, field_path): """ Lookup django model field in similar way of django query lookup. Args: obj (instance): Django Model instance field_path (str): '__' separated field path Example: >>> from django.db import model >>> from django.contrib.auth.models import User >>> class Article(models.Model): >>> title = models.CharField('title', max_length=200) >>> author = models.ForeignKey(User, null=True, >>> related_name='permission_test_articles_author') >>> editors = models.ManyToManyField(User, >>> related_name='permission_test_articles_editors') >>> user = User.objects.create_user('test_user', 'password') >>> article = Article.objects.create(title='test_article', ... author=user) >>> article.editors.add(user) >>> assert 'test_article' == field_lookup(article, 'title') >>> assert 'test_user' == field_lookup(article, 'user__username') >>> assert ['test_user'] == list(field_lookup(article, ... 'editors__username')) """ if hasattr(obj, 'iterator'): return (field_lookup(x, field_path) for x in obj.iterator()) elif isinstance(obj, Iterable): return (field_lookup(x, field_path) for x in iter(obj)) # split the path field_path = field_path.split('__', 1) if len(field_path) == 1: return getattr(obj, field_path[0], None) return field_lookup(field_lookup(obj, field_path[0]), field_path[1])
[ "def", "field_lookup", "(", "obj", ",", "field_path", ")", ":", "if", "hasattr", "(", "obj", ",", "'iterator'", ")", ":", "return", "(", "field_lookup", "(", "x", ",", "field_path", ")", "for", "x", "in", "obj", ".", "iterator", "(", ")", ")", "elif"...
Lookup django model field in similar way of django query lookup. Args: obj (instance): Django Model instance field_path (str): '__' separated field path Example: >>> from django.db import model >>> from django.contrib.auth.models import User >>> class Article(models.Model): >>> title = models.CharField('title', max_length=200) >>> author = models.ForeignKey(User, null=True, >>> related_name='permission_test_articles_author') >>> editors = models.ManyToManyField(User, >>> related_name='permission_test_articles_editors') >>> user = User.objects.create_user('test_user', 'password') >>> article = Article.objects.create(title='test_article', ... author=user) >>> article.editors.add(user) >>> assert 'test_article' == field_lookup(article, 'title') >>> assert 'test_user' == field_lookup(article, 'user__username') >>> assert ['test_user'] == list(field_lookup(article, ... 'editors__username'))
[ "Lookup", "django", "model", "field", "in", "similar", "way", "of", "django", "query", "lookup", "." ]
python
train
quantopian/pyfolio
pyfolio/perf_attrib.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/perf_attrib.py#L471-L501
def plot_risk_exposures(exposures, ax=None, title='Daily risk factor exposures'): """ Parameters ---------- exposures : pd.DataFrame df indexed by datetime, with factors as columns - Example: momentum reversal dt 2017-01-01 -0.238655 0.077123 2017-01-02 0.821872 1.520515 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used Returns ------- ax : matplotlib.axes.Axes """ if ax is None: ax = plt.gca() for col in exposures: ax.plot(exposures[col]) configure_legend(ax, change_colors=True) ax.set_ylabel('Factor exposures') ax.set_title(title) return ax
[ "def", "plot_risk_exposures", "(", "exposures", ",", "ax", "=", "None", ",", "title", "=", "'Daily risk factor exposures'", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "for", "col", "in", "exposures", ":", "ax", "."...
Parameters ---------- exposures : pd.DataFrame df indexed by datetime, with factors as columns - Example: momentum reversal dt 2017-01-01 -0.238655 0.077123 2017-01-02 0.821872 1.520515 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used Returns ------- ax : matplotlib.axes.Axes
[ "Parameters", "----------", "exposures", ":", "pd", ".", "DataFrame", "df", "indexed", "by", "datetime", "with", "factors", "as", "columns", "-", "Example", ":", "momentum", "reversal", "dt", "2017", "-", "01", "-", "01", "-", "0", ".", "238655", "0", "....
python
valid
knipknap/exscript
Exscript/emulators/command.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/emulators/command.py#L73-L96
def add_from_file(self, filename, handler_decorator=None): """ Wrapper around add() that reads the handlers from the file with the given name. The file is a Python script containing a list named 'commands' of tuples that map command names to handlers. :type filename: str :param filename: The name of the file containing the tuples. :type handler_decorator: function :param handler_decorator: A function that is used to decorate each of the handlers in the file. """ args = {} execfile(filename, args) commands = args.get('commands') if commands is None: raise Exception(filename + ' has no variable named "commands"') elif not hasattr(commands, '__iter__'): raise Exception(filename + ': "commands" is not iterable') for key, handler in commands: if handler_decorator: handler = handler_decorator(handler) self.add(key, handler)
[ "def", "add_from_file", "(", "self", ",", "filename", ",", "handler_decorator", "=", "None", ")", ":", "args", "=", "{", "}", "execfile", "(", "filename", ",", "args", ")", "commands", "=", "args", ".", "get", "(", "'commands'", ")", "if", "commands", ...
Wrapper around add() that reads the handlers from the file with the given name. The file is a Python script containing a list named 'commands' of tuples that map command names to handlers. :type filename: str :param filename: The name of the file containing the tuples. :type handler_decorator: function :param handler_decorator: A function that is used to decorate each of the handlers in the file.
[ "Wrapper", "around", "add", "()", "that", "reads", "the", "handlers", "from", "the", "file", "with", "the", "given", "name", ".", "The", "file", "is", "a", "Python", "script", "containing", "a", "list", "named", "commands", "of", "tuples", "that", "map", ...
python
train
daviddrysdale/python-phonenumbers
python/phonenumbers/shortnumberinfo.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/shortnumberinfo.py#L295-L322
def _example_short_number_for_cost(region_code, cost): """Gets a valid short number for the specified cost category. Arguments: region_code -- the region for which an example short number is needed. cost -- the cost category of number that is needed. Returns a valid short number for the specified region and cost category. Returns an empty string when the metadata does not contain such information, or the cost is UNKNOWN_COST. """ metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is None: return U_EMPTY_STRING desc = None if cost == ShortNumberCost.TOLL_FREE: desc = metadata.toll_free elif cost == ShortNumberCost.STANDARD_RATE: desc = metadata.standard_rate elif cost == ShortNumberCost.PREMIUM_RATE: desc = metadata.premium_rate else: # ShortNumberCost.UNKNOWN_COST numbers are computed by the process of # elimination from the other cost categoried. pass if desc is not None and desc.example_number is not None: return desc.example_number return U_EMPTY_STRING
[ "def", "_example_short_number_for_cost", "(", "region_code", ",", "cost", ")", ":", "metadata", "=", "PhoneMetadata", ".", "short_metadata_for_region", "(", "region_code", ")", "if", "metadata", "is", "None", ":", "return", "U_EMPTY_STRING", "desc", "=", "None", "...
Gets a valid short number for the specified cost category. Arguments: region_code -- the region for which an example short number is needed. cost -- the cost category of number that is needed. Returns a valid short number for the specified region and cost category. Returns an empty string when the metadata does not contain such information, or the cost is UNKNOWN_COST.
[ "Gets", "a", "valid", "short", "number", "for", "the", "specified", "cost", "category", "." ]
python
train
KennethWilke/PingdomLib
pingdomlib/pingdom.py
https://github.com/KennethWilke/PingdomLib/blob/3ed1e481f9c9d16b032558d62fb05c2166e162ed/pingdomlib/pingdom.py#L221-L226
def getCheck(self, checkid): """Returns a detailed description of a specified check.""" check = PingdomCheck(self, {'id': checkid}) check.getDetails() return check
[ "def", "getCheck", "(", "self", ",", "checkid", ")", ":", "check", "=", "PingdomCheck", "(", "self", ",", "{", "'id'", ":", "checkid", "}", ")", "check", ".", "getDetails", "(", ")", "return", "check" ]
Returns a detailed description of a specified check.
[ "Returns", "a", "detailed", "description", "of", "a", "specified", "check", "." ]
python
train
proteanhq/protean
src/protean/core/queryset.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L243-L264
def update_all(self, *args, **kwargs): """Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). """ updated_item_count = 0 repository = repo_factory.get_repository(self._entity_cls) try: updated_item_count = repository.update_all(self._criteria, *args, **kwargs) except Exception: # FIXME Log Exception raise return updated_item_count
[ "def", "update_all", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "updated_item_count", "=", "0", "repository", "=", "repo_factory", ".", "get_repository", "(", "self", ".", "_entity_cls", ")", "try", ":", "updated_item_count", "=", "r...
Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value).
[ "Updates", "all", "objects", "with", "details", "given", "if", "they", "match", "a", "set", "of", "conditions", "supplied", "." ]
python
train
ofa/django-bouncy
django_bouncy/views.py
https://github.com/ofa/django-bouncy/blob/a386dfa8c4ce59bd18978a3537c03cd6ad07bf06/django_bouncy/views.py#L38-L128
def endpoint(request): """Endpoint that SNS accesses. Includes logic verifying request""" # pylint: disable=too-many-return-statements,too-many-branches # In order to 'hide' the endpoint, all non-POST requests should return # the site's default HTTP404 if request.method != 'POST': raise Http404 # If necessary, check that the topic is correct if hasattr(settings, 'BOUNCY_TOPIC_ARN'): # Confirm that the proper topic header was sent if 'HTTP_X_AMZ_SNS_TOPIC_ARN' not in request.META: return HttpResponseBadRequest('No TopicArn Header') # Check to see if the topic is in the settings # Because you can have bounces and complaints coming from multiple # topics, BOUNCY_TOPIC_ARN is a list if (not request.META['HTTP_X_AMZ_SNS_TOPIC_ARN'] in settings.BOUNCY_TOPIC_ARN): return HttpResponseBadRequest('Bad Topic') # Load the JSON POST Body if isinstance(request.body, str): # requests return str in python 2.7 request_body = request.body else: # and return bytes in python 3.4 request_body = request.body.decode() try: data = json.loads(request_body) except ValueError: logger.warning('Notification Not Valid JSON: {}'.format(request_body)) return HttpResponseBadRequest('Not Valid JSON') # Ensure that the JSON we're provided contains all the keys we expect # Comparison code from http://stackoverflow.com/questions/1285911/ if not set(VITAL_NOTIFICATION_FIELDS) <= set(data): logger.warning('Request Missing Necessary Keys') return HttpResponseBadRequest('Request Missing Necessary Keys') # Ensure that the type of notification is one we'll accept if not data['Type'] in ALLOWED_TYPES: logger.info('Notification Type Not Known %s', data['Type']) return HttpResponseBadRequest('Unknown Notification Type') # Confirm that the signing certificate is hosted on a correct domain # AWS by default uses sns.{region}.amazonaws.com # On the off chance you need this to be a different domain, allow the # regex to be overridden in settings domain = urlparse(data['SigningCertURL']).netloc pattern = getattr( settings, 'BOUNCY_CERT_DOMAIN_REGEX', r"sns.[a-z0-9\-]+.amazonaws.com$" ) if not re.search(pattern, domain): logger.warning( 'Improper Certificate Location %s', data['SigningCertURL']) return HttpResponseBadRequest('Improper Certificate Location') # Verify that the notification is signed by Amazon if (getattr(settings, 'BOUNCY_VERIFY_CERTIFICATE', True) and not verify_notification(data)): logger.error('Verification Failure %s', ) return HttpResponseBadRequest('Improper Signature') # Send a signal to say a valid notification has been received signals.notification.send( sender='bouncy_endpoint', notification=data, request=request) # Handle subscription-based messages. if data['Type'] == 'SubscriptionConfirmation': # Allow the disabling of the auto-subscription feature if not getattr(settings, 'BOUNCY_AUTO_SUBSCRIBE', True): raise Http404 return approve_subscription(data) elif data['Type'] == 'UnsubscribeConfirmation': # We won't handle unsubscribe requests here. Return a 200 status code # so Amazon won't redeliver the request. If you want to remove this # endpoint, remove it either via the API or the AWS Console logger.info('UnsubscribeConfirmation Not Handled') return HttpResponse('UnsubscribeConfirmation Not Handled') try: message = json.loads(data['Message']) except ValueError: # This message is not JSON. But we need to return a 200 status code # so that Amazon doesn't attempt to deliver the message again logger.info('Non-Valid JSON Message Received') return HttpResponse('Message is not valid JSON') return process_message(message, data)
[ "def", "endpoint", "(", "request", ")", ":", "# pylint: disable=too-many-return-statements,too-many-branches", "# In order to 'hide' the endpoint, all non-POST requests should return", "# the site's default HTTP404", "if", "request", ".", "method", "!=", "'POST'", ":", "raise", "Ht...
Endpoint that SNS accesses. Includes logic verifying request
[ "Endpoint", "that", "SNS", "accesses", ".", "Includes", "logic", "verifying", "request" ]
python
train
ipazc/mtcnn
mtcnn/layer_factory.py
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/layer_factory.py#L179-L200
def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): """ Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ with tf.variable_scope(name): input_layer = self.__network.get_layer(input_layer_name) vectorized_input, dimension = self.vectorize_input(input_layer) weights = self.__make_var('weights', shape=[dimension, output_count]) biases = self.__make_var('biases', shape=[output_count]) operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b fc = operation(vectorized_input, weights, biases, name=name) self.__network.add_layer(name, layer_output=fc)
[ "def", "new_fully_connected", "(", "self", ",", "name", ":", "str", ",", "output_count", ":", "int", ",", "relu", "=", "True", ",", "input_layer_name", ":", "str", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "inpu...
Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network.
[ "Creates", "a", "new", "fully", "connected", "layer", "." ]
python
train
craft-ai/craft-ai-client-python
craftai/client.py
https://github.com/craft-ai/craft-ai-client-python/blob/8bc1a9038511540930371aacfdde0f4040e08f24/craftai/client.py#L516-L552
def get_decision_tree(self, agent_id, timestamp=None, version=DEFAULT_DECISION_TREE_VERSION): """Get decision tree. :param str agent_id: the id of the agent to get the tree. It must be an str containing only characters in "a-zA-Z0-9_-" and must be between 1 and 36 characters. :param int timestamp: Optional. The decision tree is comptuted at this timestamp. :default timestamp: None, means that we get the tree computed with all its context history. :param version: version of the tree to get. :type version: str or int. :default version: default version of the tree. :return: decision tree. :rtype: dict. :raises CraftAiLongRequestTimeOutError: if the API doesn't get the tree in the time given by the configuration. """ # Raises an error when agent_id is invalid self._check_agent_id(agent_id) if self._config["decisionTreeRetrievalTimeout"] is False: # Don't retry return self._get_decision_tree(agent_id, timestamp, version) start = current_time_ms() while True: now = current_time_ms() if now - start > self._config["decisionTreeRetrievalTimeout"]: # Client side timeout raise CraftAiLongRequestTimeOutError() try: return self._get_decision_tree(agent_id, timestamp, version) except CraftAiLongRequestTimeOutError: # Do nothing and continue. continue
[ "def", "get_decision_tree", "(", "self", ",", "agent_id", ",", "timestamp", "=", "None", ",", "version", "=", "DEFAULT_DECISION_TREE_VERSION", ")", ":", "# Raises an error when agent_id is invalid", "self", ".", "_check_agent_id", "(", "agent_id", ")", "if", "self", ...
Get decision tree. :param str agent_id: the id of the agent to get the tree. It must be an str containing only characters in "a-zA-Z0-9_-" and must be between 1 and 36 characters. :param int timestamp: Optional. The decision tree is comptuted at this timestamp. :default timestamp: None, means that we get the tree computed with all its context history. :param version: version of the tree to get. :type version: str or int. :default version: default version of the tree. :return: decision tree. :rtype: dict. :raises CraftAiLongRequestTimeOutError: if the API doesn't get the tree in the time given by the configuration.
[ "Get", "decision", "tree", "." ]
python
train
jxtech/wechatpy
wechatpy/client/api/invoice.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/invoice.py#L195-L210
def upload_pdf(self, pdf): """ 上传电子发票中的消费凭证 PDF 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param pdf: 要上传的 PDF 文件,一个 File-object :return: 64位整数,在将发票卡券插入用户卡包时使用用于关联pdf和发票卡券。有效期为3天。 """ return self._post( 'platform/setpdf', files={ 'pdf': pdf, }, result_processor=lambda x: x['s_media_id'], )
[ "def", "upload_pdf", "(", "self", ",", "pdf", ")", ":", "return", "self", ".", "_post", "(", "'platform/setpdf'", ",", "files", "=", "{", "'pdf'", ":", "pdf", ",", "}", ",", "result_processor", "=", "lambda", "x", ":", "x", "[", "'s_media_id'", "]", ...
上传电子发票中的消费凭证 PDF 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param pdf: 要上传的 PDF 文件,一个 File-object :return: 64位整数,在将发票卡券插入用户卡包时使用用于关联pdf和发票卡券。有效期为3天。
[ "上传电子发票中的消费凭证", "PDF", "详情请参考", "https", ":", "//", "mp", ".", "weixin", ".", "qq", ".", "com", "/", "wiki?id", "=", "mp1497082828_r1cI2" ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L389-L402
def get_all_client_tags(self, params=None): """ Get all client tags This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ return self._iterate_through_pages( get_function=self.get_client_tags_per_page, resource=CLIENT_TAGS, **{'params': params} )
[ "def", "get_all_client_tags", "(", "self", ",", "params", "=", "None", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_client_tags_per_page", ",", "resource", "=", "CLIENT_TAGS", ",", "*", "*", "{", "'para...
Get all client tags This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list
[ "Get", "all", "client", "tags", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "noth...
python
train
google/apitools
apitools/base/protorpclite/util.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/util.py#L197-L201
def total_seconds(offset): """Backport of offset.total_seconds() from python 2.7+.""" seconds = offset.days * 24 * 60 * 60 + offset.seconds microseconds = seconds * 10**6 + offset.microseconds return microseconds / (10**6 * 1.0)
[ "def", "total_seconds", "(", "offset", ")", ":", "seconds", "=", "offset", ".", "days", "*", "24", "*", "60", "*", "60", "+", "offset", ".", "seconds", "microseconds", "=", "seconds", "*", "10", "**", "6", "+", "offset", ".", "microseconds", "return", ...
Backport of offset.total_seconds() from python 2.7+.
[ "Backport", "of", "offset", ".", "total_seconds", "()", "from", "python", "2", ".", "7", "+", "." ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/targets.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L500-L506
def has_main_target (self, name): """Tells if a main target with the specified name exists.""" assert isinstance(name, basestring) if not self.built_main_targets_: self.build_main_targets() return name in self.main_target_
[ "def", "has_main_target", "(", "self", ",", "name", ")", ":", "assert", "isinstance", "(", "name", ",", "basestring", ")", "if", "not", "self", ".", "built_main_targets_", ":", "self", ".", "build_main_targets", "(", ")", "return", "name", "in", "self", "....
Tells if a main target with the specified name exists.
[ "Tells", "if", "a", "main", "target", "with", "the", "specified", "name", "exists", "." ]
python
train
kkroening/ffmpeg-python
ffmpeg/_filters.py
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_filters.py#L191-L212
def drawbox(stream, x, y, width, height, color, thickness=None, **kwargs): """Draw a colored box on the input image. Args: x: The expression which specifies the top left corner x coordinate of the box. It defaults to 0. y: The expression which specifies the top left corner y coordinate of the box. It defaults to 0. width: Specify the width of the box; if 0 interpreted as the input width. It defaults to 0. heigth: Specify the height of the box; if 0 interpreted as the input height. It defaults to 0. color: Specify the color of the box to write. For the general syntax of this option, check the "Color" section in the ffmpeg-utils manual. If the special value invert is used, the box edge color is the same as the video with inverted luma. thickness: The expression which sets the thickness of the box edge. Default value is 3. w: Alias for ``width``. h: Alias for ``height``. c: Alias for ``color``. t: Alias for ``thickness``. Official documentation: `drawbox <https://ffmpeg.org/ffmpeg-filters.html#drawbox>`__ """ if thickness: kwargs['t'] = thickness return FilterNode(stream, drawbox.__name__, args=[x, y, width, height, color], kwargs=kwargs).stream()
[ "def", "drawbox", "(", "stream", ",", "x", ",", "y", ",", "width", ",", "height", ",", "color", ",", "thickness", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "thickness", ":", "kwargs", "[", "'t'", "]", "=", "thickness", "return", "Filter...
Draw a colored box on the input image. Args: x: The expression which specifies the top left corner x coordinate of the box. It defaults to 0. y: The expression which specifies the top left corner y coordinate of the box. It defaults to 0. width: Specify the width of the box; if 0 interpreted as the input width. It defaults to 0. heigth: Specify the height of the box; if 0 interpreted as the input height. It defaults to 0. color: Specify the color of the box to write. For the general syntax of this option, check the "Color" section in the ffmpeg-utils manual. If the special value invert is used, the box edge color is the same as the video with inverted luma. thickness: The expression which sets the thickness of the box edge. Default value is 3. w: Alias for ``width``. h: Alias for ``height``. c: Alias for ``color``. t: Alias for ``thickness``. Official documentation: `drawbox <https://ffmpeg.org/ffmpeg-filters.html#drawbox>`__
[ "Draw", "a", "colored", "box", "on", "the", "input", "image", "." ]
python
train