repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
SheffieldML/GPyOpt
GPyOpt/optimization/optimizer.py
https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/optimization/optimizer.py#L226-L235
def f_df_nc(self,x): ''' Wrapper of the derivative of *f*: takes an input x with size of the not fixed dimensions expands it and evaluates the gradient of the entire function. ''' x = np.atleast_2d(x) xx = self.context_manager._expand_vector(x) f_nocontext_xx , df_nocontext_xx = self.f_df(xx) df_nocontext_xx = df_nocontext_xx[:,np.array(self.context_manager.noncontext_index)] return f_nocontext_xx, df_nocontext_xx
[ "def", "f_df_nc", "(", "self", ",", "x", ")", ":", "x", "=", "np", ".", "atleast_2d", "(", "x", ")", "xx", "=", "self", ".", "context_manager", ".", "_expand_vector", "(", "x", ")", "f_nocontext_xx", ",", "df_nocontext_xx", "=", "self", ".", "f_df", ...
Wrapper of the derivative of *f*: takes an input x with size of the not fixed dimensions expands it and evaluates the gradient of the entire function.
[ "Wrapper", "of", "the", "derivative", "of", "*", "f", "*", ":", "takes", "an", "input", "x", "with", "size", "of", "the", "not", "fixed", "dimensions", "expands", "it", "and", "evaluates", "the", "gradient", "of", "the", "entire", "function", "." ]
python
train
48
edx/edx-enterprise
integrated_channels/sap_success_factors/transmitters/content_metadata.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/sap_success_factors/transmitters/content_metadata.py#L32-L75
def transmit(self, payload, **kwargs): """ Transmit content metadata items to the integrated channel. """ items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload) self._prepare_items_for_delete(items_to_delete) prepared_items = {} prepared_items.update(items_to_create) prepared_items.update(items_to_update) prepared_items.update(items_to_delete) skip_metadata_transmission = False for chunk in chunks(prepared_items, self.enterprise_configuration.transmission_chunk_size): chunked_items = list(chunk.values()) if skip_metadata_transmission: # Remove the failed items from the create/update/delete dictionaries, # so ContentMetadataItemTransmission objects are not synchronized for # these items below. self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete) else: try: self.client.update_content_metadata(self._serialize_items(chunked_items)) except ClientError as exc: LOGGER.error( 'Failed to update [%s] content metadata items for integrated channel [%s] [%s]', len(chunked_items), self.enterprise_configuration.enterprise_customer.name, self.enterprise_configuration.channel_code, ) LOGGER.error(exc) # Remove the failed items from the create/update/delete dictionaries, # so ContentMetadataItemTransmission objects are not synchronized for # these items below. self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete) # SAP servers throttle incoming traffic, If a request fails than the subsequent would fail too, # So, no need to keep trying and failing. We should stop here and retry later. skip_metadata_transmission = True self._create_transmissions(items_to_create) self._update_transmissions(items_to_update, transmission_map) self._delete_transmissions(items_to_delete.keys())
[ "def", "transmit", "(", "self", ",", "payload", ",", "*", "*", "kwargs", ")", ":", "items_to_create", ",", "items_to_update", ",", "items_to_delete", ",", "transmission_map", "=", "self", ".", "_partition_items", "(", "payload", ")", "self", ".", "_prepare_ite...
Transmit content metadata items to the integrated channel.
[ "Transmit", "content", "metadata", "items", "to", "the", "integrated", "channel", "." ]
python
valid
52.886364
requests/requests-oauthlib
requests_oauthlib/oauth1_session.py
https://github.com/requests/requests-oauthlib/blob/800976faab3b827a42fa1cb80f13fcc03961d2c9/requests_oauthlib/oauth1_session.py#L390-L400
def rebuild_auth(self, prepared_request, response): """ When being redirected we should always strip Authorization header, since nonce may not be reused as per OAuth spec. """ if "Authorization" in prepared_request.headers: # If we get redirected to a new host, we should strip out # any authentication headers. prepared_request.headers.pop("Authorization", True) prepared_request.prepare_auth(self.auth) return
[ "def", "rebuild_auth", "(", "self", ",", "prepared_request", ",", "response", ")", ":", "if", "\"Authorization\"", "in", "prepared_request", ".", "headers", ":", "# If we get redirected to a new host, we should strip out", "# any authentication headers.", "prepared_request", ...
When being redirected we should always strip Authorization header, since nonce may not be reused as per OAuth spec.
[ "When", "being", "redirected", "we", "should", "always", "strip", "Authorization", "header", "since", "nonce", "may", "not", "be", "reused", "as", "per", "OAuth", "spec", "." ]
python
valid
45.181818
sdispater/cachy
cachy/redis_tagged_cache.py
https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/redis_tagged_cache.py#L61-L70
def _delete_forever_values(self, forever_key): """ Delete all of the keys that have been stored forever. :type forever_key: str """ forever = self._store.connection().lrange(forever_key, 0, -1) if len(forever) > 0: self._store.connection().delete(*forever)
[ "def", "_delete_forever_values", "(", "self", ",", "forever_key", ")", ":", "forever", "=", "self", ".", "_store", ".", "connection", "(", ")", ".", "lrange", "(", "forever_key", ",", "0", ",", "-", "1", ")", "if", "len", "(", "forever", ")", ">", "0...
Delete all of the keys that have been stored forever. :type forever_key: str
[ "Delete", "all", "of", "the", "keys", "that", "have", "been", "stored", "forever", "." ]
python
train
30.9
marcinmiklitz/pywindow
pywindow/molecular.py
https://github.com/marcinmiklitz/pywindow/blob/e5264812157224f22a691741ca2e0aefdc9bd2eb/pywindow/molecular.py#L450-L462
def calculate_average_diameter(self, **kwargs): """ Return the average diamension of a molecule. Returns ------- :class:`float` The average dimension of the molecule. """ self.average_diameter = find_average_diameter( self.elements, self.coordinates, **kwargs) return self.average_diameter
[ "def", "calculate_average_diameter", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "average_diameter", "=", "find_average_diameter", "(", "self", ".", "elements", ",", "self", ".", "coordinates", ",", "*", "*", "kwargs", ")", "return", "self",...
Return the average diamension of a molecule. Returns ------- :class:`float` The average dimension of the molecule.
[ "Return", "the", "average", "diamension", "of", "a", "molecule", "." ]
python
train
28.230769
ciena/afkak
afkak/kafkacodec.py
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L580-L592
def create_message(payload, key=None): """ Construct a :class:`Message` :param payload: The payload to send to Kafka. :type payload: :class:`bytes` or ``None`` :param key: A key used to route the message when partitioning and to determine message identity on a compacted topic. :type key: :class:`bytes` or ``None`` """ assert payload is None or isinstance(payload, bytes), 'payload={!r} should be bytes or None'.format(payload) assert key is None or isinstance(key, bytes), 'key={!r} should be bytes or None'.format(key) return Message(0, 0, key, payload)
[ "def", "create_message", "(", "payload", ",", "key", "=", "None", ")", ":", "assert", "payload", "is", "None", "or", "isinstance", "(", "payload", ",", "bytes", ")", ",", "'payload={!r} should be bytes or None'", ".", "format", "(", "payload", ")", "assert", ...
Construct a :class:`Message` :param payload: The payload to send to Kafka. :type payload: :class:`bytes` or ``None`` :param key: A key used to route the message when partitioning and to determine message identity on a compacted topic. :type key: :class:`bytes` or ``None``
[ "Construct", "a", ":", "class", ":", "Message" ]
python
train
45.615385
vimalkvn/riboplot
riboplot/ribocore.py
https://github.com/vimalkvn/riboplot/blob/914515df54eccc2e726ba71e751c3260f2066d97/riboplot/ribocore.py#L227-L257
def filter_ribo_counts(counts, orf_start=None, orf_stop=None): """Filter read counts and return only upstream of orf_start or downstream of orf_stop. Keyword arguments: counts -- Ribo-Seq read counts obtained from get_ribo_counts. orf_start -- Start position of the longest ORF. orf_stop -- Stop position of the longest ORF. """ filtered_counts = dict.copy(counts) for position in counts: if orf_start and orf_stop: # if only upstream and downstream reads are required, check if # current position is upstream or downstream of the ORF start/stop # if not, remove from counts if (position > orf_start and position < orf_stop): filtered_counts.pop(position) elif orf_start: # check if current position is upstream of ORF start. if not, remove if position >= orf_start: filtered_counts.pop(position) elif orf_stop: # check if current position is downstream of ORF stop. If not, # remove if position <= orf_stop: filtered_counts.pop(position) # calculate total reads for this transcript total_reads = sum(sum(item.values()) for item in filtered_counts.values()) return filtered_counts, total_reads
[ "def", "filter_ribo_counts", "(", "counts", ",", "orf_start", "=", "None", ",", "orf_stop", "=", "None", ")", ":", "filtered_counts", "=", "dict", ".", "copy", "(", "counts", ")", "for", "position", "in", "counts", ":", "if", "orf_start", "and", "orf_stop"...
Filter read counts and return only upstream of orf_start or downstream of orf_stop. Keyword arguments: counts -- Ribo-Seq read counts obtained from get_ribo_counts. orf_start -- Start position of the longest ORF. orf_stop -- Stop position of the longest ORF.
[ "Filter", "read", "counts", "and", "return", "only", "upstream", "of", "orf_start", "or", "downstream", "of", "orf_stop", "." ]
python
train
41.774194
fastai/fastai
fastai/core.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/core.py#L252-L255
def func_args(func)->bool: "Return the arguments of `func`." code = func.__code__ return code.co_varnames[:code.co_argcount]
[ "def", "func_args", "(", "func", ")", "->", "bool", ":", "code", "=", "func", ".", "__code__", "return", "code", ".", "co_varnames", "[", ":", "code", ".", "co_argcount", "]" ]
Return the arguments of `func`.
[ "Return", "the", "arguments", "of", "func", "." ]
python
train
33.25
ph4r05/monero-serialize
monero_serialize/xmrserialize.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrserialize.py#L619-L657
async def _load_tuple(self, reader, elem_type, params=None, elem=None): """ Loads tuple of elements from the reader. Supports the tuple ref. Returns loaded tuple. :param reader: :param elem_type: :param params: :param container: :return: """ c_len = await load_uvarint(reader) if elem and c_len != len(elem): raise ValueError("Size mismatch") if c_len != len(elem_type.f_specs()): raise ValueError("Tuple size mismatch") elem_fields = params[0] if params else None if elem_fields is None: elem_fields = elem_type.f_specs() res = elem if elem else [] for i in range(c_len): try: self.tracker.push_index(i) fvalue = await self.load_field( reader, elem_fields[i], params[1:] if params else None, eref(res, i) if elem else None, ) self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e if not elem: res.append(fvalue) return res
[ "async", "def", "_load_tuple", "(", "self", ",", "reader", ",", "elem_type", ",", "params", "=", "None", ",", "elem", "=", "None", ")", ":", "c_len", "=", "await", "load_uvarint", "(", "reader", ")", "if", "elem", "and", "c_len", "!=", "len", "(", "e...
Loads tuple of elements from the reader. Supports the tuple ref. Returns loaded tuple. :param reader: :param elem_type: :param params: :param container: :return:
[ "Loads", "tuple", "of", "elements", "from", "the", "reader", ".", "Supports", "the", "tuple", "ref", ".", "Returns", "loaded", "tuple", "." ]
python
train
31.410256
sdispater/poetry
poetry/masonry/builders/sdist.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/masonry/builders/sdist.py#L321-L337
def clean_tarinfo(cls, tar_info): """ Clean metadata from a TarInfo object to make it more reproducible. - Set uid & gid to 0 - Set uname and gname to "" - Normalise permissions to 644 or 755 - Set mtime if not None """ ti = copy(tar_info) ti.uid = 0 ti.gid = 0 ti.uname = "" ti.gname = "" ti.mode = normalize_file_permissions(ti.mode) return ti
[ "def", "clean_tarinfo", "(", "cls", ",", "tar_info", ")", ":", "ti", "=", "copy", "(", "tar_info", ")", "ti", ".", "uid", "=", "0", "ti", ".", "gid", "=", "0", "ti", ".", "uname", "=", "\"\"", "ti", ".", "gname", "=", "\"\"", "ti", ".", "mode",...
Clean metadata from a TarInfo object to make it more reproducible. - Set uid & gid to 0 - Set uname and gname to "" - Normalise permissions to 644 or 755 - Set mtime if not None
[ "Clean", "metadata", "from", "a", "TarInfo", "object", "to", "make", "it", "more", "reproducible", "." ]
python
train
27
materialsproject/pymatgen-db
matgendb/vv/report.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/vv/report.py#L478-L492
def sort_rows(self, rows, section): """Sort the rows, as appropriate for the section. :param rows: List of tuples (all same length, same values in each position) :param section: Name of section, should match const in Differ class :return: None; rows are sorted in-place """ #print("@@ SORT ROWS:\n{}".format(rows)) # Section-specific determination of sort key if section.lower() == Differ.CHANGED.lower(): sort_key = Differ.CHANGED_DELTA else: sort_key = None if sort_key is not None: rows.sort(key=itemgetter(sort_key))
[ "def", "sort_rows", "(", "self", ",", "rows", ",", "section", ")", ":", "#print(\"@@ SORT ROWS:\\n{}\".format(rows))", "# Section-specific determination of sort key", "if", "section", ".", "lower", "(", ")", "==", "Differ", ".", "CHANGED", ".", "lower", "(", ")", ...
Sort the rows, as appropriate for the section. :param rows: List of tuples (all same length, same values in each position) :param section: Name of section, should match const in Differ class :return: None; rows are sorted in-place
[ "Sort", "the", "rows", "as", "appropriate", "for", "the", "section", "." ]
python
train
41.533333
pinax/pinax-badges
pinax/badges/base.py
https://github.com/pinax/pinax-badges/blob/0921c388088e7c7a77098dc7d0eea393b4707ce5/pinax/badges/base.py#L39-L69
def actually_possibly_award(self, **state): """ Does the actual work of possibly awarding a badge. """ user = state["user"] force_timestamp = state.pop("force_timestamp", None) awarded = self.award(**state) if awarded is None: return if awarded.level is None: assert len(self.levels) == 1 awarded.level = 1 # awarded levels are 1 indexed, for conveineince awarded = awarded.level - 1 assert awarded < len(self.levels) if ( not self.multiple and BadgeAward.objects.filter(user=user, slug=self.slug, level=awarded) ): return extra_kwargs = {} if force_timestamp is not None: extra_kwargs["awarded_at"] = force_timestamp badge = BadgeAward.objects.create( user=user, slug=self.slug, level=awarded, **extra_kwargs ) self.send_badge_messages(badge) badge_awarded.send(sender=self, badge_award=badge)
[ "def", "actually_possibly_award", "(", "self", ",", "*", "*", "state", ")", ":", "user", "=", "state", "[", "\"user\"", "]", "force_timestamp", "=", "state", ".", "pop", "(", "\"force_timestamp\"", ",", "None", ")", "awarded", "=", "self", ".", "award", ...
Does the actual work of possibly awarding a badge.
[ "Does", "the", "actual", "work", "of", "possibly", "awarding", "a", "badge", "." ]
python
train
33.806452
mrstephenneal/dirutility
dirutility/walk/walk.py
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/walk/walk.py#L204-L214
def walk(self): """ Default file path retrieval function. sprinter() - Generates file path list using pool processing and Queues crawler() - Generates file path list using os.walk() in sequence """ if self.parallelize: self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size, self._printer).sprinter() else: self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown, self._printer).crawler() return self._get_filepaths()
[ "def", "walk", "(", "self", ")", ":", "if", "self", ".", "parallelize", ":", "self", ".", "filepaths", "=", "Sprinter", "(", "self", ".", "directory", ",", "self", ".", "filters", ",", "self", ".", "full_paths", ",", "self", ".", "pool_size", ",", "s...
Default file path retrieval function. sprinter() - Generates file path list using pool processing and Queues crawler() - Generates file path list using os.walk() in sequence
[ "Default", "file", "path", "retrieval", "function", ".", "sprinter", "()", "-", "Generates", "file", "path", "list", "using", "pool", "processing", "and", "Queues", "crawler", "()", "-", "Generates", "file", "path", "list", "using", "os", ".", "walk", "()", ...
python
train
50.636364
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L4142-L4243
def CheckEmptyBlockBody(filename, clean_lines, linenum, error): """Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Search for loop keywords at the beginning of the line. Because only # whitespaces are allowed before the keywords, this will also ignore most # do-while-loops, since those lines should start with closing brace. # # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: # Find the end of the conditional expression. (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') # Check for if statements that have completely empty bodies (no comments) # and no else clauses. if end_pos >= 0 and matched.group(1) == 'if': # Find the position of the opening { for the if statement. # Return without logging an error if it has no brackets. opening_linenum = end_linenum opening_line_fragment = end_line[end_pos:] # Loop until EOF or find anything that's not whitespace or opening {. while not Search(r'^\s*\{', opening_line_fragment): if Search(r'^(?!\s*$)', opening_line_fragment): # Conditional has no brackets. return opening_linenum += 1 if opening_linenum == len(clean_lines.elided): # Couldn't find conditional's opening { or any code before EOF. return opening_line_fragment = clean_lines.elided[opening_linenum] # Set opening_line (opening_line_fragment may not be entire opening line). opening_line = clean_lines.elided[opening_linenum] # Find the position of the closing }. opening_pos = opening_line_fragment.find('{') if opening_linenum == end_linenum: # We need to make opening_pos relative to the start of the entire line. opening_pos += end_pos (closing_line, closing_linenum, closing_pos) = CloseExpression( clean_lines, opening_linenum, opening_pos) if closing_pos < 0: return # Now construct the body of the conditional. This consists of the portion # of the opening line after the {, all lines until the closing line, # and the portion of the closing line before the }. if (clean_lines.raw_lines[opening_linenum] != CleanseComments(clean_lines.raw_lines[opening_linenum])): # Opening line ends with a comment, so conditional isn't empty. return if closing_linenum > opening_linenum: # Opening line after the {. Ignore comments here since we checked above. bodylist = list(opening_line[opening_pos+1:]) # All lines until closing line, excluding closing line, with comments. bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum]) # Closing line before the }. Won't (and can't) have comments. bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1]) body = '\n'.join(bodylist) else: # If statement has brackets and fits on a single line. body = opening_line[opening_pos+1:closing_pos-1] # Check if the body is empty if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body): return # The body is empty. Now make sure there's not an else clause. current_linenum = closing_linenum current_line_fragment = closing_line[closing_pos:] # Loop until EOF or find anything that's not whitespace or else clause. while Search(r'^\s*$|^(?=\s*else)', current_line_fragment): if Search(r'^(?=\s*else)', current_line_fragment): # Found an else clause, so don't log an error. return current_linenum += 1 if current_linenum == len(clean_lines.elided): break current_line_fragment = clean_lines.elided[current_linenum] # The body is empty and there's no else clause until EOF or other code. error(filename, end_linenum, 'whitespace/empty_if_body', 4, ('If statement had no body and no else clause'))
[ "def", "CheckEmptyBlockBody", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "# Search for loop keywords at the beginning of the line. Because only", "# whitespaces are allowed before the keywords, this will also ignore most", "# do-while-loops, since those...
Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
[ "Look", "for", "empty", "loop", "/", "conditional", "body", "with", "only", "a", "single", "semicolon", "." ]
python
valid
47.411765
toumorokoshi/transmute-core
transmute_core/frameworks/flask/swagger.py
https://github.com/toumorokoshi/transmute-core/blob/a2c26625d5d8bab37e00038f9d615a26167fc7f4/transmute_core/frameworks/flask/swagger.py#L23-L44
def add_swagger_api_route(app, target_route, swagger_json_route): """ mount a swagger statics page. app: the flask app object target_route: the path to mount the statics page. swagger_json_route: the path where the swagger json definitions is expected to be. """ static_root = get_swagger_static_root() swagger_body = generate_swagger_html( STATIC_ROOT, swagger_json_route ).encode("utf-8") def swagger_ui(): return Response(swagger_body, content_type="text/html") blueprint = Blueprint('swagger', __name__, static_url_path=STATIC_ROOT, static_folder=static_root) app.route(target_route)(swagger_ui) app.register_blueprint(blueprint)
[ "def", "add_swagger_api_route", "(", "app", ",", "target_route", ",", "swagger_json_route", ")", ":", "static_root", "=", "get_swagger_static_root", "(", ")", "swagger_body", "=", "generate_swagger_html", "(", "STATIC_ROOT", ",", "swagger_json_route", ")", ".", "encod...
mount a swagger statics page. app: the flask app object target_route: the path to mount the statics page. swagger_json_route: the path where the swagger json definitions is expected to be.
[ "mount", "a", "swagger", "statics", "page", "." ]
python
train
33.772727
alex-kostirin/pyatomac
atomac/ooldtp/__init__.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ooldtp/__init__.py#L322-L336
def log(self, message, level=logging.DEBUG): """ Logs the message in the root logger with the log level @param message: Message to be logged @type message: string @param level: Log level, defaul DEBUG @type level: integer @return: 1 on success and 0 on error @rtype: integer """ if _ldtp_debug: print(message) self.logger.log(level, str(message)) return 1
[ "def", "log", "(", "self", ",", "message", ",", "level", "=", "logging", ".", "DEBUG", ")", ":", "if", "_ldtp_debug", ":", "print", "(", "message", ")", "self", ".", "logger", ".", "log", "(", "level", ",", "str", "(", "message", ")", ")", "return"...
Logs the message in the root logger with the log level @param message: Message to be logged @type message: string @param level: Log level, defaul DEBUG @type level: integer @return: 1 on success and 0 on error @rtype: integer
[ "Logs", "the", "message", "in", "the", "root", "logger", "with", "the", "log", "level" ]
python
valid
30.266667
mar10/wsgidav
wsgidav/samples/mysql_dav_provider.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/samples/mysql_dav_provider.py#L295-L302
def set_property_value(self, name, value, dry_run=False): """Set or remove property value. See DAVResource.set_property_value() """ raise DAVError( HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty )
[ "def", "set_property_value", "(", "self", ",", "name", ",", "value", ",", "dry_run", "=", "False", ")", ":", "raise", "DAVError", "(", "HTTP_FORBIDDEN", ",", "err_condition", "=", "PRECONDITION_CODE_ProtectedProperty", ")" ]
Set or remove property value. See DAVResource.set_property_value()
[ "Set", "or", "remove", "property", "value", "." ]
python
valid
32.625
django-auth-ldap/django-auth-ldap
django_auth_ldap/backend.py
https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L835-L863
def _get_connection(self): """ Returns our cached LDAPObject, which may or may not be bound. """ if self._connection is None: uri = self.settings.SERVER_URI if callable(uri): if func_supports_parameter(uri, "request"): uri = uri(self._request) else: warnings.warn( "Update AUTH_LDAP_SERVER_URI callable %s.%s to accept " "a positional `request` argument. Support for callables " "accepting no arguments will be removed in a future " "version." % (uri.__module__, uri.__name__), DeprecationWarning, ) uri = uri() self._connection = self.backend.ldap.initialize(uri, bytes_mode=False) for opt, value in self.settings.CONNECTION_OPTIONS.items(): self._connection.set_option(opt, value) if self.settings.START_TLS: logger.debug("Initiating TLS") self._connection.start_tls_s() return self._connection
[ "def", "_get_connection", "(", "self", ")", ":", "if", "self", ".", "_connection", "is", "None", ":", "uri", "=", "self", ".", "settings", ".", "SERVER_URI", "if", "callable", "(", "uri", ")", ":", "if", "func_supports_parameter", "(", "uri", ",", "\"req...
Returns our cached LDAPObject, which may or may not be bound.
[ "Returns", "our", "cached", "LDAPObject", "which", "may", "or", "may", "not", "be", "bound", "." ]
python
train
39.724138
ThreatResponse/margaritashotgun
margaritashotgun/remote_host.py
https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/remote_host.py#L160-L172
def start_tunnel(self, local_port, remote_address, remote_port): """ Start ssh forward tunnel :type local_port: int :param local_port: local port binding for ssh tunnel :type remote_address: str :param remote_address: remote tunnel endpoint bind address :type remote_port: int :param remote_port: remote tunnel endpoint bind port """ self.tunnel.start(local_port, remote_address, remote_port) self.tunnel_port = local_port
[ "def", "start_tunnel", "(", "self", ",", "local_port", ",", "remote_address", ",", "remote_port", ")", ":", "self", ".", "tunnel", ".", "start", "(", "local_port", ",", "remote_address", ",", "remote_port", ")", "self", ".", "tunnel_port", "=", "local_port" ]
Start ssh forward tunnel :type local_port: int :param local_port: local port binding for ssh tunnel :type remote_address: str :param remote_address: remote tunnel endpoint bind address :type remote_port: int :param remote_port: remote tunnel endpoint bind port
[ "Start", "ssh", "forward", "tunnel" ]
python
train
38.384615
OSSOS/MOP
src/jjk/preproc/MOPplot.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/MOPplot.py#L442-L452
def reset(self): """Expand to the full scale""" import ephem, MOPcoord sun=ephem.Sun() sun.compute(self.date.get()) self.sun=MOPcoord.coord((sun.ra,sun.dec)) doplot(kbos) self.plot_pointings()
[ "def", "reset", "(", "self", ")", ":", "import", "ephem", ",", "MOPcoord", "sun", "=", "ephem", ".", "Sun", "(", ")", "sun", ".", "compute", "(", "self", ".", "date", ".", "get", "(", ")", ")", "self", ".", "sun", "=", "MOPcoord", ".", "coord", ...
Expand to the full scale
[ "Expand", "to", "the", "full", "scale" ]
python
train
20.818182
saltstack/salt
salt/modules/pagerduty_util.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pagerduty_util.py#L70-L85
def get_schedules(profile='pagerduty', subdomain=None, api_key=None): ''' List schedules belonging to this account CLI Example: salt myminion pagerduty.get_schedules ''' return _list_items( 'schedules', 'id', profile=profile, subdomain=subdomain, api_key=api_key, )
[ "def", "get_schedules", "(", "profile", "=", "'pagerduty'", ",", "subdomain", "=", "None", ",", "api_key", "=", "None", ")", ":", "return", "_list_items", "(", "'schedules'", ",", "'id'", ",", "profile", "=", "profile", ",", "subdomain", "=", "subdomain", ...
List schedules belonging to this account CLI Example: salt myminion pagerduty.get_schedules
[ "List", "schedules", "belonging", "to", "this", "account" ]
python
train
20.3125
pytroll/satpy
satpy/readers/nucaps.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/nucaps.py#L113-L126
def get_shape(self, ds_id, ds_info): """Return data array shape for item specified. """ var_path = ds_info.get('file_key', '{}'.format(ds_id.name)) if var_path + '/shape' not in self: # loading a scalar value shape = 1 else: shape = self[var_path + "/shape"] if "index" in ds_info: shape = shape[1:] if "pressure_index" in ds_info: shape = shape[:-1] return shape
[ "def", "get_shape", "(", "self", ",", "ds_id", ",", "ds_info", ")", ":", "var_path", "=", "ds_info", ".", "get", "(", "'file_key'", ",", "'{}'", ".", "format", "(", "ds_id", ".", "name", ")", ")", "if", "var_path", "+", "'/shape'", "not", "in", "self...
Return data array shape for item specified.
[ "Return", "data", "array", "shape", "for", "item", "specified", "." ]
python
train
35
Becksteinlab/GromacsWrapper
gromacs/utilities.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/utilities.py#L138-L155
def autoconvert(s): """Convert input to a numerical type if possible. 1. A non-string object is returned as it is 2. Try conversion to int, float, str. """ if type(s) is not str: return s for converter in int, float, str: # try them in increasing order of lenience try: s = [converter(i) for i in s.split()] if len(s) == 1: return s[0] else: return numpy.array(s) except (ValueError, AttributeError): pass raise ValueError("Failed to autoconvert {0!r}".format(s))
[ "def", "autoconvert", "(", "s", ")", ":", "if", "type", "(", "s", ")", "is", "not", "str", ":", "return", "s", "for", "converter", "in", "int", ",", "float", ",", "str", ":", "# try them in increasing order of lenience", "try", ":", "s", "=", "[", "con...
Convert input to a numerical type if possible. 1. A non-string object is returned as it is 2. Try conversion to int, float, str.
[ "Convert", "input", "to", "a", "numerical", "type", "if", "possible", "." ]
python
valid
32.277778
bitesofcode/projexui
projexui/widgets/xtimedeltaedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtimedeltaedit.py#L92-L119
def delta(self): """ Returns a delta based on this widget's information. :return <datetime.timedelta> """ number = self._numberSpinner.value() unit = self._unitCombo.currentText() direction = self._directionCombo.currentText() # use past tense if direction == 'ago': number = -number if unit == 'year(s)': return datetime.timedelta(number * 365) elif unit == 'month(s)': return datetime.timedelta(number * 30) elif unit == 'week(s)': return datetime.timedelta(number * 7) elif unit == 'day(s)': return datetime.timedelta(number) elif unit == 'hour(s)': return datetime.timedelta(0, number * 3600) elif unit == 'minute(s)': return datetime.timedelta(0, number * 60) else: return datetime.timedelta(0, number)
[ "def", "delta", "(", "self", ")", ":", "number", "=", "self", ".", "_numberSpinner", ".", "value", "(", ")", "unit", "=", "self", ".", "_unitCombo", ".", "currentText", "(", ")", "direction", "=", "self", ".", "_directionCombo", ".", "currentText", "(", ...
Returns a delta based on this widget's information. :return <datetime.timedelta>
[ "Returns", "a", "delta", "based", "on", "this", "widget", "s", "information", ".", ":", "return", "<datetime", ".", "timedelta", ">" ]
python
train
34.857143
spdx/tools-python
spdx/parsers/lexers/tagvalue.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/lexers/tagvalue.py#L99-L102
def t_text(self, t): r':\s*<text>' t.lexer.text_start = t.lexer.lexpos - len('<text>') t.lexer.begin('text')
[ "def", "t_text", "(", "self", ",", "t", ")", ":", "t", ".", "lexer", ".", "text_start", "=", "t", ".", "lexer", ".", "lexpos", "-", "len", "(", "'<text>'", ")", "t", ".", "lexer", ".", "begin", "(", "'text'", ")" ]
r':\s*<text>
[ "r", ":", "\\", "s", "*", "<text", ">" ]
python
valid
32.25
PMEAL/OpenPNM
openpnm/network/GenericNetwork.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/network/GenericNetwork.py#L360-L440
def create_incidence_matrix(self, weights=None, fmt='coo', drop_zeros=False): r""" Creates a weighted incidence matrix in the desired sparse format Parameters ---------- weights : array_like, optional An array containing the throat values to enter into the matrix (In graph theory these are known as the 'weights'). If omitted, ones are used to create a standard incidence matrix representing connectivity only. fmt : string, optional The sparse storage format to return. Options are: **'coo'** : (default) This is the native format of OpenPNMs data **'lil'** : Enables row-wise slice of the matrix **'csr'** : Favored by most linear algebra routines **'dok'** : Enables subscript access of locations drop_zeros : boolean (default is ``False``) If ``True``, applies the ``eliminate_zeros`` method of the sparse array to remove all zero locations. Returns ------- An incidence matrix in the specified sparse format Notes ----- The incidence matrix is a cousin to the adjacency matrix, and used by OpenPNM for finding the throats connected to a give pore or set of pores. Specifically, an incidence matrix has Np rows and Nt columns, and each row represents a pore, containing non-zero values at the locations corresponding to the indices of the throats connected to that pore. The ``weights`` argument indicates what value to place at each location, with the default being 1's to simply indicate connections. Another useful option is throat indices, such that the data values on each row indicate which throats are connected to the pore, though this is redundant as it is identical to the locations of non-zeros. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> weights = sp.rand(pn.num_throats(), ) < 0.5 >>> im = pn.create_incidence_matrix(weights=weights, fmt='csr') """ # Check if provided data is valid if weights is None: weights = sp.ones((self.Nt,), dtype=int) elif sp.shape(weights)[0] != self.Nt: raise Exception('Received dataset of incorrect length') conn = self['throat.conns'] row = conn[:, 0] row = sp.append(row, conn[:, 1]) col = sp.arange(self.Nt) col = sp.append(col, col) weights = sp.append(weights, weights) temp = sprs.coo.coo_matrix((weights, (row, col)), (self.Np, self.Nt)) if drop_zeros: temp.eliminate_zeros() # Convert to requested format if fmt == 'coo': pass # temp is already in coo format elif fmt == 'csr': temp = temp.tocsr() elif fmt == 'lil': temp = temp.tolil() elif fmt == 'dok': temp = temp.todok() return temp
[ "def", "create_incidence_matrix", "(", "self", ",", "weights", "=", "None", ",", "fmt", "=", "'coo'", ",", "drop_zeros", "=", "False", ")", ":", "# Check if provided data is valid", "if", "weights", "is", "None", ":", "weights", "=", "sp", ".", "ones", "(", ...
r""" Creates a weighted incidence matrix in the desired sparse format Parameters ---------- weights : array_like, optional An array containing the throat values to enter into the matrix (In graph theory these are known as the 'weights'). If omitted, ones are used to create a standard incidence matrix representing connectivity only. fmt : string, optional The sparse storage format to return. Options are: **'coo'** : (default) This is the native format of OpenPNMs data **'lil'** : Enables row-wise slice of the matrix **'csr'** : Favored by most linear algebra routines **'dok'** : Enables subscript access of locations drop_zeros : boolean (default is ``False``) If ``True``, applies the ``eliminate_zeros`` method of the sparse array to remove all zero locations. Returns ------- An incidence matrix in the specified sparse format Notes ----- The incidence matrix is a cousin to the adjacency matrix, and used by OpenPNM for finding the throats connected to a give pore or set of pores. Specifically, an incidence matrix has Np rows and Nt columns, and each row represents a pore, containing non-zero values at the locations corresponding to the indices of the throats connected to that pore. The ``weights`` argument indicates what value to place at each location, with the default being 1's to simply indicate connections. Another useful option is throat indices, such that the data values on each row indicate which throats are connected to the pore, though this is redundant as it is identical to the locations of non-zeros. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> weights = sp.rand(pn.num_throats(), ) < 0.5 >>> im = pn.create_incidence_matrix(weights=weights, fmt='csr')
[ "r", "Creates", "a", "weighted", "incidence", "matrix", "in", "the", "desired", "sparse", "format" ]
python
train
37.604938
bitesofcode/projexui
projexui/widgets/xlineedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xlineedit.py#L433-L486
def paintEvent(self, event): """ Overloads the paint event to paint additional \ hint information if no text is set on the \ editor. :param event | <QPaintEvent> """ super(XLineEdit, self).paintEvent(event) # paint the hint text if not text is set if self.text() and not (self.icon() and not self.icon().isNull()): return # paint the hint text with XPainter(self) as painter: painter.setPen(self.hintColor()) icon = self.icon() left, top, right, bottom = self.getTextMargins() w = self.width() h = self.height() - 2 w -= (right + left) h -= (bottom + top) if icon and not icon.isNull(): size = icon.actualSize(self.iconSize()) x = self.cornerRadius() + 2 y = (self.height() - size.height()) / 2.0 painter.drawPixmap(x, y, icon.pixmap(size.width(), size.height())) w -= size.width() - 2 else: x = 6 + left w -= self._buttonWidth y = 2 + top # create the elided hint if not self.text() and self.hint(): rect = self.cursorRect() metrics = QFontMetrics(self.font()) hint = metrics.elidedText(self.hint(), Qt.ElideRight, w) align = self.alignment() if align & Qt.AlignHCenter: x = 0 else: x = rect.center().x() painter.drawText(x, y, w, h, align, hint)
[ "def", "paintEvent", "(", "self", ",", "event", ")", ":", "super", "(", "XLineEdit", ",", "self", ")", ".", "paintEvent", "(", "event", ")", "# paint the hint text if not text is set", "if", "self", ".", "text", "(", ")", "and", "not", "(", "self", ".", ...
Overloads the paint event to paint additional \ hint information if no text is set on the \ editor. :param event | <QPaintEvent>
[ "Overloads", "the", "paint", "event", "to", "paint", "additional", "\\", "hint", "information", "if", "no", "text", "is", "set", "on", "the", "\\", "editor", ".", ":", "param", "event", "|", "<QPaintEvent", ">" ]
python
train
33.796296
sixty-north/asq
asq/queryables.py
https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/queryables.py#L2818-L2846
def to_dictionary( self, key_selector=None, value_selector=None): """Build a dictionary from the source sequence. Args: key_selector: A unary callable to extract a key from each item or None. If None, the default key selector produces a single dictionary key, which if the key of this Grouping. value_selector: A unary callable to extract a value from each item. If None, the default value selector produces a list, which contains all elements from this Grouping. Note: This method uses immediate execution. Raises: ValueError: If the Queryable is closed. TypeError: If key_selector is not callable. TypeError: If value_selector is not callable. """ if key_selector is None: key_selector = lambda _: self.key if value_selector is None: value_selector = lambda _: self.to_list() return super(Grouping, self).to_dictionary(key_selector, value_selector)
[ "def", "to_dictionary", "(", "self", ",", "key_selector", "=", "None", ",", "value_selector", "=", "None", ")", ":", "if", "key_selector", "is", "None", ":", "key_selector", "=", "lambda", "_", ":", "self", ".", "key", "if", "value_selector", "is", "None",...
Build a dictionary from the source sequence. Args: key_selector: A unary callable to extract a key from each item or None. If None, the default key selector produces a single dictionary key, which if the key of this Grouping. value_selector: A unary callable to extract a value from each item. If None, the default value selector produces a list, which contains all elements from this Grouping. Note: This method uses immediate execution. Raises: ValueError: If the Queryable is closed. TypeError: If key_selector is not callable. TypeError: If value_selector is not callable.
[ "Build", "a", "dictionary", "from", "the", "source", "sequence", "." ]
python
train
37.103448
icometrix/dicom2nifti
dicom2nifti/convert_ge.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_ge.py#L139-L165
def _get_full_block(grouped_dicoms): """ Generate a full datablock containing all timepoints """ # For each slice / mosaic create a data volume block data_blocks = [] for index in range(0, len(grouped_dicoms)): logger.info('Creating block %s of %s' % (index + 1, len(grouped_dicoms))) data_blocks.append(_timepoint_to_block(grouped_dicoms[index])) # Add the data_blocks together to one 4d block size_x = numpy.shape(data_blocks[0])[0] size_y = numpy.shape(data_blocks[0])[1] size_z = numpy.shape(data_blocks[0])[2] size_t = len(data_blocks) full_block = numpy.zeros((size_x, size_y, size_z, size_t), dtype=data_blocks[0].dtype) for index in range(0, size_t): if full_block[:, :, :, index].shape != data_blocks[index].shape: logger.warning('Missing slices (slice count mismatch between timepoint %s and %s)' % (index - 1, index)) logger.warning('---------------------------------------------------------') logger.warning(full_block[:, :, :, index].shape) logger.warning(data_blocks[index].shape) logger.warning('---------------------------------------------------------') raise ConversionError("MISSING_DICOM_FILES") full_block[:, :, :, index] = data_blocks[index] return full_block
[ "def", "_get_full_block", "(", "grouped_dicoms", ")", ":", "# For each slice / mosaic create a data volume block", "data_blocks", "=", "[", "]", "for", "index", "in", "range", "(", "0", ",", "len", "(", "grouped_dicoms", ")", ")", ":", "logger", ".", "info", "("...
Generate a full datablock containing all timepoints
[ "Generate", "a", "full", "datablock", "containing", "all", "timepoints" ]
python
train
48.777778
linode/linode_api4-python
linode_api4/objects/account.py
https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/objects/account.py#L163-L173
def reset_secret(self): """ Resets the client secret for this client. """ result = self._client.post("{}/reset_secret".format(OAuthClient.api_endpoint), model=self) if not 'id' in result: raise UnexpectedResponseError('Unexpected response when resetting secret!', json=result) self._populate(result) return self.secret
[ "def", "reset_secret", "(", "self", ")", ":", "result", "=", "self", ".", "_client", ".", "post", "(", "\"{}/reset_secret\"", ".", "format", "(", "OAuthClient", ".", "api_endpoint", ")", ",", "model", "=", "self", ")", "if", "not", "'id'", "in", "result"...
Resets the client secret for this client.
[ "Resets", "the", "client", "secret", "for", "this", "client", "." ]
python
train
34.363636
abseil/abseil-py
absl/flags/_flagvalues.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_flagvalues.py#L512-L531
def _assert_validators(self, validators): """Asserts if all validators in the list are satisfied. It asserts validators in the order they were created. Args: validators: Iterable(validators.Validator), validators to be verified. Raises: AttributeError: Raised if validators work with a non-existing flag. IllegalFlagValueError: Raised if validation fails for at least one validator. """ for validator in sorted( validators, key=lambda validator: validator.insertion_index): try: validator.verify(self) except _exceptions.ValidationError as e: message = validator.print_flags_with_values(self) raise _exceptions.IllegalFlagValueError('%s: %s' % (message, str(e)))
[ "def", "_assert_validators", "(", "self", ",", "validators", ")", ":", "for", "validator", "in", "sorted", "(", "validators", ",", "key", "=", "lambda", "validator", ":", "validator", ".", "insertion_index", ")", ":", "try", ":", "validator", ".", "verify", ...
Asserts if all validators in the list are satisfied. It asserts validators in the order they were created. Args: validators: Iterable(validators.Validator), validators to be verified. Raises: AttributeError: Raised if validators work with a non-existing flag. IllegalFlagValueError: Raised if validation fails for at least one validator.
[ "Asserts", "if", "all", "validators", "in", "the", "list", "are", "satisfied", "." ]
python
train
37.55
softlayer/softlayer-python
SoftLayer/CLI/virt/create.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/virt/create.py#L273-L304
def _validate_args(env, args): """Raises an ArgumentError if the given arguments are not valid.""" if all([args['cpu'], args['flavor']]): raise exceptions.ArgumentError( '[-c | --cpu] not allowed with [-f | --flavor]') if all([args['memory'], args['flavor']]): raise exceptions.ArgumentError( '[-m | --memory] not allowed with [-f | --flavor]') if all([args['dedicated'], args['flavor']]): raise exceptions.ArgumentError( '[-d | --dedicated] not allowed with [-f | --flavor]') if all([args['host_id'], args['flavor']]): raise exceptions.ArgumentError( '[-h | --host-id] not allowed with [-f | --flavor]') if all([args['userdata'], args['userfile']]): raise exceptions.ArgumentError( '[-u | --userdata] not allowed with [-F | --userfile]') image_args = [args['os'], args['image']] if all(image_args): raise exceptions.ArgumentError( '[-o | --os] not allowed with [--image]') while not any([args['os'], args['image']]): args['os'] = env.input("Operating System Code", default="", show_default=False) if not args['os']: args['image'] = env.input("Image", default="", show_default=False)
[ "def", "_validate_args", "(", "env", ",", "args", ")", ":", "if", "all", "(", "[", "args", "[", "'cpu'", "]", ",", "args", "[", "'flavor'", "]", "]", ")", ":", "raise", "exceptions", ".", "ArgumentError", "(", "'[-c | --cpu] not allowed with [-f | --flavor]'...
Raises an ArgumentError if the given arguments are not valid.
[ "Raises", "an", "ArgumentError", "if", "the", "given", "arguments", "are", "not", "valid", "." ]
python
train
38.84375
aio-libs/aioredis
aioredis/commands/set.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/set.py#L38-L40
def smembers(self, key, *, encoding=_NOTSET): """Get all the members in a set.""" return self.execute(b'SMEMBERS', key, encoding=encoding)
[ "def", "smembers", "(", "self", ",", "key", ",", "*", ",", "encoding", "=", "_NOTSET", ")", ":", "return", "self", ".", "execute", "(", "b'SMEMBERS'", ",", "key", ",", "encoding", "=", "encoding", ")" ]
Get all the members in a set.
[ "Get", "all", "the", "members", "in", "a", "set", "." ]
python
train
50.666667
spacetelescope/drizzlepac
drizzlepac/staticMask.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/staticMask.py#L255-L282
def saveToFile(self,imageObjectList): """ Saves the static mask to a file it uses the signatures associated with each mask to contruct the filename for the output mask image. """ virtual = imageObjectList[0].inmemory for key in self.masklist.keys(): #check to see if the file already exists on disk filename = self.masknames[key] #create a new fits image with the mask array and a standard header #open a new header and data unit newHDU = fits.PrimaryHDU() newHDU.data = self.masklist[key] if virtual: for img in imageObjectList: img.saveVirtualOutputs({filename:newHDU}) else: try: newHDU.writeto(filename, overwrite=True) log.info("Saving static mask to disk: %s" % filename) except IOError: log.error("Problem saving static mask file: %s to " "disk!\n" % filename) raise IOError
[ "def", "saveToFile", "(", "self", ",", "imageObjectList", ")", ":", "virtual", "=", "imageObjectList", "[", "0", "]", ".", "inmemory", "for", "key", "in", "self", ".", "masklist", ".", "keys", "(", ")", ":", "#check to see if the file already exists on disk", ...
Saves the static mask to a file it uses the signatures associated with each mask to contruct the filename for the output mask image.
[ "Saves", "the", "static", "mask", "to", "a", "file", "it", "uses", "the", "signatures", "associated", "with", "each", "mask", "to", "contruct", "the", "filename", "for", "the", "output", "mask", "image", "." ]
python
train
38.857143
RJT1990/pyflux
pyflux/ensembles/mixture_of_experts.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ensembles/mixture_of_experts.py#L76-L111
def add_model(self, model): """ Adds a PyFlux model to the aggregating algorithm Parameters ---------- model : pf.[MODEL] A PyFlux univariate model Returns ---------- - Void (changes self.model_list) """ if model.model_type not in self.supported_models: raise ValueError('Model type not supported for Aggregate! Apologies') if not self.model_list: self.model_list.append(model) if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']: self.data = np.abs(model.data) else: self.data = model.data self.index = model.index else: if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']: if np.isclose(np.abs(np.abs(model.data[-self.match_window:])-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX': self.model_list.append(model) else: raise ValueError('Data entered is deemed different based on %s last values!' % (s)) else: if np.isclose(np.abs(model.data[-self.match_window:]-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX': self.model_list.append(model) else: raise ValueError('Data entered is deemed different based on %s last values!' % (s)) self.model_names = [i.model_name for i in self.model_list]
[ "def", "add_model", "(", "self", ",", "model", ")", ":", "if", "model", ".", "model_type", "not", "in", "self", ".", "supported_models", ":", "raise", "ValueError", "(", "'Model type not supported for Aggregate! Apologies'", ")", "if", "not", "self", ".", "model...
Adds a PyFlux model to the aggregating algorithm Parameters ---------- model : pf.[MODEL] A PyFlux univariate model Returns ---------- - Void (changes self.model_list)
[ "Adds", "a", "PyFlux", "model", "to", "the", "aggregating", "algorithm", "Parameters", "----------", "model", ":", "pf", ".", "[", "MODEL", "]", "A", "PyFlux", "univariate", "model" ]
python
train
44.861111
cstockton/py-gensend
gensend/providers/common.py
https://github.com/cstockton/py-gensend/blob/8c8e911f8e8c386bea42967350beb4636fc19240/gensend/providers/common.py#L196-L204
def shuffle(self, *args): """Shuffles all arguments and returns them. ARG_1,...,ARG_N %{SHUFFLE:A, B ,...,F} -> 'CDA B FE' """ call_args = list(args) self.random.shuffle(call_args) return ''.join(call_args)
[ "def", "shuffle", "(", "self", ",", "*", "args", ")", ":", "call_args", "=", "list", "(", "args", ")", "self", ".", "random", ".", "shuffle", "(", "call_args", ")", "return", "''", ".", "join", "(", "call_args", ")" ]
Shuffles all arguments and returns them. ARG_1,...,ARG_N %{SHUFFLE:A, B ,...,F} -> 'CDA B FE'
[ "Shuffles", "all", "arguments", "and", "returns", "them", ".", "ARG_1", "...", "ARG_N" ]
python
train
28.777778
inveniosoftware-contrib/json-merger
json_merger/stats.py
https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/stats.py#L72-L77
def move_to_result(self, lst_idx): """Moves element from lst available at lst_idx.""" self.in_result_idx.add(lst_idx) if lst_idx in self.not_in_result_root_match_idx: self.not_in_result_root_match_idx.remove(lst_idx)
[ "def", "move_to_result", "(", "self", ",", "lst_idx", ")", ":", "self", ".", "in_result_idx", ".", "add", "(", "lst_idx", ")", "if", "lst_idx", "in", "self", ".", "not_in_result_root_match_idx", ":", "self", ".", "not_in_result_root_match_idx", ".", "remove", ...
Moves element from lst available at lst_idx.
[ "Moves", "element", "from", "lst", "available", "at", "lst_idx", "." ]
python
train
41.333333
rigetti/pyquil
pyquil/wavefunction.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/wavefunction.py#L148-L170
def pretty_print(self, decimal_digits=2): """ Returns a string repr of the wavefunction, ignoring all outcomes with approximately zero amplitude (up to a certain number of decimal digits) and rounding the amplitudes to decimal_digits. :param int decimal_digits: The number of digits to truncate to. :return: A dict with outcomes as keys and complex amplitudes as values. :rtype: str """ outcome_dict = {} qubit_num = len(self) pp_string = "" for index, amplitude in enumerate(self.amplitudes): outcome = get_bitstring_from_index(index, qubit_num) amplitude = round(amplitude.real, decimal_digits) + \ round(amplitude.imag, decimal_digits) * 1.j if amplitude != 0.: outcome_dict[outcome] = amplitude pp_string += str(amplitude) + "|{}> + ".format(outcome) if len(pp_string) >= 3: pp_string = pp_string[:-3] # remove the dangling + if it is there return pp_string
[ "def", "pretty_print", "(", "self", ",", "decimal_digits", "=", "2", ")", ":", "outcome_dict", "=", "{", "}", "qubit_num", "=", "len", "(", "self", ")", "pp_string", "=", "\"\"", "for", "index", ",", "amplitude", "in", "enumerate", "(", "self", ".", "a...
Returns a string repr of the wavefunction, ignoring all outcomes with approximately zero amplitude (up to a certain number of decimal digits) and rounding the amplitudes to decimal_digits. :param int decimal_digits: The number of digits to truncate to. :return: A dict with outcomes as keys and complex amplitudes as values. :rtype: str
[ "Returns", "a", "string", "repr", "of", "the", "wavefunction", "ignoring", "all", "outcomes", "with", "approximately", "zero", "amplitude", "(", "up", "to", "a", "certain", "number", "of", "decimal", "digits", ")", "and", "rounding", "the", "amplitudes", "to",...
python
train
45.608696
YuriyGuts/pygoose
pygoose/kg/io.py
https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/io.py#L50-L61
def save_json(obj, filename, **kwargs): """ Save an object as a JSON file. Args: obj: The object to save. Must be JSON-serializable. filename: Path to the output file. **kwargs: Additional arguments to `json.dump`. """ with open(filename, 'w', encoding='utf-8') as f: json.dump(obj, f, **kwargs)
[ "def", "save_json", "(", "obj", ",", "filename", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "filename", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "json", ".", "dump", "(", "obj", ",", "f", ",", "*", "*", "kw...
Save an object as a JSON file. Args: obj: The object to save. Must be JSON-serializable. filename: Path to the output file. **kwargs: Additional arguments to `json.dump`.
[ "Save", "an", "object", "as", "a", "JSON", "file", "." ]
python
train
28.166667
NatLibFi/Skosify
skosify/skosify.py
https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L221-L235
def transform_sparql_construct(rdf, construct_query): """Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph.""" logging.debug("performing SPARQL CONSTRUCT transformation") if construct_query[0] == '@': # actual query should be read from file construct_query = file(construct_query[1:]).read() logging.debug("CONSTRUCT query: %s", construct_query) newgraph = Graph() for triple in rdf.query(construct_query): newgraph.add(triple) return newgraph
[ "def", "transform_sparql_construct", "(", "rdf", ",", "construct_query", ")", ":", "logging", ".", "debug", "(", "\"performing SPARQL CONSTRUCT transformation\"", ")", "if", "construct_query", "[", "0", "]", "==", "'@'", ":", "# actual query should be read from file", "...
Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph.
[ "Perform", "a", "SPARQL", "CONSTRUCT", "query", "on", "the", "RDF", "data", "and", "return", "a", "new", "graph", "." ]
python
train
33.4
wbond/oscrypto
oscrypto/_openssl/tls.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_openssl/tls.py#L696-L729
def _raw_write(self): """ Takes ciphertext from the memory bio and writes it to the socket. :return: A byte string of ciphertext going to the socket. Used for debugging the handshake only. """ data_available = libssl.BIO_ctrl_pending(self._wbio) if data_available == 0: return b'' to_read = min(self._buffer_size, data_available) read = libssl.BIO_read(self._wbio, self._bio_write_buffer, to_read) to_write = bytes_from_buffer(self._bio_write_buffer, read) output = to_write while len(to_write): raise_disconnect = False try: sent = self._socket.send(to_write) except (socket_.error) as e: # Handle ECONNRESET and EPIPE if e.errno == 104 or e.errno == 32: raise_disconnect = True else: raise if raise_disconnect: raise_disconnection() to_write = to_write[sent:] if len(to_write): self.select_write() return output
[ "def", "_raw_write", "(", "self", ")", ":", "data_available", "=", "libssl", ".", "BIO_ctrl_pending", "(", "self", ".", "_wbio", ")", "if", "data_available", "==", "0", ":", "return", "b''", "to_read", "=", "min", "(", "self", ".", "_buffer_size", ",", "...
Takes ciphertext from the memory bio and writes it to the socket. :return: A byte string of ciphertext going to the socket. Used for debugging the handshake only.
[ "Takes", "ciphertext", "from", "the", "memory", "bio", "and", "writes", "it", "to", "the", "socket", "." ]
python
valid
33.264706
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1027-L1030
def p_expression_lessthan(self, p): 'expression : expression LT expression' p[0] = LessThan(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_expression_lessthan", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "LessThan", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", ...
expression : expression LT expression
[ "expression", ":", "expression", "LT", "expression" ]
python
train
43.25
nion-software/nionswift
nion/swift/Facade.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L994-L1003
def data(self, data: numpy.ndarray) -> None: """Set the data. :param data: A numpy ndarray. .. versionadded:: 1.0 Scriptable: Yes """ self.__data_item.set_data(numpy.copy(data))
[ "def", "data", "(", "self", ",", "data", ":", "numpy", ".", "ndarray", ")", "->", "None", ":", "self", ".", "__data_item", ".", "set_data", "(", "numpy", ".", "copy", "(", "data", ")", ")" ]
Set the data. :param data: A numpy ndarray. .. versionadded:: 1.0 Scriptable: Yes
[ "Set", "the", "data", "." ]
python
train
21.9
qualisys/qualisys_python_sdk
qtm/qrt.py
https://github.com/qualisys/qualisys_python_sdk/blob/127d7eeebc2b38b5cafdfa5d1d0198437fedd274/qtm/qrt.py#L15-L34
def validate_response(expected_responses): """ Decorator to validate responses from QTM """ def internal_decorator(function): @wraps(function) async def wrapper(*args, **kwargs): response = await function(*args, **kwargs) for expected_response in expected_responses: if response.startswith(expected_response): return response raise QRTCommandException( "Expected %s but got %s" % (expected_responses, response) ) return wrapper return internal_decorator
[ "def", "validate_response", "(", "expected_responses", ")", ":", "def", "internal_decorator", "(", "function", ")", ":", "@", "wraps", "(", "function", ")", "async", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", ...
Decorator to validate responses from QTM
[ "Decorator", "to", "validate", "responses", "from", "QTM" ]
python
valid
28.8
openstack/proliantutils
proliantutils/redfish/redfish.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/redfish.py#L212-L219
def get_host_power_status(self): """Request the power state of the server. :returns: Power State of the server, 'ON' or 'OFF' :raises: IloError, on an error from iLO. """ sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) return GET_POWER_STATE_MAP.get(sushy_system.power_state)
[ "def", "get_host_power_status", "(", "self", ")", ":", "sushy_system", "=", "self", ".", "_get_sushy_system", "(", "PROLIANT_SYSTEM_ID", ")", "return", "GET_POWER_STATE_MAP", ".", "get", "(", "sushy_system", ".", "power_state", ")" ]
Request the power state of the server. :returns: Power State of the server, 'ON' or 'OFF' :raises: IloError, on an error from iLO.
[ "Request", "the", "power", "state", "of", "the", "server", "." ]
python
train
40.875
asyncdef/apyio
apyio/__init__.py
https://github.com/asyncdef/apyio/blob/d6b914929269b8795ca4d6b1ede8a393841cbc29/apyio/__init__.py#L614-L626
async def seek(self, pos, whence=sync_io.SEEK_SET): """Move to new file position. Argument offset is a byte count. Optional argument whence defaults to SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values are SEEK_CUR or 1 (move relative to current position, positive or negative), and SEEK_END or 2 (move relative to end of file, usually negative, although many platforms allow seeking beyond the end of a file). Note that not all file objects are seekable. """ return self._stream.seek(pos, whence)
[ "async", "def", "seek", "(", "self", ",", "pos", ",", "whence", "=", "sync_io", ".", "SEEK_SET", ")", ":", "return", "self", ".", "_stream", ".", "seek", "(", "pos", ",", "whence", ")" ]
Move to new file position. Argument offset is a byte count. Optional argument whence defaults to SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values are SEEK_CUR or 1 (move relative to current position, positive or negative), and SEEK_END or 2 (move relative to end of file, usually negative, although many platforms allow seeking beyond the end of a file). Note that not all file objects are seekable.
[ "Move", "to", "new", "file", "position", "." ]
python
train
45.923077
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/core/core_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/core/core_client.py#L474-L493
def update_team(self, team_data, project_id, team_id): """UpdateTeam. [Preview API] Update a team's name and/or description. :param :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>` team_data: :param str project_id: The name or ID (GUID) of the team project containing the team to update. :param str team_id: The name of ID of the team to update. :rtype: :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>` """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') if team_id is not None: route_values['teamId'] = self._serialize.url('team_id', team_id, 'str') content = self._serialize.body(team_data, 'WebApiTeam') response = self._send(http_method='PATCH', location_id='d30a3dd1-f8ba-442a-b86a-bd0c0c383e59', version='5.1-preview.2', route_values=route_values, content=content) return self._deserialize('WebApiTeam', response)
[ "def", "update_team", "(", "self", ",", "team_data", ",", "project_id", ",", "team_id", ")", ":", "route_values", "=", "{", "}", "if", "project_id", "is", "not", "None", ":", "route_values", "[", "'projectId'", "]", "=", "self", ".", "_serialize", ".", "...
UpdateTeam. [Preview API] Update a team's name and/or description. :param :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>` team_data: :param str project_id: The name or ID (GUID) of the team project containing the team to update. :param str team_id: The name of ID of the team to update. :rtype: :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>`
[ "UpdateTeam", ".", "[", "Preview", "API", "]", "Update", "a", "team", "s", "name", "and", "/", "or", "description", ".", ":", "param", ":", "class", ":", "<WebApiTeam", ">", "<azure", ".", "devops", ".", "v5_1", ".", "core", ".", "models", ".", "WebA...
python
train
57.9
bethgelab/foolbox
foolbox/models/base.py
https://github.com/bethgelab/foolbox/blob/8ab54248c70e45d8580a7d9ee44c9c0fb5755c4a/foolbox/models/base.py#L223-L250
def gradient(self, image, label): """Calculates the gradient of the cross-entropy loss w.r.t. the image. The default implementation calls predictions_and_gradient. Subclasses can provide more efficient implementations that only calculate the gradient. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). label : int Reference label used to calculate the gradient. Returns ------- gradient : `numpy.ndarray` The gradient of the cross-entropy loss w.r.t. the image. Will have the same shape as the image. See Also -------- :meth:`gradient` """ _, gradient = self.predictions_and_gradient(image, label) return gradient
[ "def", "gradient", "(", "self", ",", "image", ",", "label", ")", ":", "_", ",", "gradient", "=", "self", ".", "predictions_and_gradient", "(", "image", ",", "label", ")", "return", "gradient" ]
Calculates the gradient of the cross-entropy loss w.r.t. the image. The default implementation calls predictions_and_gradient. Subclasses can provide more efficient implementations that only calculate the gradient. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). label : int Reference label used to calculate the gradient. Returns ------- gradient : `numpy.ndarray` The gradient of the cross-entropy loss w.r.t. the image. Will have the same shape as the image. See Also -------- :meth:`gradient`
[ "Calculates", "the", "gradient", "of", "the", "cross", "-", "entropy", "loss", "w", ".", "r", ".", "t", ".", "the", "image", "." ]
python
valid
30.821429
dreipol/djangocms-spa
djangocms_spa/content_helpers.py
https://github.com/dreipol/djangocms-spa/blob/eb0048eb29aef6314431c3c1800b363c40619818/djangocms_spa/content_helpers.py#L48-L102
def get_frontend_data_dict_for_placeholders(placeholders, request, editable=False): """ Takes a list of placeholder instances and returns the data that is used by the frontend to render all contents. The returned dict is grouped by placeholder slots. """ data_dict = {} for placeholder in placeholders: if placeholder: plugins = [] # We don't use the helper method `placeholder.get_plugins()` because of the wrong order by path. placeholder_plugins = placeholder.cmsplugin_set.filter(language=request.LANGUAGE_CODE).order_by( settings.DJANGOCMS_SPA_PLUGIN_ORDER_FIELD) for plugin in placeholder_plugins: # We need the complete cascading structure of the plugins in the frontend. This is why we ignore the # children here and add them later in the loop. if not plugin.parent: plugins.append(get_frontend_data_dict_for_plugin( request=request, plugin=plugin, editable=editable) ) if plugins or editable: data_dict[placeholder.slot] = { 'type': 'cmp-%s' % placeholder.slot, 'plugins': plugins, } if editable: # This is the structure of the template `cms/toolbar/placeholder.html` that is used to register # the frontend editing. from cms.plugin_pool import plugin_pool plugin_types = [cls.__name__ for cls in plugin_pool.get_all_plugins(placeholder.slot, placeholder.page)] allowed_plugins = plugin_types + plugin_pool.get_system_plugins() data_dict[placeholder.slot]['cms'] = [ 'cms-placeholder-{}'.format(placeholder.pk), { 'type': 'placeholder', 'name': str(placeholder.get_label()), 'page_language': request.LANGUAGE_CODE, 'placeholder_id': placeholder.pk, 'plugin_language': request.LANGUAGE_CODE, 'plugin_restriction': [module for module in allowed_plugins], 'addPluginHelpTitle': 'Add plugin to placeholder {}'.format(placeholder.get_label()), 'urls': { 'add_plugin': placeholder.get_add_url(), 'copy_plugin': placeholder.get_copy_url() } } ] return data_dict
[ "def", "get_frontend_data_dict_for_placeholders", "(", "placeholders", ",", "request", ",", "editable", "=", "False", ")", ":", "data_dict", "=", "{", "}", "for", "placeholder", "in", "placeholders", ":", "if", "placeholder", ":", "plugins", "=", "[", "]", "# ...
Takes a list of placeholder instances and returns the data that is used by the frontend to render all contents. The returned dict is grouped by placeholder slots.
[ "Takes", "a", "list", "of", "placeholder", "instances", "and", "returns", "the", "data", "that", "is", "used", "by", "the", "frontend", "to", "render", "all", "contents", ".", "The", "returned", "dict", "is", "grouped", "by", "placeholder", "slots", "." ]
python
train
47.545455
spencerahill/aospy
aospy/data_loader.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/data_loader.py#L617-L636
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs): """Correct off-by-one error in GFDL instantaneous model data. Instantaneous data that is outputted by GFDL models is generally off by one timestep. For example, a netCDF file that is supposed to correspond to 6 hourly data for the month of January, will have its last time value be in February. """ if time_offset is not None: time = times.apply_time_offset(da[TIME_STR], **time_offset) da[TIME_STR] = time else: if DataAttrs['dtype_in_time'] == 'inst': if DataAttrs['intvl_in'].endswith('hr'): offset = -1 * int(DataAttrs['intvl_in'][0]) else: offset = 0 time = times.apply_time_offset(da[TIME_STR], hours=offset) da[TIME_STR] = time return da
[ "def", "_maybe_apply_time_shift", "(", "da", ",", "time_offset", "=", "None", ",", "*", "*", "DataAttrs", ")", ":", "if", "time_offset", "is", "not", "None", ":", "time", "=", "times", ".", "apply_time_offset", "(", "da", "[", "TIME_STR", "]", ",", "*", ...
Correct off-by-one error in GFDL instantaneous model data. Instantaneous data that is outputted by GFDL models is generally off by one timestep. For example, a netCDF file that is supposed to correspond to 6 hourly data for the month of January, will have its last time value be in February.
[ "Correct", "off", "-", "by", "-", "one", "error", "in", "GFDL", "instantaneous", "model", "data", "." ]
python
train
45.15
geophysics-ubonn/reda
lib/reda/plotters/histograms.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/histograms.py#L29-L130
def plot_histograms(ertobj, keys, **kwargs): """Generate histograms for one or more keys in the given container. Parameters ---------- ertobj : container instance or :class:`pandas.DataFrame` data object which contains the data. keys : str or list of strings which keys (column names) to plot merge : bool, optional if True, then generate only one figure with all key-plots as columns (default True) log10plot : bool, optional default: True extra_dims : list, optional Examples -------- >>> from reda.plotters import plot_histograms >>> from reda.testing import ERTContainer >>> figs_dict = plot_histograms(ERTContainer, "r", merge=False) Generating histogram plot for key: r Returns ------- figures : dict dictionary with the generated histogram figures """ # you can either provide a DataFrame or an ERT object if isinstance(ertobj, pd.DataFrame): df = ertobj else: df = ertobj.data if df.shape[0] == 0: raise Exception('No data present, cannot plot') if isinstance(keys, str): keys = [keys, ] figures = {} merge_figs = kwargs.get('merge', True) if merge_figs: nr_x = 2 nr_y = len(keys) size_x = 15 / 2.54 size_y = 5 * nr_y / 2.54 fig, axes_all = plt.subplots(nr_y, nr_x, figsize=(size_x, size_y)) axes_all = np.atleast_2d(axes_all) for row_nr, key in enumerate(keys): print('Generating histogram plot for key: {0}'.format(key)) subdata_raw = df[key].values subdata = subdata_raw[~np.isnan(subdata_raw)] subdata = subdata[np.isfinite(subdata)] subdata_log10_with_nan = np.log10(subdata[subdata > 0]) subdata_log10 = subdata_log10_with_nan[~np.isnan( subdata_log10_with_nan) ] subdata_log10 = subdata_log10[np.isfinite(subdata_log10)] if merge_figs: axes = axes_all[row_nr].squeeze() else: fig, axes = plt.subplots(1, 2, figsize=(10 / 2.54, 5 / 2.54)) ax = axes[0] ax.hist( subdata, _get_nr_bins(subdata.size), ) ax.set_xlabel( units.get_label(key) ) ax.set_ylabel('count') ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5)) ax.tick_params(axis='both', which='major', labelsize=6) ax.tick_params(axis='both', which='minor', labelsize=6) if subdata_log10.size > 0: ax = axes[1] ax.hist( subdata_log10, _get_nr_bins(subdata.size), ) ax.set_xlabel(r'$log_{10}($' + units.get_label(key) + ')') ax.set_ylabel('count') ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5)) else: pass # del(axes[1]) fig.tight_layout() if not merge_figs: figures[key] = fig if merge_figs: figures['all'] = fig return figures
[ "def", "plot_histograms", "(", "ertobj", ",", "keys", ",", "*", "*", "kwargs", ")", ":", "# you can either provide a DataFrame or an ERT object", "if", "isinstance", "(", "ertobj", ",", "pd", ".", "DataFrame", ")", ":", "df", "=", "ertobj", "else", ":", "df", ...
Generate histograms for one or more keys in the given container. Parameters ---------- ertobj : container instance or :class:`pandas.DataFrame` data object which contains the data. keys : str or list of strings which keys (column names) to plot merge : bool, optional if True, then generate only one figure with all key-plots as columns (default True) log10plot : bool, optional default: True extra_dims : list, optional Examples -------- >>> from reda.plotters import plot_histograms >>> from reda.testing import ERTContainer >>> figs_dict = plot_histograms(ERTContainer, "r", merge=False) Generating histogram plot for key: r Returns ------- figures : dict dictionary with the generated histogram figures
[ "Generate", "histograms", "for", "one", "or", "more", "keys", "in", "the", "given", "container", "." ]
python
train
29.078431
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/api_utils.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/api_utils.py#L279-L308
def _check(cls, name, val, can_be_zero=False, val_type=float): """Check init arguments. Args: name: name of the argument. For logging purpose. val: value. Value has to be non negative number. can_be_zero: whether value can be zero. val_type: Python type of the value. Returns: The value. Raises: ValueError: when invalid value is passed in. TypeError: when invalid value type is passed in. """ valid_types = [val_type] if val_type is float: valid_types.append(int) if type(val) not in valid_types: raise TypeError( 'Expect type %s for parameter %s' % (val_type.__name__, name)) if val < 0: raise ValueError( 'Value for parameter %s has to be greater than 0' % name) if not can_be_zero and val == 0: raise ValueError( 'Value for parameter %s can not be 0' % name) return val
[ "def", "_check", "(", "cls", ",", "name", ",", "val", ",", "can_be_zero", "=", "False", ",", "val_type", "=", "float", ")", ":", "valid_types", "=", "[", "val_type", "]", "if", "val_type", "is", "float", ":", "valid_types", ".", "append", "(", "int", ...
Check init arguments. Args: name: name of the argument. For logging purpose. val: value. Value has to be non negative number. can_be_zero: whether value can be zero. val_type: Python type of the value. Returns: The value. Raises: ValueError: when invalid value is passed in. TypeError: when invalid value type is passed in.
[ "Check", "init", "arguments", "." ]
python
train
29.6
tijme/not-your-average-web-crawler
nyawc/Routing.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/Routing.py#L66-L85
def is_treshold_reached(self, scraped_request): """Check if similar requests to the given requests have already been crawled X times. Where X is the minimum treshold amount from the options. Args: scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold. Returns: bool: True if treshold reached, false otherwise. """ for route in self.__routing_options.routes: if re.compile(route).match(scraped_request.url): count_key = str(route) + scraped_request.method if count_key in self.__routing_count.keys(): return self.__routing_count[count_key] >= self.__routing_options.minimum_threshold return False
[ "def", "is_treshold_reached", "(", "self", ",", "scraped_request", ")", ":", "for", "route", "in", "self", ".", "__routing_options", ".", "routes", ":", "if", "re", ".", "compile", "(", "route", ")", ".", "match", "(", "scraped_request", ".", "url", ")", ...
Check if similar requests to the given requests have already been crawled X times. Where X is the minimum treshold amount from the options. Args: scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold. Returns: bool: True if treshold reached, false otherwise.
[ "Check", "if", "similar", "requests", "to", "the", "given", "requests", "have", "already", "been", "crawled", "X", "times", ".", "Where", "X", "is", "the", "minimum", "treshold", "amount", "from", "the", "options", "." ]
python
train
39.55
ga4gh/ga4gh-server
ga4gh/server/datamodel/variants.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/variants.py#L766-L771
def getMetadataId(self, metadata): """ Returns the id of a metadata """ return str(datamodel.VariantSetMetadataCompoundId( self.getCompoundId(), 'metadata:' + metadata.key))
[ "def", "getMetadataId", "(", "self", ",", "metadata", ")", ":", "return", "str", "(", "datamodel", ".", "VariantSetMetadataCompoundId", "(", "self", ".", "getCompoundId", "(", ")", ",", "'metadata:'", "+", "metadata", ".", "key", ")", ")" ]
Returns the id of a metadata
[ "Returns", "the", "id", "of", "a", "metadata" ]
python
train
35.333333
iotile/coretools
transport_plugins/awsiot/iotile_transport_awsiot/connection_manager.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/connection_manager.py#L174-L203
def get_connection_id(self, conn_or_int_id): """Get the connection id. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid. """ key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key) try: data = table[key] except KeyError: raise ArgumentError("Could not find connection by id", id=key) return data['conn_id']
[ "def", "get_connection_id", "(", "self", ",", "conn_or_int_id", ")", ":", "key", "=", "conn_or_int_id", "if", "isinstance", "(", "key", ",", "str", ")", ":", "table", "=", "self", ".", "_int_connections", "elif", "isinstance", "(", "key", ",", "int", ")", ...
Get the connection id. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid.
[ "Get", "the", "connection", "id", "." ]
python
train
32.566667
hannes-brt/cudnn-python-wrappers
libcudnn.py
https://github.com/hannes-brt/cudnn-python-wrappers/blob/55aab1242924c2fd43db150cf2ccc2a3df958dd5/libcudnn.py#L712-L738
def cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w): """" Initialize a filter descriptor. This function initializes a previously created filter descriptor object into a 4D filter. Filters layout must be contiguous in memory. Parameters ---------- wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter. """ status = _libcudnn.cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w) cudnnCheckStatus(status)
[ "def", "cudnnSetFilter4dDescriptor", "(", "wDesc", ",", "dataType", ",", "format", ",", "k", ",", "c", ",", "h", ",", "w", ")", ":", "status", "=", "_libcudnn", ".", "cudnnSetFilter4dDescriptor", "(", "wDesc", ",", "dataType", ",", "format", ",", "k", ",...
Initialize a filter descriptor. This function initializes a previously created filter descriptor object into a 4D filter. Filters layout must be contiguous in memory. Parameters ---------- wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter.
[ "Initialize", "a", "filter", "descriptor", "." ]
python
train
28.333333
Kronuz/pyScss
scss/namespace.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/namespace.py#L160-L168
def declare_internal(self, function): """Like declare(), but the registered function will also receive the current namespace as its first argument. Useful for functions that inspect the state of the compilation, like ``variable-exists()``. Probably not so useful for anything else. """ function._pyscss_needs_namespace = True self._auto_register_function(function, function.__name__, 1) return function
[ "def", "declare_internal", "(", "self", ",", "function", ")", ":", "function", ".", "_pyscss_needs_namespace", "=", "True", "self", ".", "_auto_register_function", "(", "function", ",", "function", ".", "__name__", ",", "1", ")", "return", "function" ]
Like declare(), but the registered function will also receive the current namespace as its first argument. Useful for functions that inspect the state of the compilation, like ``variable-exists()``. Probably not so useful for anything else.
[ "Like", "declare", "()", "but", "the", "registered", "function", "will", "also", "receive", "the", "current", "namespace", "as", "its", "first", "argument", ".", "Useful", "for", "functions", "that", "inspect", "the", "state", "of", "the", "compilation", "like...
python
train
51
astropy/photutils
photutils/extern/sigma_clipping.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/extern/sigma_clipping.py#L60-L66
def _nanmedian(array, axis=None): """Bottleneck nanmedian function that handle tuple axis.""" if isinstance(axis, tuple): array = _move_tuple_axes_first(array, axis=axis) axis = 0 return bottleneck.nanmedian(array, axis=axis)
[ "def", "_nanmedian", "(", "array", ",", "axis", "=", "None", ")", ":", "if", "isinstance", "(", "axis", ",", "tuple", ")", ":", "array", "=", "_move_tuple_axes_first", "(", "array", ",", "axis", "=", "axis", ")", "axis", "=", "0", "return", "bottleneck...
Bottleneck nanmedian function that handle tuple axis.
[ "Bottleneck", "nanmedian", "function", "that", "handle", "tuple", "axis", "." ]
python
train
35.428571
rjw57/throw
throw/identity.py
https://github.com/rjw57/throw/blob/74a7116362ba5b45635ab247472b25cfbdece4ee/throw/identity.py#L38-L101
def input_identity(interface = TerminalInterface()): """Get the full name, email address and SMTP information from the user.""" while True: identity = interface.input_fields(""" In order to send your files via email, I need to get your name and email address you will be using to send the files.""", ( 'name', 'Your full name', 'string' ), ( 'email', 'Your email address', 'string' )) try: (localpart, hostname) = identity['email'].split('@') break except ValueError: interface.error(""" I couldn't understand the email address you entered, please try again.""") while True: # Configure the SMTP information smtp_details = interface.input_fields(""" I need details of the SMTP server used to send email for your email address '%s'. These values can be obtained from the administrators of your email account. Most of the time, the default options should suffice if you are using a free email provider such as GMail.""" % identity['email'], ( 'host', 'The SMTP server hostname', 'string', 'smtp.' + hostname), ( 'port', 'The SMTP server port', 'integer', 465), ( 'use_ssl', 'Use SSL to connect', 'boolean', True), ( 'use_tls', 'Use TLS after connecting', 'boolean', False), ( 'use_auth', 'Use a username/password to log in', 'boolean', True) ) if smtp_details['use_auth']: credentials = interface.input_fields(""" I need the username and password you use to log into the SMTP server, if you provide a blank password, I'll assume you want me to ask you each time I try to send an email for your password. This is a more secure option but may be tiresome.""", ( 'username', 'Your username', 'string', localpart), ( 'password', 'Your password', 'password' )) if credentials['password'] == '': credentials['password'] = None smtp_details['username'] = credentials['username'] smtp_details['password'] = credentials['password'] new_identity = Identity(identity['name'], identity['email'], **smtp_details) # Ask if we want to send a test email. interface.new_section() interface.message("""I can try sending a test email to yourself with all the SMTP settings you've given me. This is generally a good idea because if we correct any mistakes now, you don't need to correct them when you want to send a file.""") if interface.input_boolean('Try sending a test email?', default=True): if new_identity.send_test_email(): return new_identity interface.message("""Sending the test email failed. You can go back and try re-entering your SMTP server details now if you wish.""") if not interface.input_boolean('Re-enter SMTP server details', default=True): return new_identity
[ "def", "input_identity", "(", "interface", "=", "TerminalInterface", "(", ")", ")", ":", "while", "True", ":", "identity", "=", "interface", ".", "input_fields", "(", "\"\"\"\n In order to send your files via email, I need to get your name and\n email addres...
Get the full name, email address and SMTP information from the user.
[ "Get", "the", "full", "name", "email", "address", "and", "SMTP", "information", "from", "the", "user", "." ]
python
train
48.875
projectatomic/atomic-reactor
atomic_reactor/inner.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/inner.py#L531-L578
def build_inside(input_method, input_args=None, substitutions=None): """ use requested input plugin to load configuration and then initiate build """ def process_keyvals(keyvals): """ ["key=val", "x=y"] -> {"key": "val", "x": "y"} """ keyvals = keyvals or [] processed_keyvals = {} for arg in keyvals: key, value = arg.split("=", 1) processed_keyvals[key] = value return processed_keyvals main = __name__.split('.', 1)[0] log_encoding = get_logging_encoding(main) logger.info("log encoding: %s", log_encoding) if not input_method: raise RuntimeError("No input method specified!") logger.debug("getting build json from input %s", input_method) cleaned_input_args = process_keyvals(input_args) cleaned_input_args['substitutions'] = process_keyvals(substitutions) input_runner = InputPluginsRunner([{'name': input_method, 'args': cleaned_input_args}]) build_json = input_runner.run()[input_method] if isinstance(build_json, Exception): raise RuntimeError("Input plugin raised exception: {}".format(build_json)) logger.debug("build json: %s", build_json) if not build_json: raise RuntimeError("No valid build json!") if not isinstance(build_json, dict): raise RuntimeError("Input plugin did not return valid build json: {}".format(build_json)) dbw = DockerBuildWorkflow(**build_json) try: build_result = dbw.build_docker_image() except Exception as e: logger.error('image build failed: %s', e) raise else: if not build_result or build_result.is_failed(): raise RuntimeError("no image built") else: logger.info("build has finished successfully \\o/")
[ "def", "build_inside", "(", "input_method", ",", "input_args", "=", "None", ",", "substitutions", "=", "None", ")", ":", "def", "process_keyvals", "(", "keyvals", ")", ":", "\"\"\" [\"key=val\", \"x=y\"] -> {\"key\": \"val\", \"x\": \"y\"} \"\"\"", "keyvals", "=", "keyv...
use requested input plugin to load configuration and then initiate build
[ "use", "requested", "input", "plugin", "to", "load", "configuration", "and", "then", "initiate", "build" ]
python
train
37.270833
pbrisk/unicum
unicum/visibleobject.py
https://github.com/pbrisk/unicum/blob/24bfa7355f36847a06646c58e9fd75bd3b689bfe/unicum/visibleobject.py#L68-L72
def from_range(cls, range_list, register_flag=True): """ core class method to create visible objects from a range (nested list) """ s = dict_from_range(range_list) obj = cls.from_serializable(s, register_flag) return obj
[ "def", "from_range", "(", "cls", ",", "range_list", ",", "register_flag", "=", "True", ")", ":", "s", "=", "dict_from_range", "(", "range_list", ")", "obj", "=", "cls", ".", "from_serializable", "(", "s", ",", "register_flag", ")", "return", "obj" ]
core class method to create visible objects from a range (nested list)
[ "core", "class", "method", "to", "create", "visible", "objects", "from", "a", "range", "(", "nested", "list", ")" ]
python
train
49.6
svetlyak40wt/python-cl-conditions
example/example.py
https://github.com/svetlyak40wt/python-cl-conditions/blob/709dfd55f2b8cf7eb9b7d86a6b70c8a3feed4b10/example/example.py#L93-L102
def log_analyzer(path): """This procedure replaces every line which can't be parsed with special object MalformedLogEntry. """ with handle(MalformedLogEntryError, lambda (c): invoke_restart('use_value', MalformedLogEntry(c.text))): for filename in find_all_logs(path): analyze_log(filename)
[ "def", "log_analyzer", "(", "path", ")", ":", "with", "handle", "(", "MalformedLogEntryError", ",", "lambda", "(", "c", ")", ":", "invoke_restart", "(", "'use_value'", ",", "MalformedLogEntry", "(", "c", ".", "text", ")", ")", ")", ":", "for", "filename", ...
This procedure replaces every line which can't be parsed with special object MalformedLogEntry.
[ "This", "procedure", "replaces", "every", "line", "which", "can", "t", "be", "parsed", "with", "special", "object", "MalformedLogEntry", "." ]
python
train
39.4
worstcase/blockade
blockade/state.py
https://github.com/worstcase/blockade/blob/3dc6ad803f0b0d56586dec9542a6a06aa06cf569/blockade/state.py#L127-L137
def _get_blockade_id_from_cwd(self, cwd=None): '''Generate a new blockade ID based on the CWD''' if not cwd: cwd = os.getcwd() # this follows a similar pattern as docker-compose uses parent_dir = os.path.abspath(cwd) basename = os.path.basename(parent_dir).lower() blockade_id = re.sub(r"[^a-z0-9]", "", basename) if not blockade_id: # if we can't get a valid name from CWD, use "default" blockade_id = "default" return blockade_id
[ "def", "_get_blockade_id_from_cwd", "(", "self", ",", "cwd", "=", "None", ")", ":", "if", "not", "cwd", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "# this follows a similar pattern as docker-compose uses", "parent_dir", "=", "os", ".", "path", ".", "abspa...
Generate a new blockade ID based on the CWD
[ "Generate", "a", "new", "blockade", "ID", "based", "on", "the", "CWD" ]
python
valid
46.363636
diging/tethne
tethne/classes/corpus.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L624-L685
def feature_distribution(self, featureset_name, feature, mode='counts', **slice_kwargs): """ Calculates the distribution of a feature across slices of the corpus. Examples -------- .. code-block:: python >>> corpus.feature_distribution(featureset_name='citations', \ ... feature='DOLE RJ 1965 CELL', \ ... step_size=1, window_size=1) [2, 15, 25, 1] Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. feature : str Name of the specific feature of interest. E.g. if ``featureset_name='citations'``, then ``feature`` could be something like ``'DOLE RJ 1965 CELL'``. mode : str (default: ``'counts'``) If set to ``'counts'``, values will be the sum of all count values for the feature in each slice. If set to ``'documentCounts'``, values will be the number of papers in which the feature occurs in each slice. slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list """ values = [] keys = [] fset = self.features[featureset_name] for key, papers in self.slice(subcorpus=False, **slice_kwargs): allfeatures = [v for v in chain(*[fset.features[self._generate_index(p)] for p in papers if self._generate_index(p) in fset.features])] if len(allfeatures) < 1: keys.append(key) values.append(0.) continue count = 0. for elem, v in allfeatures: if elem != feature: continue if mode == 'counts': count += v else: count += 1. values.append(count) keys.append(key) return keys, values
[ "def", "feature_distribution", "(", "self", ",", "featureset_name", ",", "feature", ",", "mode", "=", "'counts'", ",", "*", "*", "slice_kwargs", ")", ":", "values", "=", "[", "]", "keys", "=", "[", "]", "fset", "=", "self", ".", "features", "[", "featu...
Calculates the distribution of a feature across slices of the corpus. Examples -------- .. code-block:: python >>> corpus.feature_distribution(featureset_name='citations', \ ... feature='DOLE RJ 1965 CELL', \ ... step_size=1, window_size=1) [2, 15, 25, 1] Parameters ---------- featureset_name : str Name of a :class:`.FeatureSet` in the :class:`.Corpus`\. feature : str Name of the specific feature of interest. E.g. if ``featureset_name='citations'``, then ``feature`` could be something like ``'DOLE RJ 1965 CELL'``. mode : str (default: ``'counts'``) If set to ``'counts'``, values will be the sum of all count values for the feature in each slice. If set to ``'documentCounts'``, values will be the number of papers in which the feature occurs in each slice. slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list
[ "Calculates", "the", "distribution", "of", "a", "feature", "across", "slices", "of", "the", "corpus", "." ]
python
train
35.145161
T-002/pycast
pycast/methods/basemethod.py
https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/methods/basemethod.py#L146-L166
def _get_value_error_message_for_invalid_prarameter(self, parameter, value): """Returns the ValueError message for the given parameter. :param string parameter: Name of the parameter the message has to be created for. :param numeric value: Value outside the parameters interval. :return: Returns a string containing hte message. :rtype: string """ # return if not interval is defined for the parameter if parameter not in self._parameterIntervals: return interval = self._parameterIntervals[parameter] return "%s has to be in %s%s, %s%s. Current value is %s." % ( parameter, BaseMethod._interval_definitions[interval[2]][0], interval[0], interval[1], BaseMethod._interval_definitions[interval[3]][1], value )
[ "def", "_get_value_error_message_for_invalid_prarameter", "(", "self", ",", "parameter", ",", "value", ")", ":", "# return if not interval is defined for the parameter", "if", "parameter", "not", "in", "self", ".", "_parameterIntervals", ":", "return", "interval", "=", "s...
Returns the ValueError message for the given parameter. :param string parameter: Name of the parameter the message has to be created for. :param numeric value: Value outside the parameters interval. :return: Returns a string containing hte message. :rtype: string
[ "Returns", "the", "ValueError", "message", "for", "the", "given", "parameter", "." ]
python
train
40.952381
pyviz/holoviews
holoviews/ipython/__init__.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/ipython/__init__.py#L242-L277
def load_hvjs(cls, logo=False, bokeh_logo=False, mpl_logo=False, plotly_logo=False, JS=True, message='HoloViewsJS successfully loaded.'): """ Displays javascript and CSS to initialize HoloViews widgets. """ import jinja2 # Evaluate load_notebook.html template with widgetjs code if JS: widgetjs, widgetcss = Renderer.html_assets(extras=False, backends=[], script=True) else: widgetjs, widgetcss = '', '' # Add classic notebook MIME renderer widgetjs += nb_mime_js templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) jinjaEnv = jinja2.Environment(loader=templateLoader) template = jinjaEnv.get_template('load_notebook.html') html = template.render({'widgetcss': widgetcss, 'logo': logo, 'bokeh_logo': bokeh_logo, 'mpl_logo': mpl_logo, 'plotly_logo': plotly_logo, 'message': message}) publish_display_data(data={'text/html': html}) # Vanilla JS mime type is only consumed by classic notebook # Custom mime type is only consumed by JupyterLab if JS: mimebundle = { MIME_TYPES['js'] : widgetjs, MIME_TYPES['jlab-hv-load'] : widgetjs } if os.environ.get('HV_DOC_HTML', False): mimebundle = {'text/html': mimebundle_to_html(mimebundle)} publish_display_data(data=mimebundle)
[ "def", "load_hvjs", "(", "cls", ",", "logo", "=", "False", ",", "bokeh_logo", "=", "False", ",", "mpl_logo", "=", "False", ",", "plotly_logo", "=", "False", ",", "JS", "=", "True", ",", "message", "=", "'HoloViewsJS successfully loaded.'", ")", ":", "impor...
Displays javascript and CSS to initialize HoloViews widgets.
[ "Displays", "javascript", "and", "CSS", "to", "initialize", "HoloViews", "widgets", "." ]
python
train
45.277778
titusjan/argos
argos/qt/togglecolumn.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/qt/togglecolumn.py#L115-L123
def saveProfile(self, key, settings=None): """ Writes the view settings to the persistent store :param key: key where the setting will be read from :param settings: optional QSettings object which can have a group already opened. """ #logger.debug("Writing view settings for: {}".format(key)) if settings is None: settings = QtCore.QSettings() settings.setValue(key, self.horizontalHeader().saveState())
[ "def", "saveProfile", "(", "self", ",", "key", ",", "settings", "=", "None", ")", ":", "#logger.debug(\"Writing view settings for: {}\".format(key))", "if", "settings", "is", "None", ":", "settings", "=", "QtCore", ".", "QSettings", "(", ")", "settings", ".", "s...
Writes the view settings to the persistent store :param key: key where the setting will be read from :param settings: optional QSettings object which can have a group already opened.
[ "Writes", "the", "view", "settings", "to", "the", "persistent", "store", ":", "param", "key", ":", "key", "where", "the", "setting", "will", "be", "read", "from", ":", "param", "settings", ":", "optional", "QSettings", "object", "which", "can", "have", "a"...
python
train
52.333333
inveniosoftware/invenio-base
invenio_base/app.py
https://github.com/inveniosoftware/invenio-base/blob/ed4b7a76516ab2675e19270844400f4e2308f52d/invenio_base/app.py#L216-L239
def _loader(app, init_func, entry_points=None, modules=None): """Run generic loader. Used to load and initialize entry points and modules using an custom initialization function. .. versionadded: 1.0.0 """ if entry_points: for entry_point in entry_points: for ep in pkg_resources.iter_entry_points(entry_point): try: init_func(ep.load()) except Exception: app.logger.error( 'Failed to initialize entry point: {0}'.format(ep)) raise if modules: for m in modules: try: init_func(m) except Exception: app.logger.error('Failed to initialize module: {0}'.format(m)) raise
[ "def", "_loader", "(", "app", ",", "init_func", ",", "entry_points", "=", "None", ",", "modules", "=", "None", ")", ":", "if", "entry_points", ":", "for", "entry_point", "in", "entry_points", ":", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points",...
Run generic loader. Used to load and initialize entry points and modules using an custom initialization function. .. versionadded: 1.0.0
[ "Run", "generic", "loader", "." ]
python
train
32.875
datastax/python-driver
cassandra/metadata.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/metadata.py#L2358-L2360
def _build_table_options(self, row): """ Setup the mostly-non-schema table options, like caching settings """ return dict((o, row.get(o)) for o in self.recognized_table_options if o in row)
[ "def", "_build_table_options", "(", "self", ",", "row", ")", ":", "return", "dict", "(", "(", "o", ",", "row", ".", "get", "(", "o", ")", ")", "for", "o", "in", "self", ".", "recognized_table_options", "if", "o", "in", "row", ")" ]
Setup the mostly-non-schema table options, like caching settings
[ "Setup", "the", "mostly", "-", "non", "-", "schema", "table", "options", "like", "caching", "settings" ]
python
train
67.666667
jciskey/pygraph
pygraph/functions/planarity/kocay_algorithm.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/functions/planarity/kocay_algorithm.py#L998-L1003
def wt(u, v, dfs_data): """The wt_u[v] function used in the paper.""" # Determine the edge_id edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(u, v) # Pull the weight of that edge return dfs_data['edge_weights'][edge_id]
[ "def", "wt", "(", "u", ",", "v", ",", "dfs_data", ")", ":", "# Determine the edge_id", "edge_id", "=", "dfs_data", "[", "'graph'", "]", ".", "get_first_edge_id_by_node_ids", "(", "u", ",", "v", ")", "# Pull the weight of that edge", "return", "dfs_data", "[", ...
The wt_u[v] function used in the paper.
[ "The", "wt_u", "[", "v", "]", "function", "used", "in", "the", "paper", "." ]
python
train
40.666667
mikicz/arca
arca/backend/docker.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L741-L803
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path) -> Result: """ Gets or builds an image for the repo, gets or starts a container for the image and runs the script. :param repo: Repository URL :param branch: Branch ane :param task: :class:`Task` to run. :param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository. :param repo_path: :class:`Path <pathlib.Path>` to the cloned location. """ self.check_docker_access() container_name = self.get_container_name(repo, branch, git_repo) container = self.container_running(container_name) if container is None: image = self.get_image_for_repo(repo, branch, git_repo, repo_path) container = self.start_container(image, container_name, repo_path) task_filename, task_json = self.serialized_task(task) container.put_archive("/srv/scripts", self.tar_task_definition(task_filename, task_json)) res = None try: command = ["timeout"] if self.inherit_image: if self.alpine_inherited or b"Alpine" in container.exec_run(["cat", "/etc/issue"], tty=True).output: self.alpine_inherited = True command = ["timeout", "-t"] command += [str(task.timeout), "python", "/srv/scripts/runner.py", f"/srv/scripts/{task_filename}"] logger.debug("Running command %s", " ".join(command)) res = container.exec_run(command, tty=True) # 124 is the standard, 143 on alpine if res.exit_code in {124, 143}: raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.") return Result(res.output) except BuildError: # can be raised by :meth:`Result.__init__` raise except Exception as e: logger.exception(e) if res is not None: logger.warning(res.output) raise BuildError("The build failed", extra_info={ "exception": e, "output": res if res is None else res.output }) finally: if not self.keep_container_running: container.kill(signal.SIGKILL) else: self._containers.add(container)
[ "def", "run", "(", "self", ",", "repo", ":", "str", ",", "branch", ":", "str", ",", "task", ":", "Task", ",", "git_repo", ":", "Repo", ",", "repo_path", ":", "Path", ")", "->", "Result", ":", "self", ".", "check_docker_access", "(", ")", "container_n...
Gets or builds an image for the repo, gets or starts a container for the image and runs the script. :param repo: Repository URL :param branch: Branch ane :param task: :class:`Task` to run. :param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository. :param repo_path: :class:`Path <pathlib.Path>` to the cloned location.
[ "Gets", "or", "builds", "an", "image", "for", "the", "repo", "gets", "or", "starts", "a", "container", "for", "the", "image", "and", "runs", "the", "script", "." ]
python
train
37.84127
gem/oq-engine
openquake/hazardlib/gsim/abrahamson_silva_1997.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/abrahamson_silva_1997.py#L140-L154
def _get_fault_type_hanging_wall(self, rake): """ Return fault type (F) and hanging wall (HW) flags depending on rake angle. The method assumes 'reverse' (F = 1) if 45 <= rake <= 135, 'other' (F = 0) if otherwise. Hanging-wall flag is set to 1 if 'reverse', and 0 if 'other'. """ F, HW = 0, 0 if 45 <= rake <= 135: F, HW = 1, 1 return F, HW
[ "def", "_get_fault_type_hanging_wall", "(", "self", ",", "rake", ")", ":", "F", ",", "HW", "=", "0", ",", "0", "if", "45", "<=", "rake", "<=", "135", ":", "F", ",", "HW", "=", "1", ",", "1", "return", "F", ",", "HW" ]
Return fault type (F) and hanging wall (HW) flags depending on rake angle. The method assumes 'reverse' (F = 1) if 45 <= rake <= 135, 'other' (F = 0) if otherwise. Hanging-wall flag is set to 1 if 'reverse', and 0 if 'other'.
[ "Return", "fault", "type", "(", "F", ")", "and", "hanging", "wall", "(", "HW", ")", "flags", "depending", "on", "rake", "angle", "." ]
python
train
28.066667
kata198/python-subprocess2
subprocess2/__init__.py
https://github.com/kata198/python-subprocess2/blob/8544b0b651d8e14de9fdd597baa704182e248b01/subprocess2/__init__.py#L152-L172
def runInBackground(self, pollInterval=.1, encoding=False): ''' runInBackground - Create a background thread which will manage this process, automatically read from streams, and perform any cleanups The object returned is a "BackgroundTaskInfo" object, and represents the state of the process. It is updated automatically as the program runs, and if stdout or stderr are streams, they are automatically read from and populated into this object. @see BackgroundTaskInfo for more info or http://pythonhosted.org/python-subprocess2/subprocess2.BackgroundTask.html @param pollInterval - Amount of idle time between polling @param encoding - Default False. If provided, data will be decoded using the value of this field as the codec name (e.x. "utf-8"). Otherwise, data will be stored as bytes. ''' from .BackgroundTask import BackgroundTaskThread taskInfo = BackgroundTaskInfo(encoding) thread = BackgroundTaskThread(self, taskInfo, pollInterval, encoding) thread.start() #thread.run() # Uncomment to use pdb debug (will not run in background) return taskInfo
[ "def", "runInBackground", "(", "self", ",", "pollInterval", "=", ".1", ",", "encoding", "=", "False", ")", ":", "from", ".", "BackgroundTask", "import", "BackgroundTaskThread", "taskInfo", "=", "BackgroundTaskInfo", "(", "encoding", ")", "thread", "=", "Backgrou...
runInBackground - Create a background thread which will manage this process, automatically read from streams, and perform any cleanups The object returned is a "BackgroundTaskInfo" object, and represents the state of the process. It is updated automatically as the program runs, and if stdout or stderr are streams, they are automatically read from and populated into this object. @see BackgroundTaskInfo for more info or http://pythonhosted.org/python-subprocess2/subprocess2.BackgroundTask.html @param pollInterval - Amount of idle time between polling @param encoding - Default False. If provided, data will be decoded using the value of this field as the codec name (e.x. "utf-8"). Otherwise, data will be stored as bytes.
[ "runInBackground", "-", "Create", "a", "background", "thread", "which", "will", "manage", "this", "process", "automatically", "read", "from", "streams", "and", "perform", "any", "cleanups" ]
python
train
54.190476
NoMore201/googleplay-api
gpapi/googleplay.py
https://github.com/NoMore201/googleplay-api/blob/e5e60b83563055bd7e13778ad13a260d2547cbf2/gpapi/googleplay.py#L181-L200
def uploadDeviceConfig(self): """Upload the device configuration of the fake device selected in the __init__ methodi to the google account.""" upload = googleplay_pb2.UploadDeviceConfigRequest() upload.deviceConfiguration.CopyFrom(self.deviceBuilder.getDeviceConfig()) headers = self.getHeaders(upload_fields=True) stringRequest = upload.SerializeToString() response = requests.post(UPLOAD_URL, data=stringRequest, headers=headers, verify=ssl_verify, timeout=60, proxies=self.proxies_config) response = googleplay_pb2.ResponseWrapper.FromString(response.content) try: if response.payload.HasField('uploadDeviceConfigResponse'): self.device_config_token = response.payload.uploadDeviceConfigResponse self.device_config_token = self.device_config_token.uploadDeviceConfigToken except ValueError: pass
[ "def", "uploadDeviceConfig", "(", "self", ")", ":", "upload", "=", "googleplay_pb2", ".", "UploadDeviceConfigRequest", "(", ")", "upload", ".", "deviceConfiguration", ".", "CopyFrom", "(", "self", ".", "deviceBuilder", ".", "getDeviceConfig", "(", ")", ")", "hea...
Upload the device configuration of the fake device selected in the __init__ methodi to the google account.
[ "Upload", "the", "device", "configuration", "of", "the", "fake", "device", "selected", "in", "the", "__init__", "methodi", "to", "the", "google", "account", "." ]
python
valid
52.4
hyperledger/indy-plenum
plenum/server/replica.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1350-L1359
def tryCommit(self, prepare: Prepare): """ Try to commit if the Prepare message is ready to be passed into the commit phase. """ rv, reason = self.canCommit(prepare) if rv: self.doCommit(prepare) else: self.logger.debug("{} cannot send COMMIT since {}".format(self, reason))
[ "def", "tryCommit", "(", "self", ",", "prepare", ":", "Prepare", ")", ":", "rv", ",", "reason", "=", "self", ".", "canCommit", "(", "prepare", ")", "if", "rv", ":", "self", ".", "doCommit", "(", "prepare", ")", "else", ":", "self", ".", "logger", "...
Try to commit if the Prepare message is ready to be passed into the commit phase.
[ "Try", "to", "commit", "if", "the", "Prepare", "message", "is", "ready", "to", "be", "passed", "into", "the", "commit", "phase", "." ]
python
train
34.5
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L3252-L3258
def random_rescale_to_mahalanobis(self, x): """change `x` like for injection, all on genotypic level""" x -= self.mean if any(x): x *= sum(self.randn(len(x))**2)**0.5 / self.mahalanobis_norm(x) x += self.mean return x
[ "def", "random_rescale_to_mahalanobis", "(", "self", ",", "x", ")", ":", "x", "-=", "self", ".", "mean", "if", "any", "(", "x", ")", ":", "x", "*=", "sum", "(", "self", ".", "randn", "(", "len", "(", "x", ")", ")", "**", "2", ")", "**", "0.5", ...
change `x` like for injection, all on genotypic level
[ "change", "x", "like", "for", "injection", "all", "on", "genotypic", "level" ]
python
train
37.571429
freeman-lab/regional
regional/regional.py
https://github.com/freeman-lab/regional/blob/e3a29c58982e5cd3d5700131ac96e5e0b84fb981/regional/regional.py#L59-L71
def distance(self, other): """ Distance between the center of this region and another. Parameters ---------- other : one region, or array-like Either another region, or the center of another region. """ from numpy.linalg import norm if isinstance(other, one): other = other.center return norm(self.center - asarray(other), ord=2)
[ "def", "distance", "(", "self", ",", "other", ")", ":", "from", "numpy", ".", "linalg", "import", "norm", "if", "isinstance", "(", "other", ",", "one", ")", ":", "other", "=", "other", ".", "center", "return", "norm", "(", "self", ".", "center", "-",...
Distance between the center of this region and another. Parameters ---------- other : one region, or array-like Either another region, or the center of another region.
[ "Distance", "between", "the", "center", "of", "this", "region", "and", "another", "." ]
python
train
31.846154
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/netconf.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/netconf.py#L870-L902
def _url_to_prefix(node, id): '''_url_to_prefix Low-level api: Convert an identifier from `{namespace}tagname` notation to `prefix:tagname` notation by looking at nsmap of the node. If the identifier does not have a namespace, the identifier is simply returned without modification. Parameters ---------- node : `str` A config node. Its identifier will be converted. id : `str` Identifier in `{namespace}tagname` notation. Returns ------- str Identifier in `prefix:tagname` notation. ''' prefixes = {v: k for k, v in node.nsmap.items()} ret = re.search('^{(.+)}(.+)$', id) if ret: if ret.group(1) in prefixes: if prefixes[ret.group(1)] is None: return ret.group(2) else: return prefixes[ret.group(1)] + ':' + ret.group(2) return id
[ "def", "_url_to_prefix", "(", "node", ",", "id", ")", ":", "prefixes", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "node", ".", "nsmap", ".", "items", "(", ")", "}", "ret", "=", "re", ".", "search", "(", "'^{(.+)}(.+)$'", ",", "id", ")...
_url_to_prefix Low-level api: Convert an identifier from `{namespace}tagname` notation to `prefix:tagname` notation by looking at nsmap of the node. If the identifier does not have a namespace, the identifier is simply returned without modification. Parameters ---------- node : `str` A config node. Its identifier will be converted. id : `str` Identifier in `{namespace}tagname` notation. Returns ------- str Identifier in `prefix:tagname` notation.
[ "_url_to_prefix" ]
python
train
29.151515
IDSIA/sacred
sacred/dependencies.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/dependencies.py#L261-L296
def splitall(path): """Split a path into a list of directory names (and optionally a filename). Parameters ---------- path: str The path (absolute or relative). Returns ------- allparts: list[str] List of directory names (and optionally a filename) Example ------- "foo/bar/baz.py" => ["foo", "bar", "baz.py"] "/absolute/path.py" => ["/", "absolute", "baz.py"] Notes ----- Credit to Trent Mick. Taken from https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s16.html """ allparts = [] while True: parts = os.path.split(path) if parts[0] == path: # sentinel for absolute paths allparts.insert(0, parts[0]) break elif parts[1] == path: # sentinel for relative paths allparts.insert(0, parts[1]) break else: path = parts[0] allparts.insert(0, parts[1]) return allparts
[ "def", "splitall", "(", "path", ")", ":", "allparts", "=", "[", "]", "while", "True", ":", "parts", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "parts", "[", "0", "]", "==", "path", ":", "# sentinel for absolute paths", "allparts", ...
Split a path into a list of directory names (and optionally a filename). Parameters ---------- path: str The path (absolute or relative). Returns ------- allparts: list[str] List of directory names (and optionally a filename) Example ------- "foo/bar/baz.py" => ["foo", "bar", "baz.py"] "/absolute/path.py" => ["/", "absolute", "baz.py"] Notes ----- Credit to Trent Mick. Taken from https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s16.html
[ "Split", "a", "path", "into", "a", "list", "of", "directory", "names", "(", "and", "optionally", "a", "filename", ")", "." ]
python
train
26.611111
ray-project/ray
python/ray/tune/trial_runner.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trial_runner.py#L544-L553
def _requeue_trial(self, trial): """Notification to TrialScheduler and requeue trial. This does not notify the SearchAlgorithm because the function evaluation is still in progress. """ self._scheduler_alg.on_trial_error(self, trial) self.trial_executor.set_status(trial, Trial.PENDING) with warn_if_slow("scheduler.on_trial_add"): self._scheduler_alg.on_trial_add(self, trial)
[ "def", "_requeue_trial", "(", "self", ",", "trial", ")", ":", "self", ".", "_scheduler_alg", ".", "on_trial_error", "(", "self", ",", "trial", ")", "self", ".", "trial_executor", ".", "set_status", "(", "trial", ",", "Trial", ".", "PENDING", ")", "with", ...
Notification to TrialScheduler and requeue trial. This does not notify the SearchAlgorithm because the function evaluation is still in progress.
[ "Notification", "to", "TrialScheduler", "and", "requeue", "trial", "." ]
python
train
43.6
RJT1990/pyflux
pyflux/ssm/ndynlin.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/ndynlin.py#L345-L368
def _ss_matrices(self,beta): """ Creates the state space matrices required Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- T, Z, R, Q : np.array State space matrices used in KFS algorithm """ T = np.identity(self.state_no) Z = self.X R = np.identity(self.state_no) Q = np.identity(self.state_no) for i in range(0,self.state_no): Q[i][i] = self.latent_variables.z_list[i].prior.transform(beta[i]) return T, Z, R, Q
[ "def", "_ss_matrices", "(", "self", ",", "beta", ")", ":", "T", "=", "np", ".", "identity", "(", "self", ".", "state_no", ")", "Z", "=", "self", ".", "X", "R", "=", "np", ".", "identity", "(", "self", ".", "state_no", ")", "Q", "=", "np", ".", ...
Creates the state space matrices required Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- T, Z, R, Q : np.array State space matrices used in KFS algorithm
[ "Creates", "the", "state", "space", "matrices", "required" ]
python
train
26.083333
nickstenning/honcho
honcho/manager.py
https://github.com/nickstenning/honcho/blob/f3b2e1e11868283f4c3463102b7ed3bd00f26b32/honcho/manager.py#L85-L140
def loop(self): """ Start all the added processes and multiplex their output onto the bound printer (which by default will print to STDOUT). If one process terminates, all the others will be terminated by Honcho, and :func:`~honcho.manager.Manager.loop` will return. This method will block until all the processes have terminated. """ def _terminate(signum, frame): self._system_print("%s received\n" % SIGNALS[signum]['name']) self.returncode = SIGNALS[signum]['rc'] self.terminate() signal.signal(signal.SIGTERM, _terminate) signal.signal(signal.SIGINT, _terminate) self._start() exit = False exit_start = None while 1: try: msg = self.events.get(timeout=0.1) except Empty: if exit: break else: if msg.type == 'line': self._printer.write(msg) elif msg.type == 'start': self._processes[msg.name]['pid'] = msg.data['pid'] self._system_print("%s started (pid=%s)\n" % (msg.name, msg.data['pid'])) elif msg.type == 'stop': self._processes[msg.name]['returncode'] = msg.data['returncode'] self._system_print("%s stopped (rc=%s)\n" % (msg.name, msg.data['returncode'])) if self.returncode is None: self.returncode = msg.data['returncode'] if self._all_started() and self._all_stopped(): exit = True if exit_start is None and self._all_started() and self._any_stopped(): exit_start = self._env.now() self.terminate() if exit_start is not None: # If we've been in this loop for more than KILL_WAIT seconds, # it's time to kill all remaining children. waiting = self._env.now() - exit_start if waiting > datetime.timedelta(seconds=KILL_WAIT): self.kill()
[ "def", "loop", "(", "self", ")", ":", "def", "_terminate", "(", "signum", ",", "frame", ")", ":", "self", ".", "_system_print", "(", "\"%s received\\n\"", "%", "SIGNALS", "[", "signum", "]", "[", "'name'", "]", ")", "self", ".", "returncode", "=", "SIG...
Start all the added processes and multiplex their output onto the bound printer (which by default will print to STDOUT). If one process terminates, all the others will be terminated by Honcho, and :func:`~honcho.manager.Manager.loop` will return. This method will block until all the processes have terminated.
[ "Start", "all", "the", "added", "processes", "and", "multiplex", "their", "output", "onto", "the", "bound", "printer", "(", "which", "by", "default", "will", "print", "to", "STDOUT", ")", "." ]
python
train
38.839286
pycontribs/pyrax
pyrax/clouddns.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L1391-L1404
def get_rate_limits(self): """ Returns a dict with the current rate limit information for domain and status requests. """ resp, body = self.method_get("/limits") rate_limits = body.get("limits", {}).get("rate") ret = [] for rate_limit in rate_limits: limits = rate_limit["limit"] uri_limits = {"uri": rate_limit["uri"], "limits": limits} ret.append(uri_limits) return ret
[ "def", "get_rate_limits", "(", "self", ")", ":", "resp", ",", "body", "=", "self", ".", "method_get", "(", "\"/limits\"", ")", "rate_limits", "=", "body", ".", "get", "(", "\"limits\"", ",", "{", "}", ")", ".", "get", "(", "\"rate\"", ")", "ret", "="...
Returns a dict with the current rate limit information for domain and status requests.
[ "Returns", "a", "dict", "with", "the", "current", "rate", "limit", "information", "for", "domain", "and", "status", "requests", "." ]
python
train
34.714286
mabuchilab/QNET
src/qnet/printing/latexprinter.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/latexprinter.py#L442-L478
def render_latex_sub_super( name, subs=None, supers=None, translate_symbols=True, sep=','): r'''Assemble a string from the primary name and the given sub- and superscripts:: >>> render_latex_sub_super(name='alpha', subs=['mu', 'nu'], supers=[2]) '\\alpha_{\\mu,\\nu}^{2}' >>> render_latex_sub_super( ... name='alpha', subs=['1', '2'], supers=['(1)'], sep='') '\\alpha_{12}^{(1)}' Args: name (str): the string without the subscript/superscript subs (list or None): list of subscripts supers (list or None): list of superscripts translate_symbols (bool): If True, try to translate (Greek) symbols in `name, `subs`, and `supers` to unicode sep (str): Separator to use if there are multiple subscripts/superscripts ''' if subs is None: subs = [] if supers is None: supers = [] if translate_symbols: supers = [_translate_symbols(str(sup)) for sup in supers] subs = [_translate_symbols(str(sub)) for sub in subs] name = _translate_symbols(name) res = name sub = sep.join(subs) sup = sep.join(supers) if len(sub) > 0: res += "_{%s}" % sub if len(sup) > 0: res += "^{%s}" % sup return res
[ "def", "render_latex_sub_super", "(", "name", ",", "subs", "=", "None", ",", "supers", "=", "None", ",", "translate_symbols", "=", "True", ",", "sep", "=", "','", ")", ":", "if", "subs", "is", "None", ":", "subs", "=", "[", "]", "if", "supers", "is",...
r'''Assemble a string from the primary name and the given sub- and superscripts:: >>> render_latex_sub_super(name='alpha', subs=['mu', 'nu'], supers=[2]) '\\alpha_{\\mu,\\nu}^{2}' >>> render_latex_sub_super( ... name='alpha', subs=['1', '2'], supers=['(1)'], sep='') '\\alpha_{12}^{(1)}' Args: name (str): the string without the subscript/superscript subs (list or None): list of subscripts supers (list or None): list of superscripts translate_symbols (bool): If True, try to translate (Greek) symbols in `name, `subs`, and `supers` to unicode sep (str): Separator to use if there are multiple subscripts/superscripts
[ "r", "Assemble", "a", "string", "from", "the", "primary", "name", "and", "the", "given", "sub", "-", "and", "superscripts", "::" ]
python
train
34.378378
vaexio/vaex
packages/vaex-core/vaex/dataframe.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L1693-L1763
def healpix_plot(self, healpix_expression="source_id/34359738368", healpix_max_level=12, healpix_level=8, what="count(*)", selection=None, grid=None, healpix_input="equatorial", healpix_output="galactic", f=None, colormap="afmhot", grid_limits=None, image_size=800, nest=True, figsize=None, interactive=False, title="", smooth=None, show=False, colorbar=True, rotation=(0, 0, 0), **kwargs): """Viz data in 2d using a healpix column. :param healpix_expression: {healpix_max_level} :param healpix_max_level: {healpix_max_level} :param healpix_level: {healpix_level} :param what: {what} :param selection: {selection} :param grid: {grid} :param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic". :param healpix_output: Plot in "equatorial", "galactic" or "ecliptic". :param f: function to apply to the data :param colormap: matplotlib colormap :param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid))) :param image_size: size for the image that healpy uses for rendering :param nest: If the healpix data is in nested (True) or ring (False) :param figsize: If given, modify the matplotlib figure size. Example (14,9) :param interactive: (Experimental, uses healpy.mollzoom is True) :param title: Title of figure :param smooth: apply gaussian smoothing, in degrees :param show: Call matplotlib's show (True) or not (False, defaut) :param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees. :return: """ # plot_level = healpix_level #healpix_max_level-reduce_level import healpy as hp import pylab as plt if grid is None: reduce_level = healpix_max_level - healpix_level NSIDE = 2**healpix_level nmax = hp.nside2npix(NSIDE) # print nmax, np.sqrt(nmax) scaling = 4**reduce_level # print nmax epsilon = 1. / scaling / 2 grid = self._stat(what=what, binby="%s/%s" % (healpix_expression, scaling), limits=[-epsilon, nmax - epsilon], shape=nmax, selection=selection) if grid_limits: grid_min, grid_max = grid_limits else: grid_min = grid_max = None f_org = f f = _parse_f(f) if smooth: if nest: grid = hp.reorder(grid, inp="NEST", out="RING") nest = False # grid[np.isnan(grid)] = np.nanmean(grid) grid = hp.smoothing(grid, sigma=np.radians(smooth)) fgrid = f(grid) coord_map = dict(equatorial='C', galactic='G', ecliptic="E") fig = plt.gcf() if figsize is not None: fig.set_size_inches(*figsize) what_label = what if f_org: what_label = f_org + " " + what_label f = hp.mollzoom if interactive else hp.mollview with warnings.catch_warnings(): warnings.simplefilter("ignore") coord = coord_map[healpix_input], coord_map[healpix_output] if coord_map[healpix_input] == coord_map[healpix_output]: coord = None f(fgrid, unit=what_label, rot=rotation, nest=nest, title=title, coord=coord, cmap=colormap, hold=True, xsize=image_size, min=grid_min, max=grid_max, cbar=colorbar, **kwargs) if show: plt.show()
[ "def", "healpix_plot", "(", "self", ",", "healpix_expression", "=", "\"source_id/34359738368\"", ",", "healpix_max_level", "=", "12", ",", "healpix_level", "=", "8", ",", "what", "=", "\"count(*)\"", ",", "selection", "=", "None", ",", "grid", "=", "None", ","...
Viz data in 2d using a healpix column. :param healpix_expression: {healpix_max_level} :param healpix_max_level: {healpix_max_level} :param healpix_level: {healpix_level} :param what: {what} :param selection: {selection} :param grid: {grid} :param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic". :param healpix_output: Plot in "equatorial", "galactic" or "ecliptic". :param f: function to apply to the data :param colormap: matplotlib colormap :param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid))) :param image_size: size for the image that healpy uses for rendering :param nest: If the healpix data is in nested (True) or ring (False) :param figsize: If given, modify the matplotlib figure size. Example (14,9) :param interactive: (Experimental, uses healpy.mollzoom is True) :param title: Title of figure :param smooth: apply gaussian smoothing, in degrees :param show: Call matplotlib's show (True) or not (False, defaut) :param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees. :return:
[ "Viz", "data", "in", "2d", "using", "a", "healpix", "column", "." ]
python
test
52.887324
GNS3/gns3-server
gns3server/controller/compute.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/compute.py#L308-L321
def download_file(self, project, path): """ Read file of a project and download it :param project: A project object :param path: The path of the file in the project :returns: A file stream """ url = self._getUrl("/projects/{}/files/{}".format(project.id, path)) response = yield from self._session().request("GET", url, auth=self._auth) if response.status == 404: raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(path)) return response
[ "def", "download_file", "(", "self", ",", "project", ",", "path", ")", ":", "url", "=", "self", ".", "_getUrl", "(", "\"/projects/{}/files/{}\"", ".", "format", "(", "project", ".", "id", ",", "path", ")", ")", "response", "=", "yield", "from", "self", ...
Read file of a project and download it :param project: A project object :param path: The path of the file in the project :returns: A file stream
[ "Read", "file", "of", "a", "project", "and", "download", "it" ]
python
train
38.285714
padelt/temper-python
temperusb/temper.py
https://github.com/padelt/temper-python/blob/cbdbace7e6755b1d91a2603ab63c9cb778078f79/temperusb/temper.py#L38-L46
def readattr(path, name): """ Read attribute from sysfs and return as string """ try: f = open(USB_SYS_PREFIX + path + "/" + name) return f.readline().rstrip("\n") except IOError: return None
[ "def", "readattr", "(", "path", ",", "name", ")", ":", "try", ":", "f", "=", "open", "(", "USB_SYS_PREFIX", "+", "path", "+", "\"/\"", "+", "name", ")", "return", "f", ".", "readline", "(", ")", ".", "rstrip", "(", "\"\\n\"", ")", "except", "IOErro...
Read attribute from sysfs and return as string
[ "Read", "attribute", "from", "sysfs", "and", "return", "as", "string" ]
python
valid
25.222222
biolink/biolink-model
metamodel/generators/contextgen.py
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/contextgen.py#L117-L133
def add_mappings(self, defn: Definition, target: Dict) -> None: """ Process any mappings in defn, adding all of the mappings prefixes to the namespace map and add a link to the first mapping to the target @param defn: Class or Slot definition @param target: context target """ self.add_id_prefixes(defn) for mapping in defn.mappings: if '://' in mapping: target['@id'] = mapping else: if ':' not in mapping or len(mapping.split(':')) != 2: raise ValueError(f"Definition {defn.name} = unrecognized mapping: {mapping}") ns = mapping.split(':')[0] self.add_prefix(ns) target['@id'] = defn.mappings[0]
[ "def", "add_mappings", "(", "self", ",", "defn", ":", "Definition", ",", "target", ":", "Dict", ")", "->", "None", ":", "self", ".", "add_id_prefixes", "(", "defn", ")", "for", "mapping", "in", "defn", ".", "mappings", ":", "if", "'://'", "in", "mappin...
Process any mappings in defn, adding all of the mappings prefixes to the namespace map and add a link to the first mapping to the target @param defn: Class or Slot definition @param target: context target
[ "Process", "any", "mappings", "in", "defn", "adding", "all", "of", "the", "mappings", "prefixes", "to", "the", "namespace", "map", "and", "add", "a", "link", "to", "the", "first", "mapping", "to", "the", "target" ]
python
train
44.823529
geoadmin/lib-gatilegrid
gatilegrid/tilegrids.py
https://github.com/geoadmin/lib-gatilegrid/blob/28e39cba22451f6ef0ddcb93cbc0838f06815505/gatilegrid/tilegrids.py#L254-L265
def iterGrid(self, minZoom, maxZoom): "Yields the tileBounds, zoom, tileCol and tileRow" assert minZoom in range(0, len(self.RESOLUTIONS)) assert maxZoom in range(0, len(self.RESOLUTIONS)) assert minZoom <= maxZoom for zoom in xrange(minZoom, maxZoom + 1): [minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom) for row in xrange(minRow, maxRow + 1): for col in xrange(minCol, maxCol + 1): tileBounds = self.tileBounds(zoom, col, row) yield (tileBounds, zoom, col, row)
[ "def", "iterGrid", "(", "self", ",", "minZoom", ",", "maxZoom", ")", ":", "assert", "minZoom", "in", "range", "(", "0", ",", "len", "(", "self", ".", "RESOLUTIONS", ")", ")", "assert", "maxZoom", "in", "range", "(", "0", ",", "len", "(", "self", "....
Yields the tileBounds, zoom, tileCol and tileRow
[ "Yields", "the", "tileBounds", "zoom", "tileCol", "and", "tileRow" ]
python
train
48.916667
cthorey/pdsimage
pdsimage/PDS_Extractor.py
https://github.com/cthorey/pdsimage/blob/f71de6dfddd3d538d76da229b4b9605c40f3fbac/pdsimage/PDS_Extractor.py#L717-L728
def _format_lat(self, lat): ''' Format latitude to fit the image name ''' if self.ppd in [4, 8, 16, 32, 64]: latcenter = '000N' elif self.ppd in [128]: if lat < 0: latcenter = '450S' else: latcenter = '450N' return latcenter
[ "def", "_format_lat", "(", "self", ",", "lat", ")", ":", "if", "self", ".", "ppd", "in", "[", "4", ",", "8", ",", "16", ",", "32", ",", "64", "]", ":", "latcenter", "=", "'000N'", "elif", "self", ".", "ppd", "in", "[", "128", "]", ":", "if", ...
Format latitude to fit the image name
[ "Format", "latitude", "to", "fit", "the", "image", "name" ]
python
train
26.25
TankerHQ/python-cli-ui
cli_ui/__init__.py
https://github.com/TankerHQ/python-cli-ui/blob/4c9928827cea06cf80e6a1f5bd86478d8566863f/cli_ui/__init__.py#L481-L487
def ask_password(*question: Token) -> str: """Ask the user to enter a password. """ tokens = get_ask_tokens(question) info(*tokens) answer = read_password() return answer
[ "def", "ask_password", "(", "*", "question", ":", "Token", ")", "->", "str", ":", "tokens", "=", "get_ask_tokens", "(", "question", ")", "info", "(", "*", "tokens", ")", "answer", "=", "read_password", "(", ")", "return", "answer" ]
Ask the user to enter a password.
[ "Ask", "the", "user", "to", "enter", "a", "password", "." ]
python
train
26.857143
uogbuji/versa
tools/py/writer/rdfs.py
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/writer/rdfs.py#L86-L101
def write(models, base=None, graph=None, rdfsonly=False, prefixes=None, logger=logging): ''' See the command line help ''' prefixes = prefixes or {} g = graph or rdflib.Graph() #g.bind('bf', BFNS) #g.bind('bfc', BFCNS) #g.bind('bfd', BFDNS) g.bind('v', VNS) for k, v in prefixes.items(): g.bind(k, v) for m in models: base_out = m.base process(m, g, rdfsonly, base=base_out, logger=logger) return g
[ "def", "write", "(", "models", ",", "base", "=", "None", ",", "graph", "=", "None", ",", "rdfsonly", "=", "False", ",", "prefixes", "=", "None", ",", "logger", "=", "logging", ")", ":", "prefixes", "=", "prefixes", "or", "{", "}", "g", "=", "graph"...
See the command line help
[ "See", "the", "command", "line", "help" ]
python
train
28.4375
saltstack/salt
salt/utils/virtualbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/virtualbox.py#L418-L433
def _start_machine(machine, session): ''' Helper to try and start machines @param machine: @type machine: IMachine @param session: @type session: ISession @return: @rtype: IProgress or None ''' try: return machine.launchVMProcess(session, '', '') except Exception as e: log.debug(e.message, exc_info=True) return None
[ "def", "_start_machine", "(", "machine", ",", "session", ")", ":", "try", ":", "return", "machine", ".", "launchVMProcess", "(", "session", ",", "''", ",", "''", ")", "except", "Exception", "as", "e", ":", "log", ".", "debug", "(", "e", ".", "message",...
Helper to try and start machines @param machine: @type machine: IMachine @param session: @type session: ISession @return: @rtype: IProgress or None
[ "Helper", "to", "try", "and", "start", "machines" ]
python
train
23.1875
aouyar/PyMunin
pysysinfo/netiface.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/netiface.py#L27-L53
def getIfStats(self): """Return dictionary of Traffic Stats for Network Interfaces. @return: Nested dictionary of statistics for each interface. """ info_dict = {} try: fp = open(ifaceStatsFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading interface stats from file: %s' % ifaceStatsFile) for line in data.splitlines(): mobj = re.match('^\s*([\w\d:]+):\s*(.*\S)\s*$', line) if mobj: iface = mobj.group(1) statline = mobj.group(2) info_dict[iface] = dict(zip( ('rxbytes', 'rxpackets', 'rxerrs', 'rxdrop', 'rxfifo', 'rxframe', 'rxcompressed', 'rxmulticast', 'txbytes', 'txpackets', 'txerrs', 'txdrop', 'txfifo', 'txcolls', 'txcarrier', 'txcompressed'), [int(x) for x in statline.split()])) return info_dict
[ "def", "getIfStats", "(", "self", ")", ":", "info_dict", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "ifaceStatsFile", ",", "'r'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOErro...
Return dictionary of Traffic Stats for Network Interfaces. @return: Nested dictionary of statistics for each interface.
[ "Return", "dictionary", "of", "Traffic", "Stats", "for", "Network", "Interfaces", "." ]
python
train
39.111111
apache/spark
python/pyspark/conf.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L123-L130
def set(self, key, value): """Set a configuration property.""" # Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet. if self._jconf is not None: self._jconf.set(key, unicode(value)) else: self._conf[key] = unicode(value) return self
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "# Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet.", "if", "self", ".", "_jconf", "is", "not", "None", ":", "self", ".", "_jconf", ".", "set", "(", "key", ",",...
Set a configuration property.
[ "Set", "a", "configuration", "property", "." ]
python
train
40.875
cltl/KafNafParserPy
KafNafParserPy/markable_data.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/markable_data.py#L247-L259
def add_external_reference(self,markable_id, external_ref): """ Adds an external reference for the given markable @type markable_id: string @param markable_id: the markable identifier @type external_ref: L{CexternalReference} @param external_ref: the external reference object """ if markable_id in self.idx: markable_obj = Cterm(self.idx[markable_id],self.type) markable_obj.add_external_reference(external_ref) else: print('{markable_id} not in self.idx'.format(**locals()))
[ "def", "add_external_reference", "(", "self", ",", "markable_id", ",", "external_ref", ")", ":", "if", "markable_id", "in", "self", ".", "idx", ":", "markable_obj", "=", "Cterm", "(", "self", ".", "idx", "[", "markable_id", "]", ",", "self", ".", "type", ...
Adds an external reference for the given markable @type markable_id: string @param markable_id: the markable identifier @type external_ref: L{CexternalReference} @param external_ref: the external reference object
[ "Adds", "an", "external", "reference", "for", "the", "given", "markable" ]
python
train
44
serge-sans-paille/pythran
pythran/analyses/cfg.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/cfg.py#L65-L100
def visit_For(self, node): """ OUT = (node,) + last body statements RAISES = body's that are not break or continue """ currs = (node,) break_currs = tuple() raises = () # handle body for n in node.body: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) for nraise in nraises: if isinstance(nraise, ast.Break): break_currs += (nraise,) elif isinstance(nraise, ast.Continue): self.result.add_edge(nraise, node) else: raises += (nraise,) # add the backward loop for curr in currs: self.result.add_edge(curr, node) # the else statement if needed if node.orelse: for n in node.orelse: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) # while only if hasattr(node, 'test') and is_true_predicate(node.test): return break_currs, raises return break_currs + currs, raises
[ "def", "visit_For", "(", "self", ",", "node", ")", ":", "currs", "=", "(", "node", ",", ")", "break_currs", "=", "tuple", "(", ")", "raises", "=", "(", ")", "# handle body", "for", "n", "in", "node", ".", "body", ":", "self", ".", "result", ".", ...
OUT = (node,) + last body statements RAISES = body's that are not break or continue
[ "OUT", "=", "(", "node", ")", "+", "last", "body", "statements", "RAISES", "=", "body", "s", "that", "are", "not", "break", "or", "continue" ]
python
train
34.472222
casebeer/python-hkdf
hkdf.py
https://github.com/casebeer/python-hkdf/blob/cc3c9dbf0a271b27a7ac5cd04cc1485bbc3b4307/hkdf.py#L61-L71
def expand(self, info=b"", length=32): ''' Generate output key material based on an `info` value Arguments: - info - context to generate the OKM - length - length in bytes of the key to generate See the HKDF draft RFC for guidance. ''' return hkdf_expand(self._prk, info, length, self._hash)
[ "def", "expand", "(", "self", ",", "info", "=", "b\"\"", ",", "length", "=", "32", ")", ":", "return", "hkdf_expand", "(", "self", ".", "_prk", ",", "info", ",", "length", ",", "self", ".", "_hash", ")" ]
Generate output key material based on an `info` value Arguments: - info - context to generate the OKM - length - length in bytes of the key to generate See the HKDF draft RFC for guidance.
[ "Generate", "output", "key", "material", "based", "on", "an", "info", "value" ]
python
train
27.181818
KelSolaar/Umbra
umbra/engine.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/engine.py#L1087-L1099
def logging_active_formatter(self, value): """ Setter for **self.__logging_active_formatter** attribute. :param value: Attribute value. :type value: unicode or QString """ if value is not None: assert type(value) in ( unicode, QString), "'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!".format( "logging_active_formatter", value) self.__logging_active_formatter = value
[ "def", "logging_active_formatter", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "in", "(", "unicode", ",", "QString", ")", ",", "\"'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!\"", ...
Setter for **self.__logging_active_formatter** attribute. :param value: Attribute value. :type value: unicode or QString
[ "Setter", "for", "**", "self", ".", "__logging_active_formatter", "**", "attribute", "." ]
python
train
36.153846
elmotec/massedit
massedit.py
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L262-L276
def append_code_expr(self, code): """Compile argument and adds it to the list of code objects.""" # expects a string. if isinstance(code, str) and not isinstance(code, unicode): code = unicode(code) if not isinstance(code, unicode): raise TypeError("string expected") log.debug("compiling code %s...", code) try: code_obj = compile(code, '<string>', 'eval') self.code_objs[code] = code_obj except SyntaxError as syntax_err: log.error("cannot compile %s: %s", code, syntax_err) raise log.debug("compiled code %s", code)
[ "def", "append_code_expr", "(", "self", ",", "code", ")", ":", "# expects a string.", "if", "isinstance", "(", "code", ",", "str", ")", "and", "not", "isinstance", "(", "code", ",", "unicode", ")", ":", "code", "=", "unicode", "(", "code", ")", "if", "...
Compile argument and adds it to the list of code objects.
[ "Compile", "argument", "and", "adds", "it", "to", "the", "list", "of", "code", "objects", "." ]
python
train
42.666667