repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
HiPERCAM/hcam_widgets
hcam_widgets/gtc/headers.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/gtc/headers.py#L46-L72
def create_header_from_telpars(telpars): """ Create a list of fits header items from GTC telescope pars. The GTC telescope server gives a list of string describing FITS header items such as RA, DEC, etc. Arguments --------- telpars : list list returned by server call to getTelescopeParams """ # pars is a list of strings describing tel info in FITS # style, each entry in the list is a different class of # thing (weather, telescope, instrument etc). # first, we munge it into a single list of strings, each one # describing a single item whilst also stripping whitespace pars = [val.strip() for val in (';').join(telpars).split(';') if val.strip() != ''] # apply parse_hstring to everything in pars with warnings.catch_warnings(): warnings.simplefilter('ignore', fits.verify.VerifyWarning) hdr = fits.Header(map(parse_hstring, pars)) return hdr
[ "def", "create_header_from_telpars", "(", "telpars", ")", ":", "# pars is a list of strings describing tel info in FITS", "# style, each entry in the list is a different class of", "# thing (weather, telescope, instrument etc).", "# first, we munge it into a single list of strings, each one", "#...
Create a list of fits header items from GTC telescope pars. The GTC telescope server gives a list of string describing FITS header items such as RA, DEC, etc. Arguments --------- telpars : list list returned by server call to getTelescopeParams
[ "Create", "a", "list", "of", "fits", "header", "items", "from", "GTC", "telescope", "pars", "." ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L7550-L7559
def show(self): """ Print with a pretty display the MapList object """ bytecode._Print("MAP_LIST SIZE", self.size) for i in self.map_item: if i.item != self: # FIXME this does not work for CodeItems! # as we do not have the method analysis here... i.show()
[ "def", "show", "(", "self", ")", ":", "bytecode", ".", "_Print", "(", "\"MAP_LIST SIZE\"", ",", "self", ".", "size", ")", "for", "i", "in", "self", ".", "map_item", ":", "if", "i", ".", "item", "!=", "self", ":", "# FIXME this does not work for CodeItems!"...
Print with a pretty display the MapList object
[ "Print", "with", "a", "pretty", "display", "the", "MapList", "object" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/gallery/gallery_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/gallery/gallery_client.py#L740-L757
def query_extensions(self, extension_query, account_token=None, account_token_header=None): """QueryExtensions. [Preview API] :param :class:`<ExtensionQuery> <azure.devops.v5_1.gallery.models.ExtensionQuery>` extension_query: :param str account_token: :param String account_token_header: Header to pass the account token :rtype: :class:`<ExtensionQueryResult> <azure.devops.v5_1.gallery.models.ExtensionQueryResult>` """ query_parameters = {} if account_token is not None: query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str') content = self._serialize.body(extension_query, 'ExtensionQuery') response = self._send(http_method='POST', location_id='eb9d5ee1-6d43-456b-b80e-8a96fbc014b6', version='5.1-preview.1', query_parameters=query_parameters, content=content) return self._deserialize('ExtensionQueryResult', response)
[ "def", "query_extensions", "(", "self", ",", "extension_query", ",", "account_token", "=", "None", ",", "account_token_header", "=", "None", ")", ":", "query_parameters", "=", "{", "}", "if", "account_token", "is", "not", "None", ":", "query_parameters", "[", ...
QueryExtensions. [Preview API] :param :class:`<ExtensionQuery> <azure.devops.v5_1.gallery.models.ExtensionQuery>` extension_query: :param str account_token: :param String account_token_header: Header to pass the account token :rtype: :class:`<ExtensionQueryResult> <azure.devops.v5_1.gallery.models.ExtensionQueryResult>`
[ "QueryExtensions", ".", "[", "Preview", "API", "]", ":", "param", ":", "class", ":", "<ExtensionQuery", ">", "<azure", ".", "devops", ".", "v5_1", ".", "gallery", ".", "models", ".", "ExtensionQuery", ">", "extension_query", ":", ":", "param", "str", "acco...
python
train
bitly/asyncmongo
asyncmongo/message.py
https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/message.py#L60-L74
def insert(collection_name, docs, check_keys, safe, last_error_args): """Get an **insert** message. """ data = __ZERO data += bson._make_c_string(collection_name) bson_data = "".join([bson.BSON.encode(doc, check_keys) for doc in docs]) if not bson_data: raise InvalidOperation("cannot do an empty bulk insert") data += bson_data if safe: (_, insert_message) = __pack_message(2002, data) (request_id, error_message) = __last_error(last_error_args) return (request_id, insert_message + error_message) else: return __pack_message(2002, data)
[ "def", "insert", "(", "collection_name", ",", "docs", ",", "check_keys", ",", "safe", ",", "last_error_args", ")", ":", "data", "=", "__ZERO", "data", "+=", "bson", ".", "_make_c_string", "(", "collection_name", ")", "bson_data", "=", "\"\"", ".", "join", ...
Get an **insert** message.
[ "Get", "an", "**", "insert", "**", "message", "." ]
python
train
neherlab/treetime
treetime/treetime.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treetime.py#L655-L684
def print_lh(self, joint=True): """ Print the total likelihood of the tree given the constrained leaves Parameters ---------- joint : bool If true, print joint LH, else print marginal LH """ try: u_lh = self.tree.unconstrained_sequence_LH if joint: s_lh = self.tree.sequence_joint_LH t_lh = self.tree.positional_joint_LH c_lh = self.tree.coalescent_joint_LH else: s_lh = self.tree.sequence_marginal_LH t_lh = self.tree.positional_marginal_LH c_lh = 0 print ("### Tree Log-Likelihood ###\n" " Sequence log-LH without constraints: \t%1.3f\n" " Sequence log-LH with constraints: \t%1.3f\n" " TreeTime sequence log-LH: \t%1.3f\n" " Coalescent log-LH: \t%1.3f\n" "#########################"%(u_lh, s_lh,t_lh, c_lh)) except: print("ERROR. Did you run the corresponding inference (joint/marginal)?")
[ "def", "print_lh", "(", "self", ",", "joint", "=", "True", ")", ":", "try", ":", "u_lh", "=", "self", ".", "tree", ".", "unconstrained_sequence_LH", "if", "joint", ":", "s_lh", "=", "self", ".", "tree", ".", "sequence_joint_LH", "t_lh", "=", "self", "....
Print the total likelihood of the tree given the constrained leaves Parameters ---------- joint : bool If true, print joint LH, else print marginal LH
[ "Print", "the", "total", "likelihood", "of", "the", "tree", "given", "the", "constrained", "leaves" ]
python
test
lablup/backend.ai-client-py
src/ai/backend/client/cli/vfolder.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/vfolder.py#L156-L171
def download(name, filenames): ''' Download a file from the virtual folder to the current working directory. The files with the same names will be overwirtten. \b NAME: Name of a virtual folder. FILENAMES: Paths of the files to be uploaded. ''' with Session() as session: try: session.VFolder(name).download(filenames, show_progress=True) print_done('Done.') except Exception as e: print_error(e) sys.exit(1)
[ "def", "download", "(", "name", ",", "filenames", ")", ":", "with", "Session", "(", ")", "as", "session", ":", "try", ":", "session", ".", "VFolder", "(", "name", ")", ".", "download", "(", "filenames", ",", "show_progress", "=", "True", ")", "print_do...
Download a file from the virtual folder to the current working directory. The files with the same names will be overwirtten. \b NAME: Name of a virtual folder. FILENAMES: Paths of the files to be uploaded.
[ "Download", "a", "file", "from", "the", "virtual", "folder", "to", "the", "current", "working", "directory", ".", "The", "files", "with", "the", "same", "names", "will", "be", "overwirtten", "." ]
python
train
robotools/fontParts
Lib/fontParts/base/normalizers.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/normalizers.py#L927-L937
def normalizeGlyphNote(value): """ Normalizes Glyph Note. * **value** must be a :ref:`type-string`. * Returned value is an unencoded ``unicode`` string """ if not isinstance(value, basestring): raise TypeError("Note must be a string, not %s." % type(value).__name__) return unicode(value)
[ "def", "normalizeGlyphNote", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"Note must be a string, not %s.\"", "%", "type", "(", "value", ")", ".", "__name__", ")", "return", "unicode...
Normalizes Glyph Note. * **value** must be a :ref:`type-string`. * Returned value is an unencoded ``unicode`` string
[ "Normalizes", "Glyph", "Note", "." ]
python
train
reanahub/reana-commons
reana_commons/publisher.py
https://github.com/reanahub/reana-commons/blob/abf31d9f495e0d93171c43fc4a414cd292091b11/reana_commons/publisher.py#L100-L118
def publish_workflow_status(self, workflow_uuid, status, logs='', message=None): """Publish workflow status using the configured. :param workflow_uudid: String which represents the workflow UUID. :param status: Integer which represents the status of the workflow, this is defined in the `reana-db` `Workflow` models. :param logs: String which represents the logs which the workflow has produced as output. :param message: Dictionary which includes additional information can be attached such as the overall progress of the workflow. """ msg = { "workflow_uuid": workflow_uuid, "logs": logs, "status": status, "message": message } self._publish(msg)
[ "def", "publish_workflow_status", "(", "self", ",", "workflow_uuid", ",", "status", ",", "logs", "=", "''", ",", "message", "=", "None", ")", ":", "msg", "=", "{", "\"workflow_uuid\"", ":", "workflow_uuid", ",", "\"logs\"", ":", "logs", ",", "\"status\"", ...
Publish workflow status using the configured. :param workflow_uudid: String which represents the workflow UUID. :param status: Integer which represents the status of the workflow, this is defined in the `reana-db` `Workflow` models. :param logs: String which represents the logs which the workflow has produced as output. :param message: Dictionary which includes additional information can be attached such as the overall progress of the workflow.
[ "Publish", "workflow", "status", "using", "the", "configured", "." ]
python
train
Jaymon/prom
prom/interface/base.py
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/base.py#L217-L227
def query(self, query_str, *query_args, **query_options): """ run a raw query on the db query_str -- string -- the query to run *query_args -- if the query_str is a formatting string, pass the values in this **query_options -- any query options can be passed in by using key=val syntax """ with self.connection(**query_options) as connection: query_options['connection'] = connection return self._query(query_str, query_args, **query_options)
[ "def", "query", "(", "self", ",", "query_str", ",", "*", "query_args", ",", "*", "*", "query_options", ")", ":", "with", "self", ".", "connection", "(", "*", "*", "query_options", ")", "as", "connection", ":", "query_options", "[", "'connection'", "]", "...
run a raw query on the db query_str -- string -- the query to run *query_args -- if the query_str is a formatting string, pass the values in this **query_options -- any query options can be passed in by using key=val syntax
[ "run", "a", "raw", "query", "on", "the", "db" ]
python
train
twilio/twilio-python
twilio/rest/verify/v2/service/verification.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/verify/v2/service/verification.py#L168-L190
def update(self, status): """ Update the VerificationInstance :param VerificationInstance.Status status: The new status of the resource :returns: Updated VerificationInstance :rtype: twilio.rest.verify.v2.service.verification.VerificationInstance """ data = values.of({'Status': status, }) payload = self._version.update( 'POST', self._uri, data=data, ) return VerificationInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "status", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'Status'", ":", "status", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "update", "(", "'POST'", ",", "self", ".", "_uri", ",", "da...
Update the VerificationInstance :param VerificationInstance.Status status: The new status of the resource :returns: Updated VerificationInstance :rtype: twilio.rest.verify.v2.service.verification.VerificationInstance
[ "Update", "the", "VerificationInstance" ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_gcp/c7n_gcp/mu.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_gcp/c7n_gcp/mu.py#L142-L151
def get(self, func_name, qualifier=None): """Get the details on a given function.""" project = self.session.get_default_project() func_name = "projects/{}/locations/{}/functions/{}".format( project, self.region, func_name) try: return self.client.execute_query('get', {'name': func_name}) except errors.HttpError as e: if e.resp.status != 404: raise
[ "def", "get", "(", "self", ",", "func_name", ",", "qualifier", "=", "None", ")", ":", "project", "=", "self", ".", "session", ".", "get_default_project", "(", ")", "func_name", "=", "\"projects/{}/locations/{}/functions/{}\"", ".", "format", "(", "project", ",...
Get the details on a given function.
[ "Get", "the", "details", "on", "a", "given", "function", "." ]
python
train
droope/droopescan
dscan/common/functions.py
https://github.com/droope/droopescan/blob/424c48a0f9d12b4536dbef5a786f0fbd4ce9519a/dscan/common/functions.py#L303-L331
def instances_get(opts, plugins, url_file_input, out): """ Creates and returns an ordered dictionary containing instances for all available scanning plugins, sort of ordered by popularity. @param opts: options as returned by self._options. @param plugins: plugins as returned by plugins_util.plugins_base_get. @param url_file_input: boolean value which indicates whether we are scanning an individual URL or a file. This is used to determine kwargs required. @param out: self.out """ instances = OrderedDict() preferred_order = ['wordpress', 'joomla', 'drupal'] for cms_name in preferred_order: for plugin in plugins: plugin_name = plugin.__name__.lower() if cms_name == plugin_name: instances[plugin_name] = instance_get(plugin, opts, url_file_input, out) for plugin in plugins: plugin_name = plugin.__name__.lower() if plugin_name not in preferred_order: instances[plugin_name] = instance_get(plugin, opts, url_file_input, out) return instances
[ "def", "instances_get", "(", "opts", ",", "plugins", ",", "url_file_input", ",", "out", ")", ":", "instances", "=", "OrderedDict", "(", ")", "preferred_order", "=", "[", "'wordpress'", ",", "'joomla'", ",", "'drupal'", "]", "for", "cms_name", "in", "preferre...
Creates and returns an ordered dictionary containing instances for all available scanning plugins, sort of ordered by popularity. @param opts: options as returned by self._options. @param plugins: plugins as returned by plugins_util.plugins_base_get. @param url_file_input: boolean value which indicates whether we are scanning an individual URL or a file. This is used to determine kwargs required. @param out: self.out
[ "Creates", "and", "returns", "an", "ordered", "dictionary", "containing", "instances", "for", "all", "available", "scanning", "plugins", "sort", "of", "ordered", "by", "popularity", "." ]
python
train
dead-beef/markovchain
markovchain/image/traversal.py
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/image/traversal.py#L70-L81
def save(self): """Convert to JSON. Returns ------- `dict` JSON data. """ data = super().save() data['reverse'] = self.reverse data['line_sentences'] = self.line_sentences return data
[ "def", "save", "(", "self", ")", ":", "data", "=", "super", "(", ")", ".", "save", "(", ")", "data", "[", "'reverse'", "]", "=", "self", ".", "reverse", "data", "[", "'line_sentences'", "]", "=", "self", ".", "line_sentences", "return", "data" ]
Convert to JSON. Returns ------- `dict` JSON data.
[ "Convert", "to", "JSON", "." ]
python
train
swharden/PyOriginTools
PyOriginTools/workbook.py
https://github.com/swharden/PyOriginTools/blob/536fb8e11234ffdc27e26b1800e0358179ca7d26/PyOriginTools/workbook.py#L49-L68
def colAdd(self,name="",desc="",unit="",comment="",coltype=0,data=[],pos=None): """ column types: 0: Y 1: Disregard 2: Y Error 3: X 4: Label 5: Z 6: X Error """ if pos is None: pos=len(self.colNames) self.colNames.insert(pos,name) self.colDesc.insert(pos,desc) self.colUnits.insert(pos,unit) self.colComments.insert(pos,comment) self.colTypes.insert(pos,coltype) self.colData.insert(pos,data) return
[ "def", "colAdd", "(", "self", ",", "name", "=", "\"\"", ",", "desc", "=", "\"\"", ",", "unit", "=", "\"\"", ",", "comment", "=", "\"\"", ",", "coltype", "=", "0", ",", "data", "=", "[", "]", ",", "pos", "=", "None", ")", ":", "if", "pos", "is...
column types: 0: Y 1: Disregard 2: Y Error 3: X 4: Label 5: Z 6: X Error
[ "column", "types", ":", "0", ":", "Y", "1", ":", "Disregard", "2", ":", "Y", "Error", "3", ":", "X", "4", ":", "Label", "5", ":", "Z", "6", ":", "X", "Error" ]
python
train
christophertbrown/bioscripts
ctbBio/rRNA_copies.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L31-L52
def parse_s2bins(s2bins): """ parse ggKbase scaffold-to-bin mapping - scaffolds-to-bins and bins-to-scaffolds """ s2b = {} b2s = {} for line in s2bins: line = line.strip().split() s, b = line[0], line[1] if 'UNK' in b: continue if len(line) > 2: g = ' '.join(line[2:]) else: g = 'n/a' b = '%s\t%s' % (b, g) s2b[s] = b if b not in b2s: b2s[b] = [] b2s[b].append(s) return s2b, b2s
[ "def", "parse_s2bins", "(", "s2bins", ")", ":", "s2b", "=", "{", "}", "b2s", "=", "{", "}", "for", "line", "in", "s2bins", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "s", ",", "b", "=", "line", "[", "0", "]", ...
parse ggKbase scaffold-to-bin mapping - scaffolds-to-bins and bins-to-scaffolds
[ "parse", "ggKbase", "scaffold", "-", "to", "-", "bin", "mapping", "-", "scaffolds", "-", "to", "-", "bins", "and", "bins", "-", "to", "-", "scaffolds" ]
python
train
csirtgadgets/csirtgsdk-py
csirtgsdk/__init__.py
https://github.com/csirtgadgets/csirtgsdk-py/blob/5a7ed9c5e6fa27170366ecbdef710dc80d537dc2/csirtgsdk/__init__.py#L39-L61
def indicator_create(f, i): """ Create an indicator in a feed :param f: feed name (eg: wes/test) :param i: indicator dict (eg: {'indicator': 'example.com', 'tags': ['ssh'], 'description': 'this is a test'}) :return: dict of indicator """ if '/' not in f: raise ValueError('feed name must be formatted like: ' 'csirtgadgets/scanners') if not i: raise ValueError('missing indicator dict') u, f = f.split('/') i['user'] = u i['feed'] = f ret = Indicator(i).submit() return ret
[ "def", "indicator_create", "(", "f", ",", "i", ")", ":", "if", "'/'", "not", "in", "f", ":", "raise", "ValueError", "(", "'feed name must be formatted like: '", "'csirtgadgets/scanners'", ")", "if", "not", "i", ":", "raise", "ValueError", "(", "'missing indicato...
Create an indicator in a feed :param f: feed name (eg: wes/test) :param i: indicator dict (eg: {'indicator': 'example.com', 'tags': ['ssh'], 'description': 'this is a test'}) :return: dict of indicator
[ "Create", "an", "indicator", "in", "a", "feed", ":", "param", "f", ":", "feed", "name", "(", "eg", ":", "wes", "/", "test", ")", ":", "param", "i", ":", "indicator", "dict", "(", "eg", ":", "{", "indicator", ":", "example", ".", "com", "tags", ":...
python
train
jwkvam/plotlywrapper
plotlywrapper.py
https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L551-L577
def horizontal(y, xmin=0, xmax=1, color=None, width=None, dash=None, opacity=None): """Draws a horizontal line from `xmin` to `xmax`. Parameters ---------- xmin : int, optional xmax : int, optional color : str, optional width : number, optional Returns ------- Chart """ lineattr = {} if color: lineattr['color'] = color if width: lineattr['width'] = width if dash: lineattr['dash'] = dash layout = dict( shapes=[dict(type='line', x0=xmin, x1=xmax, y0=y, y1=y, opacity=opacity, line=lineattr)] ) return Chart(layout=layout)
[ "def", "horizontal", "(", "y", ",", "xmin", "=", "0", ",", "xmax", "=", "1", ",", "color", "=", "None", ",", "width", "=", "None", ",", "dash", "=", "None", ",", "opacity", "=", "None", ")", ":", "lineattr", "=", "{", "}", "if", "color", ":", ...
Draws a horizontal line from `xmin` to `xmax`. Parameters ---------- xmin : int, optional xmax : int, optional color : str, optional width : number, optional Returns ------- Chart
[ "Draws", "a", "horizontal", "line", "from", "xmin", "to", "xmax", "." ]
python
train
apache/spark
python/pyspark/sql/functions.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1525-L1531
def decode(col, charset): """ Computes the first argument into a string from a binary using the provided character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16'). """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
[ "def", "decode", "(", "col", ",", "charset", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "decode", "(", "_to_java_column", "(", "col", ")", ",", "charset", ")", ")"...
Computes the first argument into a string from a binary using the provided character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
[ "Computes", "the", "first", "argument", "into", "a", "string", "from", "a", "binary", "using", "the", "provided", "character", "set", "(", "one", "of", "US", "-", "ASCII", "ISO", "-", "8859", "-", "1", "UTF", "-", "8", "UTF", "-", "16BE", "UTF", "-",...
python
train
ioos/compliance-checker
compliance_checker/acdd.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/acdd.py#L505-L564
def check_time_extents(self, ds): """ Check that the values of time_coverage_start/time_coverage_end approximately match the data. """ if not (hasattr(ds, 'time_coverage_start') and hasattr(ds, 'time_coverage_end')): return # Parse the ISO 8601 formatted dates try: t_min = dateparse(ds.time_coverage_start) t_max = dateparse(ds.time_coverage_end) except: return Result(BaseCheck.MEDIUM, False, 'time_coverage_extents_match', ['time_coverage attributes are not formatted properly. Use the ISO 8601:2004 date format, preferably the extended format.']) timevar = cfutil.get_time_variable(ds) if not timevar: return Result(BaseCheck.MEDIUM, False, 'time_coverage_extents_match', ['Could not find time variable to test extent of time_coverage_start/time_coverage_end, see CF-1.6 spec chapter 4.4']) # Time should be monotonically increasing, so we make that assumption here so we don't have to download THE ENTIRE ARRAY try: # num2date returns as naive date, but with time adjusted to UTC # we need to attach timezone information here, or the date # subtraction from t_min/t_max will assume that a naive timestamp is # in the same time zone and cause erroneous results. # Pendulum uses UTC by default, but we are being explicit here time0 = pendulum.instance(num2date(ds.variables[timevar][0], ds.variables[timevar].units), 'UTC') time1 = pendulum.instance(num2date(ds.variables[timevar][-1], ds.variables[timevar].units), 'UTC') except: return Result(BaseCheck.MEDIUM, False, 'time_coverage_extents_match', ['Failed to retrieve and convert times for variables %s.' % timevar]) start_dt = abs(time0 - t_min) end_dt = abs(time1 - t_max) score = 2 msgs = [] if start_dt > timedelta(hours=1): msgs.append("Date time mismatch between time_coverage_start and actual " "time values %s (time_coverage_start) != %s (time[0])" % (t_min.isoformat(), time0.isoformat())) score -= 1 if end_dt > timedelta(hours=1): msgs.append("Date time mismatch between time_coverage_end and actual " "time values %s (time_coverage_end) != %s (time[N])" % (t_max.isoformat(), time1.isoformat())) score -= 1 return Result(BaseCheck.MEDIUM, (score, 2), 'time_coverage_extents_match', msgs)
[ "def", "check_time_extents", "(", "self", ",", "ds", ")", ":", "if", "not", "(", "hasattr", "(", "ds", ",", "'time_coverage_start'", ")", "and", "hasattr", "(", "ds", ",", "'time_coverage_end'", ")", ")", ":", "return", "# Parse the ISO 8601 formatted dates", ...
Check that the values of time_coverage_start/time_coverage_end approximately match the data.
[ "Check", "that", "the", "values", "of", "time_coverage_start", "/", "time_coverage_end", "approximately", "match", "the", "data", "." ]
python
train
rmohr/static3
static.py
https://github.com/rmohr/static3/blob/e5f88c5e91789bd4db7fde0cf59e4a15c3326f11/static.py#L302-L312
def _full_path(self, path_info): """Return the full path from which to read.""" full_path = self.root + path_info if path.exists(full_path): return full_path else: for magic in self.magics: if path.exists(magic.new_path(full_path)): return magic.new_path(full_path) else: return full_path
[ "def", "_full_path", "(", "self", ",", "path_info", ")", ":", "full_path", "=", "self", ".", "root", "+", "path_info", "if", "path", ".", "exists", "(", "full_path", ")", ":", "return", "full_path", "else", ":", "for", "magic", "in", "self", ".", "magi...
Return the full path from which to read.
[ "Return", "the", "full", "path", "from", "which", "to", "read", "." ]
python
train
inveniosoftware-contrib/invenio-classifier
invenio_classifier/engine.py
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/engine.py#L381-L391
def _get_acronyms(acronyms): """Return a formatted list of acronyms.""" acronyms_str = {} if acronyms: for acronym, expansions in iteritems(acronyms): expansions_str = ", ".join(["%s (%d)" % expansion for expansion in expansions]) acronyms_str[acronym] = expansions_str return [{'acronym': str(key), 'expansion': value.encode('utf8')} for key, value in acronyms_str.iteritems()]
[ "def", "_get_acronyms", "(", "acronyms", ")", ":", "acronyms_str", "=", "{", "}", "if", "acronyms", ":", "for", "acronym", ",", "expansions", "in", "iteritems", "(", "acronyms", ")", ":", "expansions_str", "=", "\", \"", ".", "join", "(", "[", "\"%s (%d)\"...
Return a formatted list of acronyms.
[ "Return", "a", "formatted", "list", "of", "acronyms", "." ]
python
train
PyCQA/pylint
pylint/checkers/python3.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/python3.py#L1001-L1014
def visit_name(self, node): """Detect when a "bad" built-in is referenced.""" found_node, _ = node.lookup(node.name) if not _is_builtin(found_node): return if node.name not in self._bad_builtins: return if node_ignores_exception(node) or isinstance( find_try_except_wrapper_node(node), astroid.ExceptHandler ): return message = node.name.lower() + "-builtin" self.add_message(message, node=node)
[ "def", "visit_name", "(", "self", ",", "node", ")", ":", "found_node", ",", "_", "=", "node", ".", "lookup", "(", "node", ".", "name", ")", "if", "not", "_is_builtin", "(", "found_node", ")", ":", "return", "if", "node", ".", "name", "not", "in", "...
Detect when a "bad" built-in is referenced.
[ "Detect", "when", "a", "bad", "built", "-", "in", "is", "referenced", "." ]
python
test
wummel/linkchecker
linkcheck/director/status.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/director/status.py#L40-L51
def run_checked (self): """Print periodic status messages.""" self.start_time = time.time() self.setName("Status") # the first status should be after a second wait_seconds = 1 first_wait = True while not self.stopped(wait_seconds): self.log_status() if first_wait: wait_seconds = self.wait_seconds first_wait = False
[ "def", "run_checked", "(", "self", ")", ":", "self", ".", "start_time", "=", "time", ".", "time", "(", ")", "self", ".", "setName", "(", "\"Status\"", ")", "# the first status should be after a second", "wait_seconds", "=", "1", "first_wait", "=", "True", "whi...
Print periodic status messages.
[ "Print", "periodic", "status", "messages", "." ]
python
train
riga/scinum
scinum.py
https://github.com/riga/scinum/blob/55eb6d8aa77beacee5a07443392954b8a0aad8cb/scinum.py#L1480-L1534
def round_uncertainty(unc, method="publication"): """ Rounds an uncertainty *unc* following a specific *method* and returns a 2-tuple containing the significant digits as a string, and the decimal magnitude that is required to recover the uncertainty. *unc* might also be a numpy array. Rounding methods: - ``"pdg"``: Rounding rules as defined by the `PDG <http://pdg.lbl.gov/2011/reviews/rpp2011-rev-rpp-intro.pdf#page=13>`_. - ``"publication"``, ``"pub``: Like ``"pdg"`` with an extra significant digit for results that need to be combined later. - ``"onedigit"``, ``"one"``: Forces one single significant digit. This is useful when there are multiple uncertainties that vary by more than a factor 10 among themselves. Example: .. code-block:: python round_uncertainty(0.123, "pub") # -> ("123", -3) round_uncertainty(0.123, "pdg") # -> ("12", -2) round_uncertainty(0.123, "one") # -> ("1", -1) round_uncertainty(0.456, "pub") # -> ("46", -2) round_uncertainty(0.456, "pdg") # -> ("5", -1) round_uncertainty(0.456, "one") # -> ("5", -1) round_uncertainty(0.987, "pub") # -> ("987", -3) round_uncertainty(0.987, "pdg") # -> ("10", -1) round_uncertainty(0.987, "one") # -> ("10", -1) a = np.array([0.123, 0.456, 0.987]) round_uncertainty(a, "pub") # -> (["123", "46", "987"], [-3, -2, -3]) """ # validate the method meth = method.lower() if meth not in ("pub", "publication", "pdg", "one", "onedigit"): raise ValueError("unknown rounding method: {}".format(method)) # split the uncertainty sig, mag = split_value(unc) # infer the precision based on the method and get updated significand and magnitude if not is_numpy(unc): prec, sig, mag = _infer_precision(unc, sig, mag, meth) replace_args = (".", "") else: prec = np.ones(unc.shape).astype(np.int) for p, u, s, m in np.nditer([prec, unc, sig, mag], op_flags=["readwrite"]): p[...], s[...], m[...] = _infer_precision(u, s, m, meth) replace_args = (b".", b"") # determine the significant digits and the decimal magnitude that would reconstruct the value digits = match_precision(sig, 10.**(1 - prec)).replace(*replace_args) mag -= prec - 1 return (digits, mag)
[ "def", "round_uncertainty", "(", "unc", ",", "method", "=", "\"publication\"", ")", ":", "# validate the method", "meth", "=", "method", ".", "lower", "(", ")", "if", "meth", "not", "in", "(", "\"pub\"", ",", "\"publication\"", ",", "\"pdg\"", ",", "\"one\""...
Rounds an uncertainty *unc* following a specific *method* and returns a 2-tuple containing the significant digits as a string, and the decimal magnitude that is required to recover the uncertainty. *unc* might also be a numpy array. Rounding methods: - ``"pdg"``: Rounding rules as defined by the `PDG <http://pdg.lbl.gov/2011/reviews/rpp2011-rev-rpp-intro.pdf#page=13>`_. - ``"publication"``, ``"pub``: Like ``"pdg"`` with an extra significant digit for results that need to be combined later. - ``"onedigit"``, ``"one"``: Forces one single significant digit. This is useful when there are multiple uncertainties that vary by more than a factor 10 among themselves. Example: .. code-block:: python round_uncertainty(0.123, "pub") # -> ("123", -3) round_uncertainty(0.123, "pdg") # -> ("12", -2) round_uncertainty(0.123, "one") # -> ("1", -1) round_uncertainty(0.456, "pub") # -> ("46", -2) round_uncertainty(0.456, "pdg") # -> ("5", -1) round_uncertainty(0.456, "one") # -> ("5", -1) round_uncertainty(0.987, "pub") # -> ("987", -3) round_uncertainty(0.987, "pdg") # -> ("10", -1) round_uncertainty(0.987, "one") # -> ("10", -1) a = np.array([0.123, 0.456, 0.987]) round_uncertainty(a, "pub") # -> (["123", "46", "987"], [-3, -2, -3])
[ "Rounds", "an", "uncertainty", "*", "unc", "*", "following", "a", "specific", "*", "method", "*", "and", "returns", "a", "2", "-", "tuple", "containing", "the", "significant", "digits", "as", "a", "string", "and", "the", "decimal", "magnitude", "that", "is...
python
train
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L170-L183
def set_ocha_url(cls, url=None): # type: (str) -> None """ Set World Bank url from which to retrieve countries data Args: url (str): World Bank url from which to retrieve countries data. Defaults to internal value. Returns: None """ if url is None: url = cls._ochaurl_int cls._ochaurl = url
[ "def", "set_ocha_url", "(", "cls", ",", "url", "=", "None", ")", ":", "# type: (str) -> None", "if", "url", "is", "None", ":", "url", "=", "cls", ".", "_ochaurl_int", "cls", ".", "_ochaurl", "=", "url" ]
Set World Bank url from which to retrieve countries data Args: url (str): World Bank url from which to retrieve countries data. Defaults to internal value. Returns: None
[ "Set", "World", "Bank", "url", "from", "which", "to", "retrieve", "countries", "data" ]
python
train
projectatomic/atomic-reactor
atomic_reactor/core.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/core.py#L128-L179
def build_image_dockerhost(self, build_image, json_args_path): """ Build docker image inside privileged container using docker from host (mount docker socket inside container). There are possible races here. Use wisely. This operation is asynchronous and you should wait for container to finish. :param build_image: str, name of image where build is performed :param json_args_path: str, this dir is mounted inside build container and used as a way to transport data between host and buildroot; there has to be a file inside this dir with name atomic_reactor.BUILD_JSON which is used to feed build :return: str, container id """ logger.info("building image '%s' in container using docker from host", build_image) self._check_build_input(build_image, json_args_path) self._obtain_source_from_path_if_needed(json_args_path, CONTAINER_SHARE_PATH) if not os.path.exists(DOCKER_SOCKET_PATH): logger.error("looks like docker is not running because there is no socket at: %s", DOCKER_SOCKET_PATH) raise RuntimeError("docker socket not found: %s" % DOCKER_SOCKET_PATH) volume_bindings = { DOCKER_SOCKET_PATH: { 'bind': DOCKER_SOCKET_PATH, }, json_args_path: { 'bind': CONTAINER_SHARE_PATH, }, } if self._volume_bind_understands_mode(): volume_bindings[DOCKER_SOCKET_PATH]['mode'] = 'ro' volume_bindings[json_args_path]['mode'] = 'rw,Z' else: volume_bindings[DOCKER_SOCKET_PATH]['ro'] = True volume_bindings[json_args_path]['rw'] = True with open(os.path.join(json_args_path, BUILD_JSON)) as fp: logger.debug('build json mounted in container: %s', fp.read()) container_id = self.tasker.run( ImageName.parse(build_image), create_kwargs={'volumes': [DOCKER_SOCKET_PATH, json_args_path]}, volume_bindings=volume_bindings, privileged=True, ) return container_id
[ "def", "build_image_dockerhost", "(", "self", ",", "build_image", ",", "json_args_path", ")", ":", "logger", ".", "info", "(", "\"building image '%s' in container using docker from host\"", ",", "build_image", ")", "self", ".", "_check_build_input", "(", "build_image", ...
Build docker image inside privileged container using docker from host (mount docker socket inside container). There are possible races here. Use wisely. This operation is asynchronous and you should wait for container to finish. :param build_image: str, name of image where build is performed :param json_args_path: str, this dir is mounted inside build container and used as a way to transport data between host and buildroot; there has to be a file inside this dir with name atomic_reactor.BUILD_JSON which is used to feed build :return: str, container id
[ "Build", "docker", "image", "inside", "privileged", "container", "using", "docker", "from", "host", "(", "mount", "docker", "socket", "inside", "container", ")", ".", "There", "are", "possible", "races", "here", ".", "Use", "wisely", "." ]
python
train
openego/eTraGo
etrago/tools/utilities.py
https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/utilities.py#L557-L604
def data_manipulation_sh(network): """ Adds missing components to run calculations with SH scenarios. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA """ from shapely.geometry import Point, LineString, MultiLineString from geoalchemy2.shape import from_shape, to_shape # add connection from Luebeck to Siems new_bus = str(network.buses.index.astype(np.int64).max() + 1) new_trafo = str(network.transformers.index.astype(np.int64).max() + 1) new_line = str(network.lines.index.astype(np.int64).max() + 1) network.add("Bus", new_bus, carrier='AC', v_nom=220, x=10.760835, y=53.909745) network.add("Transformer", new_trafo, bus0="25536", bus1=new_bus, x=1.29960, tap_ratio=1, s_nom=1600) network.add("Line", new_line, bus0="26387", bus1=new_bus, x=0.0001, s_nom=1600) network.lines.loc[new_line, 'cables'] = 3.0 # bus geom point_bus1 = Point(10.760835, 53.909745) network.buses.set_value(new_bus, 'geom', from_shape(point_bus1, 4326)) # line geom/topo network.lines.set_value(new_line, 'geom', from_shape(MultiLineString( [LineString([to_shape(network. buses.geom['26387']), point_bus1])]), 4326)) network.lines.set_value(new_line, 'topo', from_shape(LineString( [to_shape(network.buses.geom['26387']), point_bus1]), 4326)) # trafo geom/topo network.transformers.set_value(new_trafo, 'geom', from_shape(MultiLineString( [LineString( [to_shape(network .buses.geom['25536']), point_bus1])]), 4326)) network.transformers.set_value(new_trafo, 'topo', from_shape( LineString([to_shape(network.buses.geom['25536']), point_bus1]), 4326)) return
[ "def", "data_manipulation_sh", "(", "network", ")", ":", "from", "shapely", ".", "geometry", "import", "Point", ",", "LineString", ",", "MultiLineString", "from", "geoalchemy2", ".", "shape", "import", "from_shape", ",", "to_shape", "# add connection from Luebeck to S...
Adds missing components to run calculations with SH scenarios. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA
[ "Adds", "missing", "components", "to", "run", "calculations", "with", "SH", "scenarios", "." ]
python
train
bitesofcode/projexui
projexui/xsettings.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xsettings.py#L497-L506
def allKeys(self): """ Returns a list of all the keys for this settings instance. :return [<str>, ..] """ if self._customFormat: return self._customFormat.allKeys() else: return super(XSettings, self).allKeys()
[ "def", "allKeys", "(", "self", ")", ":", "if", "self", ".", "_customFormat", ":", "return", "self", ".", "_customFormat", ".", "allKeys", "(", ")", "else", ":", "return", "super", "(", "XSettings", ",", "self", ")", ".", "allKeys", "(", ")" ]
Returns a list of all the keys for this settings instance. :return [<str>, ..]
[ "Returns", "a", "list", "of", "all", "the", "keys", "for", "this", "settings", "instance", ".", ":", "return", "[", "<str", ">", "..", "]" ]
python
train
neo4j/neo4j-python-driver
neo4j/__init__.py
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/__init__.py#L102-L113
def _check_uri(cls, uri): """ Check whether a URI is compatible with a :class:`.Driver` subclass. When called from a subclass, execution simply passes through if the URI scheme is valid for that class. If invalid, a `ValueError` is raised. :param uri: URI to check for compatibility :raise: `ValueError` if URI scheme is incompatible """ parsed = urlparse(uri) if parsed.scheme != cls.uri_scheme: raise ValueError("%s objects require the %r URI scheme" % (cls.__name__, cls.uri_scheme))
[ "def", "_check_uri", "(", "cls", ",", "uri", ")", ":", "parsed", "=", "urlparse", "(", "uri", ")", "if", "parsed", ".", "scheme", "!=", "cls", ".", "uri_scheme", ":", "raise", "ValueError", "(", "\"%s objects require the %r URI scheme\"", "%", "(", "cls", ...
Check whether a URI is compatible with a :class:`.Driver` subclass. When called from a subclass, execution simply passes through if the URI scheme is valid for that class. If invalid, a `ValueError` is raised. :param uri: URI to check for compatibility :raise: `ValueError` if URI scheme is incompatible
[ "Check", "whether", "a", "URI", "is", "compatible", "with", "a", ":", "class", ":", ".", "Driver", "subclass", ".", "When", "called", "from", "a", "subclass", "execution", "simply", "passes", "through", "if", "the", "URI", "scheme", "is", "valid", "for", ...
python
train
jameslyons/pycipher
pycipher/delastelle.py
https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/delastelle.py#L42-L57
def decipher(self,string): """Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext. """ string = self.remove_punctuation(string,filter='[^'+self.chars+']') ret = '' for i in range(0,len(string),3): ind = tuple([int(string[i+k]) for k in [0,1,2]]) ret += IND2L[ind] return ret
[ "def", "decipher", "(", "self", ",", "string", ")", ":", "string", "=", "self", ".", "remove_punctuation", "(", "string", ",", "filter", "=", "'[^'", "+", "self", ".", "chars", "+", "']'", ")", "ret", "=", "''", "for", "i", "in", "range", "(", "0",...
Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext.
[ "Decipher", "string", "using", "Delastelle", "cipher", "according", "to", "initialised", "key", "." ]
python
train
googledatalab/pydatalab
google/datalab/storage/_api.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_api.py#L124-L146
def object_download(self, bucket, key, start_offset=0, byte_count=None): """Reads the contents of an object as text. Args: bucket: the name of the bucket containing the object. key: the key of the object to be read. start_offset: the start offset of bytes to read. byte_count: the number of bytes to read. If None, it reads to the end. Returns: The text content within the object. Raises: Exception if the object could not be read from. """ args = {'alt': 'media'} headers = {} if start_offset > 0 or byte_count is not None: header = 'bytes=%d-' % start_offset if byte_count is not None: header += '%d' % byte_count headers['Range'] = header url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key))) return google.datalab.utils.Http.request(url, args=args, headers=headers, credentials=self._credentials, raw_response=True)
[ "def", "object_download", "(", "self", ",", "bucket", ",", "key", ",", "start_offset", "=", "0", ",", "byte_count", "=", "None", ")", ":", "args", "=", "{", "'alt'", ":", "'media'", "}", "headers", "=", "{", "}", "if", "start_offset", ">", "0", "or",...
Reads the contents of an object as text. Args: bucket: the name of the bucket containing the object. key: the key of the object to be read. start_offset: the start offset of bytes to read. byte_count: the number of bytes to read. If None, it reads to the end. Returns: The text content within the object. Raises: Exception if the object could not be read from.
[ "Reads", "the", "contents", "of", "an", "object", "as", "text", "." ]
python
train
madeindjs/Super-Markdown
SuperMarkdown/SuperMarkdown.py
https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L86-L93
def _text_file(self, url): """return the content of a file""" try: with open(url, 'r', encoding='utf-8') as file: return file.read() except FileNotFoundError: print('File `{}` not found'.format(url)) sys.exit(0)
[ "def", "_text_file", "(", "self", ",", "url", ")", ":", "try", ":", "with", "open", "(", "url", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "file", ":", "return", "file", ".", "read", "(", ")", "except", "FileNotFoundError", ":", "print",...
return the content of a file
[ "return", "the", "content", "of", "a", "file" ]
python
train
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L676-L690
def _construct_direct(cls, variables, coord_names, dims, attrs=None, indexes=None, encoding=None, file_obj=None): """Shortcut around __init__ for internal use when we want to skip costly validation """ obj = object.__new__(cls) obj._variables = variables obj._coord_names = coord_names obj._dims = dims obj._indexes = indexes obj._attrs = attrs obj._file_obj = file_obj obj._encoding = encoding obj._initialized = True return obj
[ "def", "_construct_direct", "(", "cls", ",", "variables", ",", "coord_names", ",", "dims", ",", "attrs", "=", "None", ",", "indexes", "=", "None", ",", "encoding", "=", "None", ",", "file_obj", "=", "None", ")", ":", "obj", "=", "object", ".", "__new__...
Shortcut around __init__ for internal use when we want to skip costly validation
[ "Shortcut", "around", "__init__", "for", "internal", "use", "when", "we", "want", "to", "skip", "costly", "validation" ]
python
train
mitodl/django-server-status
server_status/views.py
https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L64-L102
def get_redis_info(): """Check Redis connection.""" from kombu.utils.url import _parse_url as parse_redis_url from redis import ( StrictRedis, ConnectionError as RedisConnectionError, ResponseError as RedisResponseError, ) for conf_name in ('REDIS_URL', 'BROKER_URL', 'CELERY_BROKER_URL'): if hasattr(settings, conf_name): url = getattr(settings, conf_name) if url.startswith('redis://'): break else: log.error("No redis connection info found in settings.") return {"status": NO_CONFIG} _, host, port, _, password, database, _ = parse_redis_url(url) start = datetime.now() try: rdb = StrictRedis( host=host, port=port, db=database, password=password, socket_timeout=TIMEOUT_SECONDS, ) info = rdb.info() except (RedisConnectionError, TypeError) as ex: log.error("Error making Redis connection: %s", ex.args) return {"status": DOWN} except RedisResponseError as ex: log.error("Bad Redis response: %s", ex.args) return {"status": DOWN, "message": "auth error"} micro = (datetime.now() - start).microseconds del rdb # the redis package does not support Redis's QUIT. ret = { "status": UP, "response_microseconds": micro, } fields = ("uptime_in_seconds", "used_memory", "used_memory_peak") ret.update({x: info[x] for x in fields}) return ret
[ "def", "get_redis_info", "(", ")", ":", "from", "kombu", ".", "utils", ".", "url", "import", "_parse_url", "as", "parse_redis_url", "from", "redis", "import", "(", "StrictRedis", ",", "ConnectionError", "as", "RedisConnectionError", ",", "ResponseError", "as", "...
Check Redis connection.
[ "Check", "Redis", "connection", "." ]
python
train
mbedmicro/pyOCD
pyocd/probe/stlink_probe.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink_probe.py#L112-L115
def connect(self, protocol=None): """! @brief Initialize DAP IO pins for JTAG or SWD""" self._link.enter_debug(STLink.Protocol.SWD) self._is_connected = True
[ "def", "connect", "(", "self", ",", "protocol", "=", "None", ")", ":", "self", ".", "_link", ".", "enter_debug", "(", "STLink", ".", "Protocol", ".", "SWD", ")", "self", ".", "_is_connected", "=", "True" ]
! @brief Initialize DAP IO pins for JTAG or SWD
[ "!" ]
python
train
PMEAL/porespy
porespy/filters/__funcs__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/filters/__funcs__.py#L19-L80
def distance_transform_lin(im, axis=0, mode='both'): r""" Replaces each void voxel with the linear distance to the nearest solid voxel along the specified axis. Parameters ---------- im : ND-array The image of the porous material with ``True`` values indicating the void phase (or phase of interest) axis : int The direction along which the distance should be measured, the default is 0 (i.e. along the x-direction) mode : string Controls how the distance is measured. Options are: 'forward' - Distances are measured in the increasing direction along the specified axis 'reverse' - Distances are measured in the reverse direction. *'backward'* is also accepted. 'both' - Distances are calculated in both directions (by recursively calling itself), then reporting the minimum value of the two results. Returns ------- image : ND-array A copy of ``im`` with each foreground voxel containing the distance to the nearest background along the specified axis. """ if im.ndim != im.squeeze().ndim: warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') if mode in ['backward', 'reverse']: im = sp.flip(im, axis) im = distance_transform_lin(im=im, axis=axis, mode='forward') im = sp.flip(im, axis) return im elif mode in ['both']: im_f = distance_transform_lin(im=im, axis=axis, mode='forward') im_b = distance_transform_lin(im=im, axis=axis, mode='backward') return sp.minimum(im_f, im_b) else: b = sp.cumsum(im > 0, axis=axis) c = sp.diff(b*(im == 0), axis=axis) d = sp.minimum.accumulate(c, axis=axis) if im.ndim == 1: e = sp.pad(d, pad_width=[1, 0], mode='constant', constant_values=0) elif im.ndim == 2: ax = [[[1, 0], [0, 0]], [[0, 0], [1, 0]]] e = sp.pad(d, pad_width=ax[axis], mode='constant', constant_values=0) elif im.ndim == 3: ax = [[[1, 0], [0, 0], [0, 0]], [[0, 0], [1, 0], [0, 0]], [[0, 0], [0, 0], [1, 0]]] e = sp.pad(d, pad_width=ax[axis], mode='constant', constant_values=0) f = im*(b + e) return f
[ "def", "distance_transform_lin", "(", "im", ",", "axis", "=", "0", ",", "mode", "=", "'both'", ")", ":", "if", "im", ".", "ndim", "!=", "im", ".", "squeeze", "(", ")", ".", "ndim", ":", "warnings", ".", "warn", "(", "'Input image conains a singleton axis...
r""" Replaces each void voxel with the linear distance to the nearest solid voxel along the specified axis. Parameters ---------- im : ND-array The image of the porous material with ``True`` values indicating the void phase (or phase of interest) axis : int The direction along which the distance should be measured, the default is 0 (i.e. along the x-direction) mode : string Controls how the distance is measured. Options are: 'forward' - Distances are measured in the increasing direction along the specified axis 'reverse' - Distances are measured in the reverse direction. *'backward'* is also accepted. 'both' - Distances are calculated in both directions (by recursively calling itself), then reporting the minimum value of the two results. Returns ------- image : ND-array A copy of ``im`` with each foreground voxel containing the distance to the nearest background along the specified axis.
[ "r", "Replaces", "each", "void", "voxel", "with", "the", "linear", "distance", "to", "the", "nearest", "solid", "voxel", "along", "the", "specified", "axis", "." ]
python
train
pypa/pipenv
pipenv/vendor/urllib3/response.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/response.py#L500-L526
def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ headers = r.msg if not isinstance(headers, HTTPHeaderDict): if PY3: # Python 3 headers = HTTPHeaderDict(headers.items()) else: # Python 2 headers = HTTPHeaderDict.from_httplib(headers) # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, 'strict', 0) resp = ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw) return resp
[ "def", "from_httplib", "(", "ResponseCls", ",", "r", ",", "*", "*", "response_kw", ")", ":", "headers", "=", "r", ".", "msg", "if", "not", "isinstance", "(", "headers", ",", "HTTPHeaderDict", ")", ":", "if", "PY3", ":", "# Python 3", "headers", "=", "H...
Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``.
[ "Given", "an", ":", "class", ":", "httplib", ".", "HTTPResponse", "instance", "r", "return", "a", "corresponding", ":", "class", ":", "urllib3", ".", "response", ".", "HTTPResponse", "object", "." ]
python
train
BlackEarth/bxml
bxml/docx.py
https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/docx.py#L149-L162
def footnotemap(self, cache=True): """return the footnotes from the docx, keyed to string id.""" if self.__footnotemap is not None and cache==True: return self.__footnotemap else: x = self.xml(src='word/footnotes.xml') d = Dict() if x is None: return d for footnote in x.root.xpath("w:footnote", namespaces=self.NS): id = footnote.get("{%(w)s}id" % self.NS) typ = footnote.get("{%(w)s}type" % self.NS) d[id] = Dict(id=id, type=typ, elem=footnote) if cache==True: self.__footnotemap = d return d
[ "def", "footnotemap", "(", "self", ",", "cache", "=", "True", ")", ":", "if", "self", ".", "__footnotemap", "is", "not", "None", "and", "cache", "==", "True", ":", "return", "self", ".", "__footnotemap", "else", ":", "x", "=", "self", ".", "xml", "("...
return the footnotes from the docx, keyed to string id.
[ "return", "the", "footnotes", "from", "the", "docx", "keyed", "to", "string", "id", "." ]
python
train
BlueBrain/NeuroM
examples/end_to_end_distance.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/end_to_end_distance.py#L38-L42
def path_end_to_end_distance(neurite): '''Calculate and return end-to-end-distance of a given neurite.''' trunk = neurite.root_node.points[0] return max(morphmath.point_dist(l.points[-1], trunk) for l in neurite.root_node.ileaf())
[ "def", "path_end_to_end_distance", "(", "neurite", ")", ":", "trunk", "=", "neurite", ".", "root_node", ".", "points", "[", "0", "]", "return", "max", "(", "morphmath", ".", "point_dist", "(", "l", ".", "points", "[", "-", "1", "]", ",", "trunk", ")", ...
Calculate and return end-to-end-distance of a given neurite.
[ "Calculate", "and", "return", "end", "-", "to", "-", "end", "-", "distance", "of", "a", "given", "neurite", "." ]
python
train
olitheolix/qtmacs
qtmacs/base_applet.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/base_applet.py#L641-L698
def qteMakeWidgetActive(self, widgetObj: QtGui.QWidget): """ Give keyboard focus to ``widgetObj``. If ``widgetObj`` is **None** then the internal focus state is reset, but the focus manger will automatically activate the first available widget again. |Args| * ``widgetObj`` (**QWidget**): the widget to focus on. |Returns| * **None** |Raises| * **QtmacsOtherError** if ``widgetObj`` was not added with ``qteAddWidget``. """ # Void the active widget information. if widgetObj is None: self._qteActiveWidget = None return # Ensure that this applet is an ancestor of ``widgetObj`` # inside the Qt hierarchy. if qteGetAppletFromWidget(widgetObj) is not self: msg = 'The specified widget is not inside the current applet.' raise QtmacsOtherError(msg) # If widgetObj is not registered with Qtmacs then simply declare # it active and return. if not hasattr(widgetObj, '_qteAdmin'): self._qteActiveWidget = widgetObj return # Do nothing if widgetObj refers to an applet. if widgetObj._qteAdmin.isQtmacsApplet: self._qteActiveWidget = None return # Housekeeping: remove non-existing widgets from the admin structure. self.qteAutoremoveDeletedWidgets() # Verify the widget is registered for this applet. if widgetObj not in self._qteAdmin.widgetList: msg = 'Widget is not registered for this applet.' self.qteLogger.error(msg, stack_info=True) self._qteActiveWidget = None return # The focus manager in QtmacsMain will hand the focus to # whatever the _qteActiveWidget variable of the active applet # points to. self.qteSetWidgetFocusOrder((self._qteActiveWidget, widgetObj)) self._qteActiveWidget = widgetObj
[ "def", "qteMakeWidgetActive", "(", "self", ",", "widgetObj", ":", "QtGui", ".", "QWidget", ")", ":", "# Void the active widget information.", "if", "widgetObj", "is", "None", ":", "self", ".", "_qteActiveWidget", "=", "None", "return", "# Ensure that this applet is an...
Give keyboard focus to ``widgetObj``. If ``widgetObj`` is **None** then the internal focus state is reset, but the focus manger will automatically activate the first available widget again. |Args| * ``widgetObj`` (**QWidget**): the widget to focus on. |Returns| * **None** |Raises| * **QtmacsOtherError** if ``widgetObj`` was not added with ``qteAddWidget``.
[ "Give", "keyboard", "focus", "to", "widgetObj", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/slugs.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/slugs.py#L8120-L8129
def slugs_configuration_camera_send(self, target, idOrder, order, force_mavlink1=False): ''' Control for camara. target : The system setting the commands (uint8_t) idOrder : ID 0: brightness 1: aperture 2: iris 3: ICR 4: backlight (uint8_t) order : 1: up/on 2: down/off 3: auto/reset/no action (uint8_t) ''' return self.send(self.slugs_configuration_camera_encode(target, idOrder, order), force_mavlink1=force_mavlink1)
[ "def", "slugs_configuration_camera_send", "(", "self", ",", "target", ",", "idOrder", ",", "order", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "slugs_configuration_camera_encode", "(", "target", ",", "idOrder"...
Control for camara. target : The system setting the commands (uint8_t) idOrder : ID 0: brightness 1: aperture 2: iris 3: ICR 4: backlight (uint8_t) order : 1: up/on 2: down/off 3: auto/reset/no action (uint8_t)
[ "Control", "for", "camara", "." ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsMarkovModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsMarkovModel.py#L902-L920
def getControls(self): ''' Calculates consumption for each consumer of this type using the consumption functions. Parameters ---------- None Returns ------- None ''' cNrmNow = np.zeros(self.AgentCount) + np.nan for t in range(self.T_cycle): for j in range(self.MrkvArray[t].shape[0]): these = np.logical_and(t == self.t_cycle, j == self.MrkvNow) cNrmNow[these] = self.solution[t].cFunc[j](self.mNrmNow[these]) self.cNrmNow = cNrmNow return None
[ "def", "getControls", "(", "self", ")", ":", "cNrmNow", "=", "np", ".", "zeros", "(", "self", ".", "AgentCount", ")", "+", "np", ".", "nan", "for", "t", "in", "range", "(", "self", ".", "T_cycle", ")", ":", "for", "j", "in", "range", "(", "self",...
Calculates consumption for each consumer of this type using the consumption functions. Parameters ---------- None Returns ------- None
[ "Calculates", "consumption", "for", "each", "consumer", "of", "this", "type", "using", "the", "consumption", "functions", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xnodewidget/xnode.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnode.py#L667-L679
def drawHotspots(self, painter): """ Draws all the hotspots for the renderer. :param painter | <QPaint> """ # draw hotspots for hotspot in (self._hotspots + self._dropzones): hstyle = hotspot.style() if hstyle == XNode.HotspotStyle.Invisible: continue hotspot.render(painter, self)
[ "def", "drawHotspots", "(", "self", ",", "painter", ")", ":", "# draw hotspots", "for", "hotspot", "in", "(", "self", ".", "_hotspots", "+", "self", ".", "_dropzones", ")", ":", "hstyle", "=", "hotspot", ".", "style", "(", ")", "if", "hstyle", "==", "X...
Draws all the hotspots for the renderer. :param painter | <QPaint>
[ "Draws", "all", "the", "hotspots", "for", "the", "renderer", ".", ":", "param", "painter", "|", "<QPaint", ">" ]
python
train
influxdata/influxdb-python
influxdb/influxdb08/dataframe_client.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/influxdb08/dataframe_client.py#L85-L108
def query(self, query, time_precision='s', chunked=False): """Query data into DataFrames. Returns a DataFrame for a single time series and a map for multiple time series with the time series as value and its name as key. :param time_precision: [Optional, default 's'] Either 's', 'm', 'ms' or 'u'. :param chunked: [Optional, default=False] True if the data shall be retrieved in chunks, False otherwise. """ result = InfluxDBClient.query(self, query=query, time_precision=time_precision, chunked=chunked) if len(result) == 0: return result elif len(result) == 1: return self._to_dataframe(result[0], time_precision) else: ret = {} for time_series in result: ret[time_series['name']] = self._to_dataframe(time_series, time_precision) return ret
[ "def", "query", "(", "self", ",", "query", ",", "time_precision", "=", "'s'", ",", "chunked", "=", "False", ")", ":", "result", "=", "InfluxDBClient", ".", "query", "(", "self", ",", "query", "=", "query", ",", "time_precision", "=", "time_precision", ",...
Query data into DataFrames. Returns a DataFrame for a single time series and a map for multiple time series with the time series as value and its name as key. :param time_precision: [Optional, default 's'] Either 's', 'm', 'ms' or 'u'. :param chunked: [Optional, default=False] True if the data shall be retrieved in chunks, False otherwise.
[ "Query", "data", "into", "DataFrames", "." ]
python
train
androguard/androguard
androguard/core/bytecode.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecode.py#L795-L809
def read(self, size): """ Read from the current offset a total number of `size` bytes and increment the offset by `size` :param int size: length of bytes to read :rtype: bytearray """ if isinstance(size, SV): size = size.value buff = self.__buff[self.__idx:self.__idx + size] self.__idx += size return buff
[ "def", "read", "(", "self", ",", "size", ")", ":", "if", "isinstance", "(", "size", ",", "SV", ")", ":", "size", "=", "size", ".", "value", "buff", "=", "self", ".", "__buff", "[", "self", ".", "__idx", ":", "self", ".", "__idx", "+", "size", "...
Read from the current offset a total number of `size` bytes and increment the offset by `size` :param int size: length of bytes to read :rtype: bytearray
[ "Read", "from", "the", "current", "offset", "a", "total", "number", "of", "size", "bytes", "and", "increment", "the", "offset", "by", "size" ]
python
train
MIT-LCP/wfdb-python
wfdb/io/_header.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L675-L730
def _read_header_lines(base_record_name, dir_name, pb_dir): """ Read the lines in a local or remote header file. Parameters ---------- base_record_name : str The base name of the WFDB record to be read, without any file extensions. dir_name : str The local directory location of the header file. This parameter is ignored if `pb_dir` is set. pb_dir : str Option used to stream data from Physiobank. The Physiobank database directory from which to find the required record files. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb' pb_dir='mitdb'. Returns ------- header_lines : list List of strings corresponding to the header lines. comment_lines : list List of strings corresponding to the comment lines. """ file_name = base_record_name + '.hea' # Read local file if pb_dir is None: with open(os.path.join(dir_name, file_name), 'r') as fp: # Record line followed by signal/segment lines if any header_lines = [] # Comment lines comment_lines = [] for line in fp: line = line.strip() # Comment line if line.startswith('#'): comment_lines.append(line) # Non-empty non-comment line = header line. elif line: # Look for a comment in the line ci = line.find('#') if ci > 0: header_lines.append(line[:ci]) # comment on same line as header line comment_lines.append(line[ci:]) else: header_lines.append(line) # Read online header file else: header_lines, comment_lines = download._stream_header(file_name, pb_dir) return header_lines, comment_lines
[ "def", "_read_header_lines", "(", "base_record_name", ",", "dir_name", ",", "pb_dir", ")", ":", "file_name", "=", "base_record_name", "+", "'.hea'", "# Read local file", "if", "pb_dir", "is", "None", ":", "with", "open", "(", "os", ".", "path", ".", "join", ...
Read the lines in a local or remote header file. Parameters ---------- base_record_name : str The base name of the WFDB record to be read, without any file extensions. dir_name : str The local directory location of the header file. This parameter is ignored if `pb_dir` is set. pb_dir : str Option used to stream data from Physiobank. The Physiobank database directory from which to find the required record files. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb' pb_dir='mitdb'. Returns ------- header_lines : list List of strings corresponding to the header lines. comment_lines : list List of strings corresponding to the comment lines.
[ "Read", "the", "lines", "in", "a", "local", "or", "remote", "header", "file", "." ]
python
train
nya3jp/end
end.py
https://github.com/nya3jp/end/blob/c001590604e50ab78402420eba4f26ba3d0ed406/end.py#L235-L245
def install_import_hook(): """Installs __import__ hook.""" saved_import = builtins.__import__ @functools.wraps(saved_import) def import_hook(name, *args, **kwargs): if name == 'end': process_import() end return saved_import(name, *args, **kwargs) end builtins.__import__ = import_hook
[ "def", "install_import_hook", "(", ")", ":", "saved_import", "=", "builtins", ".", "__import__", "@", "functools", ".", "wraps", "(", "saved_import", ")", "def", "import_hook", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "name...
Installs __import__ hook.
[ "Installs", "__import__", "hook", "." ]
python
train
openstack/networking-cisco
networking_cisco/plugins/cisco/db/l3/ha_db.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/l3/ha_db.py#L1020-L1062
def get_router_for_floatingip(self, context, internal_port, internal_subnet, external_network_id): """We need to over-load this function so that we only return the user visible router and never its redundancy routers (as they never have floatingips associated with them). """ gw_port = orm.aliased(models_v2.Port, name="gw_port") routerport_qry = context.session.query( RouterPort.router_id, models_v2.IPAllocation.ip_address).join( models_v2.Port, models_v2.IPAllocation).filter( models_v2.Port.network_id == internal_port['network_id'], RouterPort.port_type.in_(bc.constants.ROUTER_INTERFACE_OWNERS), models_v2.IPAllocation.subnet_id == internal_subnet['id'] ).join(gw_port, gw_port.device_id == RouterPort.router_id).filter( gw_port.network_id == external_network_id, gw_port.device_owner == bc.constants.DEVICE_OWNER_ROUTER_GW ).distinct() # Ensure that redundancy routers (in a ha group) are not returned, # since only the user visible router should have floatingips. # This can be done by checking that the id of routers does not # appear in the 'redundancy_router_id' column in the # 'cisco_router_redundancy_bindings' table. routerport_qry = routerport_qry.outerjoin( RouterRedundancyBinding, RouterRedundancyBinding.redundancy_router_id == RouterPort.router_id) routerport_qry = routerport_qry.filter( RouterRedundancyBinding.redundancy_router_id == expr.null()) first_router_id = None for router_id, interface_ip in routerport_qry: if interface_ip == internal_subnet['gateway_ip']: return router_id if not first_router_id: first_router_id = router_id if first_router_id: return first_router_id raise l3_exceptions.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet['id'], external_network_id=external_network_id, port_id=internal_port['id'])
[ "def", "get_router_for_floatingip", "(", "self", ",", "context", ",", "internal_port", ",", "internal_subnet", ",", "external_network_id", ")", ":", "gw_port", "=", "orm", ".", "aliased", "(", "models_v2", ".", "Port", ",", "name", "=", "\"gw_port\"", ")", "ro...
We need to over-load this function so that we only return the user visible router and never its redundancy routers (as they never have floatingips associated with them).
[ "We", "need", "to", "over", "-", "load", "this", "function", "so", "that", "we", "only", "return", "the", "user", "visible", "router", "and", "never", "its", "redundancy", "routers", "(", "as", "they", "never", "have", "floatingips", "associated", "with", ...
python
train
pybel/pybel
src/pybel/struct/summary/node_summary.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/summary/node_summary.py#L101-L110
def get_names(graph): """Get all names for each namespace. :type graph: pybel.BELGraph :rtype: dict[str,set[str]] """ rv = defaultdict(set) for namespace, name in _identifier_filtered_iterator(graph): rv[namespace].add(name) return dict(rv)
[ "def", "get_names", "(", "graph", ")", ":", "rv", "=", "defaultdict", "(", "set", ")", "for", "namespace", ",", "name", "in", "_identifier_filtered_iterator", "(", "graph", ")", ":", "rv", "[", "namespace", "]", ".", "add", "(", "name", ")", "return", ...
Get all names for each namespace. :type graph: pybel.BELGraph :rtype: dict[str,set[str]]
[ "Get", "all", "names", "for", "each", "namespace", "." ]
python
train
Azure/blobxfer
blobxfer/operations/synccopy.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/synccopy.py#L406-L472
def _process_synccopy_descriptor(self, sd): # type: (SyncCopy, blobxfer.models.synccopy.Descriptor) -> None """Process synccopy descriptor :param SyncCopy self: this :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor """ # update progress bar self._update_progress_bar() # get download offsets offsets, resume_bytes = sd.next_offsets() # add resume bytes to counter if resume_bytes is not None: with self._transfer_lock: self._synccopy_bytes_sofar += resume_bytes logger.debug('adding {} sofar {} from {}'.format( resume_bytes, self._synccopy_bytes_sofar, sd.dst_entity.name)) del resume_bytes # check if all operations completed if offsets is None and sd.all_operations_completed: # finalize upload for non-one shots if not sd.is_one_shot_block_blob: self._finalize_upload(sd) else: # set access tier for one shots if sd.requires_access_tier_set: blobxfer.operations.azure.blob.block.set_blob_access_tier( sd.dst_entity) # accounting with self._transfer_lock: self._transfer_set.remove( blobxfer.operations.synccopy.SyncCopy. create_unique_transfer_operation_id( sd.src_entity, sd.dst_entity)) self._synccopy_sofar += 1 return # re-enqueue for finalization if no offsets if offsets is None: self._transfer_queue.put(sd) return # prepare upload if offsets.chunk_num == 0: self._prepare_upload(sd.dst_entity) # prepare replica targets if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets): for ase in sd.dst_entity.replica_targets: if offsets.chunk_num == 0: self._prepare_upload(ase) # re-enqueue for other threads to download next offset if not append if sd.src_entity.mode != blobxfer.models.azure.StorageModes.Append: self._transfer_queue.put(sd) # issue get range if sd.src_entity.mode == blobxfer.models.azure.StorageModes.File: data = blobxfer.operations.azure.file.get_file_range( sd.src_entity, offsets) else: data = blobxfer.operations.azure.blob.get_blob_range( sd.src_entity, offsets) # process data for upload self._process_data(sd, sd.dst_entity, offsets, data) # iterate replicas if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets): for ase in sd.dst_entity.replica_targets: self._process_data(sd, ase, offsets, data) # re-enqueue for append blobs if sd.src_entity.mode == blobxfer.models.azure.StorageModes.Append: self._transfer_queue.put(sd)
[ "def", "_process_synccopy_descriptor", "(", "self", ",", "sd", ")", ":", "# type: (SyncCopy, blobxfer.models.synccopy.Descriptor) -> None", "# update progress bar", "self", ".", "_update_progress_bar", "(", ")", "# get download offsets", "offsets", ",", "resume_bytes", "=", "...
Process synccopy descriptor :param SyncCopy self: this :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor
[ "Process", "synccopy", "descriptor", ":", "param", "SyncCopy", "self", ":", "this", ":", "param", "blobxfer", ".", "models", ".", "synccopy", ".", "Descriptor", "sd", ":", "synccopy", "descriptor" ]
python
train
ethereum/py-trie
trie/smt.py
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L271-L297
def _get(self, key: bytes) -> Tuple[bytes, Tuple[Hash32]]: """ Returns db value and branch in root->leaf order """ validate_is_bytes(key) validate_length(key, self._key_size) branch = [] target_bit = 1 << (self.depth - 1) path = to_int(key) node_hash = self.root_hash # Append the sibling node to the branch # Iterate on the parent for _ in range(self.depth): node = self.db[node_hash] left, right = node[:32], node[32:] if path & target_bit: branch.append(left) node_hash = right else: branch.append(right) node_hash = left target_bit >>= 1 # Value is the last hash in the chain # NOTE: Didn't do exception here for testing purposes return self.db[node_hash], tuple(branch)
[ "def", "_get", "(", "self", ",", "key", ":", "bytes", ")", "->", "Tuple", "[", "bytes", ",", "Tuple", "[", "Hash32", "]", "]", ":", "validate_is_bytes", "(", "key", ")", "validate_length", "(", "key", ",", "self", ".", "_key_size", ")", "branch", "="...
Returns db value and branch in root->leaf order
[ "Returns", "db", "value", "and", "branch", "in", "root", "-", ">", "leaf", "order" ]
python
train
gabstopper/smc-python
smc/elements/netlink.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/netlink.py#L152-L176
def update_or_create(cls, with_status=False, **kwargs): """ Update or create static netlink. DNS entry differences are not resolved, instead any entries provided will be the final state for this netlink. If the intent is to add/remove DNS entries you can use the :meth:`~domain_server_address` method to add or remove. :raises CreateElementFailed: failed creating element :return: element instance by type or 3-tuple if with_status set """ dns_address = kwargs.pop('domain_server_address', []) element, updated, created = super(StaticNetlink, cls).update_or_create( with_status=True, defer_update=True, **kwargs) if not created: if dns_address: new_entries = RankedDNSAddress([]) new_entries.add(dns_address) element.data.update(domain_server_address=new_entries.entries) updated = True if updated: element.update() if with_status: return element, updated, created return element
[ "def", "update_or_create", "(", "cls", ",", "with_status", "=", "False", ",", "*", "*", "kwargs", ")", ":", "dns_address", "=", "kwargs", ".", "pop", "(", "'domain_server_address'", ",", "[", "]", ")", "element", ",", "updated", ",", "created", "=", "sup...
Update or create static netlink. DNS entry differences are not resolved, instead any entries provided will be the final state for this netlink. If the intent is to add/remove DNS entries you can use the :meth:`~domain_server_address` method to add or remove. :raises CreateElementFailed: failed creating element :return: element instance by type or 3-tuple if with_status set
[ "Update", "or", "create", "static", "netlink", ".", "DNS", "entry", "differences", "are", "not", "resolved", "instead", "any", "entries", "provided", "will", "be", "the", "final", "state", "for", "this", "netlink", ".", "If", "the", "intent", "is", "to", "...
python
train
google/grr
grr/server/grr_response_server/gui/http_api.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/http_api.py#L106-L117
def _SetField(self, args, type_info, value): """Sets fields on the arg rdfvalue object.""" if hasattr(type_info, "enum"): try: coerced_obj = type_info.enum[value.upper()] except KeyError: # A bool is an enum but serializes to "1" / "0" which are both not in # enum or reverse_enum. coerced_obj = type_info.type.FromHumanReadable(value) else: coerced_obj = type_info.type.FromHumanReadable(value) args.Set(type_info.name, coerced_obj)
[ "def", "_SetField", "(", "self", ",", "args", ",", "type_info", ",", "value", ")", ":", "if", "hasattr", "(", "type_info", ",", "\"enum\"", ")", ":", "try", ":", "coerced_obj", "=", "type_info", ".", "enum", "[", "value", ".", "upper", "(", ")", "]",...
Sets fields on the arg rdfvalue object.
[ "Sets", "fields", "on", "the", "arg", "rdfvalue", "object", "." ]
python
train
bschollnick/semantic_url
semantic_url/__init__.py
https://github.com/bschollnick/semantic_url/blob/3c9b9e24354c0d4c5a2ce82006e610c5980395ad/semantic_url/__init__.py#L416-L431
def return_current_uri_subpage(self): """ Args: * None Returns: String - Returns the full postpath & semantic components *NOTE* may not contain the server & port numbers. That depends on what was provided to the parser. """ uri = post_slash("%s%s/%s/%s" % (post_slash(self.current_dir()), self.slots['page'], self.slots['item'], self.slots['subpage'])) return uri
[ "def", "return_current_uri_subpage", "(", "self", ")", ":", "uri", "=", "post_slash", "(", "\"%s%s/%s/%s\"", "%", "(", "post_slash", "(", "self", ".", "current_dir", "(", ")", ")", ",", "self", ".", "slots", "[", "'page'", "]", ",", "self", ".", "slots",...
Args: * None Returns: String - Returns the full postpath & semantic components *NOTE* may not contain the server & port numbers. That depends on what was provided to the parser.
[ "Args", ":", "*", "None" ]
python
train
nkmathew/yasi-sexp-indenter
yasi.py
https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L150-L164
def read_file(fname): """ read_file(fname : str) -> str >>> read_file(r'C:\\mine\\test.lisp') r'(print "No, no, there\'s \\r\\nlife in him!. ")\\r\\n\\r\\n' The file is read in binary mode in order to preserve original line endings. Line ending Binary mode Text mode CRLF CRLF LF CR CR LF """ assert os.path.exists(fname), "\n--%s-- Warning: File `%s' does not exist. . ." \ % (current_time(), fname) with open(fname, 'rb') as fp: return fp.read().decode('utf-8')
[ "def", "read_file", "(", "fname", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "fname", ")", ",", "\"\\n--%s-- Warning: File `%s' does not exist. . .\"", "%", "(", "current_time", "(", ")", ",", "fname", ")", "with", "open", "(", "fname", ",", ...
read_file(fname : str) -> str >>> read_file(r'C:\\mine\\test.lisp') r'(print "No, no, there\'s \\r\\nlife in him!. ")\\r\\n\\r\\n' The file is read in binary mode in order to preserve original line endings. Line ending Binary mode Text mode CRLF CRLF LF CR CR LF
[ "read_file", "(", "fname", ":", "str", ")", "-", ">", "str" ]
python
train
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow_base.py#L139-L165
def CallState(self, next_state="", start_time=None): """This method is used to schedule a new state on a different worker. This is basically the same as CallFlow() except we are calling ourselves. The state will be invoked at a later time. Args: next_state: The state in this flow to be invoked. start_time: Start the flow at this time. This delays notification for flow processing into the future. Note that the flow may still be processed earlier if there are client responses waiting. Raises: ValueError: The next state specified does not exist. """ if not getattr(self, next_state): raise ValueError("Next state %s is invalid." % next_state) flow_request = rdf_flow_objects.FlowRequest( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=self.GetNextOutboundId(), next_state=next_state, start_time=start_time, needs_processing=True) self.flow_requests.append(flow_request)
[ "def", "CallState", "(", "self", ",", "next_state", "=", "\"\"", ",", "start_time", "=", "None", ")", ":", "if", "not", "getattr", "(", "self", ",", "next_state", ")", ":", "raise", "ValueError", "(", "\"Next state %s is invalid.\"", "%", "next_state", ")", ...
This method is used to schedule a new state on a different worker. This is basically the same as CallFlow() except we are calling ourselves. The state will be invoked at a later time. Args: next_state: The state in this flow to be invoked. start_time: Start the flow at this time. This delays notification for flow processing into the future. Note that the flow may still be processed earlier if there are client responses waiting. Raises: ValueError: The next state specified does not exist.
[ "This", "method", "is", "used", "to", "schedule", "a", "new", "state", "on", "a", "different", "worker", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L7823-L7852
def delete_collection_namespaced_limit_range(self, namespace, **kwargs): # noqa: E501 """delete_collection_namespaced_limit_range # noqa: E501 delete collection of LimitRange # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_limit_range(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_limit_range_with_http_info(namespace, **kwargs) # noqa: E501 else: (data) = self.delete_collection_namespaced_limit_range_with_http_info(namespace, **kwargs) # noqa: E501 return data
[ "def", "delete_collection_namespaced_limit_range", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", ...
delete_collection_namespaced_limit_range # noqa: E501 delete collection of LimitRange # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_limit_range(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete_collection_namespaced_limit_range", "#", "noqa", ":", "E501" ]
python
train
erikrose/conway
bin/conway.py
https://github.com/erikrose/conway/blob/4c185d5f2d4b942a0710d7593926e7b749b16015/bin/conway.py#L98-L102
def draw(board, term, cells): """Draw a board to the terminal.""" for (x, y), state in board.iteritems(): with term.location(x, y): print cells[state],
[ "def", "draw", "(", "board", ",", "term", ",", "cells", ")", ":", "for", "(", "x", ",", "y", ")", ",", "state", "in", "board", ".", "iteritems", "(", ")", ":", "with", "term", ".", "location", "(", "x", ",", "y", ")", ":", "print", "cells", "...
Draw a board to the terminal.
[ "Draw", "a", "board", "to", "the", "terminal", "." ]
python
train
sentinel-hub/sentinelhub-py
sentinelhub/aws.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/aws.py#L163-L175
def url_to_tile(url): """ Extracts tile name, date and AWS index from tile url on AWS. :param url: class input parameter 'metafiles' :type url: str :return: Name of tile, date and AWS index which uniquely identifies tile on AWS :rtype: (str, str, int) """ info = url.strip('/').split('/') name = ''.join(info[-7: -4]) date = '-'.join(info[-4: -1]) return name, date, int(info[-1])
[ "def", "url_to_tile", "(", "url", ")", ":", "info", "=", "url", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "name", "=", "''", ".", "join", "(", "info", "[", "-", "7", ":", "-", "4", "]", ")", "date", "=", "'-'", ".", "joi...
Extracts tile name, date and AWS index from tile url on AWS. :param url: class input parameter 'metafiles' :type url: str :return: Name of tile, date and AWS index which uniquely identifies tile on AWS :rtype: (str, str, int)
[ "Extracts", "tile", "name", "date", "and", "AWS", "index", "from", "tile", "url", "on", "AWS", "." ]
python
train
Kaggle/kaggle-api
kaggle/api/kaggle_api.py
https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api.py#L2245-L2264
def kernel_push(self, kernel_push_request, **kwargs): # noqa: E501 """Push a new kernel version. Can be used to create a new kernel and update an existing one. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.kernel_push(kernel_push_request, async_req=True) >>> result = thread.get() :param async_req bool :param KernelPushRequest kernel_push_request: Information for pushing a new kernel version (required) :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.kernel_push_with_http_info(kernel_push_request, **kwargs) # noqa: E501 else: (data) = self.kernel_push_with_http_info(kernel_push_request, **kwargs) # noqa: E501 return data
[ "def", "kernel_push", "(", "self", ",", "kernel_push_request", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", ...
Push a new kernel version. Can be used to create a new kernel and update an existing one. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.kernel_push(kernel_push_request, async_req=True) >>> result = thread.get() :param async_req bool :param KernelPushRequest kernel_push_request: Information for pushing a new kernel version (required) :return: Result If the method is called asynchronously, returns the request thread.
[ "Push", "a", "new", "kernel", "version", ".", "Can", "be", "used", "to", "create", "a", "new", "kernel", "and", "update", "an", "existing", "one", ".", "#", "noqa", ":", "E501" ]
python
train
wglass/lighthouse
lighthouse/configs/handler.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/handler.py#L45-L74
def on_created(self, event): """ Newly created config file handler. Parses the file's yaml contents and creates a new instance of the target_class with the results. Fires the on_add callback with the new instance. """ if os.path.isdir(event.src_path): return logger.debug("File created: %s", event.src_path) name = self.file_name(event) try: result = self.target_class.from_config( name, yaml.load(open(event.src_path)) ) except Exception as e: logger.exception( "Error when loading new config file %s: %s", event.src_path, str(e) ) return if not result: return self.on_add(self.target_class, name, result)
[ "def", "on_created", "(", "self", ",", "event", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "event", ".", "src_path", ")", ":", "return", "logger", ".", "debug", "(", "\"File created: %s\"", ",", "event", ".", "src_path", ")", "name", "=", ...
Newly created config file handler. Parses the file's yaml contents and creates a new instance of the target_class with the results. Fires the on_add callback with the new instance.
[ "Newly", "created", "config", "file", "handler", "." ]
python
train
ArchiveTeam/wpull
wpull/processor/coprocessor/youtubedl.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/coprocessor/youtubedl.py#L124-L134
def _get_output_template(self): '''Return the path prefix and output template.''' path = self._file_writer_session.extra_resource_path('.youtube-dl') if not path: self._temp_dir = tempfile.TemporaryDirectory( dir=self._root_path, prefix='tmp-wpull-youtubedl' ) path = '{}/tmp'.format(self._temp_dir.name) return path, '{}.%(id)s.%(format_id)s.%(ext)s'.format(path)
[ "def", "_get_output_template", "(", "self", ")", ":", "path", "=", "self", ".", "_file_writer_session", ".", "extra_resource_path", "(", "'.youtube-dl'", ")", "if", "not", "path", ":", "self", ".", "_temp_dir", "=", "tempfile", ".", "TemporaryDirectory", "(", ...
Return the path prefix and output template.
[ "Return", "the", "path", "prefix", "and", "output", "template", "." ]
python
train
aws/sagemaker-python-sdk
src/sagemaker/model.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/model.py#L162-L209
def compile(self, target_instance_family, input_shape, output_path, role, tags=None, job_name=None, compile_max_run=5 * 60, framework=None, framework_version=None): """Compile this ``Model`` with SageMaker Neo. Args: target_instance_family (str): Identifies the device that you want to run your model after compilation, for example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3, deeplens, rasp3b input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]} output_path (str): Specifies where to store the compiled model role (str): Execution role tags (list[dict]): List of tags for labeling a compilation job. For more, see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. job_name (str): The name of the compilation job compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60). After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its current status. framework (str): The framework that is used to train the original model. Allowed values: 'mxnet', 'tensorflow', 'pytorch', 'onnx', 'xgboost' framework_version (str) Returns: sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details. """ framework = self._framework() or framework if framework is None: raise ValueError("You must specify framework, allowed values {}".format(NEO_ALLOWED_FRAMEWORKS)) if framework not in NEO_ALLOWED_FRAMEWORKS: raise ValueError("You must provide valid framework, allowed values {}".format(NEO_ALLOWED_FRAMEWORKS)) if job_name is None: raise ValueError("You must provide a compilation job name") framework = framework.upper() framework_version = self._get_framework_version() or framework_version config = self._compilation_job_config(target_instance_family, input_shape, output_path, role, compile_max_run, job_name, framework, tags) self.sagemaker_session.compile_model(**config) job_status = self.sagemaker_session.wait_for_compilation_job(job_name) self.model_data = job_status['ModelArtifacts']['S3ModelArtifacts'] if target_instance_family.startswith('ml_'): self.image = self._neo_image(self.sagemaker_session.boto_region_name, target_instance_family, framework, framework_version) self._is_compiled_model = True else: LOGGER.warning("The intance type {} is not supported to deploy via SageMaker," "please deploy the model on the device by yourself.".format(target_instance_family)) return self
[ "def", "compile", "(", "self", ",", "target_instance_family", ",", "input_shape", ",", "output_path", ",", "role", ",", "tags", "=", "None", ",", "job_name", "=", "None", ",", "compile_max_run", "=", "5", "*", "60", ",", "framework", "=", "None", ",", "f...
Compile this ``Model`` with SageMaker Neo. Args: target_instance_family (str): Identifies the device that you want to run your model after compilation, for example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3, deeplens, rasp3b input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]} output_path (str): Specifies where to store the compiled model role (str): Execution role tags (list[dict]): List of tags for labeling a compilation job. For more, see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. job_name (str): The name of the compilation job compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60). After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its current status. framework (str): The framework that is used to train the original model. Allowed values: 'mxnet', 'tensorflow', 'pytorch', 'onnx', 'xgboost' framework_version (str) Returns: sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
[ "Compile", "this", "Model", "with", "SageMaker", "Neo", "." ]
python
train
ioam/lancet
lancet/core.py
https://github.com/ioam/lancet/blob/1fbbf88fa0e8974ff9ed462e3cb11722ddebdd6e/lancet/core.py#L391-L404
def show(self, exclude=[]): """ Convenience method to inspect the available argument values in human-readable format. The ordering of keys is determined by how quickly they vary. The exclude list allows specific keys to be excluded for readability (e.g. to hide long, absolute filenames). """ ordering = self.constant_keys + self.varying_keys spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering if (k in s) and (k not in exclude)]) for s in self.specs] print('\n'.join(['%d: %s' % (i,l) for (i,l) in enumerate(spec_lines)]))
[ "def", "show", "(", "self", ",", "exclude", "=", "[", "]", ")", ":", "ordering", "=", "self", ".", "constant_keys", "+", "self", ".", "varying_keys", "spec_lines", "=", "[", "', '", ".", "join", "(", "[", "'%s=%s'", "%", "(", "k", ",", "s", "[", ...
Convenience method to inspect the available argument values in human-readable format. The ordering of keys is determined by how quickly they vary. The exclude list allows specific keys to be excluded for readability (e.g. to hide long, absolute filenames).
[ "Convenience", "method", "to", "inspect", "the", "available", "argument", "values", "in", "human", "-", "readable", "format", ".", "The", "ordering", "of", "keys", "is", "determined", "by", "how", "quickly", "they", "vary", "." ]
python
valid
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L1890-L2052
def check_status(self): """ This function checks the status of the task by inspecting the output and the error files produced by the application and by the queue manager. """ # 1) see it the job is blocked # 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved # 3) see if there is output # 4) see if abinit reports problems # 5) see if both err files exist and are empty # 6) no output and no err files, the job must still be running # 7) try to find out what caused the problems # 8) there is a problem but we did not figure out what ... # 9) the only way of landing here is if there is a output file but no err files... # 1) A locked task can only be unlocked by calling set_status explicitly. # an errored task, should not end up here but just to be sure black_list = (self.S_LOCKED, self.S_ERROR) #if self.status in black_list: return self.status # 2) Check the returncode of the job script if self.returncode != 0: msg = "job.sh return code: %s\nPerhaps the job was not submitted properly?" % self.returncode return self.set_status(self.S_QCRITICAL, msg=msg) # If we have an abort file produced by Abinit if self.mpiabort_file.exists: return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file") # Analyze the stderr file for Fortran runtime errors. # getsize is 0 if the file is empty or it does not exist. err_msg = None if self.stderr_file.getsize() != 0: err_msg = self.stderr_file.read() # Analyze the stderr file of the resource manager runtime errors. # TODO: Why are we looking for errors in queue.qerr? qerr_info = None if self.qerr_file.getsize() != 0: qerr_info = self.qerr_file.read() # Analyze the stdout file of the resource manager (needed for PBS !) qout_info = None if self.qout_file.getsize(): qout_info = self.qout_file.read() # Start to check ABINIT status if the output file has been created. #if self.output_file.getsize() != 0: if self.output_file.exists: try: report = self.get_event_report() except Exception as exc: msg = "%s exception while parsing event_report:\n%s" % (self, exc) return self.set_status(self.S_ABICRITICAL, msg=msg) if report is None: return self.set_status(self.S_ERROR, msg="got None report!") if report.run_completed: # Here we set the correct timing data reported by Abinit self.datetimes.start = report.start_datetime self.datetimes.end = report.end_datetime # Check if the calculation converged. not_ok = report.filter_types(self.CRITICAL_EVENTS) if not_ok: return self.set_status(self.S_UNCONVERGED, msg='status set to unconverged based on abiout') else: return self.set_status(self.S_OK, msg="status set to ok based on abiout") # Calculation still running or errors? if report.errors: # Abinit reported problems logger.debug('Found errors in report') for error in report.errors: logger.debug(str(error)) try: self.abi_errors.append(error) except AttributeError: self.abi_errors = [error] # The job is unfixable due to ABINIT errors logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self) msg = "\n".join(map(repr, report.errors)) return self.set_status(self.S_ABICRITICAL, msg=msg) # 5) if self.stderr_file.exists and not err_msg: if self.qerr_file.exists and not qerr_info: # there is output and no errors # The job still seems to be running return self.set_status(self.S_RUN, msg='there is output and no errors: job still seems to be running') # 6) if not self.output_file.exists: logger.debug("output_file does not exists") if not self.stderr_file.exists and not self.qerr_file.exists: # No output at allThe job is still in the queue. return self.status # 7) Analyze the files of the resource manager and abinit and execution err (mvs) # MG: This section has been disabled: several portability issues # Need more robust logic in error_parser, perhaps logic provided by users via callbacks. if False and (qerr_info or qout_info): from pymatgen.io.abinit.scheduler_error_parsers import get_parser scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path, out_file=self.qout_file.path, run_err_file=self.stderr_file.path) if scheduler_parser is None: return self.set_status(self.S_QCRITICAL, msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE) scheduler_parser.parse() if scheduler_parser.errors: # Store the queue errors in the task self.queue_errors = scheduler_parser.errors # The job is killed or crashed and we know what happened msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors) return self.set_status(self.S_QCRITICAL, msg=msg) elif lennone(qerr_info) > 0: # if only qout_info, we are not necessarily in QCRITICAL state, # since there will always be info in the qout file self.history.info('Found unknown message in the queue qerr file: %s' % str(qerr_info)) #try: # rt = self.datetimes.get_runtime().seconds #except: # rt = -1.0 #tl = self.manager.qadapter.timelimit #if rt > tl: # msg += 'set to error : runtime (%s) exceded walltime (%s)' % (rt, tl) # print(msg) # return self.set_status(self.S_ERROR, msg=msg) # The job may be killed or crashed but we don't know what happened # It may also be that an innocent message was written to qerr, so we wait for a while # it is set to QCritical, we will attempt to fix it by running on more resources # 8) analyzing the err files and abinit output did not identify a problem # but if the files are not empty we do have a problem but no way of solving it: # The job is killed or crashed but we don't know what happend # it is set to QCritical, we will attempt to fix it by running on more resources if err_msg: msg = 'Found error message:\n %s' % str(err_msg) self.history.warning(msg) #return self.set_status(self.S_QCRITICAL, msg=msg) # 9) if we still haven't returned there is no indication of any error and the job can only still be running # but we should actually never land here, or we have delays in the file system .... # print('the job still seems to be running maybe it is hanging without producing output... ') # Check time of last modification. if self.output_file.exists and \ (time.time() - self.output_file.get_stat().st_mtime > self.manager.policy.frozen_timeout): msg = "Task seems to be frozen, last change more than %s [s] ago" % self.manager.policy.frozen_timeout return self.set_status(self.S_ERROR, msg=msg) # Handle weird case in which either run.abo, or run.log have not been produced #if self.status not in (self.S_INIT, self.S_READY) and (not self.output.file.exists or not self.log_file.exits): # msg = "Task have been submitted but cannot find the log file or the output file" # return self.set_status(self.S_ERROR, msg) return self.set_status(self.S_RUN, msg='final option: nothing seems to be wrong, the job must still be running')
[ "def", "check_status", "(", "self", ")", ":", "# 1) see it the job is blocked", "# 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved", "# 3) see if there is output", "# 4) see if abinit reports problems", "# 5) see if both err files exist a...
This function checks the status of the task by inspecting the output and the error files produced by the application and by the queue manager.
[ "This", "function", "checks", "the", "status", "of", "the", "task", "by", "inspecting", "the", "output", "and", "the", "error", "files", "produced", "by", "the", "application", "and", "by", "the", "queue", "manager", "." ]
python
train
summa-tx/riemann
riemann/tx/tx_builder.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L78-L87
def _make_output(value, output_script, version=None): ''' byte-like, byte-like -> TxOut ''' if 'decred' in riemann.get_current_network_name(): return tx.DecredTxOut( value=value, version=version, output_script=output_script) return tx.TxOut(value=value, output_script=output_script)
[ "def", "_make_output", "(", "value", ",", "output_script", ",", "version", "=", "None", ")", ":", "if", "'decred'", "in", "riemann", ".", "get_current_network_name", "(", ")", ":", "return", "tx", ".", "DecredTxOut", "(", "value", "=", "value", ",", "versi...
byte-like, byte-like -> TxOut
[ "byte", "-", "like", "byte", "-", "like", "-", ">", "TxOut" ]
python
train
edwards-lab/libGWAS
libgwas/pedigree_parser.py
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/pedigree_parser.py#L83-L90
def ReportConfiguration(self, file): """ Report configuration for logging purposes. :param file: Destination for report details :return: None """ print >> file, BuildReportLine("PED FILE", self.datasource) print >> file, BuildReportLine("MAP FILE", self.mapfile)
[ "def", "ReportConfiguration", "(", "self", ",", "file", ")", ":", "print", ">>", "file", ",", "BuildReportLine", "(", "\"PED FILE\"", ",", "self", ".", "datasource", ")", "print", ">>", "file", ",", "BuildReportLine", "(", "\"MAP FILE\"", ",", "self", ".", ...
Report configuration for logging purposes. :param file: Destination for report details :return: None
[ "Report", "configuration", "for", "logging", "purposes", "." ]
python
train
tus/tus-py-client
tusclient/uploader.py
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L243-L251
def verify_upload(self): """ Confirm that the last upload was sucessful. Raises TusUploadFailed exception if the upload was not sucessful. """ if self.request.status_code == 204: return True else: raise TusUploadFailed('', self.request.status_code, self.request.response_content)
[ "def", "verify_upload", "(", "self", ")", ":", "if", "self", ".", "request", ".", "status_code", "==", "204", ":", "return", "True", "else", ":", "raise", "TusUploadFailed", "(", "''", ",", "self", ".", "request", ".", "status_code", ",", "self", ".", ...
Confirm that the last upload was sucessful. Raises TusUploadFailed exception if the upload was not sucessful.
[ "Confirm", "that", "the", "last", "upload", "was", "sucessful", ".", "Raises", "TusUploadFailed", "exception", "if", "the", "upload", "was", "not", "sucessful", "." ]
python
train
lsst-sqre/lander
lander/config.py
https://github.com/lsst-sqre/lander/blob/5e4f6123e48b451ba21963724ace0dc59798618e/lander/config.py#L246-L296
def _get_docushare_url(handle, validate=True): """Get a docushare URL given document's handle. Parameters ---------- handle : `str` Handle name, such as ``'LDM-151'``. validate : `bool`, optional Set to `True` to request that the link resolves by performing a HEAD request over the network. `False` disables this testing. Default is `True`. Returns ------- docushare_url : `str` Shortened DocuShare URL for the document corresponding to the handle. Raises ------ lander.exceptions.DocuShareError Raised for any error related to validating the DocuShare URL. """ logger = structlog.get_logger(__name__) logger.debug('Using Configuration._get_docushare_url') # Make a short link to the DocuShare version page since # a) It doesn't immediately trigger a PDF download, # b) It gives the user extra information about the document before # downloading it. url = 'https://ls.st/{handle}*'.format(handle=handle.lower()) if validate: # Test that the short link successfully resolves to DocuShare logger.debug('Validating {0}'.format(url)) try: response = requests.head(url, allow_redirects=True, timeout=30) except requests.exceptions.RequestException as e: raise DocuShareError(str(e)) error_message = 'URL {0} does not resolve to DocuShare'.format(url) if response.status_code != 200: logger.warning('HEAD {0} status: {1:d}'.format( url, response.status_code)) raise DocuShareError(error_message) redirect_url_parts = urllib.parse.urlsplit(response.url) if redirect_url_parts.netloc != 'docushare.lsst.org': logger.warning('{0} resolved to {1}'.format(url, response.url)) raise DocuShareError(error_message) return url
[ "def", "_get_docushare_url", "(", "handle", ",", "validate", "=", "True", ")", ":", "logger", "=", "structlog", ".", "get_logger", "(", "__name__", ")", "logger", ".", "debug", "(", "'Using Configuration._get_docushare_url'", ")", "# Make a short link to the DocuShare...
Get a docushare URL given document's handle. Parameters ---------- handle : `str` Handle name, such as ``'LDM-151'``. validate : `bool`, optional Set to `True` to request that the link resolves by performing a HEAD request over the network. `False` disables this testing. Default is `True`. Returns ------- docushare_url : `str` Shortened DocuShare URL for the document corresponding to the handle. Raises ------ lander.exceptions.DocuShareError Raised for any error related to validating the DocuShare URL.
[ "Get", "a", "docushare", "URL", "given", "document", "s", "handle", "." ]
python
train
graphite-project/carbonate
carbonate/config.py
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L21-L27
def destinations(self, cluster='main'): """Return a list of destinations for a cluster.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) destinations = self.config.get(cluster, 'destinations') return destinations.replace(' ', '').split(',')
[ "def", "destinations", "(", "self", ",", "cluster", "=", "'main'", ")", ":", "if", "not", "self", ".", "config", ".", "has_section", "(", "cluster", ")", ":", "raise", "SystemExit", "(", "\"Cluster '%s' not defined in %s\"", "%", "(", "cluster", ",", "self",...
Return a list of destinations for a cluster.
[ "Return", "a", "list", "of", "destinations", "for", "a", "cluster", "." ]
python
train
ellmetha/django-machina
machina/apps/forum_member/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_member/views.py#L120-L123
def get_object(self, queryset=None): """ Returns the considered object. """ profile, dummy = ForumProfile.objects.get_or_create(user=self.request.user) return profile
[ "def", "get_object", "(", "self", ",", "queryset", "=", "None", ")", ":", "profile", ",", "dummy", "=", "ForumProfile", ".", "objects", ".", "get_or_create", "(", "user", "=", "self", ".", "request", ".", "user", ")", "return", "profile" ]
Returns the considered object.
[ "Returns", "the", "considered", "object", "." ]
python
train
pantsbuild/pants
src/python/pants/help/build_dictionary_info_extracter.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/help/build_dictionary_info_extracter.py#L111-L133
def get_arg_descriptions_from_docstring(cls, obj): """Returns an ordered map of arg name -> arg description found in :param: stanzas.""" ret = OrderedDict() name = '' doc = obj.__doc__ or '' lines = [s.strip() for s in doc.split('\n')] stanza_first_line_re = cls._get_stanza_first_line_re() for line in lines: m = stanza_first_line_re.match(line) if m and m.group(1) == 'param': # If first line of a parameter description, set name and description. name, description = m.group(3, 4) ret[name] = description elif m and m.group(1) != 'param': # If first line of a description of an item other than a parameter, clear name. name = '' elif name and line: # If subsequent line of a parameter description, add to existing description (if any) for # that parameter. ret[name] += (' ' + line) if ret[name] else line # Ignore subsequent lines of descriptions of items other than parameters. return ret
[ "def", "get_arg_descriptions_from_docstring", "(", "cls", ",", "obj", ")", ":", "ret", "=", "OrderedDict", "(", ")", "name", "=", "''", "doc", "=", "obj", ".", "__doc__", "or", "''", "lines", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", ...
Returns an ordered map of arg name -> arg description found in :param: stanzas.
[ "Returns", "an", "ordered", "map", "of", "arg", "name", "-", ">", "arg", "description", "found", "in", ":", "param", ":", "stanzas", "." ]
python
train
ckan/deadoralive
deadoralive/deadoralive.py
https://github.com/ckan/deadoralive/blob/82eed6c73e17b9884476311a7a8fae9d2b379600/deadoralive/deadoralive.py#L124-L131
def upsert_result(client_site_url, apikey, resource_id, result): """Post the given link check result to the client site.""" # TODO: Handle exceptions and unexpected results. url = client_site_url + u"deadoralive/upsert" params = result.copy() params["resource_id"] = resource_id requests.post(url, headers=dict(Authorization=apikey), params=params)
[ "def", "upsert_result", "(", "client_site_url", ",", "apikey", ",", "resource_id", ",", "result", ")", ":", "# TODO: Handle exceptions and unexpected results.", "url", "=", "client_site_url", "+", "u\"deadoralive/upsert\"", "params", "=", "result", ".", "copy", "(", "...
Post the given link check result to the client site.
[ "Post", "the", "given", "link", "check", "result", "to", "the", "client", "site", "." ]
python
train
istommao/django-simditor
simditor/utils.py
https://github.com/istommao/django-simditor/blob/1d9fe00481f463c67f88d73ec6593a721f5fb469/simditor/utils.py#L42-L46
def is_valid_image_extension(file_path): """is_valid_image_extension.""" valid_extensions = ['.jpeg', '.jpg', '.gif', '.png'] _, extension = os.path.splitext(file_path) return extension.lower() in valid_extensions
[ "def", "is_valid_image_extension", "(", "file_path", ")", ":", "valid_extensions", "=", "[", "'.jpeg'", ",", "'.jpg'", ",", "'.gif'", ",", "'.png'", "]", "_", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "file_path", ")", "return", "exte...
is_valid_image_extension.
[ "is_valid_image_extension", "." ]
python
train
Sheeprider/BitBucket-api
bitbucket/issue_comment.py
https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/issue_comment.py#L44-L55
def create(self, issue_id=None, repo_slug=None, **kwargs): """ Add an issue comment to one of your repositories. Each issue comment require only the content data field the system autopopulate the rest. """ issue_id = issue_id or self.issue_id repo_slug = repo_slug or self.bitbucket.repo_slug or '' url = self.bitbucket.url('CREATE_COMMENT', username=self.bitbucket.username, repo_slug=repo_slug, issue_id=issue_id) return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs)
[ "def", "create", "(", "self", ",", "issue_id", "=", "None", ",", "repo_slug", "=", "None", ",", "*", "*", "kwargs", ")", ":", "issue_id", "=", "issue_id", "or", "self", ".", "issue_id", "repo_slug", "=", "repo_slug", "or", "self", ".", "bitbucket", "."...
Add an issue comment to one of your repositories. Each issue comment require only the content data field the system autopopulate the rest.
[ "Add", "an", "issue", "comment", "to", "one", "of", "your", "repositories", ".", "Each", "issue", "comment", "require", "only", "the", "content", "data", "field", "the", "system", "autopopulate", "the", "rest", "." ]
python
train
Qiskit/qiskit-terra
qiskit/quantum_info/operators/pauli.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/pauli.py#L82-L110
def from_label(cls, label): r"""Take pauli string to construct pauli. The qubit index of pauli label is q_{n-1} ... q_0. E.g., a pauli is $P_{n-1} \otimes ... \otimes P_0$ Args: label (str): pauli label Returns: Pauli: the constructed pauli Raises: QiskitError: invalid character in the label """ z = np.zeros(len(label), dtype=np.bool) x = np.zeros(len(label), dtype=np.bool) for i, char in enumerate(label): if char == 'X': x[-i - 1] = True elif char == 'Z': z[-i - 1] = True elif char == 'Y': z[-i - 1] = True x[-i - 1] = True elif char != 'I': raise QiskitError("Pauli string must be only consisted of 'I', 'X', " "'Y' or 'Z' but you have {}.".format(char)) return cls(z=z, x=x)
[ "def", "from_label", "(", "cls", ",", "label", ")", ":", "z", "=", "np", ".", "zeros", "(", "len", "(", "label", ")", ",", "dtype", "=", "np", ".", "bool", ")", "x", "=", "np", ".", "zeros", "(", "len", "(", "label", ")", ",", "dtype", "=", ...
r"""Take pauli string to construct pauli. The qubit index of pauli label is q_{n-1} ... q_0. E.g., a pauli is $P_{n-1} \otimes ... \otimes P_0$ Args: label (str): pauli label Returns: Pauli: the constructed pauli Raises: QiskitError: invalid character in the label
[ "r", "Take", "pauli", "string", "to", "construct", "pauli", "." ]
python
test
sci-bots/dmf-device-ui
dmf_device_ui/view.py
https://github.com/sci-bots/dmf-device-ui/blob/05b480683c9fa43f91ce5a58de2fa90cdf363fc8/dmf_device_ui/view.py#L288-L308
def on_canvas_slave__route_electrode_added(self, slave, electrode_id): ''' .. versionchanged:: 0.11 Draw temporary route currently being formed. .. versionchanged:: 0.11.3 Update routes table by setting ``df_routes`` property of :attr:`canvas_slave`. ''' logger.debug('Route electrode added: %s', electrode_id) if slave._route.electrode_ids is None: return df_route = pd.DataFrame([[-1, e, i] for i, e in enumerate(slave._route.electrode_ids)], columns=['route_i', 'electrode_i', 'transition_i']) # XXX Negative `route_i` corresponds to temporary route being # drawn. Append row entries for temporary route to existing routes # table. df_routes = slave.df_routes.loc[slave.df_routes.route_i >= 0].copy() self.canvas_slave.df_routes = pd.concat([df_routes, df_route])
[ "def", "on_canvas_slave__route_electrode_added", "(", "self", ",", "slave", ",", "electrode_id", ")", ":", "logger", ".", "debug", "(", "'Route electrode added: %s'", ",", "electrode_id", ")", "if", "slave", ".", "_route", ".", "electrode_ids", "is", "None", ":", ...
.. versionchanged:: 0.11 Draw temporary route currently being formed. .. versionchanged:: 0.11.3 Update routes table by setting ``df_routes`` property of :attr:`canvas_slave`.
[ "..", "versionchanged", "::", "0", ".", "11", "Draw", "temporary", "route", "currently", "being", "formed", "." ]
python
train
devassistant/devassistant
devassistant/gui/gui_helper.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/gui/gui_helper.py#L418-L431
def create_textview(self, wrap_mode=Gtk.WrapMode.WORD_CHAR, justify=Gtk.Justification.LEFT, visible=True, editable=True): """ Function creates a text view with wrap_mode and justification """ text_view = Gtk.TextView() text_view.set_wrap_mode(wrap_mode) text_view.set_editable(editable) if not editable: text_view.set_cursor_visible(False) else: text_view.set_cursor_visible(visible) text_view.set_justification(justify) return text_view
[ "def", "create_textview", "(", "self", ",", "wrap_mode", "=", "Gtk", ".", "WrapMode", ".", "WORD_CHAR", ",", "justify", "=", "Gtk", ".", "Justification", ".", "LEFT", ",", "visible", "=", "True", ",", "editable", "=", "True", ")", ":", "text_view", "=", ...
Function creates a text view with wrap_mode and justification
[ "Function", "creates", "a", "text", "view", "with", "wrap_mode", "and", "justification" ]
python
train
apache/incubator-mxnet
example/ctc/captcha_generator.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/captcha_generator.py#L126-L134
def _gen_sample(self): """Generate a random captcha image sample Returns ------- (numpy.ndarray, str) Tuple of image (numpy ndarray) and character string of digits used to generate the image """ num_str = self.get_rand(self.num_digit_min, self.num_digit_max) return self.captcha.image(num_str), num_str
[ "def", "_gen_sample", "(", "self", ")", ":", "num_str", "=", "self", ".", "get_rand", "(", "self", ".", "num_digit_min", ",", "self", ".", "num_digit_max", ")", "return", "self", ".", "captcha", ".", "image", "(", "num_str", ")", ",", "num_str" ]
Generate a random captcha image sample Returns ------- (numpy.ndarray, str) Tuple of image (numpy ndarray) and character string of digits used to generate the image
[ "Generate", "a", "random", "captcha", "image", "sample", "Returns", "-------", "(", "numpy", ".", "ndarray", "str", ")", "Tuple", "of", "image", "(", "numpy", "ndarray", ")", "and", "character", "string", "of", "digits", "used", "to", "generate", "the", "i...
python
train
mikicz/arca
arca/utils.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/utils.py#L96-L139
def get(self, *keys: str, default: Any = NOT_SET) -> Any: """ Returns values from the settings in the order of keys, the first value encountered is used. Example: >>> settings = Settings({"ARCA_ONE": 1, "ARCA_TWO": 2}) >>> settings.get("one") 1 >>> settings.get("one", "two") 1 >>> settings.get("two", "one") 2 >>> settings.get("three", "one") 1 >>> settings.get("three", default=3) 3 >>> settings.get("three") Traceback (most recent call last): ... KeyError: :param keys: One or more keys to get from settings. If multiple keys are provided, the value of the first key that has a value is returned. :param default: If none of the ``options`` aren't set, return this value. :return: A value from the settings or the default. :raise ValueError: If no keys are provided. :raise KeyError: If none of the keys are set and no default is provided. """ if not len(keys): raise ValueError("At least one key must be provided.") for option in keys: key = f"{self.PREFIX}_{option.upper()}" if key in self._data: return self._data[key] if default is NOT_SET: raise KeyError("None of the following key is present in settings and no default is set: {}".format( ", ".join(keys) )) return default
[ "def", "get", "(", "self", ",", "*", "keys", ":", "str", ",", "default", ":", "Any", "=", "NOT_SET", ")", "->", "Any", ":", "if", "not", "len", "(", "keys", ")", ":", "raise", "ValueError", "(", "\"At least one key must be provided.\"", ")", "for", "op...
Returns values from the settings in the order of keys, the first value encountered is used. Example: >>> settings = Settings({"ARCA_ONE": 1, "ARCA_TWO": 2}) >>> settings.get("one") 1 >>> settings.get("one", "two") 1 >>> settings.get("two", "one") 2 >>> settings.get("three", "one") 1 >>> settings.get("three", default=3) 3 >>> settings.get("three") Traceback (most recent call last): ... KeyError: :param keys: One or more keys to get from settings. If multiple keys are provided, the value of the first key that has a value is returned. :param default: If none of the ``options`` aren't set, return this value. :return: A value from the settings or the default. :raise ValueError: If no keys are provided. :raise KeyError: If none of the keys are set and no default is provided.
[ "Returns", "values", "from", "the", "settings", "in", "the", "order", "of", "keys", "the", "first", "value", "encountered", "is", "used", "." ]
python
train
IndicoDataSolutions/IndicoIo-python
indicoio/custom/custom.py
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L262-L267
def info(self, cloud=None, api_key=None, version=None, **kwargs): """ Return the current state of the model associated with a given collection """ url_params = {"batch": False, "api_key": api_key, "version": version, "method": "info"} return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
[ "def", "info", "(", "self", ",", "cloud", "=", "None", ",", "api_key", "=", "None", ",", "version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url_params", "=", "{", "\"batch\"", ":", "False", ",", "\"api_key\"", ":", "api_key", ",", "\"version...
Return the current state of the model associated with a given collection
[ "Return", "the", "current", "state", "of", "the", "model", "associated", "with", "a", "given", "collection" ]
python
train
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L372-L374
def channels_set_type(self, room_id, a_type, **kwargs): """Sets the type of room this channel should be. The type of room this channel should be, either c or p.""" return self.__call_api_post('channels.setType', roomId=room_id, type=a_type, kwargs=kwargs)
[ "def", "channels_set_type", "(", "self", ",", "room_id", ",", "a_type", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'channels.setType'", ",", "roomId", "=", "room_id", ",", "type", "=", "a_type", ",", "kwargs", "=", ...
Sets the type of room this channel should be. The type of room this channel should be, either c or p.
[ "Sets", "the", "type", "of", "room", "this", "channel", "should", "be", ".", "The", "type", "of", "room", "this", "channel", "should", "be", "either", "c", "or", "p", "." ]
python
train
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L106-L114
def choice_default_loader(self, pk): """Load a Choice from the database.""" try: obj = Choice.objects.get(pk=pk) except Choice.DoesNotExist: return None else: self.choice_default_add_related_pks(obj) return obj
[ "def", "choice_default_loader", "(", "self", ",", "pk", ")", ":", "try", ":", "obj", "=", "Choice", ".", "objects", ".", "get", "(", "pk", "=", "pk", ")", "except", "Choice", ".", "DoesNotExist", ":", "return", "None", "else", ":", "self", ".", "choi...
Load a Choice from the database.
[ "Load", "a", "Choice", "from", "the", "database", "." ]
python
train
blubberdiblub/eztemplate
eztemplate/__main__.py
https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L220-L227
def dump_engines(target=sys.stderr): """Print successfully imported templating engines.""" print("Available templating engines:", file=target) width = max(len(engine) for engine in engines.engines) for handle, engine in sorted(engines.engines.items()): description = engine.__doc__.split('\n', 0)[0] print(" %-*s - %s" % (width, handle, description), file=target)
[ "def", "dump_engines", "(", "target", "=", "sys", ".", "stderr", ")", ":", "print", "(", "\"Available templating engines:\"", ",", "file", "=", "target", ")", "width", "=", "max", "(", "len", "(", "engine", ")", "for", "engine", "in", "engines", ".", "en...
Print successfully imported templating engines.
[ "Print", "successfully", "imported", "templating", "engines", "." ]
python
train
lokhman/pydbal
pydbal/connection.py
https://github.com/lokhman/pydbal/blob/53f396a2a18826e9fff178cd2c0636c1656cbaea/pydbal/connection.py#L504-L514
def release_savepoint(self, savepoint): """Releases the given savepoint. :param savepoint: the name of the savepoint to release :raise: pydbal.exception.DBALConnectionError """ if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() if self._platform.is_release_savepoints_supported(): self.ensure_connected() self._platform.release_savepoint(savepoint)
[ "def", "release_savepoint", "(", "self", ",", "savepoint", ")", ":", "if", "not", "self", ".", "_platform", ".", "is_savepoints_supported", "(", ")", ":", "raise", "DBALConnectionError", ".", "savepoints_not_supported", "(", ")", "if", "self", ".", "_platform", ...
Releases the given savepoint. :param savepoint: the name of the savepoint to release :raise: pydbal.exception.DBALConnectionError
[ "Releases", "the", "given", "savepoint", "." ]
python
train
Archived-Object/ligament
ligament/helpers.py
https://github.com/Archived-Object/ligament/blob/ff3d78130522676a20dc64086dc8a27b197cc20f/ligament/helpers.py#L65-L67
def compose(*funcs): """compose a list of functions""" return lambda x: reduce(lambda v, f: f(v), reversed(funcs), x)
[ "def", "compose", "(", "*", "funcs", ")", ":", "return", "lambda", "x", ":", "reduce", "(", "lambda", "v", ",", "f", ":", "f", "(", "v", ")", ",", "reversed", "(", "funcs", ")", ",", "x", ")" ]
compose a list of functions
[ "compose", "a", "list", "of", "functions" ]
python
train
timothydmorton/VESPA
vespa/stars/utils.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/stars/utils.py#L176-L183
def fluxfrac(*mags): """Returns fraction of total flux in first argument, assuming all are magnitudes. """ Ftot = 0 for mag in mags: Ftot += 10**(-0.4*mag) F1 = 10**(-0.4*mags[0]) return F1/Ftot
[ "def", "fluxfrac", "(", "*", "mags", ")", ":", "Ftot", "=", "0", "for", "mag", "in", "mags", ":", "Ftot", "+=", "10", "**", "(", "-", "0.4", "*", "mag", ")", "F1", "=", "10", "**", "(", "-", "0.4", "*", "mags", "[", "0", "]", ")", "return",...
Returns fraction of total flux in first argument, assuming all are magnitudes.
[ "Returns", "fraction", "of", "total", "flux", "in", "first", "argument", "assuming", "all", "are", "magnitudes", "." ]
python
train
rsgalloway/grit
grit/repo/local.py
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/repo/local.py#L159-L169
def setVersion(self, version): """ Checkout a version of the repo. :param version: Version number. """ try: sha = self.versions(version).commit.sha self.git.reset("--hard", sha) except Exception, e: raise RepoError(e)
[ "def", "setVersion", "(", "self", ",", "version", ")", ":", "try", ":", "sha", "=", "self", ".", "versions", "(", "version", ")", ".", "commit", ".", "sha", "self", ".", "git", ".", "reset", "(", "\"--hard\"", ",", "sha", ")", "except", "Exception", ...
Checkout a version of the repo. :param version: Version number.
[ "Checkout", "a", "version", "of", "the", "repo", "." ]
python
train
iancmcc/ouimeaux
ouimeaux/pysignals/dispatcher.py
https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/dispatcher.py#L257-L294
def _live_receivers(self, sender): """ Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers. """ receivers = None if self.use_caching and not self._dead_receivers: receivers = self.sender_receivers_cache.get(sender) # We could end up here with NO_RECEIVERS even if we do check this case in # .send() prior to calling _live_receivers() due to concurrent .send() call. if receivers is NO_RECEIVERS: return [] if receivers is None: with self.lock: self._clear_dead_receivers() senderkey = _make_id(sender) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == NONE_ID or r_senderkey == senderkey: receivers.append(receiver) if self.use_caching: if not receivers: self.sender_receivers_cache[sender] = NO_RECEIVERS else: # Note, we must cache the weakref versions. self.sender_receivers_cache[sender] = receivers non_weak_receivers = [] for receiver in receivers: if isinstance(receiver, weakref.ReferenceType): # Dereference the weak reference. receiver = receiver() if receiver is not None: non_weak_receivers.append(receiver) else: non_weak_receivers.append(receiver) return non_weak_receivers
[ "def", "_live_receivers", "(", "self", ",", "sender", ")", ":", "receivers", "=", "None", "if", "self", ".", "use_caching", "and", "not", "self", ".", "_dead_receivers", ":", "receivers", "=", "self", ".", "sender_receivers_cache", ".", "get", "(", "sender",...
Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers.
[ "Filter", "sequence", "of", "receivers", "to", "get", "resolved", "live", "receivers", "." ]
python
train
CalebBell/ht
ht/conv_plate.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_plate.py#L51-L147
def Nu_plate_Kumar(Re, Pr, chevron_angle, mu=None, mu_wall=None): r'''Calculates Nusselt number for single-phase flow in a **well-designed** Chevron-style plate heat exchanger according to [1]_. The data is believed to have been developed by APV International Limited, since acquired by SPX Corporation. This uses a curve fit of that data published in [2]_. .. math:: Nu = C_1 Re^m Pr^{0.33}\left(\frac{\mu}{\mu_{wall}}\right)^{0.17} `C1` and `m` are coefficients looked up in a table, with varying ranges of Re validity and chevron angle validity. See the source for their exact values. The wall fluid property correction is included only if the viscosity values are provided. Parameters ---------- Re : float Reynolds number with respect to the hydraulic diameter of the channels, [-] Pr : float Prandtl number calculated with bulk fluid properties, [-] chevron_angle : float Angle of the plate corrugations with respect to the vertical axis (the direction of flow if the plates were straight), between 0 and 90. Many plate exchangers use two alternating patterns; use their average angle for that situation [degrees] mu : float, optional Viscosity of the fluid at the bulk (inlet and outlet average) temperature, [Pa*s] mu_wall : float, optional Viscosity of fluid at wall temperature, [Pa*s] Returns ------- Nu : float Nusselt number with respect to `Dh`, [-] Notes ----- Data on graph from Re=0.1 to Re=10000, with chevron angles 30 to 65 degrees. See `PlateExchanger` for further clarification on the definitions. It is believed the constants used in this correlation were curve-fit to the actual graph in [1]_ by the author of [2]_ as there is no As the coefficients change, there are numerous small discontinuities, although the data on the graphs is continuous with sharp transitions of the slope. The author of [1]_ states clearly this correlation is "applicable only to well designed Chevron PHEs". Examples -------- >>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30) 47.757818892853955 With the wall-correction factor included: >>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30, mu=1E-3, mu_wall=8E-4) 49.604284135097544 References ---------- .. [1] Kumar, H. "The plate heat exchanger: construction and design." In First U.K. National Conference on Heat Transfer: Held at the University of Leeds, 3-5 July 1984, Institute of Chemical Engineering Symposium Series, vol. 86, pp. 1275-1288. 1984. .. [2] Ayub, Zahid H. "Plate Heat Exchanger Literature Survey and New Heat Transfer and Pressure Drop Correlations for Refrigerant Evaporators." Heat Transfer Engineering 24, no. 5 (September 1, 2003): 3-16. doi:10.1080/01457630304056. ''' # Uses the standard diameter as characteristic diameter beta_list_len = len(Kumar_beta_list) for i in range(beta_list_len): if chevron_angle <= Kumar_beta_list[i]: C1_options, m_options, Re_ranges = Kumar_C1s[i], Kumar_ms[i], Kumar_Nu_Res[i] break elif i == beta_list_len-1: C1_options, m_options, Re_ranges = Kumar_C1s[-1], Kumar_ms[-1], Kumar_Nu_Res[-1] Re_len = len(Re_ranges) for j in range(Re_len): if Re <= Re_ranges[j]: C1, m = C1_options[j], m_options[j] break elif j == Re_len-1: C1, m = C1_options[-1], m_options[-1] Nu = C1*Re**m*Pr**0.33 if mu_wall is not None and mu is not None: Nu *= (mu/mu_wall)**0.17 return Nu
[ "def", "Nu_plate_Kumar", "(", "Re", ",", "Pr", ",", "chevron_angle", ",", "mu", "=", "None", ",", "mu_wall", "=", "None", ")", ":", "# Uses the standard diameter as characteristic diameter", "beta_list_len", "=", "len", "(", "Kumar_beta_list", ")", "for", "i", "...
r'''Calculates Nusselt number for single-phase flow in a **well-designed** Chevron-style plate heat exchanger according to [1]_. The data is believed to have been developed by APV International Limited, since acquired by SPX Corporation. This uses a curve fit of that data published in [2]_. .. math:: Nu = C_1 Re^m Pr^{0.33}\left(\frac{\mu}{\mu_{wall}}\right)^{0.17} `C1` and `m` are coefficients looked up in a table, with varying ranges of Re validity and chevron angle validity. See the source for their exact values. The wall fluid property correction is included only if the viscosity values are provided. Parameters ---------- Re : float Reynolds number with respect to the hydraulic diameter of the channels, [-] Pr : float Prandtl number calculated with bulk fluid properties, [-] chevron_angle : float Angle of the plate corrugations with respect to the vertical axis (the direction of flow if the plates were straight), between 0 and 90. Many plate exchangers use two alternating patterns; use their average angle for that situation [degrees] mu : float, optional Viscosity of the fluid at the bulk (inlet and outlet average) temperature, [Pa*s] mu_wall : float, optional Viscosity of fluid at wall temperature, [Pa*s] Returns ------- Nu : float Nusselt number with respect to `Dh`, [-] Notes ----- Data on graph from Re=0.1 to Re=10000, with chevron angles 30 to 65 degrees. See `PlateExchanger` for further clarification on the definitions. It is believed the constants used in this correlation were curve-fit to the actual graph in [1]_ by the author of [2]_ as there is no As the coefficients change, there are numerous small discontinuities, although the data on the graphs is continuous with sharp transitions of the slope. The author of [1]_ states clearly this correlation is "applicable only to well designed Chevron PHEs". Examples -------- >>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30) 47.757818892853955 With the wall-correction factor included: >>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30, mu=1E-3, mu_wall=8E-4) 49.604284135097544 References ---------- .. [1] Kumar, H. "The plate heat exchanger: construction and design." In First U.K. National Conference on Heat Transfer: Held at the University of Leeds, 3-5 July 1984, Institute of Chemical Engineering Symposium Series, vol. 86, pp. 1275-1288. 1984. .. [2] Ayub, Zahid H. "Plate Heat Exchanger Literature Survey and New Heat Transfer and Pressure Drop Correlations for Refrigerant Evaporators." Heat Transfer Engineering 24, no. 5 (September 1, 2003): 3-16. doi:10.1080/01457630304056.
[ "r", "Calculates", "Nusselt", "number", "for", "single", "-", "phase", "flow", "in", "a", "**", "well", "-", "designed", "**", "Chevron", "-", "style", "plate", "heat", "exchanger", "according", "to", "[", "1", "]", "_", ".", "The", "data", "is", "beli...
python
train
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L1189-L1283
def reset_syslog_config(host, username, password, protocol=None, port=None, syslog_config=None, esxi_hosts=None, credstore=None): ''' Reset the syslog service to its default settings. Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, ``default-timeout``, or ``all`` for all of these. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. syslog_config List of parameters to reset, provided as a comma-delimited string, or 'all' to reset all syslog configuration parameters. Required. esxi_hosts If ``host`` is a vCenter host, then use esxi_hosts to execute this function on a list of one or more ESXi machines. credstore Optionally set to path to the credential store file. :return: Dictionary with a top-level key of 'success' which indicates if all the parameters were reset, and individual keys for each parameter indicating which succeeded or failed, per host. CLI Example: ``syslog_config`` can be passed as a quoted, comma-separated string, e.g. .. code-block:: bash # Used for ESXi host connection information salt '*' vsphere.reset_syslog_config my.esxi.host root bad-password \ syslog_config='logdir,loghost' # Used for connecting to a vCenter Server salt '*' vsphere.reset_syslog_config my.vcenter.location root bad-password \ syslog_config='logdir,loghost' esxi_hosts='[esxi-1.host.com, esxi-2.host.com]' ''' if not syslog_config: raise CommandExecutionError('The \'reset_syslog_config\' function requires a ' '\'syslog_config\' setting.') valid_resets = ['logdir', 'loghost', 'default-rotate', 'default-size', 'default-timeout', 'logdir-unique'] cmd = 'system syslog config set --reset=' if ',' in syslog_config: resets = [ind_reset.strip() for ind_reset in syslog_config.split(',')] elif syslog_config == 'all': resets = valid_resets else: resets = [syslog_config] ret = {} if esxi_hosts: if not isinstance(esxi_hosts, list): raise CommandExecutionError('\'esxi_hosts\' must be a list.') for esxi_host in esxi_hosts: response_dict = _reset_syslog_config_params(host, username, password, cmd, resets, valid_resets, protocol=protocol, port=port, esxi_host=esxi_host, credstore=credstore) ret.update({esxi_host: response_dict}) else: # Handles a single host or a vCenter connection when no esxi_hosts are provided. response_dict = _reset_syslog_config_params(host, username, password, cmd, resets, valid_resets, protocol=protocol, port=port, credstore=credstore) ret.update({host: response_dict}) return ret
[ "def", "reset_syslog_config", "(", "host", ",", "username", ",", "password", ",", "protocol", "=", "None", ",", "port", "=", "None", ",", "syslog_config", "=", "None", ",", "esxi_hosts", "=", "None", ",", "credstore", "=", "None", ")", ":", "if", "not", ...
Reset the syslog service to its default settings. Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, ``default-timeout``, or ``all`` for all of these. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. syslog_config List of parameters to reset, provided as a comma-delimited string, or 'all' to reset all syslog configuration parameters. Required. esxi_hosts If ``host`` is a vCenter host, then use esxi_hosts to execute this function on a list of one or more ESXi machines. credstore Optionally set to path to the credential store file. :return: Dictionary with a top-level key of 'success' which indicates if all the parameters were reset, and individual keys for each parameter indicating which succeeded or failed, per host. CLI Example: ``syslog_config`` can be passed as a quoted, comma-separated string, e.g. .. code-block:: bash # Used for ESXi host connection information salt '*' vsphere.reset_syslog_config my.esxi.host root bad-password \ syslog_config='logdir,loghost' # Used for connecting to a vCenter Server salt '*' vsphere.reset_syslog_config my.vcenter.location root bad-password \ syslog_config='logdir,loghost' esxi_hosts='[esxi-1.host.com, esxi-2.host.com]'
[ "Reset", "the", "syslog", "service", "to", "its", "default", "settings", "." ]
python
train
MagicStack/asyncpg
asyncpg/connect_utils.py
https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connect_utils.py#L105-L139
def _read_password_from_pgpass( *, passfile: typing.Optional[pathlib.Path], hosts: typing.List[str], ports: typing.List[int], database: str, user: str): """Parse the pgpass file and return the matching password. :return: Password string, if found, ``None`` otherwise. """ passtab = _read_password_file(passfile) if not passtab: return None for host, port in zip(hosts, ports): if host.startswith('/'): # Unix sockets get normalized into 'localhost' host = 'localhost' for phost, pport, pdatabase, puser, ppassword in passtab: if phost != '*' and phost != host: continue if pport != '*' and pport != str(port): continue if pdatabase != '*' and pdatabase != database: continue if puser != '*' and puser != user: continue # Found a match. return ppassword return None
[ "def", "_read_password_from_pgpass", "(", "*", ",", "passfile", ":", "typing", ".", "Optional", "[", "pathlib", ".", "Path", "]", ",", "hosts", ":", "typing", ".", "List", "[", "str", "]", ",", "ports", ":", "typing", ".", "List", "[", "int", "]", ",...
Parse the pgpass file and return the matching password. :return: Password string, if found, ``None`` otherwise.
[ "Parse", "the", "pgpass", "file", "and", "return", "the", "matching", "password", "." ]
python
train
treycucco/pyebnf
pyebnf/primitive.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/primitive.py#L162-L174
def merged(self, other): """Returns a new ParseNode whose type is this node's type, and whose children are all the children from this node and the other whose length is not 0. """ children = [c for c in itertools.chain(self.children, other.children) if len(c) > 0] # NOTE: Only terminals should have ignored text attached to them, and terminals shouldn't be # merged (probably) so it shouldn't be necessary to copy of ignored -- it should always # be None. But, we'll go ahead and copy it over anyway, recognizing that other's # ignored text will be lost. return ParseNode(self.node_type, children=children, consumed=self.consumed + other.consumed, ignored=self.ignored)
[ "def", "merged", "(", "self", ",", "other", ")", ":", "children", "=", "[", "c", "for", "c", "in", "itertools", ".", "chain", "(", "self", ".", "children", ",", "other", ".", "children", ")", "if", "len", "(", "c", ")", ">", "0", "]", "# NOTE: On...
Returns a new ParseNode whose type is this node's type, and whose children are all the children from this node and the other whose length is not 0.
[ "Returns", "a", "new", "ParseNode", "whose", "type", "is", "this", "node", "s", "type", "and", "whose", "children", "are", "all", "the", "children", "from", "this", "node", "and", "the", "other", "whose", "length", "is", "not", "0", "." ]
python
test
CI-WATER/mapkit
mapkit/RasterLoader.py
https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterLoader.py#L35-L80
def load(self, tableName='rasters', rasters=[]): ''' Accepts a list of paths to raster files to load into the database. Returns the ids of the rasters loaded successfully in the same order as the list passed in. ''' # Create table if necessary Base.metadata.create_all(self._engine) # Create a session Session = sessionmaker(bind=self._engine) session = Session() for raster in rasters: # Must read in using the raster2pgsql commandline tool. rasterPath = raster['path'] if 'srid' in raster: srid = str(raster['srid']) else: srid = '4326' if 'no-data' in raster: noData = str(raster['no-data']) else: noData = '-1' wellKnownBinary = RasterLoader.rasterToWKB(rasterPath, srid, noData, self._raster2pgsql) rasterBinary = wellKnownBinary # Get the filename filename = os.path.split(rasterPath)[1] # Populate raster record mapKitRaster = MapKitRaster() mapKitRaster.filename = filename mapKitRaster.raster = rasterBinary if 'timestamp' in raster: mapKitRaster.timestamp = raster['timestamp'] # Add to session session.add(mapKitRaster) session.commit()
[ "def", "load", "(", "self", ",", "tableName", "=", "'rasters'", ",", "rasters", "=", "[", "]", ")", ":", "# Create table if necessary", "Base", ".", "metadata", ".", "create_all", "(", "self", ".", "_engine", ")", "# Create a session", "Session", "=", "sessi...
Accepts a list of paths to raster files to load into the database. Returns the ids of the rasters loaded successfully in the same order as the list passed in.
[ "Accepts", "a", "list", "of", "paths", "to", "raster", "files", "to", "load", "into", "the", "database", ".", "Returns", "the", "ids", "of", "the", "rasters", "loaded", "successfully", "in", "the", "same", "order", "as", "the", "list", "passed", "in", "....
python
train
mongolab/dex
dex/analyzer.py
https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L91-L112
def _ensure_index_cache(self, db_uri, db_name, collection_name): """Adds a collections index entries to the cache if not present""" if not self._check_indexes or db_uri is None: return {'indexes': None} if db_name not in self.get_cache(): self._internal_map[db_name] = {} if collection_name not in self._internal_map[db_name]: indexes = [] try: if self._index_cache_connection is None: self._index_cache_connection = pymongo.MongoClient(db_uri, document_class=OrderedDict, read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED) db = self._index_cache_connection[db_name] indexes = db[collection_name].index_information() except: warning = 'Warning: unable to connect to ' + db_uri + "\n" else: internal_map_entry = {'indexes': indexes} self.get_cache()[db_name][collection_name] = internal_map_entry return self.get_cache()[db_name][collection_name]
[ "def", "_ensure_index_cache", "(", "self", ",", "db_uri", ",", "db_name", ",", "collection_name", ")", ":", "if", "not", "self", ".", "_check_indexes", "or", "db_uri", "is", "None", ":", "return", "{", "'indexes'", ":", "None", "}", "if", "db_name", "not",...
Adds a collections index entries to the cache if not present
[ "Adds", "a", "collections", "index", "entries", "to", "the", "cache", "if", "not", "present" ]
python
train
nion-software/nionswift
nion/swift/model/DocumentModel.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/DocumentModel.py#L2072-L2113
def __construct_data_item_reference(self, hardware_source: HardwareSource.HardwareSource, data_channel: HardwareSource.DataChannel): """Construct a data item reference. Construct a data item reference and assign a data item to it. Update data item session id and session metadata. Also connect the data channel processor. This method is thread safe. """ session_id = self.session_id key = self.make_data_item_reference_key(hardware_source.hardware_source_id, data_channel.channel_id) data_item_reference = self.get_data_item_reference(key) with data_item_reference.mutex: data_item = data_item_reference.data_item # if we still don't have a data item, create it. if data_item is None: data_item = DataItem.DataItem() data_item.ensure_data_source() data_item.title = "%s (%s)" % (hardware_source.display_name, data_channel.name) if data_channel.name else hardware_source.display_name data_item.category = "temporary" data_item_reference.data_item = data_item def append_data_item(): self.append_data_item(data_item) self._update_data_item_reference(key, data_item) self.__call_soon(append_data_item) def update_session(): # update the session, but only if necessary (this is an optimization to prevent unnecessary display updates) if data_item.session_id != session_id: data_item.session_id = session_id session_metadata = ApplicationData.get_session_metadata_dict() if data_item.session_metadata != session_metadata: data_item.session_metadata = session_metadata if data_channel.processor: src_data_channel = hardware_source.data_channels[data_channel.src_channel_index] src_data_item_reference = self.get_data_item_reference(self.make_data_item_reference_key(hardware_source.hardware_source_id, src_data_channel.channel_id)) data_channel.processor.connect_data_item_reference(src_data_item_reference) self.__call_soon(update_session) return data_item_reference
[ "def", "__construct_data_item_reference", "(", "self", ",", "hardware_source", ":", "HardwareSource", ".", "HardwareSource", ",", "data_channel", ":", "HardwareSource", ".", "DataChannel", ")", ":", "session_id", "=", "self", ".", "session_id", "key", "=", "self", ...
Construct a data item reference. Construct a data item reference and assign a data item to it. Update data item session id and session metadata. Also connect the data channel processor. This method is thread safe.
[ "Construct", "a", "data", "item", "reference", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py#L194-L205
def new_author(name=None, email=None, affiliation=None, url=None): """Create a new author.""" author = NotebookNode() if name is not None: author.name = unicode(name) if email is not None: author.email = unicode(email) if affiliation is not None: author.affiliation = unicode(affiliation) if url is not None: author.url = unicode(url) return author
[ "def", "new_author", "(", "name", "=", "None", ",", "email", "=", "None", ",", "affiliation", "=", "None", ",", "url", "=", "None", ")", ":", "author", "=", "NotebookNode", "(", ")", "if", "name", "is", "not", "None", ":", "author", ".", "name", "=...
Create a new author.
[ "Create", "a", "new", "author", "." ]
python
test
nesaro/pydsl
pydsl/parser/LR0.py
https://github.com/nesaro/pydsl/blob/00b4fffd72036b80335e1a44a888fac57917ab41/pydsl/parser/LR0.py#L97-L134
def _slr_build_parser_table(productionset): """SLR method to build parser table""" result = ParserTable() statesset = build_states_sets(productionset) for itemindex, itemset in enumerate(statesset): LOG.debug("_slr_build_parser_table: Evaluating itemset:" + str(itemset)) for symbol in productionset.getSymbols() + [EndSymbol()]: numberoptions = 0 for lritem in itemset.itemlist: #if cursor is before a terminal, and there is a transition to another itemset with the following terminal, append shift rule if isinstance(symbol, TerminalSymbol) and lritem.next_symbol() == symbol and itemset.has_transition(symbol): destinationstate = statesset.index(itemset.get_transition(symbol)) result.append(itemindex, symbol, "Shift", destinationstate) numberoptions += 1 if isinstance(symbol, NonTerminalSymbol) and lritem.next_symbol() == symbol and itemset.has_transition(symbol): destinationstate = statesset.index(itemset.get_transition(symbol)) result.append_goto(itemindex, symbol, destinationstate) #if cursor is at the end of the rule, then append reduce rule and go transition if lritem.previous_symbol() == symbol and lritem.is_last_position() and symbol != Extended_S: for x in productionset.next_lookup(symbol): if isinstance(x, Grammar): result.append(itemindex, TerminalSymbol(x), "Reduce", None, lritem.rule) elif isinstance(x, Symbol): result.append(itemindex, x, "Reduce", None, lritem.rule) else: raise TypeError(x) numberoptions += 1 #if cursor is at the end of main rule, and current symbol is end, then append accept rule if symbol == EndSymbol() and lritem.previous_symbol() == productionset.initialsymbol and lritem.next_symbol() == EndSymbol(): result.append(itemindex, symbol, "Accept", None) numberoptions += 1 if not numberoptions: LOG.info("No rule found to generate a new parsertable entry ") LOG.debug("symbol: " + str(symbol)) LOG.debug("itemset: " + str(itemset)) elif numberoptions > 1: #FIXME can it count duplicated entries? raise Exception("LR Conflict %s" % symbol) return result
[ "def", "_slr_build_parser_table", "(", "productionset", ")", ":", "result", "=", "ParserTable", "(", ")", "statesset", "=", "build_states_sets", "(", "productionset", ")", "for", "itemindex", ",", "itemset", "in", "enumerate", "(", "statesset", ")", ":", "LOG", ...
SLR method to build parser table
[ "SLR", "method", "to", "build", "parser", "table" ]
python
train
bloomberg/bqplot
bqplot/pyplot.py
https://github.com/bloomberg/bqplot/blob/8eb8b163abe9ee6306f6918067e2f36c1caef2ef/bqplot/pyplot.py#L1268-L1278
def clear(): """Clears the current context figure of all marks axes and grid lines.""" fig = _context['figure'] if fig is not None: fig.marks = [] fig.axes = [] setattr(fig, 'axis_registry', {}) _context['scales'] = {} key = _context['current_key'] if key is not None: _context['scale_registry'][key] = {}
[ "def", "clear", "(", ")", ":", "fig", "=", "_context", "[", "'figure'", "]", "if", "fig", "is", "not", "None", ":", "fig", ".", "marks", "=", "[", "]", "fig", ".", "axes", "=", "[", "]", "setattr", "(", "fig", ",", "'axis_registry'", ",", "{", ...
Clears the current context figure of all marks axes and grid lines.
[ "Clears", "the", "current", "context", "figure", "of", "all", "marks", "axes", "and", "grid", "lines", "." ]
python
train
inonit/drf-haystack
drf_haystack/serializers.py
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L283-L318
def get_paginate_by_param(self): """ Returns the ``paginate_by_param`` for the (root) view paginator class. This is needed in order to remove the query parameter from faceted narrow urls. If using a custom pagination class, this class attribute needs to be set manually. """ if hasattr(self.root, "paginate_by_param") and self.root.paginate_by_param: return self.root.paginate_by_param pagination_class = self.context["view"].pagination_class if not pagination_class: return None # PageNumberPagination if hasattr(pagination_class, "page_query_param"): return pagination_class.page_query_param # LimitOffsetPagination elif hasattr(pagination_class, "offset_query_param"): return pagination_class.offset_query_param # CursorPagination elif hasattr(pagination_class, "cursor_query_param"): return pagination_class.cursor_query_param else: raise AttributeError( "%(root_cls)s is missing a `paginate_by_param` attribute. " "Define a %(root_cls)s.paginate_by_param or override " "%(cls)s.get_paginate_by_param()." % { "root_cls": self.root.__class__.__name__, "cls": self.__class__.__name__ })
[ "def", "get_paginate_by_param", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "root", ",", "\"paginate_by_param\"", ")", "and", "self", ".", "root", ".", "paginate_by_param", ":", "return", "self", ".", "root", ".", "paginate_by_param", "pagination...
Returns the ``paginate_by_param`` for the (root) view paginator class. This is needed in order to remove the query parameter from faceted narrow urls. If using a custom pagination class, this class attribute needs to be set manually.
[ "Returns", "the", "paginate_by_param", "for", "the", "(", "root", ")", "view", "paginator", "class", ".", "This", "is", "needed", "in", "order", "to", "remove", "the", "query", "parameter", "from", "faceted", "narrow", "urls", "." ]
python
train