repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/ext/dtcompat.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/ext/dtcompat.py#L1823-L1844
def run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0): """ Test examples in the given object's docstring (`f`), using `globs` as globals. Optional argument `name` is used in failure messages. If the optional argument `verbose` is true, then generate output even if there are no failures. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. Optional keyword arg `optionflags` specifies options for the testing and output. See the documentation for `testmod` for more information. """ # Find, parse, and run all tests in the given module. finder = DocTestFinder(verbose=verbose, recurse=False) runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(f, name, globs=globs): runner.run(test, compileflags=compileflags)
[ "def", "run_docstring_examples", "(", "f", ",", "globs", ",", "verbose", "=", "False", ",", "name", "=", "\"NoName\"", ",", "compileflags", "=", "None", ",", "optionflags", "=", "0", ")", ":", "# Find, parse, and run all tests in the given module.", "finder", "=",...
Test examples in the given object's docstring (`f`), using `globs` as globals. Optional argument `name` is used in failure messages. If the optional argument `verbose` is true, then generate output even if there are no failures. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. Optional keyword arg `optionflags` specifies options for the testing and output. See the documentation for `testmod` for more information.
[ "Test", "examples", "in", "the", "given", "object", "s", "docstring", "(", "f", ")", "using", "globs", "as", "globals", ".", "Optional", "argument", "name", "is", "used", "in", "failure", "messages", ".", "If", "the", "optional", "argument", "verbose", "is...
python
test
twoolie/NBT
nbt/region.py
https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/nbt/region.py#L311-L356
def _parse_header(self): """Read the region header and stores: offset, length and status.""" # update the file size, needed when parse_header is called after # we have unlinked a chunk or writed a new one self.size = self.get_size() if self.size == 0: # Some region files seems to have 0 bytes of size, and # Minecraft handle them without problems. Take them # as empty region files. return elif self.size < 2*SECTOR_LENGTH: raise NoRegionHeader('The region file is %d bytes, too small in size to have a header.' % self.size) for index in range(0, SECTOR_LENGTH, 4): x = int(index//4) % 32 z = int(index//4)//32 m = self.metadata[x, z] self.file.seek(index) offset, length = unpack(">IB", b"\0" + self.file.read(4)) m.blockstart, m.blocklength = offset, length self.file.seek(index + SECTOR_LENGTH) m.timestamp = unpack(">I", self.file.read(4))[0] if offset == 0 and length == 0: m.status = STATUS_CHUNK_NOT_CREATED elif length == 0: m.status = STATUS_CHUNK_ZERO_LENGTH elif offset < 2 and offset != 0: m.status = STATUS_CHUNK_IN_HEADER elif SECTOR_LENGTH * offset + 5 > self.size: # Chunk header can't be read. m.status = STATUS_CHUNK_OUT_OF_FILE else: m.status = STATUS_CHUNK_OK # Check for chunks overlapping in the file for chunks in self._sectors()[2:]: if len(chunks) > 1: # overlapping chunks for m in chunks: # Update status, unless these more severe errors take precedence if m.status not in (STATUS_CHUNK_ZERO_LENGTH, STATUS_CHUNK_IN_HEADER, STATUS_CHUNK_OUT_OF_FILE): m.status = STATUS_CHUNK_OVERLAPPING
[ "def", "_parse_header", "(", "self", ")", ":", "# update the file size, needed when parse_header is called after", "# we have unlinked a chunk or writed a new one", "self", ".", "size", "=", "self", ".", "get_size", "(", ")", "if", "self", ".", "size", "==", "0", ":", ...
Read the region header and stores: offset, length and status.
[ "Read", "the", "region", "header", "and", "stores", ":", "offset", "length", "and", "status", "." ]
python
train
tonybaloney/wily
wily/__main__.py
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/__main__.py#L107-L136
def build(ctx, max_revisions, targets, operators, archiver): """Build the wily cache.""" config = ctx.obj["CONFIG"] from wily.commands.build import build if max_revisions: logger.debug(f"Fixing revisions to {max_revisions}") config.max_revisions = max_revisions if operators: logger.debug(f"Fixing operators to {operators}") config.operators = operators.strip().split(",") if archiver: logger.debug(f"Fixing archiver to {archiver}") config.archiver = archiver if targets: logger.debug(f"Fixing targets to {targets}") config.targets = targets build( config=config, archiver=resolve_archiver(config.archiver), operators=resolve_operators(config.operators), ) logger.info( "Completed building wily history, run `wily report <file>` or `wily index` to see more." )
[ "def", "build", "(", "ctx", ",", "max_revisions", ",", "targets", ",", "operators", ",", "archiver", ")", ":", "config", "=", "ctx", ".", "obj", "[", "\"CONFIG\"", "]", "from", "wily", ".", "commands", ".", "build", "import", "build", "if", "max_revision...
Build the wily cache.
[ "Build", "the", "wily", "cache", "." ]
python
train
pyparsing/pyparsing
examples/pymicko.py
https://github.com/pyparsing/pyparsing/blob/f0264bd8d1a548a50b3e5f7d99cfefd577942d14/examples/pymicko.py#L679-L682
def global_var(self, name): """Inserts a new static (global) variable definition""" self.newline_label(name, False, True) self.newline_text("WORD\t1", True)
[ "def", "global_var", "(", "self", ",", "name", ")", ":", "self", ".", "newline_label", "(", "name", ",", "False", ",", "True", ")", "self", ".", "newline_text", "(", "\"WORD\\t1\"", ",", "True", ")" ]
Inserts a new static (global) variable definition
[ "Inserts", "a", "new", "static", "(", "global", ")", "variable", "definition" ]
python
train
rlisagor/freshen
examples/twisted/features/steps.py
https://github.com/rlisagor/freshen/blob/5578f7368e8d53b4cf51c589fb192090d3524968/examples/twisted/features/steps.py#L10-L22
def simulate_async_event(): """Simulate an asynchronous event.""" scc.state = 'executing' def async_event(result): """All other asynchronous events or function calls returned from later steps will wait until this callback fires.""" scc.state = result return 'some event result' deferred = Deferred() reactor.callLater(1, deferred.callback, 'done') # pylint: disable=E1101 deferred.addCallback(async_event) return deferred
[ "def", "simulate_async_event", "(", ")", ":", "scc", ".", "state", "=", "'executing'", "def", "async_event", "(", "result", ")", ":", "\"\"\"All other asynchronous events or function calls\n returned from later steps will wait until this\n callback fires.\"\"\"", "scc...
Simulate an asynchronous event.
[ "Simulate", "an", "asynchronous", "event", "." ]
python
train
senaite/senaite.core
bika/lims/exportimport/instruments/shimadzu/gcms/qp2010se.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/shimadzu/gcms/qp2010se.py#L221-L292
def parse_quantitationesultsline(self, line): """ Parses quantitation result lines Please see samples/GC-MS output.txt [MS Quantitative Results] section """ # [MS Quantitative Results] if line.startswith(self.QUANTITATIONRESULTS_KEY) \ or line.startswith(self.QUANTITATIONRESULTS_NUMBEROFIDS) \ or line.startswith(self.SIMILARITYSEARCHRESULTS_KEY) \ or line.startswith(self.PEAK_TABLE_KEY): # Nothing to do, continue return 0 # # of IDs \t23 if line.startswith(self.QUANTITATIONRESULTS_HEADER_ID_NUMBER): self._quantitationresultsheader = [token.strip() for token in line.split('\t') if token.strip()] return 0 # 1 \talpha-Pinene \tTarget \t0 \t93.00 \t7.738 \t7.680 \t7.795 \t2.480 # \t344488 \t138926 \t0.02604 \tAuto \t2 \t7.812 \tLinear \t0 \t0 # \t4.44061e+008 \t278569 \t0 \t0 \t38.94 \t38.58 \t0.00 \t98 \t92.00 # \t0 \t0 \t38.94 \t38.58 \t91.00 \t0 \t0 \t38.93 \t40.02 \t0 \t0 \t0 # \t0 \t0 \t0 \t0 #\t0 \t0 \t0 \t0 \t0 \t0 \t0 \t0 \t75.27 \tmg \t0.000 splitted = [token.strip() for token in line.split('\t')] ar_id = self._header['Data File Name'].split('\\')[-1].split('.')[0] quantitation = {'DefaultResult': 'Conc.', 'AR': ar_id} for colname in self._quantitationresultsheader: quantitation[colname] = '' for i in range(len(splitted)): token = splitted[i] if i < len(self._quantitationresultsheader): colname = self._quantitationresultsheader[i] if colname in self.QUANTITATIONRESULTS_NUMERICHEADERS: try: quantitation[colname] = float(token) except ValueError: self.warn( "No valid number ${token} in column " "${index} (${column_name})", mapping={"token": token, "index": str(i + 1), "column_name": colname}, numline=self._numline, line=line) quantitation[colname] = token else: quantitation[colname] = token # val = re.sub(r"\W", "", splitted[1]) # self._addRawResult(quantitation['AR'], # values={val:quantitation}, # override=False) elif token: self.err("Orphan value in column ${index} (${token})", mapping={"index": str(i+1), "token": token}, numline=self._numline, line=line) result = quantitation[quantitation['DefaultResult']] column_name = quantitation['DefaultResult'] result = self.zeroValueDefaultInstrumentResults(column_name, result, line) quantitation[quantitation['DefaultResult']] = result val = re.sub(r"\W", "", splitted[1]) self._addRawResult(quantitation['AR'], values={val: quantitation}, override=False)
[ "def", "parse_quantitationesultsline", "(", "self", ",", "line", ")", ":", "# [MS Quantitative Results]", "if", "line", ".", "startswith", "(", "self", ".", "QUANTITATIONRESULTS_KEY", ")", "or", "line", ".", "startswith", "(", "self", ".", "QUANTITATIONRESULTS_NUMBE...
Parses quantitation result lines Please see samples/GC-MS output.txt [MS Quantitative Results] section
[ "Parses", "quantitation", "result", "lines", "Please", "see", "samples", "/", "GC", "-", "MS", "output", ".", "txt", "[", "MS", "Quantitative", "Results", "]", "section" ]
python
train
juju/charm-helpers
charmhelpers/core/hookenv.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L1215-L1221
def _run_atexit(): '''Hook frameworks must invoke this after the main hook body has successfully completed. Do not invoke it if the hook fails.''' global _atexit for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:]
[ "def", "_run_atexit", "(", ")", ":", "global", "_atexit", "for", "callback", ",", "args", ",", "kwargs", "in", "reversed", "(", "_atexit", ")", ":", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")", "del", "_atexit", "[", ":", "]" ]
Hook frameworks must invoke this after the main hook body has successfully completed. Do not invoke it if the hook fails.
[ "Hook", "frameworks", "must", "invoke", "this", "after", "the", "main", "hook", "body", "has", "successfully", "completed", ".", "Do", "not", "invoke", "it", "if", "the", "hook", "fails", "." ]
python
train
nickpandolfi/Cyther
cyther/commands.py
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/commands.py#L206-L220
def makeCommands(file): """ Given a high level preset, it will construct the basic args to pass over. 'ninja', 'beast', 'minimal', 'swift' """ commands = [['cython', '-a', '-p', '-o', file['c_name'], file['file_path']], ['gcc', '-DNDEBUG', '-g', '-fwrapv', '-O3', '-Wall', '-Wextra', '-pthread', '-fPIC', '-c', file['include'], '-o', file['object_file_name'], file['c_name']], ['gcc', '-g', '-Wall', '-Wextra', '-pthread', '-shared', RUNTIME_STRING, '-o', file['output_name'], file['object_file_name'], L_OPTION]] return commands
[ "def", "makeCommands", "(", "file", ")", ":", "commands", "=", "[", "[", "'cython'", ",", "'-a'", ",", "'-p'", ",", "'-o'", ",", "file", "[", "'c_name'", "]", ",", "file", "[", "'file_path'", "]", "]", ",", "[", "'gcc'", ",", "'-DNDEBUG'", ",", "'-...
Given a high level preset, it will construct the basic args to pass over. 'ninja', 'beast', 'minimal', 'swift'
[ "Given", "a", "high", "level", "preset", "it", "will", "construct", "the", "basic", "args", "to", "pass", "over", ".", "ninja", "beast", "minimal", "swift" ]
python
train
pandas-dev/pandas
pandas/core/panel.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L239-L279
def from_dict(cls, data, intersect=False, orient='items', dtype=None): """ Construct Panel from dict of DataFrame objects. Parameters ---------- data : dict {field : DataFrame} intersect : boolean Intersect indexes of input DataFrames orient : {'items', 'minor'}, default 'items' The "orientation" of the data. If the keys of the passed dict should be the items of the result panel, pass 'items' (default). Otherwise if the columns of the values of the passed DataFrame objects should be the items (which in the case of mixed-dtype data you should do), instead pass 'minor' dtype : dtype, default None Data type to force, otherwise infer Returns ------- Panel """ from collections import defaultdict orient = orient.lower() if orient == 'minor': new_data = defaultdict(OrderedDict) for col, df in data.items(): for item, s in df.items(): new_data[item][col] = s data = new_data elif orient != 'items': # pragma: no cover raise ValueError('Orientation must be one of {items, minor}.') d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype) ks = list(d['data'].keys()) if not isinstance(d['data'], OrderedDict): ks = list(sorted(ks)) d[cls._info_axis_name] = Index(ks) return cls(**d)
[ "def", "from_dict", "(", "cls", ",", "data", ",", "intersect", "=", "False", ",", "orient", "=", "'items'", ",", "dtype", "=", "None", ")", ":", "from", "collections", "import", "defaultdict", "orient", "=", "orient", ".", "lower", "(", ")", "if", "ori...
Construct Panel from dict of DataFrame objects. Parameters ---------- data : dict {field : DataFrame} intersect : boolean Intersect indexes of input DataFrames orient : {'items', 'minor'}, default 'items' The "orientation" of the data. If the keys of the passed dict should be the items of the result panel, pass 'items' (default). Otherwise if the columns of the values of the passed DataFrame objects should be the items (which in the case of mixed-dtype data you should do), instead pass 'minor' dtype : dtype, default None Data type to force, otherwise infer Returns ------- Panel
[ "Construct", "Panel", "from", "dict", "of", "DataFrame", "objects", "." ]
python
train
ToucanToco/toucan-data-sdk
toucan_data_sdk/utils/postprocess/converter.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/converter.py#L99-L154
def cast(df, column: str, type: str, new_column=None): """ Convert column's type into type --- ### Parameters *mandatory :* - `column` (*str*): name of the column to convert - `type` (*str*): output type. It can be : - `"int"` : integer type - `"float"` : general number type - `"str"` : text type *optional :* - `new_column` (*str*): name of the output column. By default the `column` arguments is modified. --- ### Example **Input** | Column 1 | Column 2 | Column 3 | |:-------:|:--------:|:--------:| | 'one' | '2014' | 30.0 | | 'two' | 2015.0 | '1' | | 3.1 | 2016 | 450 | ```cson postprocess: [ cast: column: 'Column 1' type: 'str' cast: column: 'Column 2' type: 'int' cast: column: 'Column 3' type: 'float' ] ``` **Output** | Column 1 | Column 2 | Column 3 | |:-------:|:------:|:--------:| | 'one' | 2014 | 30.0 | | 'two' | 2015 | 1.0 | | '3.1' | 2016 | 450.0 | """ new_column = new_column or column df[new_column] = df[column].astype(type) return df
[ "def", "cast", "(", "df", ",", "column", ":", "str", ",", "type", ":", "str", ",", "new_column", "=", "None", ")", ":", "new_column", "=", "new_column", "or", "column", "df", "[", "new_column", "]", "=", "df", "[", "column", "]", ".", "astype", "("...
Convert column's type into type --- ### Parameters *mandatory :* - `column` (*str*): name of the column to convert - `type` (*str*): output type. It can be : - `"int"` : integer type - `"float"` : general number type - `"str"` : text type *optional :* - `new_column` (*str*): name of the output column. By default the `column` arguments is modified. --- ### Example **Input** | Column 1 | Column 2 | Column 3 | |:-------:|:--------:|:--------:| | 'one' | '2014' | 30.0 | | 'two' | 2015.0 | '1' | | 3.1 | 2016 | 450 | ```cson postprocess: [ cast: column: 'Column 1' type: 'str' cast: column: 'Column 2' type: 'int' cast: column: 'Column 3' type: 'float' ] ``` **Output** | Column 1 | Column 2 | Column 3 | |:-------:|:------:|:--------:| | 'one' | 2014 | 30.0 | | 'two' | 2015 | 1.0 | | '3.1' | 2016 | 450.0 |
[ "Convert", "column", "s", "type", "into", "type" ]
python
test
edeposit/edeposit.amqp.harvester
src/edeposit/amqp/harvester/edeposit_autoparser.py
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/edeposit_autoparser.py#L42-L64
def _locate_element(dom, el_content, transformer=None): """ Find element containing `el_content` in `dom`. Use `transformer` function to content of all elements in `dom` in order to correctly transforming them to match them with `el_content`. Args: dom (obj): HTMLElement tree. el_content (str): Content of element will be picked from `dom`. transformer (fn, default None): Transforming function. Note: `transformer` parameter can be for example simple lambda:: lambda x: x.strip() Returns: list: Matching HTMLElements. """ return dom.find( None, fn=utils.content_matchs(el_content, transformer) )
[ "def", "_locate_element", "(", "dom", ",", "el_content", ",", "transformer", "=", "None", ")", ":", "return", "dom", ".", "find", "(", "None", ",", "fn", "=", "utils", ".", "content_matchs", "(", "el_content", ",", "transformer", ")", ")" ]
Find element containing `el_content` in `dom`. Use `transformer` function to content of all elements in `dom` in order to correctly transforming them to match them with `el_content`. Args: dom (obj): HTMLElement tree. el_content (str): Content of element will be picked from `dom`. transformer (fn, default None): Transforming function. Note: `transformer` parameter can be for example simple lambda:: lambda x: x.strip() Returns: list: Matching HTMLElements.
[ "Find", "element", "containing", "el_content", "in", "dom", ".", "Use", "transformer", "function", "to", "content", "of", "all", "elements", "in", "dom", "in", "order", "to", "correctly", "transforming", "them", "to", "match", "them", "with", "el_content", "."...
python
train
MKLab-ITI/reveal-user-annotation
reveal_user_annotation/twitter/twitter_util.py
https://github.com/MKLab-ITI/reveal-user-annotation/blob/ed019c031857b091e5601f53ba3f01a499a0e3ef/reveal_user_annotation/twitter/twitter_util.py#L27-L94
def safe_twitter_request_handler(twitter_api_func, call_rate_limit, call_counter, time_window_start, max_retries, wait_period, *args, **kw): """ This is a safe function handler for any twitter request. Inputs: - twitter_api_func: The twython function object to be safely called. - call_rate_limit: THe call rate limit for this specific Twitter API function. - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window. - time_window_start: The timestamp of the current 15-minute window. - max_retries: Number of call retries allowed before abandoning the effort. - wait_period: For certain Twitter errors (i.e. server overload), we wait and call again. - *args, **kw: The parameters of the twython function to be called. Outputs: - twitter_api_function_result: The results of the Twitter function. - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window. - time_window_start: The timestamp of the current 15-minute window. Raises: - twython.TwythonError - urllib.error.URLError - http.client.BadStatusLine """ error_count = 0 while True: try: # If we have reached the call rate limit for this function: if call_counter >= call_rate_limit: # Reset counter. call_counter = 0 # Sleep for the appropriate time. elapsed_time = time.perf_counter() - time_window_start sleep_time = 15*60 - elapsed_time if sleep_time < 0.1: sleep_time = 0.1 time.sleep(sleep_time) # Initialize new 15-minute time window. time_window_start = time.perf_counter() else: call_counter += 1 twitter_api_function_result = twitter_api_func(*args, **kw) return twitter_api_function_result, call_counter, time_window_start except twython.TwythonError as e: # If it is a Twitter error, handle it. error_count, call_counter, time_window_start, wait_period = handle_twitter_http_error(e, error_count, call_counter, time_window_start, wait_period) if error_count > max_retries: print("Max error count reached. Abandoning effort.") raise e except URLError as e: error_count += 1 if error_count > max_retries: print("Max error count reached. Abandoning effort.") raise e except BadStatusLine as e: error_count += 1 if error_count > max_retries: print("Max error count reached. Abandoning effort.") raise e
[ "def", "safe_twitter_request_handler", "(", "twitter_api_func", ",", "call_rate_limit", ",", "call_counter", ",", "time_window_start", ",", "max_retries", ",", "wait_period", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "error_count", "=", "0", "while", "Tru...
This is a safe function handler for any twitter request. Inputs: - twitter_api_func: The twython function object to be safely called. - call_rate_limit: THe call rate limit for this specific Twitter API function. - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window. - time_window_start: The timestamp of the current 15-minute window. - max_retries: Number of call retries allowed before abandoning the effort. - wait_period: For certain Twitter errors (i.e. server overload), we wait and call again. - *args, **kw: The parameters of the twython function to be called. Outputs: - twitter_api_function_result: The results of the Twitter function. - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window. - time_window_start: The timestamp of the current 15-minute window. Raises: - twython.TwythonError - urllib.error.URLError - http.client.BadStatusLine
[ "This", "is", "a", "safe", "function", "handler", "for", "any", "twitter", "request", "." ]
python
train
VisualOps/cli
visualops/utils/dockervisops.py
https://github.com/VisualOps/cli/blob/e9ee9a804df0de3cce54be4c623528fd658838dc/visualops/utils/dockervisops.py#L203-L235
def _pull_assemble_error_status(logs): ''' Given input in this form:: u'{"status":"Pulling repository foo/ubuntubox"}: "image (latest) from foo/ ... rogress":"complete","id":"2c80228370c9"}' construct something like that (load JSON data is possible):: [u'{"status":"Pulling repository foo/ubuntubox"', {"status":"Download","progress":"complete","id":"2c80228370c9"}] ''' comment = 'An error occurred pulling your image' try: for err_log in logs: if isinstance(err_log, dict): if 'errorDetail' in err_log: if 'code' in err_log['errorDetail']: msg = '\n{0}\n{1}: {2}'.format( err_log['error'], err_log['errorDetail']['code'], err_log['errorDetail']['message'] ) else: msg = '\n{0}\n{1}'.format( err_log['error'], err_log['errorDetail']['message'], ) comment += msg except Exception as e: comment += "%s"%e return comment
[ "def", "_pull_assemble_error_status", "(", "logs", ")", ":", "comment", "=", "'An error occurred pulling your image'", "try", ":", "for", "err_log", "in", "logs", ":", "if", "isinstance", "(", "err_log", ",", "dict", ")", ":", "if", "'errorDetail'", "in", "err_l...
Given input in this form:: u'{"status":"Pulling repository foo/ubuntubox"}: "image (latest) from foo/ ... rogress":"complete","id":"2c80228370c9"}' construct something like that (load JSON data is possible):: [u'{"status":"Pulling repository foo/ubuntubox"', {"status":"Download","progress":"complete","id":"2c80228370c9"}]
[ "Given", "input", "in", "this", "form", "::" ]
python
train
jmoiron/humanize
humanize/number.py
https://github.com/jmoiron/humanize/blob/32c469bc378de22e8eabd5f9565bd7cffe7c7ae0/humanize/number.py#L12-L32
def ordinal(value): """Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd', 3 is '3rd', etc. Works for any integer or anything int() will turn into an integer. Anything other value will have nothing done to it.""" try: value = int(value) except (TypeError, ValueError): return value t = (P_('0', 'th'), P_('1', 'st'), P_('2', 'nd'), P_('3', 'rd'), P_('4', 'th'), P_('5', 'th'), P_('6', 'th'), P_('7', 'th'), P_('8', 'th'), P_('9', 'th')) if value % 100 in (11, 12, 13): # special case return "%d%s" % (value, t[0]) return '%d%s' % (value, t[value % 10])
[ "def", "ordinal", "(", "value", ")", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "value", "t", "=", "(", "P_", "(", "'0'", ",", "'th'", ")", ",", "P_", "(", "'1'", ...
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd', 3 is '3rd', etc. Works for any integer or anything int() will turn into an integer. Anything other value will have nothing done to it.
[ "Converts", "an", "integer", "to", "its", "ordinal", "as", "a", "string", ".", "1", "is", "1st", "2", "is", "2nd", "3", "is", "3rd", "etc", ".", "Works", "for", "any", "integer", "or", "anything", "int", "()", "will", "turn", "into", "an", "integer",...
python
train
horejsek/python-fastjsonschema
fastjsonschema/ref_resolver.py
https://github.com/horejsek/python-fastjsonschema/blob/8c38d0f91fa5d928ff629080cdb75ab23f96590f/fastjsonschema/ref_resolver.py#L133-L140
def get_scope_name(self): """ Get current scope and return it as a valid function name. """ name = 'validate_' + unquote(self.resolution_scope).replace('~1', '_').replace('~0', '_') name = re.sub(r'[:/#\.\-\%]', '_', name) name = name.lower().rstrip('_') return name
[ "def", "get_scope_name", "(", "self", ")", ":", "name", "=", "'validate_'", "+", "unquote", "(", "self", ".", "resolution_scope", ")", ".", "replace", "(", "'~1'", ",", "'_'", ")", ".", "replace", "(", "'~0'", ",", "'_'", ")", "name", "=", "re", ".",...
Get current scope and return it as a valid function name.
[ "Get", "current", "scope", "and", "return", "it", "as", "a", "valid", "function", "name", "." ]
python
train
stanfordnlp/stanza
stanza/monitoring/summary.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/summary.py#L325-L337
def write_events(stream, events): ''' Write a sequence of Event protos to file-like object `stream`. ''' for event in events: data = event.SerializeToString() len_field = struct.pack('<Q', len(data)) len_crc = struct.pack('<I', masked_crc(len_field)) data_crc = struct.pack('<I', masked_crc(data)) stream.write(len_field) stream.write(len_crc) stream.write(data) stream.write(data_crc)
[ "def", "write_events", "(", "stream", ",", "events", ")", ":", "for", "event", "in", "events", ":", "data", "=", "event", ".", "SerializeToString", "(", ")", "len_field", "=", "struct", ".", "pack", "(", "'<Q'", ",", "len", "(", "data", ")", ")", "le...
Write a sequence of Event protos to file-like object `stream`.
[ "Write", "a", "sequence", "of", "Event", "protos", "to", "file", "-", "like", "object", "stream", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/user_manager.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/user_manager.py#L294-L301
def disconnect_user(self): """ Disconnects the user currently logged-in :param ip_addr: the ip address of the client, that will be logged """ if self.session_logged_in(): self._logger.info("User %s disconnected - %s - %s - %s", self.session_username(), self.session_realname(), self.session_email(), web.ctx.ip) self._destroy_session()
[ "def", "disconnect_user", "(", "self", ")", ":", "if", "self", ".", "session_logged_in", "(", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"User %s disconnected - %s - %s - %s\"", ",", "self", ".", "session_username", "(", ")", ",", "self", ".", "se...
Disconnects the user currently logged-in :param ip_addr: the ip address of the client, that will be logged
[ "Disconnects", "the", "user", "currently", "logged", "-", "in", ":", "param", "ip_addr", ":", "the", "ip", "address", "of", "the", "client", "that", "will", "be", "logged" ]
python
train
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L1133-L1146
def hardware_version(self): """Returns the hardware version of the connected J-Link as a major.minor string. Args: self (JLink): the ``JLink`` instance Returns: Hardware version string. """ version = self._dll.JLINKARM_GetHardwareVersion() major = version / 10000 % 100 minor = version / 100 % 100 return '%d.%02d' % (major, minor)
[ "def", "hardware_version", "(", "self", ")", ":", "version", "=", "self", ".", "_dll", ".", "JLINKARM_GetHardwareVersion", "(", ")", "major", "=", "version", "/", "10000", "%", "100", "minor", "=", "version", "/", "100", "%", "100", "return", "'%d.%02d'", ...
Returns the hardware version of the connected J-Link as a major.minor string. Args: self (JLink): the ``JLink`` instance Returns: Hardware version string.
[ "Returns", "the", "hardware", "version", "of", "the", "connected", "J", "-", "Link", "as", "a", "major", ".", "minor", "string", "." ]
python
train
edeposit/edeposit.amqp.storage
src/edeposit/amqp/storage/web_tools.py
https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/web_tools.py#L31-L58
def compose_path(pub, uuid_url=False): """ Compose absolute path for given `pub`. Args: pub (obj): :class:`.DBPublication` instance. uuid_url (bool, default False): Compose URL using UUID. Returns: str: Absolute url-path of the publication, without server's address \ and protocol. Raises: PrivatePublicationError: When the `pub` is private publication. """ if uuid_url: return join( "/", UUID_DOWNLOAD_KEY, str(pub.uuid) ) return join( "/", DOWNLOAD_KEY, basename(pub.file_pointer), basename(pub.filename) )
[ "def", "compose_path", "(", "pub", ",", "uuid_url", "=", "False", ")", ":", "if", "uuid_url", ":", "return", "join", "(", "\"/\"", ",", "UUID_DOWNLOAD_KEY", ",", "str", "(", "pub", ".", "uuid", ")", ")", "return", "join", "(", "\"/\"", ",", "DOWNLOAD_K...
Compose absolute path for given `pub`. Args: pub (obj): :class:`.DBPublication` instance. uuid_url (bool, default False): Compose URL using UUID. Returns: str: Absolute url-path of the publication, without server's address \ and protocol. Raises: PrivatePublicationError: When the `pub` is private publication.
[ "Compose", "absolute", "path", "for", "given", "pub", "." ]
python
train
singularityhub/sregistry-cli
sregistry/client/backend.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/client/backend.py#L212-L234
def list_backends(backend=None): '''return a list of backends installed for the user, which is based on the config file keys found present Parameters ========== backend: a specific backend to list. If defined, just list parameters. ''' settings = read_client_secrets() # Backend names are the keys backends = list(settings.keys()) backends = [b for b in backends if b!='SREGISTRY_CLIENT'] if backend in backends: bot.info(backend) print(json.dumps(settings[backend], indent=4, sort_keys=True)) else: if backend is not None: print('%s is not a known client.' %backend) bot.info("Backends Installed") print('\n'.join(backends))
[ "def", "list_backends", "(", "backend", "=", "None", ")", ":", "settings", "=", "read_client_secrets", "(", ")", "# Backend names are the keys", "backends", "=", "list", "(", "settings", ".", "keys", "(", ")", ")", "backends", "=", "[", "b", "for", "b", "i...
return a list of backends installed for the user, which is based on the config file keys found present Parameters ========== backend: a specific backend to list. If defined, just list parameters.
[ "return", "a", "list", "of", "backends", "installed", "for", "the", "user", "which", "is", "based", "on", "the", "config", "file", "keys", "found", "present", "Parameters", "==========", "backend", ":", "a", "specific", "backend", "to", "list", ".", "If", ...
python
test
datamachine/twx.botapi
twx/botapi/botapi.py
https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4294-L4296
def forward_message(self, *args, **kwargs): """See :func:`forward_message`""" return forward_message(*args, **self._merge_overrides(**kwargs)).run()
[ "def", "forward_message", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "forward_message", "(", "*", "args", ",", "*", "*", "self", ".", "_merge_overrides", "(", "*", "*", "kwargs", ")", ")", ".", "run", "(", ")" ]
See :func:`forward_message`
[ "See", ":", "func", ":", "forward_message" ]
python
train
ninuxorg/nodeshot
nodeshot/interop/sync/synchronizers/base.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/interop/sync/synchronizers/base.py#L342-L431
def _convert_item(self, item): """ take a parsed item as input and returns a python dictionary the keys will be saved into the Node model either in their respective fields or in the hstore "data" field :param item: object representing parsed item """ item = self.parse_item(item) # name is required if not item['name']: raise Exception('Expected property %s not found in item %s.' % (self.keys['name'], item)) elif len(item['name']) > 75: item['name'] = item['name'][:75] if not item['status']: item['status'] = self.default_status # get status or get default status or None try: item['status'] = Status.objects.get(slug__iexact=item['status']) except Status.DoesNotExist: try: item['status'] = Status.objects.create(name=item['status'], slug=slugify(item['status']), description=item['status'], is_default=False) except Exception as e: logger.exception(e) item['status'] = None # slugify slug item['slug'] = slugify(item['name']) if not item['address']: item['address'] = '' if not item['is_published']: item['is_published'] = '' User = get_user_model() # get user or None try: item['user'] = User.objects.get(username=item['user']) except User.DoesNotExist: item['user'] = None if not item['elev']: item['elev'] = None if not item['description']: item['description'] = '' if not item['notes']: item['notes'] = '' if item['added']: # convert dates to python datetime try: item['added'] = parser.parse(item['added']) except Exception as e: print "Exception while parsing 'added' date: %s" % e if item['updated']: try: item['updated'] = parser.parse(item['updated']) except Exception as e: print "Exception while parsing 'updated' date: %s" % e result = { "name": item['name'], "slug": item['slug'], "status": item['status'], "address": item['address'], "is_published": item['is_published'], "user": item['user'], "geometry": item['geometry'], "elev": item['elev'], "description": item['description'], "notes": item['notes'], "added": item['added'], "updated": item['updated'], "data": {} } # ensure all additional data items are strings for key, value in item['data'].items(): result["data"][key] = value return result
[ "def", "_convert_item", "(", "self", ",", "item", ")", ":", "item", "=", "self", ".", "parse_item", "(", "item", ")", "# name is required", "if", "not", "item", "[", "'name'", "]", ":", "raise", "Exception", "(", "'Expected property %s not found in item %s.'", ...
take a parsed item as input and returns a python dictionary the keys will be saved into the Node model either in their respective fields or in the hstore "data" field :param item: object representing parsed item
[ "take", "a", "parsed", "item", "as", "input", "and", "returns", "a", "python", "dictionary", "the", "keys", "will", "be", "saved", "into", "the", "Node", "model", "either", "in", "their", "respective", "fields", "or", "in", "the", "hstore", "data", "field"...
python
train
henrysher/kotocore
kotocore/session.py
https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/session.py#L133-L168
def get_collection(self, service_name, collection_name, base_class=None): """ Returns a ``Collection`` **class** for a given service. :param service_name: A string that specifies the name of the desired service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param collection_name: A string that specifies the name of the desired class. Ex. ``QueueCollection``, ``NotificationCollection``, ``TableCollection``, etc. :type collection_name: string :param base_class: (Optional) The base class of the object. Prevents "magically" loading the wrong class (one with a different base). :type base_class: class :rtype: <kotocore.collections.Collection subclass> """ try: return self.cache.get_collection( service_name, collection_name, base_class=base_class ) except NotCached: pass # We didn't find it. Construct it. new_class = self.collection_factory.construct_for( service_name, collection_name, base_class=base_class ) self.cache.set_collection(service_name, collection_name, new_class) return new_class
[ "def", "get_collection", "(", "self", ",", "service_name", ",", "collection_name", ",", "base_class", "=", "None", ")", ":", "try", ":", "return", "self", ".", "cache", ".", "get_collection", "(", "service_name", ",", "collection_name", ",", "base_class", "=",...
Returns a ``Collection`` **class** for a given service. :param service_name: A string that specifies the name of the desired service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param collection_name: A string that specifies the name of the desired class. Ex. ``QueueCollection``, ``NotificationCollection``, ``TableCollection``, etc. :type collection_name: string :param base_class: (Optional) The base class of the object. Prevents "magically" loading the wrong class (one with a different base). :type base_class: class :rtype: <kotocore.collections.Collection subclass>
[ "Returns", "a", "Collection", "**", "class", "**", "for", "a", "given", "service", "." ]
python
train
paylogic/pip-accel
pip_accel/caches/s3.py
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/caches/s3.py#L250-L302
def s3_connection(self): """ Connect to the Amazon S3 API. If the connection attempt fails because Boto can't find credentials the attempt is retried once with an anonymous connection. Called on demand by :attr:`s3_bucket`. :returns: A :class:`boto.s3.connection.S3Connection` object. :raises: :exc:`.CacheBackendError` when the connection to the Amazon S3 API fails. """ if not hasattr(self, 'cached_connection'): self.check_prerequisites() with PatchedBotoConfig(): import boto from boto.exception import BotoClientError, BotoServerError, NoAuthHandlerFound from boto.s3.connection import S3Connection, SubdomainCallingFormat, OrdinaryCallingFormat try: # Configure the number of retries and the socket timeout used # by Boto. Based on the snippet given in the following email: # https://groups.google.com/d/msg/boto-users/0osmP0cUl5Y/X4NdlMGWKiEJ if not boto.config.has_section(BOTO_CONFIG_SECTION): boto.config.add_section(BOTO_CONFIG_SECTION) boto.config.set(BOTO_CONFIG_SECTION, BOTO_CONFIG_NUM_RETRIES_OPTION, str(self.config.s3_cache_retries)) boto.config.set(BOTO_CONFIG_SECTION, BOTO_CONFIG_SOCKET_TIMEOUT_OPTION, str(self.config.s3_cache_timeout)) logger.debug("Connecting to Amazon S3 API ..") endpoint = urlparse(self.config.s3_cache_url) host, _, port = endpoint.netloc.partition(':') kw = dict( host=host, port=int(port) if port else None, is_secure=(endpoint.scheme == 'https'), calling_format=(SubdomainCallingFormat() if host == S3Connection.DefaultHost else OrdinaryCallingFormat()), ) try: self.cached_connection = S3Connection(**kw) except NoAuthHandlerFound: logger.debug("Amazon S3 API credentials missing, retrying with anonymous connection ..") self.cached_connection = S3Connection(anon=True, **kw) except (BotoClientError, BotoServerError): raise CacheBackendError(""" Failed to connect to the Amazon S3 API! Most likely your credentials are not correctly configured. The Amazon S3 cache backend will be disabled for now. """) return self.cached_connection
[ "def", "s3_connection", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'cached_connection'", ")", ":", "self", ".", "check_prerequisites", "(", ")", "with", "PatchedBotoConfig", "(", ")", ":", "import", "boto", "from", "boto", ".", "exce...
Connect to the Amazon S3 API. If the connection attempt fails because Boto can't find credentials the attempt is retried once with an anonymous connection. Called on demand by :attr:`s3_bucket`. :returns: A :class:`boto.s3.connection.S3Connection` object. :raises: :exc:`.CacheBackendError` when the connection to the Amazon S3 API fails.
[ "Connect", "to", "the", "Amazon", "S3", "API", "." ]
python
train
spyder-ide/spyder
spyder/plugins/ipythonconsole/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/plugin.py#L1504-L1518
def _remove_old_stderr_files(self): """ Remove stderr files left by previous Spyder instances. This is only required on Windows because we can't clean up stderr files while Spyder is running on it. """ if os.name == 'nt': tmpdir = get_temp_dir() for fname in os.listdir(tmpdir): if osp.splitext(fname)[1] == '.stderr': try: os.remove(osp.join(tmpdir, fname)) except Exception: pass
[ "def", "_remove_old_stderr_files", "(", "self", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "tmpdir", "=", "get_temp_dir", "(", ")", "for", "fname", "in", "os", ".", "listdir", "(", "tmpdir", ")", ":", "if", "osp", ".", "splitext", "(", "fn...
Remove stderr files left by previous Spyder instances. This is only required on Windows because we can't clean up stderr files while Spyder is running on it.
[ "Remove", "stderr", "files", "left", "by", "previous", "Spyder", "instances", ".", "This", "is", "only", "required", "on", "Windows", "because", "we", "can", "t", "clean", "up", "stderr", "files", "while", "Spyder", "is", "running", "on", "it", "." ]
python
train
gitpython-developers/GitPython
git/objects/submodule/base.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/objects/submodule/base.py#L431-L645
def update(self, recursive=False, init=True, to_latest_revision=False, progress=None, dry_run=False, force=False, keep_going=False): """Update the repository of this submodule to point to the checkout we point at with the binsha of this instance. :param recursive: if True, we will operate recursively and update child- modules as well. :param init: if True, the module repository will be cloned into place if necessary :param to_latest_revision: if True, the submodule's sha will be ignored during checkout. Instead, the remote will be fetched, and the local tracking branch updated. This only works if we have a local tracking branch, which is the case if the remote repository had a master branch, or of the 'branch' option was specified for this submodule and the branch existed remotely :param progress: UpdateProgress instance or None if no progress should be shown :param dry_run: if True, the operation will only be simulated, but not performed. All performed operations are read-only :param force: If True, we may reset heads even if the repository in question is dirty. Additinoally we will be allowed to set a tracking branch which is ahead of its remote branch back into the past or the location of the remote branch. This will essentially 'forget' commits. If False, local tracking branches that are in the future of their respective remote branches will simply not be moved. :param keep_going: if True, we will ignore but log all errors, and keep going recursively. Unless dry_run is set as well, keep_going could cause subsequent/inherited errors you wouldn't see otherwise. In conjunction with dry_run, it can be useful to anticipate all errors when updating submodules :note: does nothing in bare repositories :note: method is definitely not atomic if recurisve is True :return: self""" if self.repo.bare: return self # END pass in bare mode if progress is None: progress = UpdateProgress() # END handle progress prefix = '' if dry_run: prefix = "DRY-RUN: " # END handle prefix # to keep things plausible in dry-run mode if dry_run: mrepo = None # END init mrepo try: # ASSURE REPO IS PRESENT AND UPTODATE ##################################### try: mrepo = self.module() rmts = mrepo.remotes len_rmts = len(rmts) for i, remote in enumerate(rmts): op = FETCH if i == 0: op |= BEGIN # END handle start progress.update(op, i, len_rmts, prefix + "Fetching remote %s of submodule %r" % (remote, self.name)) #=============================== if not dry_run: remote.fetch(progress=progress) # END handle dry-run #=============================== if i == len_rmts - 1: op |= END # END handle end progress.update(op, i, len_rmts, prefix + "Done fetching remote of submodule %r" % self.name) # END fetch new data except InvalidGitRepositoryError: if not init: return self # END early abort if init is not allowed # there is no git-repository yet - but delete empty paths checkout_module_abspath = self.abspath if not dry_run and osp.isdir(checkout_module_abspath): try: os.rmdir(checkout_module_abspath) except OSError: raise OSError("Module directory at %r does already exist and is non-empty" % checkout_module_abspath) # END handle OSError # END handle directory removal # don't check it out at first - nonetheless it will create a local # branch according to the remote-HEAD if possible progress.update(BEGIN | CLONE, 0, 1, prefix + "Cloning url '%s' to '%s' in submodule %r" % (self.url, checkout_module_abspath, self.name)) if not dry_run: mrepo = self._clone_repo(self.repo, self.url, self.path, self.name, n=True) # END handle dry-run progress.update(END | CLONE, 0, 1, prefix + "Done cloning to %s" % checkout_module_abspath) if not dry_run: # see whether we have a valid branch to checkout try: # find a remote which has our branch - we try to be flexible remote_branch = find_first_remote_branch(mrepo.remotes, self.branch_name) local_branch = mkhead(mrepo, self.branch_path) # have a valid branch, but no checkout - make sure we can figure # that out by marking the commit with a null_sha local_branch.set_object(Object(mrepo, self.NULL_BIN_SHA)) # END initial checkout + branch creation # make sure HEAD is not detached mrepo.head.set_reference(local_branch, logmsg="submodule: attaching head to %s" % local_branch) mrepo.head.ref.set_tracking_branch(remote_branch) except (IndexError, InvalidGitRepositoryError): log.warn("Failed to checkout tracking branch %s", self.branch_path) # END handle tracking branch # NOTE: Have to write the repo config file as well, otherwise # the default implementation will be offended and not update the repository # Maybe this is a good way to assure it doesn't get into our way, but # we want to stay backwards compatible too ... . Its so redundant ! with self.repo.config_writer() as writer: writer.set_value(sm_section(self.name), 'url', self.url) # END handle dry_run # END handle initialization # DETERMINE SHAS TO CHECKOUT ############################ binsha = self.binsha hexsha = self.hexsha if mrepo is not None: # mrepo is only set if we are not in dry-run mode or if the module existed is_detached = mrepo.head.is_detached # END handle dry_run if mrepo is not None and to_latest_revision: msg_base = "Cannot update to latest revision in repository at %r as " % mrepo.working_dir if not is_detached: rref = mrepo.head.ref.tracking_branch() if rref is not None: rcommit = rref.commit binsha = rcommit.binsha hexsha = rcommit.hexsha else: log.error("%s a tracking branch was not set for local branch '%s'", msg_base, mrepo.head.ref) # END handle remote ref else: log.error("%s there was no local tracking branch", msg_base) # END handle detached head # END handle to_latest_revision option # update the working tree # handles dry_run if mrepo is not None and mrepo.head.commit.binsha != binsha: # We must assure that our destination sha (the one to point to) is in the future of our current head. # Otherwise, we will reset changes that might have been done on the submodule, but were not yet pushed # We also handle the case that history has been rewritten, leaving no merge-base. In that case # we behave conservatively, protecting possible changes the user had done may_reset = True if mrepo.head.commit.binsha != self.NULL_BIN_SHA: base_commit = mrepo.merge_base(mrepo.head.commit, hexsha) if len(base_commit) == 0 or base_commit[0].hexsha == hexsha: if force: msg = "Will force checkout or reset on local branch that is possibly in the future of" msg += "the commit it will be checked out to, effectively 'forgetting' new commits" log.debug(msg) else: msg = "Skipping %s on branch '%s' of submodule repo '%s' as it contains un-pushed commits" msg %= (is_detached and "checkout" or "reset", mrepo.head, mrepo) log.info(msg) may_reset = False # end handle force # end handle if we are in the future if may_reset and not force and mrepo.is_dirty(index=True, working_tree=True, untracked_files=True): raise RepositoryDirtyError(mrepo, "Cannot reset a dirty repository") # end handle force and dirty state # end handle empty repo # end verify future/past progress.update(BEGIN | UPDWKTREE, 0, 1, prefix + "Updating working tree at %s for submodule %r to revision %s" % (self.path, self.name, hexsha)) if not dry_run and may_reset: if is_detached: # NOTE: for now we force, the user is no supposed to change detached # submodules anyway. Maybe at some point this becomes an option, to # properly handle user modifications - see below for future options # regarding rebase and merge. mrepo.git.checkout(hexsha, force=force) else: mrepo.head.reset(hexsha, index=True, working_tree=True) # END handle checkout # if we may reset/checkout progress.update(END | UPDWKTREE, 0, 1, prefix + "Done updating working tree for submodule %r" % self.name) # END update to new commit only if needed except Exception as err: if not keep_going: raise log.error(str(err)) # end handle keep_going # HANDLE RECURSION ################## if recursive: # in dry_run mode, the module might not exist if mrepo is not None: for submodule in self.iter_items(self.module()): submodule.update(recursive, init, to_latest_revision, progress=progress, dry_run=dry_run, force=force, keep_going=keep_going) # END handle recursive update # END handle dry run # END for each submodule return self
[ "def", "update", "(", "self", ",", "recursive", "=", "False", ",", "init", "=", "True", ",", "to_latest_revision", "=", "False", ",", "progress", "=", "None", ",", "dry_run", "=", "False", ",", "force", "=", "False", ",", "keep_going", "=", "False", ")...
Update the repository of this submodule to point to the checkout we point at with the binsha of this instance. :param recursive: if True, we will operate recursively and update child- modules as well. :param init: if True, the module repository will be cloned into place if necessary :param to_latest_revision: if True, the submodule's sha will be ignored during checkout. Instead, the remote will be fetched, and the local tracking branch updated. This only works if we have a local tracking branch, which is the case if the remote repository had a master branch, or of the 'branch' option was specified for this submodule and the branch existed remotely :param progress: UpdateProgress instance or None if no progress should be shown :param dry_run: if True, the operation will only be simulated, but not performed. All performed operations are read-only :param force: If True, we may reset heads even if the repository in question is dirty. Additinoally we will be allowed to set a tracking branch which is ahead of its remote branch back into the past or the location of the remote branch. This will essentially 'forget' commits. If False, local tracking branches that are in the future of their respective remote branches will simply not be moved. :param keep_going: if True, we will ignore but log all errors, and keep going recursively. Unless dry_run is set as well, keep_going could cause subsequent/inherited errors you wouldn't see otherwise. In conjunction with dry_run, it can be useful to anticipate all errors when updating submodules :note: does nothing in bare repositories :note: method is definitely not atomic if recurisve is True :return: self
[ "Update", "the", "repository", "of", "this", "submodule", "to", "point", "to", "the", "checkout", "we", "point", "at", "with", "the", "binsha", "of", "this", "instance", "." ]
python
train
roclark/sportsreference
sportsreference/nfl/schedule.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nfl/schedule.py#L156-L203
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the boxscore string. """ if self._points_scored is None and self._points_allowed is None: return None fields_to_include = { 'boxscore_index': self.boxscore_index, 'date': self.date, 'datetime': self.datetime, 'day': self.day, 'extra_points_attempted': self.extra_points_attempted, 'extra_points_made': self.extra_points_made, 'field_goals_attempted': self.field_goals_attempted, 'field_goals_made': self.field_goals_made, 'fourth_down_attempts': self.fourth_down_attempts, 'fourth_down_conversions': self.fourth_down_conversions, 'interceptions': self.interceptions, 'location': self.location, 'opponent_abbr': self.opponent_abbr, 'opponent_name': self.opponent_name, 'overtime': self.overtime, 'pass_attempts': self.pass_attempts, 'pass_completion_rate': self.pass_completion_rate, 'pass_completions': self.pass_completions, 'pass_touchdowns': self.pass_touchdowns, 'pass_yards': self.pass_yards, 'pass_yards_per_attempt': self.pass_yards_per_attempt, 'points_allowed': self.points_allowed, 'points_scored': self.points_scored, 'punt_yards': self.punt_yards, 'punts': self.punts, 'quarterback_rating': self.quarterback_rating, 'result': self.result, 'rush_attempts': self.rush_attempts, 'rush_touchdowns': self.rush_touchdowns, 'rush_yards': self.rush_yards, 'rush_yards_per_attempt': self.rush_yards_per_attempt, 'third_down_attempts': self.third_down_attempts, 'third_down_conversions': self.third_down_conversions, 'time_of_possession': self.time_of_possession, 'times_sacked': self.times_sacked, 'type': self.type, 'week': self.week, 'yards_lost_from_sacks': self.yards_lost_from_sacks } return pd.DataFrame([fields_to_include], index=[self._boxscore])
[ "def", "dataframe", "(", "self", ")", ":", "if", "self", ".", "_points_scored", "is", "None", "and", "self", ".", "_points_allowed", "is", "None", ":", "return", "None", "fields_to_include", "=", "{", "'boxscore_index'", ":", "self", ".", "boxscore_index", "...
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the boxscore string.
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "other", "class", "properties", "and", "values", ".", "The", "index", "for", "the", "DataFrame", "is", "the", "boxscore", "string", "." ]
python
train
horazont/aioopenssl
aioopenssl/__init__.py
https://github.com/horazont/aioopenssl/blob/95cb39b5904d6a9702afcef6704181c850371081/aioopenssl/__init__.py#L726-L849
def create_starttls_connection( loop, protocol_factory, host=None, port=None, *, sock=None, ssl_context_factory=None, use_starttls=False, local_addr=None, **kwargs): """ Create a connection which can later be upgraded to use TLS. .. versionchanged:: 0.4 The `local_addr` argument was added. :param loop: The event loop to use. :type loop: :class:`asyncio.BaseEventLoop` :param protocol_factory: Factory for the protocol for the connection :param host: The host name or address to connect to :type host: :class:`str` or :data:`None` :param port: The port to connect to :type port: :class:`int` or :data:`None` :param sock: A socket to wrap (conflicts with `host` and `port`) :type sock: :class:`socket.socket` :param ssl_context_factory: Function which returns a :class:`OpenSSL.SSL.Context` to use for TLS operations :param use_starttls: Flag to control whether TLS is negotiated right away or deferredly. :type use_starttls: :class:`bool` :param local_addr: Address to bind to This is roughly a copy of the asyncio implementation of :meth:`asyncio.BaseEventLoop.create_connection`. It returns a pair ``(transport, protocol)``, where `transport` is a newly created :class:`STARTTLSTransport` instance. Further keyword arguments are forwarded to the constructor of :class:`STARTTLSTransport`. `loop` must be a :class:`asyncio.BaseEventLoop`, with support for :meth:`asyncio.BaseEventLoop.add_reader` and the corresponding writer and removal functions for sockets. This is typically a selector type event loop. `protocol_factory` must be a callable which (without any arguments) returns a :class:`asyncio.Protocol` which will be connected to the STARTTLS transport. `host` and `port` must be a hostname and a port number, or both :data:`None`. Both must be :data:`None`, if and only if `sock` is not :data:`None`. In that case, `sock` is used instead of a newly created socket. `sock` is put into non-blocking mode and must be a stream socket. If `use_starttls` is :data:`True`, no TLS handshake will be performed initially. Instead, the connection is established without any transport-layer security. It is expected that the :meth:`STARTTLSTransport.starttls` method is used when the application protocol requires TLS. If `use_starttls` is :data:`False`, the TLS handshake is initiated right away. `local_addr` may be an address to bind this side of the socket to. If omitted or :data:`None`, the local address is assigned by the operating system. This coroutine returns when the stream is established. If `use_starttls` is :data:`False`, this means that the full TLS handshake has to be finished for this coroutine to return. Otherwise, no TLS handshake takes place. It must be invoked using the :meth:`STARTTLSTransport.starttls` coroutine. """ if host is not None and port is not None: host_addrs = yield from loop.getaddrinfo( host, port, type=socket.SOCK_STREAM) exceptions = [] for family, type, proto, cname, address in host_addrs: sock = None try: sock = socket.socket(family=family, type=type, proto=proto) sock.setblocking(False) if local_addr is not None: sock.bind(local_addr) yield from loop.sock_connect(sock, address) except OSError as exc: if sock is not None: sock.close() exceptions.append(exc) else: break else: if len(exceptions) == 1: raise exceptions[0] model = str(exceptions[0]) if all(str(exc) == model for exc in exceptions): raise exceptions[0] try: from aioxmpp.errors import MultiOSError except ImportError: MultiOSError = OSError exc = MultiOSError( "could not connect to [{}]:{}".format(host, port), exceptions) raise exc elif sock is None: raise ValueError("sock must not be None if host and/or port are None") else: sock.setblocking(False) protocol = protocol_factory() waiter = asyncio.Future(loop=loop) transport = STARTTLSTransport(loop, sock, protocol, ssl_context_factory=ssl_context_factory, waiter=waiter, use_starttls=use_starttls, **kwargs) yield from waiter return transport, protocol
[ "def", "create_starttls_connection", "(", "loop", ",", "protocol_factory", ",", "host", "=", "None", ",", "port", "=", "None", ",", "*", ",", "sock", "=", "None", ",", "ssl_context_factory", "=", "None", ",", "use_starttls", "=", "False", ",", "local_addr", ...
Create a connection which can later be upgraded to use TLS. .. versionchanged:: 0.4 The `local_addr` argument was added. :param loop: The event loop to use. :type loop: :class:`asyncio.BaseEventLoop` :param protocol_factory: Factory for the protocol for the connection :param host: The host name or address to connect to :type host: :class:`str` or :data:`None` :param port: The port to connect to :type port: :class:`int` or :data:`None` :param sock: A socket to wrap (conflicts with `host` and `port`) :type sock: :class:`socket.socket` :param ssl_context_factory: Function which returns a :class:`OpenSSL.SSL.Context` to use for TLS operations :param use_starttls: Flag to control whether TLS is negotiated right away or deferredly. :type use_starttls: :class:`bool` :param local_addr: Address to bind to This is roughly a copy of the asyncio implementation of :meth:`asyncio.BaseEventLoop.create_connection`. It returns a pair ``(transport, protocol)``, where `transport` is a newly created :class:`STARTTLSTransport` instance. Further keyword arguments are forwarded to the constructor of :class:`STARTTLSTransport`. `loop` must be a :class:`asyncio.BaseEventLoop`, with support for :meth:`asyncio.BaseEventLoop.add_reader` and the corresponding writer and removal functions for sockets. This is typically a selector type event loop. `protocol_factory` must be a callable which (without any arguments) returns a :class:`asyncio.Protocol` which will be connected to the STARTTLS transport. `host` and `port` must be a hostname and a port number, or both :data:`None`. Both must be :data:`None`, if and only if `sock` is not :data:`None`. In that case, `sock` is used instead of a newly created socket. `sock` is put into non-blocking mode and must be a stream socket. If `use_starttls` is :data:`True`, no TLS handshake will be performed initially. Instead, the connection is established without any transport-layer security. It is expected that the :meth:`STARTTLSTransport.starttls` method is used when the application protocol requires TLS. If `use_starttls` is :data:`False`, the TLS handshake is initiated right away. `local_addr` may be an address to bind this side of the socket to. If omitted or :data:`None`, the local address is assigned by the operating system. This coroutine returns when the stream is established. If `use_starttls` is :data:`False`, this means that the full TLS handshake has to be finished for this coroutine to return. Otherwise, no TLS handshake takes place. It must be invoked using the :meth:`STARTTLSTransport.starttls` coroutine.
[ "Create", "a", "connection", "which", "can", "later", "be", "upgraded", "to", "use", "TLS", "." ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/slice.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/slice.py#L79-L103
def _get_and_assert_slice_param(url_dict, param_name, default_int): """Return ``param_str`` converted to an int. If str cannot be converted to int or int is not zero or positive, raise InvalidRequest. """ param_str = url_dict['query'].get(param_name, default_int) try: n = int(param_str) except ValueError: raise d1_common.types.exceptions.InvalidRequest( 0, 'Slice parameter is not a valid integer. {}="{}"'.format( param_name, param_str ), ) if n < 0: raise d1_common.types.exceptions.InvalidRequest( 0, 'Slice parameter cannot be a negative number. {}="{}"'.format( param_name, param_str ), ) return n
[ "def", "_get_and_assert_slice_param", "(", "url_dict", ",", "param_name", ",", "default_int", ")", ":", "param_str", "=", "url_dict", "[", "'query'", "]", ".", "get", "(", "param_name", ",", "default_int", ")", "try", ":", "n", "=", "int", "(", "param_str", ...
Return ``param_str`` converted to an int. If str cannot be converted to int or int is not zero or positive, raise InvalidRequest.
[ "Return", "param_str", "converted", "to", "an", "int", "." ]
python
train
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L1086-L1105
def shutdown(self, save=False, nosave=False): """Shutdown the Redis server. If Redis has persistence configured, data will be flushed before shutdown. If the "save" option is set, a data flush will be attempted even if there is no persistence configured. If the "nosave" option is set, no data flush will be attempted. The "save" and "nosave" options cannot both be set. """ if save and nosave: raise DataError('SHUTDOWN save and nosave cannot both be set') args = ['SHUTDOWN'] if save: args.append('SAVE') if nosave: args.append('NOSAVE') try: self.execute_command(*args) except ConnectionError: # a ConnectionError here is expected return raise RedisError("SHUTDOWN seems to have failed.")
[ "def", "shutdown", "(", "self", ",", "save", "=", "False", ",", "nosave", "=", "False", ")", ":", "if", "save", "and", "nosave", ":", "raise", "DataError", "(", "'SHUTDOWN save and nosave cannot both be set'", ")", "args", "=", "[", "'SHUTDOWN'", "]", "if", ...
Shutdown the Redis server. If Redis has persistence configured, data will be flushed before shutdown. If the "save" option is set, a data flush will be attempted even if there is no persistence configured. If the "nosave" option is set, no data flush will be attempted. The "save" and "nosave" options cannot both be set.
[ "Shutdown", "the", "Redis", "server", ".", "If", "Redis", "has", "persistence", "configured", "data", "will", "be", "flushed", "before", "shutdown", ".", "If", "the", "save", "option", "is", "set", "a", "data", "flush", "will", "be", "attempted", "even", "...
python
train
bionikspoon/pureyaml
pureyaml/__init__.py
https://github.com/bionikspoon/pureyaml/blob/784830b907ca14525c4cecdb6ae35306f6f8a877/pureyaml/__init__.py#L47-L49
def dumps(obj, indent=None, default=None, sort_keys=False, **kw): """Dump string.""" return YAMLEncoder(indent=indent, default=default, sort_keys=sort_keys, **kw).encode(obj)
[ "def", "dumps", "(", "obj", ",", "indent", "=", "None", ",", "default", "=", "None", ",", "sort_keys", "=", "False", ",", "*", "*", "kw", ")", ":", "return", "YAMLEncoder", "(", "indent", "=", "indent", ",", "default", "=", "default", ",", "sort_keys...
Dump string.
[ "Dump", "string", "." ]
python
train
singularityhub/sregistry-cli
sregistry/main/s3/query.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/s3/query.py#L80-L103
def container_search(self, query, across_collections=False): '''search for a specific container. If across collections is False, the query is parsed as a full container name and a specific container is returned. If across_collections is True, the container is searched for across collections. If across collections is True, details are not shown''' results = self._search_all(quiet=True) matches = [] for result in results: # This is the container name if query in result[0]: matches.append(result) if len(matches) > 0: bot.info("Containers %s" %query) bot.table(matches) else: bot.info('No matches for %s found.' % name) return matches
[ "def", "container_search", "(", "self", ",", "query", ",", "across_collections", "=", "False", ")", ":", "results", "=", "self", ".", "_search_all", "(", "quiet", "=", "True", ")", "matches", "=", "[", "]", "for", "result", "in", "results", ":", "# This ...
search for a specific container. If across collections is False, the query is parsed as a full container name and a specific container is returned. If across_collections is True, the container is searched for across collections. If across collections is True, details are not shown
[ "search", "for", "a", "specific", "container", ".", "If", "across", "collections", "is", "False", "the", "query", "is", "parsed", "as", "a", "full", "container", "name", "and", "a", "specific", "container", "is", "returned", ".", "If", "across_collections", ...
python
test
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/AmqpLink.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/AmqpLink.py#L367-L371
def __recv_exc_clear(self, log_if_exc_set=None): """Equivalent to __send_exc_clear""" if not (log_if_exc_set is None or self.__recv_exc is None): logger.info(log_if_exc_set) self.__recv_exc = None
[ "def", "__recv_exc_clear", "(", "self", ",", "log_if_exc_set", "=", "None", ")", ":", "if", "not", "(", "log_if_exc_set", "is", "None", "or", "self", ".", "__recv_exc", "is", "None", ")", ":", "logger", ".", "info", "(", "log_if_exc_set", ")", "self", "....
Equivalent to __send_exc_clear
[ "Equivalent", "to", "__send_exc_clear" ]
python
train
cocaine/cocaine-tools
cocaine/tools/dispatch.py
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L2018-L2026
def keyring_edit(**kwargs): """ Edits interactively the keyring. """ ctx = Context(**kwargs) ctx.timeout = None ctx.execute_action('keyring:edit', **{ 'storage': ctx.repo.create_secure_service('storage'), })
[ "def", "keyring_edit", "(", "*", "*", "kwargs", ")", ":", "ctx", "=", "Context", "(", "*", "*", "kwargs", ")", "ctx", ".", "timeout", "=", "None", "ctx", ".", "execute_action", "(", "'keyring:edit'", ",", "*", "*", "{", "'storage'", ":", "ctx", ".", ...
Edits interactively the keyring.
[ "Edits", "interactively", "the", "keyring", "." ]
python
train
Dallinger/Dallinger
dallinger/models.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/models.py#L519-L532
def vectors(self, failed=False): """ Get vectors in the network. failed = { False, True, "all" } To get the vectors to/from to a specific node, see Node.vectors(). """ if failed not in ["all", False, True]: raise ValueError("{} is not a valid vector failed".format(failed)) if failed == "all": return Vector.query.filter_by(network_id=self.id).all() else: return Vector.query.filter_by(network_id=self.id, failed=failed).all()
[ "def", "vectors", "(", "self", ",", "failed", "=", "False", ")", ":", "if", "failed", "not", "in", "[", "\"all\"", ",", "False", ",", "True", "]", ":", "raise", "ValueError", "(", "\"{} is not a valid vector failed\"", ".", "format", "(", "failed", ")", ...
Get vectors in the network. failed = { False, True, "all" } To get the vectors to/from to a specific node, see Node.vectors().
[ "Get", "vectors", "in", "the", "network", "." ]
python
train
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L741-L750
def set_matrix(self, matrix): """Modifies the current transformation matrix (CTM) by setting it equal to :obj:`matrix`. :param matrix: A transformation :class:`Matrix` from user space to device space. """ cairo.cairo_set_matrix(self._pointer, matrix._pointer) self._check_status()
[ "def", "set_matrix", "(", "self", ",", "matrix", ")", ":", "cairo", ".", "cairo_set_matrix", "(", "self", ".", "_pointer", ",", "matrix", ".", "_pointer", ")", "self", ".", "_check_status", "(", ")" ]
Modifies the current transformation matrix (CTM) by setting it equal to :obj:`matrix`. :param matrix: A transformation :class:`Matrix` from user space to device space.
[ "Modifies", "the", "current", "transformation", "matrix", "(", "CTM", ")", "by", "setting", "it", "equal", "to", ":", "obj", ":", "matrix", "." ]
python
train
hyperledger/sawtooth-core
cli/sawtooth_cli/transaction.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/transaction.py#L63-L125
def do_transaction(args): """Runs the transaction list or show command, printing to the console Args: args: The parsed arguments sent to the command at runtime """ rest_client = RestClient(args.url, args.user) if args.subcommand == 'list': transactions = rest_client.list_transactions() keys = ('transaction_id', 'family', 'version', 'size', 'payload') headers = tuple(k.upper() if k != 'version' else 'VERS' for k in keys) def parse_txn_row(transaction, decode=True): decoded = b64decode(transaction['payload']) return ( transaction['header_signature'], transaction['header']['family_name'], transaction['header']['family_version'], len(decoded), str(decoded) if decode else transaction['payload']) if args.format == 'default': fmt.print_terminal_table(headers, transactions, parse_txn_row) elif args.format == 'csv': fmt.print_csv(headers, transactions, parse_txn_row) elif args.format == 'json' or args.format == 'yaml': data = [{k: d for k, d in zip(keys, parse_txn_row(b, False))} for b in transactions] if args.format == 'yaml': fmt.print_yaml(data) elif args.format == 'json': fmt.print_json(data) else: raise AssertionError('Missing handler: {}'.format(args.format)) else: raise AssertionError('Missing handler: {}'.format(args.format)) if args.subcommand == 'show': output = rest_client.get_transaction(args.transaction_id) if args.key: if args.key == 'payload': output = b64decode(output['payload']) elif args.key in output: output = output[args.key] elif args.key in output['header']: output = output['header'][args.key] else: raise CliException( 'Key "{}" not found in transaction or header'.format( args.key)) if args.format == 'yaml': fmt.print_yaml(output) elif args.format == 'json': fmt.print_json(output) else: raise AssertionError('Missing handler: {}'.format(args.format))
[ "def", "do_transaction", "(", "args", ")", ":", "rest_client", "=", "RestClient", "(", "args", ".", "url", ",", "args", ".", "user", ")", "if", "args", ".", "subcommand", "==", "'list'", ":", "transactions", "=", "rest_client", ".", "list_transactions", "(...
Runs the transaction list or show command, printing to the console Args: args: The parsed arguments sent to the command at runtime
[ "Runs", "the", "transaction", "list", "or", "show", "command", "printing", "to", "the", "console" ]
python
train
bukun/TorCMS
torcms/model/post_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_model.py#L361-L381
def query_cat_recent(cat_id, label=None, num=8, kind='1', order=False): ''' Query recent posts of catalog. ''' if label: recent_recs = MPost.query_cat_recent_with_label( cat_id, label=label, num=num, kind=kind, order=order ) else: recent_recs = MPost.query_cat_recent_no_label( cat_id, num=num, kind=kind, order=order ) return recent_recs
[ "def", "query_cat_recent", "(", "cat_id", ",", "label", "=", "None", ",", "num", "=", "8", ",", "kind", "=", "'1'", ",", "order", "=", "False", ")", ":", "if", "label", ":", "recent_recs", "=", "MPost", ".", "query_cat_recent_with_label", "(", "cat_id", ...
Query recent posts of catalog.
[ "Query", "recent", "posts", "of", "catalog", "." ]
python
train
ARMmbed/yotta
yotta/lib/component.py
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/component.py#L669-L689
def satisfyTarget(self, target_name_and_version, update_installed=False, additional_config=None, install_missing=True): ''' Ensure that the specified target name (and optionally version, github ref or URL) is installed in the targets directory of the current component returns (derived_target, errors) ''' # Target, , represent an installed target, internal from yotta.lib import target application_dir = None if self.isApplication(): application_dir = self.path return target.getDerivedTarget( target_name_and_version, self.targetsPath(), install_missing = install_missing, application_dir = application_dir, update_installed = update_installed, additional_config = additional_config, shrinkwrap = self.getShrinkwrap() )
[ "def", "satisfyTarget", "(", "self", ",", "target_name_and_version", ",", "update_installed", "=", "False", ",", "additional_config", "=", "None", ",", "install_missing", "=", "True", ")", ":", "# Target, , represent an installed target, internal", "from", "yotta", ".",...
Ensure that the specified target name (and optionally version, github ref or URL) is installed in the targets directory of the current component returns (derived_target, errors)
[ "Ensure", "that", "the", "specified", "target", "name", "(", "and", "optionally", "version", "github", "ref", "or", "URL", ")", "is", "installed", "in", "the", "targets", "directory", "of", "the", "current", "component" ]
python
valid
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/AmqpLink.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/AmqpLink.py#L374-L423
def __send_run(self): """Send request thread """ while not self.__end.is_set(): try: with Connection(userid=self.__prefix + self.__epid, password=self.__passwd, virtual_host=self.__vhost, heartbeat=self.__heartbeat, connect_timeout=self.__socket_timeout, operation_timeout=self.__socket_timeout, ssl=self.__get_ssl_context(self.__sslca), host=self.__host) as conn,\ conn.channel(auto_encode_decode=False) as channel: self.__send_channel = channel self.__send_exc_clear(log_if_exc_set='reconnected') self.__send_ready.set() try: self.__send_ready_callback(self.__send_exc_time) while not self.__end.is_set(): with self.__send_lock: try: # deal with any incoming messages (AMQP protocol only, not QAPI) conn.drain_events(0) except (BlockingIOError, SocketTimeout): pass conn.heartbeat_tick() # idle self.__end.wait(.25) finally: # locked so can make sure another call to send() is not made whilst shutting down with self.__send_lock: self.__send_ready.clear() except exceptions.AccessRefused: self.__send_log_set_exc_and_wait('Access Refused (Credentials already in use?)') except exceptions.ConnectionForced: self.__send_log_set_exc_and_wait('Disconnected by broker (ConnectionForced)') except SocketTimeout: self.__send_log_set_exc_and_wait('SocketTimeout exception. wrong credentials, vhost or prefix?') except SSLError: self.__send_log_set_exc_and_wait('ssl.SSLError Bad Certificate?') except (exceptions.AMQPError, SocketError): self.__send_log_set_exc_and_wait('amqp/transport failure, sleeping before retry') except: self.__send_log_set_exc_and_wait('unexpected failure, exiting', wait_seconds=0) break logger.debug('finished')
[ "def", "__send_run", "(", "self", ")", ":", "while", "not", "self", ".", "__end", ".", "is_set", "(", ")", ":", "try", ":", "with", "Connection", "(", "userid", "=", "self", ".", "__prefix", "+", "self", ".", "__epid", ",", "password", "=", "self", ...
Send request thread
[ "Send", "request", "thread" ]
python
train
PythonCharmers/python-future
src/future/backports/urllib/request.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/request.py#L2008-L2047
def open_data(self, url, data=None): """Use "data" URL.""" if not isinstance(url, str): raise URLError('data error: proxy support for data protocol currently not implemented') # ignore POSTed data # # syntax of data URLs: # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data # mediatype := [ type "/" subtype ] *( ";" parameter ) # data := *urlchar # parameter := attribute "=" value try: [type, data] = url.split(',', 1) except ValueError: raise IOError('data error', 'bad data URL') if not type: type = 'text/plain;charset=US-ASCII' semi = type.rfind(';') if semi >= 0 and '=' not in type[semi:]: encoding = type[semi+1:] type = type[:semi] else: encoding = '' msg = [] msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(time.time()))) msg.append('Content-type: %s' % type) if encoding == 'base64': # XXX is this encoding/decoding ok? data = base64.decodebytes(data.encode('ascii')).decode('latin-1') else: data = unquote(data) msg.append('Content-Length: %d' % len(data)) msg.append('') msg.append(data) msg = '\n'.join(msg) headers = email.message_from_string(msg) f = io.StringIO(msg) #f.fileno = None # needed for addinfourl return addinfourl(f, headers, url)
[ "def", "open_data", "(", "self", ",", "url", ",", "data", "=", "None", ")", ":", "if", "not", "isinstance", "(", "url", ",", "str", ")", ":", "raise", "URLError", "(", "'data error: proxy support for data protocol currently not implemented'", ")", "# ignore POSTed...
Use "data" URL.
[ "Use", "data", "URL", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/mask_flags.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/mask_flags.py#L80-L100
def prepare_subprocess_cmd(subprocess_cmd): """Prepares a subprocess command by running --helpfull and masking flags. Args: subprocess_cmd: List[str], what would be passed into subprocess.call() i.e. ['python', 'train.py', '--flagfile=flags'] Returns: ['python', 'train.py', '--train_flag=blah', '--more_flags'] """ help_cmd = subprocess_cmd + ['--helpfull'] help_output = subprocess.run(help_cmd, stdout=subprocess.PIPE).stdout help_output = help_output.decode('ascii') if 'python' in subprocess_cmd[0]: valid_flags = parse_helpfull_output(help_output) else: valid_flags = parse_helpfull_output(help_output, regex=FLAG_HELP_RE_CC) parsed_flags = flags.FlagValues().read_flags_from_files(subprocess_cmd[1:]) filtered_flags = filter_flags(parsed_flags, valid_flags) return [subprocess_cmd[0]] + filtered_flags
[ "def", "prepare_subprocess_cmd", "(", "subprocess_cmd", ")", ":", "help_cmd", "=", "subprocess_cmd", "+", "[", "'--helpfull'", "]", "help_output", "=", "subprocess", ".", "run", "(", "help_cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "stdout"...
Prepares a subprocess command by running --helpfull and masking flags. Args: subprocess_cmd: List[str], what would be passed into subprocess.call() i.e. ['python', 'train.py', '--flagfile=flags'] Returns: ['python', 'train.py', '--train_flag=blah', '--more_flags']
[ "Prepares", "a", "subprocess", "command", "by", "running", "--", "helpfull", "and", "masking", "flags", "." ]
python
train
Radi85/Comment
comment/templatetags/comment_tags.py
https://github.com/Radi85/Comment/blob/c3c46afe51228cd7ee4e04f5e6164fff1be3a5bc/comment/templatetags/comment_tags.py#L68-L113
def get_comments(obj, request, oauth=False, paginate=False, cpp=10): """ Retrieves list of comments related to a certain object and renders The appropriate template to view it """ model_object = type(obj).objects.get(id=obj.id) comments = Comment.objects.filter_by_object(model_object) comments_count = comments.count() if paginate: paginator = Paginator(comments, cpp) page = request.GET.get('page') try: comments = paginator.page(page) except PageNotAnInteger: comments = paginator.page(1) except EmptyPage: comments = paginator.page(paginator.num_pages) try: profile_app_name = settings.PROFILE_APP_NAME profile_model_name = settings.PROFILE_MODEL_NAME except AttributeError: profile_app_name = None profile_model_name = None try: if settings.LOGIN_URL.startswith("/"): login_url = settings.LOGIN_URL else: login_url = "/" + settings.LOGIN_URL except AttributeError: login_url = "" return { "commentform": CommentForm(), "model_object": obj, "user": request.user, "comments": comments, # "comments_count": comments_count, "oauth": oauth, "profile_app_name": profile_app_name, "profile_model_name": profile_model_name, "paginate": paginate, "login_url": login_url, "cpp": cpp }
[ "def", "get_comments", "(", "obj", ",", "request", ",", "oauth", "=", "False", ",", "paginate", "=", "False", ",", "cpp", "=", "10", ")", ":", "model_object", "=", "type", "(", "obj", ")", ".", "objects", ".", "get", "(", "id", "=", "obj", ".", "...
Retrieves list of comments related to a certain object and renders The appropriate template to view it
[ "Retrieves", "list", "of", "comments", "related", "to", "a", "certain", "object", "and", "renders", "The", "appropriate", "template", "to", "view", "it" ]
python
train
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L483-L592
def prepare_bam(job, uuid, url, config, paired_url=None, rg_line=None): """ Prepares BAM file for Toil germline pipeline. Steps in pipeline 0: Download and align BAM or FASTQ sample 1: Sort BAM 2: Index BAM 3: Run GATK preprocessing pipeline (Optional) - Uploads preprocessed BAM to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique identifier for the sample :param str url: URL or local path to BAM file or FASTQs :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.g1k_indel FileStoreID for 1000G INDEL resource file config.mills FileStoreID for Mills resource file config.dbsnp FileStoreID for dbSNP resource file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes :param str|None paired_url: URL or local path to paired FASTQ file, default is None :param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None :return: BAM and BAI FileStoreIDs :rtype: tuple """ # 0: Align FASTQ or realign BAM if config.run_bwa: get_bam = job.wrapJobFn(setup_and_run_bwakit, uuid, url, rg_line, config, paired_url=paired_url).encapsulate() # 0: Download BAM elif '.bam' in url.lower(): job.fileStore.logToMaster("Downloading BAM: %s" % uuid) get_bam = job.wrapJobFn(download_url_job, url, name='toil.bam', s3_key_path=config.ssec, disk=config.file_size).encapsulate() else: raise ValueError('Could not generate BAM file for %s\n' 'Provide a FASTQ URL and set run-bwa or ' 'provide a BAM URL that includes .bam extension.' % uuid) # 1: Sort BAM file if necessary # Realigning BAM file shuffles read order if config.sorted and not config.run_bwa: sorted_bam = get_bam else: # The samtools sort disk requirement depends on the input bam, the tmp files, and the # sorted output bam. sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size, get_bam.rv()) sorted_bam = get_bam.addChildJobFn(run_samtools_sort, get_bam.rv(), cores=config.cores, disk=sorted_bam_disk) # 2: Index BAM # The samtools index disk requirement depends on the input bam and the output bam index index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv()) index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk=index_bam_disk) job.addChild(get_bam) sorted_bam.addChild(index_bam) if config.preprocess: preprocess = job.wrapJobFn(run_gatk_preprocessing, sorted_bam.rv(), index_bam.rv(), config.genome_fasta, config.genome_dict, config.genome_fai, config.g1k_indel, config.mills, config.dbsnp, memory=config.xmx, cores=config.cores).encapsulate() sorted_bam.addChild(preprocess) index_bam.addChild(preprocess) # Update output BAM promises output_bam_promise = preprocess.rv(0) output_bai_promise = preprocess.rv(1) # Save processed BAM output_dir = os.path.join(config.output_dir, uuid) filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix) output_bam = job.wrapJobFn(output_file_job, filename, preprocess.rv(0), output_dir, s3_key_path=config.ssec) preprocess.addChild(output_bam) else: output_bam_promise = sorted_bam.rv() output_bai_promise = index_bam.rv() return output_bam_promise, output_bai_promise
[ "def", "prepare_bam", "(", "job", ",", "uuid", ",", "url", ",", "config", ",", "paired_url", "=", "None", ",", "rg_line", "=", "None", ")", ":", "# 0: Align FASTQ or realign BAM", "if", "config", ".", "run_bwa", ":", "get_bam", "=", "job", ".", "wrapJobFn"...
Prepares BAM file for Toil germline pipeline. Steps in pipeline 0: Download and align BAM or FASTQ sample 1: Sort BAM 2: Index BAM 3: Run GATK preprocessing pipeline (Optional) - Uploads preprocessed BAM to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique identifier for the sample :param str url: URL or local path to BAM file or FASTQs :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.g1k_indel FileStoreID for 1000G INDEL resource file config.mills FileStoreID for Mills resource file config.dbsnp FileStoreID for dbSNP resource file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes :param str|None paired_url: URL or local path to paired FASTQ file, default is None :param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None :return: BAM and BAI FileStoreIDs :rtype: tuple
[ "Prepares", "BAM", "file", "for", "Toil", "germline", "pipeline", "." ]
python
train
pytroll/pyspectral
pyspectral/radiance_tb_conversion.py
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/pyspectral/radiance_tb_conversion.py#L130-L158
def _get_rsr(self): """ Get the relative spectral responses from file, find the bandname, and convert to the requested wave-spave (wavelength or wave number) """ sensor = RelativeSpectralResponse(self.platform_name, self.instrument) if self.wavespace == WAVE_NUMBER: LOG.debug("Converting to wavenumber...") self.rsr, info = convert2wavenumber(sensor.rsr) else: self.rsr = sensor.rsr info = {'unit': sensor.unit, 'si_scale': sensor.si_scale} self._wave_unit = info['unit'] self._wave_si_scale = info['si_scale'] if isinstance(self.band, str): self.bandname = BANDNAMES.get(self.instrument, BANDNAMES['generic']).get(self.band, self.band) elif isinstance(self.band, Number): self.bandwavelength = self.band self.bandname = get_bandname_from_wavelength(self.instrument, self.band, self.rsr) self.wavelength_or_wavenumber = (self.rsr[self.bandname][self.detector][self.wavespace] * self._wave_si_scale) self.response = self.rsr[self.bandname][self.detector]['response'] # Get the integral of the spectral response curve: self.rsr_integral = np.trapz(self.response, self.wavelength_or_wavenumber)
[ "def", "_get_rsr", "(", "self", ")", ":", "sensor", "=", "RelativeSpectralResponse", "(", "self", ".", "platform_name", ",", "self", ".", "instrument", ")", "if", "self", ".", "wavespace", "==", "WAVE_NUMBER", ":", "LOG", ".", "debug", "(", "\"Converting to ...
Get the relative spectral responses from file, find the bandname, and convert to the requested wave-spave (wavelength or wave number)
[ "Get", "the", "relative", "spectral", "responses", "from", "file", "find", "the", "bandname", "and", "convert", "to", "the", "requested", "wave", "-", "spave", "(", "wavelength", "or", "wave", "number", ")" ]
python
train
aio-libs/aioredis
aioredis/commands/geo.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/geo.py#L26-L33
def geohash(self, key, member, *members, **kwargs): """Returns members of a geospatial index as standard geohash strings. :rtype: list[str or bytes or None] """ return self.execute( b'GEOHASH', key, member, *members, **kwargs )
[ "def", "geohash", "(", "self", ",", "key", ",", "member", ",", "*", "members", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "execute", "(", "b'GEOHASH'", ",", "key", ",", "member", ",", "*", "members", ",", "*", "*", "kwargs", ")" ]
Returns members of a geospatial index as standard geohash strings. :rtype: list[str or bytes or None]
[ "Returns", "members", "of", "a", "geospatial", "index", "as", "standard", "geohash", "strings", "." ]
python
train
GNS3/gns3-server
gns3server/compute/vpcs/vpcs_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vpcs/vpcs_vm.py#L106-L124
def _check_requirements(self): """ Check if VPCS is available with the correct version. """ path = self._vpcs_path() if not path: raise VPCSError("No path to a VPCS executable has been set") # This raise an error if ubridge is not available self.ubridge_path if not os.path.isfile(path): raise VPCSError("VPCS program '{}' is not accessible".format(path)) if not os.access(path, os.X_OK): raise VPCSError("VPCS program '{}' is not executable".format(path)) yield from self._check_vpcs_version()
[ "def", "_check_requirements", "(", "self", ")", ":", "path", "=", "self", ".", "_vpcs_path", "(", ")", "if", "not", "path", ":", "raise", "VPCSError", "(", "\"No path to a VPCS executable has been set\"", ")", "# This raise an error if ubridge is not available", "self",...
Check if VPCS is available with the correct version.
[ "Check", "if", "VPCS", "is", "available", "with", "the", "correct", "version", "." ]
python
train
BHSPitMonkey/vmflib
tools/buildbsp.py
https://github.com/BHSPitMonkey/vmflib/blob/322757fcba98e05041ee8f416c8ffe847ca1fe64/tools/buildbsp.py#L34-L45
def get_game_dir(self, username=False): """Returns joined game directory path relative to Steamapps""" if not self.common and not username: raise RuntimeError("Can't determine this game's directory without username") if self.common: subdir = "common" else: subdir = "username" subsubdir = self.dir if WIN32 or CYGWIN: subsubdir = subsubdir.lower() return os.path.join(subdir, subsubdir)
[ "def", "get_game_dir", "(", "self", ",", "username", "=", "False", ")", ":", "if", "not", "self", ".", "common", "and", "not", "username", ":", "raise", "RuntimeError", "(", "\"Can't determine this game's directory without username\"", ")", "if", "self", ".", "c...
Returns joined game directory path relative to Steamapps
[ "Returns", "joined", "game", "directory", "path", "relative", "to", "Steamapps" ]
python
test
profitbricks/profitbricks-sdk-python
profitbricks/client.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L1245-L1265
def get_server(self, datacenter_id, server_id, depth=1): """ Retrieves a server by its ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/datacenters/%s/servers/%s?depth=%s' % ( datacenter_id, server_id, str(depth))) return response
[ "def", "get_server", "(", "self", ",", "datacenter_id", ",", "server_id", ",", "depth", "=", "1", ")", ":", "response", "=", "self", ".", "_perform_request", "(", "'/datacenters/%s/servers/%s?depth=%s'", "%", "(", "datacenter_id", ",", "server_id", ",", "str", ...
Retrieves a server by its ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param depth: The depth of the response data. :type depth: ``int``
[ "Retrieves", "a", "server", "by", "its", "ID", "." ]
python
valid
MrYsLab/PyMata
PyMata/pymata.py
https://github.com/MrYsLab/PyMata/blob/7e0ec34670b5a0d3d6b74bcbe4f3808c845cc429/PyMata/pymata.py#L377-L406
def encoder_config(self, pin_a, pin_b, cb=None): """ This command enables the rotary encoder (2 pin + ground) and will enable encoder reporting. NOTE: This command is not currently part of standard arduino firmata, but is provided for legacy support of CodeShield on an Arduino UNO. Encoder data is retrieved by performing a digital_read from pin a (encoder pin 1) :param pin_a: Encoder pin 1. :param pin_b: Encoder pin 2. :param cb: callback function to report encoder changes :return: No return value """ data = [pin_a, pin_b] self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_MODE] \ = self.ENCODER self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb self.enable_digital_reporting(pin_a) self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_MODE] \ = self.ENCODER self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb self.enable_digital_reporting(pin_b) self._command_handler.send_sysex(self._command_handler.ENCODER_CONFIG, data)
[ "def", "encoder_config", "(", "self", ",", "pin_a", ",", "pin_b", ",", "cb", "=", "None", ")", ":", "data", "=", "[", "pin_a", ",", "pin_b", "]", "self", ".", "_command_handler", ".", "digital_response_table", "[", "pin_a", "]", "[", "self", ".", "_com...
This command enables the rotary encoder (2 pin + ground) and will enable encoder reporting. NOTE: This command is not currently part of standard arduino firmata, but is provided for legacy support of CodeShield on an Arduino UNO. Encoder data is retrieved by performing a digital_read from pin a (encoder pin 1) :param pin_a: Encoder pin 1. :param pin_b: Encoder pin 2. :param cb: callback function to report encoder changes :return: No return value
[ "This", "command", "enables", "the", "rotary", "encoder", "(", "2", "pin", "+", "ground", ")", "and", "will", "enable", "encoder", "reporting", "." ]
python
valid
benley/butcher
butcher/targets/gendeb.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/gendeb.py#L270-L285
def validate_args(self): """Input validators for this rule type.""" base.BaseTarget.validate_args(self) params = self.params if params['extra_control_fields'] is not None: assert isinstance(params['extra_control_fields'], list), ( 'extra_control_fields must be a list of tuples, not %s' % type( params['extra_control_fields'])) for elem in params['extra_control_fields']: assert (isinstance(elem, tuple) and len(elem) == 1), ( 'extra_control_fields must be a list of 2-element tuples. ' 'Invalid contents: %s' % elem) pkgname_re = '^[a-z][a-z0-9+-.]+' assert re.match(pkgname_re, params['package_name']), ( 'Invalid package name: %s. Must match %s' % ( params['package_name'], pkgname_re))
[ "def", "validate_args", "(", "self", ")", ":", "base", ".", "BaseTarget", ".", "validate_args", "(", "self", ")", "params", "=", "self", ".", "params", "if", "params", "[", "'extra_control_fields'", "]", "is", "not", "None", ":", "assert", "isinstance", "(...
Input validators for this rule type.
[ "Input", "validators", "for", "this", "rule", "type", "." ]
python
train
buzzfeed/caliendo
caliendo/facade.py
https://github.com/buzzfeed/caliendo/blob/1628a10f7782ad67c0422b5cbc9bf4979ac40abc/caliendo/facade.py#L208-L221
def __store_callable(self, o, method_name, member): """ Stores a callable member to the private __store__ :param mixed o: Any callable (function or method) :param str method_name: The name of the attribute :param mixed member: A reference to the member """ self.__store__['callables'][method_name] = eval( "o." + method_name ) self.__store__['callables'][method_name[0].lower() + method_name[1:]] = eval( "o." + method_name ) ret_val = self.__wrap( method_name ) self.__store__[ method_name ] = ret_val self.__store__[ method_name[0].lower() + method_name[1:] ] = ret_val
[ "def", "__store_callable", "(", "self", ",", "o", ",", "method_name", ",", "member", ")", ":", "self", ".", "__store__", "[", "'callables'", "]", "[", "method_name", "]", "=", "eval", "(", "\"o.\"", "+", "method_name", ")", "self", ".", "__store__", "[",...
Stores a callable member to the private __store__ :param mixed o: Any callable (function or method) :param str method_name: The name of the attribute :param mixed member: A reference to the member
[ "Stores", "a", "callable", "member", "to", "the", "private", "__store__" ]
python
train
tcalmant/ipopo
pelix/ipopo/handlers/requires.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requires.py#L428-L439
def stop(self): """ Stops the dependency manager (must be called before clear()) :return: The removed bindings (list) or None """ super(SimpleDependency, self).stop() if self.reference is not None: # Return a tuple of tuple return ((self._value, self.reference),) return None
[ "def", "stop", "(", "self", ")", ":", "super", "(", "SimpleDependency", ",", "self", ")", ".", "stop", "(", ")", "if", "self", ".", "reference", "is", "not", "None", ":", "# Return a tuple of tuple", "return", "(", "(", "self", ".", "_value", ",", "sel...
Stops the dependency manager (must be called before clear()) :return: The removed bindings (list) or None
[ "Stops", "the", "dependency", "manager", "(", "must", "be", "called", "before", "clear", "()", ")" ]
python
train
snare/scruffy
scruffy/file.py
https://github.com/snare/scruffy/blob/0fedc08cfdb6db927ff93c09f25f24ce5a04c541/scruffy/file.py#L348-L358
def cleanup(self): """ Clean up children and remove the directory. Directory will only be removed if the cleanup flag is set. """ for k in self._children: self._children[k].cleanup() if self._cleanup: self.remove(True)
[ "def", "cleanup", "(", "self", ")", ":", "for", "k", "in", "self", ".", "_children", ":", "self", ".", "_children", "[", "k", "]", ".", "cleanup", "(", ")", "if", "self", ".", "_cleanup", ":", "self", ".", "remove", "(", "True", ")" ]
Clean up children and remove the directory. Directory will only be removed if the cleanup flag is set.
[ "Clean", "up", "children", "and", "remove", "the", "directory", "." ]
python
test
mardix/Yass
yass/utils.py
https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/utils.py#L58-L69
def load_conf(yml_file, conf={}): """ To load the config :param yml_file: the config file path :param conf: dict, to override global config :return: dict """ with open(yml_file) as f: data = yaml.load(f) if conf: data.update(conf) return dictdot(data)
[ "def", "load_conf", "(", "yml_file", ",", "conf", "=", "{", "}", ")", ":", "with", "open", "(", "yml_file", ")", "as", "f", ":", "data", "=", "yaml", ".", "load", "(", "f", ")", "if", "conf", ":", "data", ".", "update", "(", "conf", ")", "retur...
To load the config :param yml_file: the config file path :param conf: dict, to override global config :return: dict
[ "To", "load", "the", "config", ":", "param", "yml_file", ":", "the", "config", "file", "path", ":", "param", "conf", ":", "dict", "to", "override", "global", "config", ":", "return", ":", "dict" ]
python
train
saltstack/salt
salt/utils/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1212-L1264
def get_dvportgroups(parent_ref, portgroup_names=None, get_all_portgroups=False): ''' Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False. ''' if not (isinstance(parent_ref, (vim.Datacenter, vim.DistributedVirtualSwitch))): raise salt.exceptions.ArgumentValueError( 'Parent has to be either a datacenter, ' 'or a distributed virtual switch') parent_name = get_managed_object_name(parent_ref) log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', ' 'get_all_portgroups=%s', type(parent_ref).__name__, parent_name, ','.join(portgroup_names) if portgroup_names else None, get_all_portgroups) properties = ['name'] if isinstance(parent_ref, vim.Datacenter): traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='networkFolder', skip=True, type=vim.Datacenter, selectSet=[vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', skip=False, type=vim.Folder)]) else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) service_instance = get_service_instance_from_managed_object(parent_ref) items = [i['object'] for i in get_mors_with_properties(service_instance, vim.DistributedVirtualPortgroup, container_ref=parent_ref, property_list=properties, traversal_spec=traversal_spec) if get_all_portgroups or (portgroup_names and i['name'] in portgroup_names)] return items
[ "def", "get_dvportgroups", "(", "parent_ref", ",", "portgroup_names", "=", "None", ",", "get_all_portgroups", "=", "False", ")", ":", "if", "not", "(", "isinstance", "(", "parent_ref", ",", "(", "vim", ".", "Datacenter", ",", "vim", ".", "DistributedVirtualSwi...
Returns distributed virtual porgroups (dvportgroups). The parent object can be either a datacenter or a dvs. parent_ref The parent object reference. Can be either a datacenter or a dvs. portgroup_names The names of the dvss to return. Default is None. get_all_portgroups Return all portgroups in the parent. Default is False.
[ "Returns", "distributed", "virtual", "porgroups", "(", "dvportgroups", ")", ".", "The", "parent", "object", "can", "be", "either", "a", "datacenter", "or", "a", "dvs", "." ]
python
train
ladybug-tools/ladybug
ladybug/designday.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L1029-L1047
def from_json(cls, data): """Create a Wind Condition from a dictionary. Args: data = { "wind_speed": float, "wind_direction": float, "rain": bool, "snow_on_ground": bool} """ # Check required and optional keys optional_keys = {'wind_direction': 0, 'rain': False, 'snow_on_ground': False} assert 'wind_speed' in data, 'Required key "wind_speed" is missing!' for key, val in optional_keys.items(): if key not in data: data[key] = val return cls(data['wind_speed'], data['wind_direction'], data['rain'], data['snow_on_ground'])
[ "def", "from_json", "(", "cls", ",", "data", ")", ":", "# Check required and optional keys", "optional_keys", "=", "{", "'wind_direction'", ":", "0", ",", "'rain'", ":", "False", ",", "'snow_on_ground'", ":", "False", "}", "assert", "'wind_speed'", "in", "data",...
Create a Wind Condition from a dictionary. Args: data = { "wind_speed": float, "wind_direction": float, "rain": bool, "snow_on_ground": bool}
[ "Create", "a", "Wind", "Condition", "from", "a", "dictionary", "." ]
python
train
diging/tethne
tethne/serialize/paper.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/serialize/paper.py#L346-L424
def get_details_from_inst_literal(self, institute_literal, institution_id, institution_instance_id, paper_key): """ This method parses the institute literal to get the following 1. Department naame 2. Country 3. University name 4. ZIP, STATE AND CITY (Only if the country is USA. For other countries the standard may vary. So parsing these values becomes very difficult. However, the complete address can be found in the column "AddressLine1" Parameters ---------- institute_literal -> The literal value of the institute institution_id -> the Primary key value which is to be added in the fixture institution_instance_id -> Primary key value which is to be added in the fixture paper_key -> The Paper key which is used for the Institution Instance Returns ------- """ institute_details = institute_literal.split(',') institute_name = institute_details[0] country = institute_details[len(institute_details)-1].lstrip().replace('.', '') institute_row = None zipcode = "" state = "" city = "" if 'USA' in country: temp = country if(len(temp.split())) == 3: country = temp.split()[2] zipcode = temp.split()[1] state = temp.split()[0] elif(len(temp.split())) == 2: country = temp.split()[1] state = temp.split()[0] city = institute_details[len(institute_details)-2].lstrip() addressline1 = "" for i in range(1, len(institute_details)-1, 1): if i != len(institute_details)-2: addressline1 = addressline1 + institute_details[i]+',' else: addressline1 = addressline1 + institute_details[i] if institute_literal not in self.instituteIdMap: self.instituteIdMap[institute_literal] = institution_id institute_row = { "model": "django-tethne.institution", "pk": institution_id, "fields": { "institute_name": institute_name, "addressLine1": addressline1, "country": country, "zip": zipcode, "state": state, "city": city } } department = "" if re.search('Dept([^,]*),', institute_literal) is not None: department = re.search('Dept([^,]*),', institute_literal).group().replace(',', '') institute_instance_row = { "model": "django-tethne.institution_instance", "pk": institution_instance_id, "fields": { "institution": self.instituteIdMap[institute_literal], "literal": institute_literal, "institute_name": institute_name, "addressLine1": addressline1, "country": country, "paper": self.paperIdMap[paper_key], "department": department, "zip": zipcode, "state": state, "city": city } } return institute_row, institute_instance_row
[ "def", "get_details_from_inst_literal", "(", "self", ",", "institute_literal", ",", "institution_id", ",", "institution_instance_id", ",", "paper_key", ")", ":", "institute_details", "=", "institute_literal", ".", "split", "(", "','", ")", "institute_name", "=", "inst...
This method parses the institute literal to get the following 1. Department naame 2. Country 3. University name 4. ZIP, STATE AND CITY (Only if the country is USA. For other countries the standard may vary. So parsing these values becomes very difficult. However, the complete address can be found in the column "AddressLine1" Parameters ---------- institute_literal -> The literal value of the institute institution_id -> the Primary key value which is to be added in the fixture institution_instance_id -> Primary key value which is to be added in the fixture paper_key -> The Paper key which is used for the Institution Instance Returns -------
[ "This", "method", "parses", "the", "institute", "literal", "to", "get", "the", "following", "1", ".", "Department", "naame", "2", ".", "Country", "3", ".", "University", "name", "4", ".", "ZIP", "STATE", "AND", "CITY", "(", "Only", "if", "the", "country"...
python
train
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/common/model_factory.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/model_factory.py#L166-L179
def get_property_name_from_attribute_name(attribute): """ Returns property name from attribute name :param attribute: Attribute name, may contain upper and lower case and spaces :return: string """ if isinstance(attribute, str) or isinstance(attribute, unicode): attribute_name = attribute elif hasattr(attribute, 'Name'): attribute_name = attribute.Name else: raise Exception('Attribute type {0} is not supported'.format(str(type(attribute)))) return attribute_name.lower().replace(' ', '_')
[ "def", "get_property_name_from_attribute_name", "(", "attribute", ")", ":", "if", "isinstance", "(", "attribute", ",", "str", ")", "or", "isinstance", "(", "attribute", ",", "unicode", ")", ":", "attribute_name", "=", "attribute", "elif", "hasattr", "(", "attrib...
Returns property name from attribute name :param attribute: Attribute name, may contain upper and lower case and spaces :return: string
[ "Returns", "property", "name", "from", "attribute", "name", ":", "param", "attribute", ":", "Attribute", "name", "may", "contain", "upper", "and", "lower", "case", "and", "spaces", ":", "return", ":", "string" ]
python
train
olitheolix/qtmacs
qtmacs/extensions/qtmacsscintilla_widget.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacsscintilla_widget.py#L458-L474
def commit(self): """ Put the document into the new state. """ if self.textAfter is None: # If this is the first 'commit' call then do not make # any changes but store the current document state # and its style. line, col = self.qteWidget.getNumLinesAndColumns() text, style = self.qteWidget.SCIGetStyledText((0, 0, line, col)) self.styleAfter = style self.textAfter = text.decode('utf-8') else: # Put the document into the 'after' state. self.baseClass.setText(self.textAfter) self.qteWidget.SCISetStylingEx(0, 0, self.styleAfter) self.placeCursor(*self.origPosition)
[ "def", "commit", "(", "self", ")", ":", "if", "self", ".", "textAfter", "is", "None", ":", "# If this is the first 'commit' call then do not make", "# any changes but store the current document state", "# and its style.", "line", ",", "col", "=", "self", ".", "qteWidget",...
Put the document into the new state.
[ "Put", "the", "document", "into", "the", "new", "state", "." ]
python
train
pricingassistant/mrq
mrq/agent.py
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/agent.py#L159-L191
def queuestats(self): """ Compute ETAs for every known queue & subqueue """ start_time = time.time() log.debug("Starting queue stats...") # Fetch all known queues queues = [Queue(q) for q in Queue.all_known()] new_queues = {queue.id for queue in queues} old_queues = set(self.queue_etas.keys()) for deleted_queue in old_queues.difference(new_queues): self.queue_etas.pop(deleted_queue) t = time.time() stats = {} for queue in queues: cnt = queue.count_jobs_to_dequeue() eta = self.queue_etas[queue.id].next(cnt, t=t) # Number of jobs to dequeue, ETA, Time of stats stats[queue.id] = "%d %s %d" % (cnt, eta if eta is not None else "N", int(t)) with connections.redis.pipeline(transaction=True) as pipe: if random.randint(0, 100) == 0 or len(stats) == 0: pipe.delete(self.redis_queuestats_key) if len(stats) > 0: pipe.hmset(self.redis_queuestats_key, stats) pipe.execute() log.debug("... done queue stats in %0.4fs" % (time.time() - start_time))
[ "def", "queuestats", "(", "self", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "log", ".", "debug", "(", "\"Starting queue stats...\"", ")", "# Fetch all known queues", "queues", "=", "[", "Queue", "(", "q", ")", "for", "q", "in", "Queue", ...
Compute ETAs for every known queue & subqueue
[ "Compute", "ETAs", "for", "every", "known", "queue", "&", "subqueue" ]
python
train
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L5792-L5812
def _validate_entity(entity): ''' Validates the entity dict representation entity Dictionary representation of an entity. See ``_get_entity`` docstrings for format. ''' #Validate entity: if entity['type'] == 'cluster': schema = ESXClusterEntitySchema.serialize() elif entity['type'] == 'vcenter': schema = VCenterEntitySchema.serialize() else: raise ArgumentValueError('Unsupported entity type \'{0}\'' ''.format(entity['type'])) try: jsonschema.validate(entity, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidEntityError(exc)
[ "def", "_validate_entity", "(", "entity", ")", ":", "#Validate entity:", "if", "entity", "[", "'type'", "]", "==", "'cluster'", ":", "schema", "=", "ESXClusterEntitySchema", ".", "serialize", "(", ")", "elif", "entity", "[", "'type'", "]", "==", "'vcenter'", ...
Validates the entity dict representation entity Dictionary representation of an entity. See ``_get_entity`` docstrings for format.
[ "Validates", "the", "entity", "dict", "representation" ]
python
train
saltstack/salt
salt/modules/saltutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/saltutil.py#L1207-L1234
def clear_job_cache(hours=24): ''' Forcibly removes job cache folders and files on a minion. .. versionadded:: 2018.3.0 WARNING: The safest way to clear a minion cache is by first stopping the minion and then deleting the cache files before restarting it. CLI Example: .. code-block:: bash salt '*' saltutil.clear_job_cache hours=12 ''' threshold = time.time() - hours * 60 * 60 for root, dirs, files in salt.utils.files.safe_walk(os.path.join(__opts__['cachedir'], 'minion_jobs'), followlinks=False): for name in dirs: try: directory = os.path.join(root, name) mtime = os.path.getmtime(directory) if mtime < threshold: shutil.rmtree(directory) except OSError as exc: log.error('Attempt to clear cache with saltutil.clear_job_cache FAILED with: %s', exc) return False return True
[ "def", "clear_job_cache", "(", "hours", "=", "24", ")", ":", "threshold", "=", "time", ".", "time", "(", ")", "-", "hours", "*", "60", "*", "60", "for", "root", ",", "dirs", ",", "files", "in", "salt", ".", "utils", ".", "files", ".", "safe_walk", ...
Forcibly removes job cache folders and files on a minion. .. versionadded:: 2018.3.0 WARNING: The safest way to clear a minion cache is by first stopping the minion and then deleting the cache files before restarting it. CLI Example: .. code-block:: bash salt '*' saltutil.clear_job_cache hours=12
[ "Forcibly", "removes", "job", "cache", "folders", "and", "files", "on", "a", "minion", "." ]
python
train
saltstack/salt
salt/modules/mac_power.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L192-L217
def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, )
[ "def", "set_display_sleep", "(", "minutes", ")", ":", "value", "=", "_validate_sleep", "(", "minutes", ")", "cmd", "=", "'systemsetup -setdisplaysleep {0}'", ".", "format", "(", "value", ")", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_success", ...
Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off
[ "Set", "the", "amount", "of", "idle", "time", "until", "the", "display", "sleeps", ".", "Pass", "Never", "of", "Off", "to", "never", "sleep", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L1097-L1112
def set_rubric(self, assessment_id): """Sets the rubric expressed as another assessment. arg: assessment_id (osid.id.Id): the assessment ``Id`` raise: InvalidArgument - ``assessment_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_rubric_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(assessment_id): raise errors.InvalidArgument() self._my_map['rubricId'] = str(assessment_id)
[ "def", "set_rubric", "(", "self", ",", "assessment_id", ")", ":", "# Implemented from template for osid.resource.ResourceForm.set_avatar_template", "if", "self", ".", "get_rubric_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess"...
Sets the rubric expressed as another assessment. arg: assessment_id (osid.id.Id): the assessment ``Id`` raise: InvalidArgument - ``assessment_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "rubric", "expressed", "as", "another", "assessment", "." ]
python
train
aio-libs/aioftp
aioftp/server.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/server.py#L504-L523
async def response_writer(self, stream, response_queue): """ :py:func:`asyncio.coroutine` Worker for write_response with current connection. Get data to response from queue, this is for right order of responses. Exits if received :py:class:`None`. :param stream: command connection stream :type connection: :py:class:`aioftp.StreamIO` :param response_queue: :type response_queue: :py:class:`asyncio.Queue` """ while True: args = await response_queue.get() try: await self.write_response(stream, *args) finally: response_queue.task_done()
[ "async", "def", "response_writer", "(", "self", ",", "stream", ",", "response_queue", ")", ":", "while", "True", ":", "args", "=", "await", "response_queue", ".", "get", "(", ")", "try", ":", "await", "self", ".", "write_response", "(", "stream", ",", "*...
:py:func:`asyncio.coroutine` Worker for write_response with current connection. Get data to response from queue, this is for right order of responses. Exits if received :py:class:`None`. :param stream: command connection stream :type connection: :py:class:`aioftp.StreamIO` :param response_queue: :type response_queue: :py:class:`asyncio.Queue`
[ ":", "py", ":", "func", ":", "asyncio", ".", "coroutine" ]
python
valid
yeraydiazdiaz/lunr.py
lunr/builder.py
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/builder.py#L181-L197
def build(self): """Builds the index, creating an instance of `lunr.Index`. This completes the indexing process and should only be called once all documents have been added to the index. """ self._calculate_average_field_lengths() self._create_field_vectors() self._create_token_set() return Index( inverted_index=self.inverted_index, field_vectors=self.field_vectors, token_set=self.token_set, fields=list(self._fields.keys()), pipeline=self.search_pipeline, )
[ "def", "build", "(", "self", ")", ":", "self", ".", "_calculate_average_field_lengths", "(", ")", "self", ".", "_create_field_vectors", "(", ")", "self", ".", "_create_token_set", "(", ")", "return", "Index", "(", "inverted_index", "=", "self", ".", "inverted_...
Builds the index, creating an instance of `lunr.Index`. This completes the indexing process and should only be called once all documents have been added to the index.
[ "Builds", "the", "index", "creating", "an", "instance", "of", "lunr", ".", "Index", "." ]
python
train
eng-tools/sfsimodels
sfsimodels/models/soils.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1181-L1190
def get_v_eff_stress_at_depth(self, y_c): """ Determine the vertical effective stress at a single depth z_c. :param y_c: float, depth from surface """ sigma_v_c = self.get_v_total_stress_at_depth(y_c) pp = self.get_hydrostatic_pressure_at_depth(y_c) sigma_veff_c = sigma_v_c - pp return sigma_veff_c
[ "def", "get_v_eff_stress_at_depth", "(", "self", ",", "y_c", ")", ":", "sigma_v_c", "=", "self", ".", "get_v_total_stress_at_depth", "(", "y_c", ")", "pp", "=", "self", ".", "get_hydrostatic_pressure_at_depth", "(", "y_c", ")", "sigma_veff_c", "=", "sigma_v_c", ...
Determine the vertical effective stress at a single depth z_c. :param y_c: float, depth from surface
[ "Determine", "the", "vertical", "effective", "stress", "at", "a", "single", "depth", "z_c", "." ]
python
train
viniciuschiele/flask-io
flask_io/io.py
https://github.com/viniciuschiele/flask-io/blob/4e559419b3d8e6859f83fa16557b00542d5f3aa7/flask_io/io.py#L120-L131
def ok(self, data, schema=None, envelope=None): """ Gets a 200 response with the specified data. :param data: The content value. :param schema: The schema to serialize the data. :param envelope: The key used to envelope the data. :return: A Flask response object. """ data = marshal(data, schema, envelope) return self.__make_response(data)
[ "def", "ok", "(", "self", ",", "data", ",", "schema", "=", "None", ",", "envelope", "=", "None", ")", ":", "data", "=", "marshal", "(", "data", ",", "schema", ",", "envelope", ")", "return", "self", ".", "__make_response", "(", "data", ")" ]
Gets a 200 response with the specified data. :param data: The content value. :param schema: The schema to serialize the data. :param envelope: The key used to envelope the data. :return: A Flask response object.
[ "Gets", "a", "200", "response", "with", "the", "specified", "data", "." ]
python
train
Yelp/kafka-utils
kafka_utils/util/validation.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/validation.py#L36-L47
def assignment_to_plan(assignment): """Convert an assignment to the format used by Kafka to describe a reassignment plan. """ return { 'version': 1, 'partitions': [{'topic': t_p[0], 'partition': t_p[1], 'replicas': replica } for t_p, replica in six.iteritems(assignment)] }
[ "def", "assignment_to_plan", "(", "assignment", ")", ":", "return", "{", "'version'", ":", "1", ",", "'partitions'", ":", "[", "{", "'topic'", ":", "t_p", "[", "0", "]", ",", "'partition'", ":", "t_p", "[", "1", "]", ",", "'replicas'", ":", "replica", ...
Convert an assignment to the format used by Kafka to describe a reassignment plan.
[ "Convert", "an", "assignment", "to", "the", "format", "used", "by", "Kafka", "to", "describe", "a", "reassignment", "plan", "." ]
python
train
frictionlessdata/tabulator-py
tabulator/helpers.py
https://github.com/frictionlessdata/tabulator-py/blob/06c25845a7139d919326388cc6335f33f909db8c/tabulator/helpers.py#L105-L109
def detect_html(text): """Detect if text is HTML. """ pattern = re.compile('\\s*<(!doctype|html)', re.IGNORECASE) return bool(pattern.match(text))
[ "def", "detect_html", "(", "text", ")", ":", "pattern", "=", "re", ".", "compile", "(", "'\\\\s*<(!doctype|html)'", ",", "re", ".", "IGNORECASE", ")", "return", "bool", "(", "pattern", ".", "match", "(", "text", ")", ")" ]
Detect if text is HTML.
[ "Detect", "if", "text", "is", "HTML", "." ]
python
train
awickert/gFlex
gflex/base.py
https://github.com/awickert/gFlex/blob/3ac32249375b0f8d342a142585d86ea4d905a5a0/gflex/base.py#L100-L135
def greatCircleDistance(self, lat1, long1, lat2, long2, radius): """ Returns the great circle distance between two points. Useful when using the SAS_NG solution in lat/lon coordinates Modified from http://www.johndcook.com/blog/python_longitude_latitude/ It should be able to take numpy arrays. """ # Convert latitude and longitude to # spherical coordinates in radians. degrees_to_radians = np.pi/180.0 # theta = colatitude = 90 - latitude theta1rad = (90.0 - lat1)*degrees_to_radians theta2rad = (90.0 - lat2)*degrees_to_radians # lambda = longitude lambda1rad = long1*degrees_to_radians lambda2rad = long2*degrees_to_radians # Compute spherical distance from spherical coordinates. # For two locations in spherical coordinates # (1, theta, phi) and (1, theta, phi) # cosine( arc length ) = # sin(theta) * sin(theta') * cos(theta-theta') + cos(phi) * cos(phi') # distance = radius * arc length cos_arc_length = np.sin(theta1rad) * np.sin(theta2rad) * \ np.cos(lambda1rad - lambda2rad) + \ np.cos(theta1rad) * np.cos(theta2rad) arc = np.arccos( cos_arc_length ) great_circle_distance = radius * arc return great_circle_distance
[ "def", "greatCircleDistance", "(", "self", ",", "lat1", ",", "long1", ",", "lat2", ",", "long2", ",", "radius", ")", ":", "# Convert latitude and longitude to", "# spherical coordinates in radians.", "degrees_to_radians", "=", "np", ".", "pi", "/", "180.0", "# theta...
Returns the great circle distance between two points. Useful when using the SAS_NG solution in lat/lon coordinates Modified from http://www.johndcook.com/blog/python_longitude_latitude/ It should be able to take numpy arrays.
[ "Returns", "the", "great", "circle", "distance", "between", "two", "points", ".", "Useful", "when", "using", "the", "SAS_NG", "solution", "in", "lat", "/", "lon", "coordinates", "Modified", "from", "http", ":", "//", "www", ".", "johndcook", ".", "com", "/...
python
train
rocky/python-uncompyle6
uncompyle6/semantics/pysource.py
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/semantics/pysource.py#L2059-L2156
def build_class(self, code): """Dump class definition, doc string and class body.""" assert iscode(code) self.classes.append(self.currentclass) code = Code(code, self.scanner, self.currentclass) indent = self.indent # self.println(indent, '#flags:\t', int(code.co_flags)) ast = self.build_ast(code._tokens, code._customize) code._tokens = None # save memory assert ast == 'stmts' first_stmt = ast[0][0] if 3.0 <= self.version <= 3.3: try: if first_stmt[0] == 'store_locals': if self.hide_internal: del ast[0] first_stmt = ast[0][0] except: pass try: if first_stmt == NAME_MODULE: if self.hide_internal: del ast[0] first_stmt = ast[0][0] pass except: pass have_qualname = False if self.version < 3.0: # Should we ditch this in favor of the "else" case? qualname = '.'.join(self.classes) QUAL_NAME = SyntaxTree('stmt', [ SyntaxTree('assign', [ SyntaxTree('expr', [Token('LOAD_CONST', pattr=qualname)]), SyntaxTree('store', [ Token('STORE_NAME', pattr='__qualname__')]) ])]) have_qualname = (ast[0][0] == QUAL_NAME) else: # Python 3.4+ has constants like 'cmp_to_key.<locals>.K' # which are not simple classes like the < 3 case. try: if (first_stmt[0] == 'assign' and first_stmt[0][0][0] == 'LOAD_CONST' and first_stmt[0][1] == 'store' and first_stmt[0][1][0] == Token('STORE_NAME', pattr='__qualname__')): have_qualname = True except: pass if have_qualname: if self.hide_internal: del ast[0] pass # if docstring exists, dump it if (code.co_consts and code.co_consts[0] is not None and len(ast) > 0): do_doc = False if is_docstring(ast[0]): i = 0 do_doc = True elif (len(ast) > 1 and is_docstring(ast[1])): i = 1 do_doc = True if do_doc and self.hide_internal: try: docstring = ast[i][0][0][0][0].pattr except: docstring = code.co_consts[0] if print_docstring(self, indent, docstring): self.println() del ast[i] # the function defining a class normally returns locals(); we # don't want this to show up in the source, thus remove the node if len(ast) > 0 and ast[-1][0] == RETURN_LOCALS: if self.hide_internal: del ast[-1] # remove last node # else: # print ast[-1][-1] globals, nonlocals = find_globals_and_nonlocals(ast, set(), set(), code, self.version) # Add "global" declaration statements at the top # of the function for g in sorted(globals): self.println(indent, 'global ', g) for nl in sorted(nonlocals): self.println(indent, 'nonlocal ', nl) old_name = self.name self.gen_source(ast, code.co_name, code._customize) self.name = old_name code._tokens = None; code._customize = None # save memory self.classes.pop(-1)
[ "def", "build_class", "(", "self", ",", "code", ")", ":", "assert", "iscode", "(", "code", ")", "self", ".", "classes", ".", "append", "(", "self", ".", "currentclass", ")", "code", "=", "Code", "(", "code", ",", "self", ".", "scanner", ",", "self", ...
Dump class definition, doc string and class body.
[ "Dump", "class", "definition", "doc", "string", "and", "class", "body", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L769-L782
def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts """ def wrap(f): @wraps(f) def wrapped_f(*args, **kwargs): # Run the original function first f(*args, **kwargs) # Set workload status now that contexts have been # acted on set_os_workload_status(configs, required_interfaces, charm_func) return wrapped_f return wrap
[ "def", "os_workload_status", "(", "configs", ",", "required_interfaces", ",", "charm_func", "=", "None", ")", ":", "def", "wrap", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapped_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":"...
Decorator to set workload status based on complete contexts
[ "Decorator", "to", "set", "workload", "status", "based", "on", "complete", "contexts" ]
python
train
xflr6/gsheets
gsheets/export.py
https://github.com/xflr6/gsheets/blob/ca4f1273044704e529c1138e3f942836fc496e1b/gsheets/export.py#L21-L24
def write_csv(fileobj, rows, encoding=ENCODING, dialect=DIALECT): """Dump rows to ``fileobj`` with the given ``encoding`` and CSV ``dialect``.""" csvwriter = csv.writer(fileobj, dialect=dialect) csv_writerows(csvwriter, rows, encoding)
[ "def", "write_csv", "(", "fileobj", ",", "rows", ",", "encoding", "=", "ENCODING", ",", "dialect", "=", "DIALECT", ")", ":", "csvwriter", "=", "csv", ".", "writer", "(", "fileobj", ",", "dialect", "=", "dialect", ")", "csv_writerows", "(", "csvwriter", "...
Dump rows to ``fileobj`` with the given ``encoding`` and CSV ``dialect``.
[ "Dump", "rows", "to", "fileobj", "with", "the", "given", "encoding", "and", "CSV", "dialect", "." ]
python
train
Jaymon/endpoints
endpoints/call.py
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L360-L368
def get_class(self, module, class_name): """try and get the class_name from the module and make sure it is a valid controller""" # let's get the class class_object = getattr(module, class_name, None) if not class_object or not issubclass(class_object, Controller): class_object = None return class_object
[ "def", "get_class", "(", "self", ",", "module", ",", "class_name", ")", ":", "# let's get the class", "class_object", "=", "getattr", "(", "module", ",", "class_name", ",", "None", ")", "if", "not", "class_object", "or", "not", "issubclass", "(", "class_object...
try and get the class_name from the module and make sure it is a valid controller
[ "try", "and", "get", "the", "class_name", "from", "the", "module", "and", "make", "sure", "it", "is", "a", "valid", "controller" ]
python
train
zooniverse/panoptes-python-client
panoptes_client/panoptes.py
https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L1011-L1046
def add(self, objs): """ Adds the given `objs` to this `LinkCollection`. - **objs** can be a list of :py:class:`.PanoptesObject` instances, a list of object IDs, a single :py:class:`.PanoptesObject` instance, or a single object ID. Examples:: organization.links.projects.add(1234) organization.links.projects.add(Project(1234)) workflow.links.subject_sets.add([1,2,3,4]) workflow.links.subject_sets.add([Project(12), Project(34)]) """ if self.readonly: raise NotImplementedError( '{} links can\'t be modified'.format(self._slug) ) if not self._parent.id: raise ObjectNotSavedException( "Links can not be modified before the object has been saved." ) _objs = [obj for obj in self._build_obj_list(objs) if obj not in self] if not _objs: return self._parent.http_post( '{}/links/{}'.format(self._parent.id, self._slug), json={self._slug: _objs}, retry=True, ) self._linked_object_ids.extend(_objs)
[ "def", "add", "(", "self", ",", "objs", ")", ":", "if", "self", ".", "readonly", ":", "raise", "NotImplementedError", "(", "'{} links can\\'t be modified'", ".", "format", "(", "self", ".", "_slug", ")", ")", "if", "not", "self", ".", "_parent", ".", "id...
Adds the given `objs` to this `LinkCollection`. - **objs** can be a list of :py:class:`.PanoptesObject` instances, a list of object IDs, a single :py:class:`.PanoptesObject` instance, or a single object ID. Examples:: organization.links.projects.add(1234) organization.links.projects.add(Project(1234)) workflow.links.subject_sets.add([1,2,3,4]) workflow.links.subject_sets.add([Project(12), Project(34)])
[ "Adds", "the", "given", "objs", "to", "this", "LinkCollection", "." ]
python
train
gpennington/PyMarvel
marvel/marvel.py
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L152-L170
def get_comics(self, *args, **kwargs): """ Fetches list of comics. get /v1/public/comics :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15") >>> print cdw.data.count 10 >>> print cdw.data.results[0].name Some Comic """ response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text) return ComicDataWrapper(self, response)
[ "def", "get_comics", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "json", ".", "loads", "(", "self", ".", "_call", "(", "Comic", ".", "resource_url", "(", ")", ",", "self", ".", "_params", "(", "kwargs", ")", ...
Fetches list of comics. get /v1/public/comics :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15") >>> print cdw.data.count 10 >>> print cdw.data.results[0].name Some Comic
[ "Fetches", "list", "of", "comics", "." ]
python
train
christian-oudard/htmltreediff
htmltreediff/util.py
https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/util.py#L261-L272
def get_location(dom, location): """ Get the node at the specified location in the dom. Location is a sequence of child indices, starting at the children of the root element. If there is no node at this location, raise a ValueError. """ node = dom.documentElement for i in location: node = get_child(node, i) if not node: raise ValueError('Node at location %s does not exist.' % location) #TODO: line not covered return node
[ "def", "get_location", "(", "dom", ",", "location", ")", ":", "node", "=", "dom", ".", "documentElement", "for", "i", "in", "location", ":", "node", "=", "get_child", "(", "node", ",", "i", ")", "if", "not", "node", ":", "raise", "ValueError", "(", "...
Get the node at the specified location in the dom. Location is a sequence of child indices, starting at the children of the root element. If there is no node at this location, raise a ValueError.
[ "Get", "the", "node", "at", "the", "specified", "location", "in", "the", "dom", ".", "Location", "is", "a", "sequence", "of", "child", "indices", "starting", "at", "the", "children", "of", "the", "root", "element", ".", "If", "there", "is", "no", "node",...
python
train
spotify/docker_interface
docker_interface/util.py
https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L57-L74
def split_path(path, ref=None): """ Split a path into its components. Parameters ---------- path : str absolute or relative path with respect to `ref` ref : str or None reference path if `path` is relative Returns ------- list : str components of the path """ path = abspath(path, ref) return path.strip(os.path.sep).split(os.path.sep)
[ "def", "split_path", "(", "path", ",", "ref", "=", "None", ")", ":", "path", "=", "abspath", "(", "path", ",", "ref", ")", "return", "path", ".", "strip", "(", "os", ".", "path", ".", "sep", ")", ".", "split", "(", "os", ".", "path", ".", "sep"...
Split a path into its components. Parameters ---------- path : str absolute or relative path with respect to `ref` ref : str or None reference path if `path` is relative Returns ------- list : str components of the path
[ "Split", "a", "path", "into", "its", "components", "." ]
python
train
mbarakaja/braulio
braulio/version.py
https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/version.py#L108-L155
def bump(self, bump_part): """Return a new bumped version instance.""" major, minor, patch, stage, n = tuple(self) # stage bump if bump_part not in {"major", "minor", "patch"}: if bump_part not in self.stages: raise ValueError(f"Unknown {bump_part} stage") # We can not bump from final stage to final again. if self.stage == "final" and bump_part == "final": raise ValueError(f"{self} is already in final stage.") # bump in the same stage (numeric part) if bump_part == self.stage: n += 1 else: new_stage_number = tuple(self.stages).index(bump_part) # We can not bump to a previous stage if new_stage_number < self._stage_number: raise ValueError(f"{bump_part} stage is previous to {self}") stage = bump_part n = 0 else: # major, minor, or patch bump # Only version in final stage can do a major, minor or patch # bump if self.stage != "final": raise ValueError( f"{self} is a pre-release version." f" Can't do a {bump_part} version bump" ) if bump_part == "major": major += 1 minor, patch = 0, 0 elif bump_part == "minor": minor += 1 patch = 0 else: patch += 1 return Version(major=major, minor=minor, patch=patch, stage=stage, n=n)
[ "def", "bump", "(", "self", ",", "bump_part", ")", ":", "major", ",", "minor", ",", "patch", ",", "stage", ",", "n", "=", "tuple", "(", "self", ")", "# stage bump", "if", "bump_part", "not", "in", "{", "\"major\"", ",", "\"minor\"", ",", "\"patch\"", ...
Return a new bumped version instance.
[ "Return", "a", "new", "bumped", "version", "instance", "." ]
python
train
saltstack/salt
salt/states/boto_secgroup.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_secgroup.py#L408-L499
def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None): ''' given a group name or group name and vpc_id (or vpc name): 1. get lists of desired rule changes (using _get_rule_changes) 2. authorize/create rules missing rules 3. if delete_ingress_rules is True, delete/revoke non-requested rules 4. return 'old' and 'new' group rules ''' ret = {'result': True, 'comment': '', 'changes': {}} sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key, keyid=keyid, profile=profile, vpc_id=vpc_id, vpc_name=vpc_name) if not sg: ret['comment'] = '{0} security group configuration could not be retrieved.'.format(name) ret['result'] = False return ret rules = _split_rules(rules) if vpc_id or vpc_name: for rule in rules: _source_group_name = rule.get('source_group_name', None) if _source_group_name: _group_vpc_name = vpc_name _group_vpc_id = vpc_id _source_group_name_vpc = rule.get('source_group_name_vpc', None) if _source_group_name_vpc: _group_vpc_name = _source_group_name_vpc _group_vpc_id = None _group_id = __salt__['boto_secgroup.get_group_id']( name=_source_group_name, vpc_id=_group_vpc_id, vpc_name=_group_vpc_name, region=region, key=key, keyid=keyid, profile=profile ) if not _group_id: raise SaltInvocationError( 'source_group_name {0} does not map to a valid ' 'source group id.'.format(_source_group_name) ) rule['source_group_name'] = None if _source_group_name_vpc: rule.pop('source_group_name_vpc') rule['source_group_group_id'] = _group_id # rules = rules that exist in salt state # sg['rules'] = that exist in present group to_delete, to_create = _get_rule_changes(rules, sg['rules']) to_delete = to_delete if delete_ingress_rules else [] if to_create or to_delete: if __opts__['test']: msg = """Security group {0} set to have rules modified. To be created: {1} To be deleted: {2}""".format(name, pprint.pformat(to_create), pprint.pformat(to_delete)) ret['comment'] = msg ret['result'] = None return ret if to_delete: deleted = True for rule in to_delete: _deleted = __salt__['boto_secgroup.revoke']( name, vpc_id=vpc_id, vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile, **rule) if not _deleted: deleted = False if deleted: ret['comment'] = 'Removed rules on {0} security group.'.format(name) else: ret['comment'] = 'Failed to remove rules on {0} security group.'.format(name) ret['result'] = False if to_create: created = True for rule in to_create: _created = __salt__['boto_secgroup.authorize']( name, vpc_id=vpc_id, vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile, **rule) if not _created: created = False if created: ret['comment'] = ' '.join([ ret['comment'], 'Created rules on {0} security group.'.format(name) ]) else: ret['comment'] = ' '.join([ ret['comment'], 'Failed to create rules on {0} security group.'.format(name) ]) ret['result'] = False ret['changes']['old'] = {'rules': sg['rules']} sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key, keyid=keyid, profile=profile, vpc_id=vpc_id, vpc_name=vpc_name) ret['changes']['new'] = {'rules': sg['rules']} return ret
[ "def", "_rules_present", "(", "name", ",", "rules", ",", "delete_ingress_rules", "=", "True", ",", "vpc_id", "=", "None", ",", "vpc_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=",...
given a group name or group name and vpc_id (or vpc name): 1. get lists of desired rule changes (using _get_rule_changes) 2. authorize/create rules missing rules 3. if delete_ingress_rules is True, delete/revoke non-requested rules 4. return 'old' and 'new' group rules
[ "given", "a", "group", "name", "or", "group", "name", "and", "vpc_id", "(", "or", "vpc", "name", ")", ":", "1", ".", "get", "lists", "of", "desired", "rule", "changes", "(", "using", "_get_rule_changes", ")", "2", ".", "authorize", "/", "create", "rule...
python
train
tdryer/hangups
hangups/conversation_event.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/conversation_event.py#L34-L37
def user_id(self): """Who created the event (:class:`~hangups.user.UserID`).""" return user.UserID(chat_id=self._event.sender_id.chat_id, gaia_id=self._event.sender_id.gaia_id)
[ "def", "user_id", "(", "self", ")", ":", "return", "user", ".", "UserID", "(", "chat_id", "=", "self", ".", "_event", ".", "sender_id", ".", "chat_id", ",", "gaia_id", "=", "self", ".", "_event", ".", "sender_id", ".", "gaia_id", ")" ]
Who created the event (:class:`~hangups.user.UserID`).
[ "Who", "created", "the", "event", "(", ":", "class", ":", "~hangups", ".", "user", ".", "UserID", ")", "." ]
python
valid
pypa/pipenv
pipenv/vendor/chardet/universaldetector.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/chardet/universaldetector.py#L111-L218
def feed(self, byte_str): """ Takes a chunk of a document and feeds it through all of the relevant charset probers. After calling ``feed``, you can check the value of the ``done`` attribute to see if you need to continue feeding the ``UniversalDetector`` more data, or if it has made a prediction (in the ``result`` attribute). .. note:: You should always call ``close`` when you're done feeding in your document if ``done`` is not already ``True``. """ if self.done: return if not len(byte_str): return if not isinstance(byte_str, bytearray): byte_str = bytearray(byte_str) # First check for known BOMs, since these are guaranteed to be correct if not self._got_data: # If the data starts with BOM, we know it is UTF if byte_str.startswith(codecs.BOM_UTF8): # EF BB BF UTF-8 with BOM self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0, 'language': ''} elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): # FF FE 00 00 UTF-32, little-endian BOM # 00 00 FE FF UTF-32, big-endian BOM self.result = {'encoding': "UTF-32", 'confidence': 1.0, 'language': ''} elif byte_str.startswith(b'\xFE\xFF\x00\x00'): # FE FF 00 00 UCS-4, unusual octet order BOM (3412) self.result = {'encoding': "X-ISO-10646-UCS-4-3412", 'confidence': 1.0, 'language': ''} elif byte_str.startswith(b'\x00\x00\xFF\xFE'): # 00 00 FF FE UCS-4, unusual octet order BOM (2143) self.result = {'encoding': "X-ISO-10646-UCS-4-2143", 'confidence': 1.0, 'language': ''} elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)): # FF FE UTF-16, little endian BOM # FE FF UTF-16, big endian BOM self.result = {'encoding': "UTF-16", 'confidence': 1.0, 'language': ''} self._got_data = True if self.result['encoding'] is not None: self.done = True return # If none of those matched and we've only see ASCII so far, check # for high bytes and escape sequences if self._input_state == InputState.PURE_ASCII: if self.HIGH_BYTE_DETECTOR.search(byte_str): self._input_state = InputState.HIGH_BYTE elif self._input_state == InputState.PURE_ASCII and \ self.ESC_DETECTOR.search(self._last_char + byte_str): self._input_state = InputState.ESC_ASCII self._last_char = byte_str[-1:] # If we've seen escape sequences, use the EscCharSetProber, which # uses a simple state machine to check for known escape sequences in # HZ and ISO-2022 encodings, since those are the only encodings that # use such sequences. if self._input_state == InputState.ESC_ASCII: if not self._esc_charset_prober: self._esc_charset_prober = EscCharSetProber(self.lang_filter) if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT: self.result = {'encoding': self._esc_charset_prober.charset_name, 'confidence': self._esc_charset_prober.get_confidence(), 'language': self._esc_charset_prober.language} self.done = True # If we've seen high bytes (i.e., those with values greater than 127), # we need to do more complicated checks using all our multi-byte and # single-byte probers that are left. The single-byte probers # use character bigram distributions to determine the encoding, whereas # the multi-byte probers use a combination of character unigram and # bigram distributions. elif self._input_state == InputState.HIGH_BYTE: if not self._charset_probers: self._charset_probers = [MBCSGroupProber(self.lang_filter)] # If we're checking non-CJK encodings, use single-byte prober if self.lang_filter & LanguageFilter.NON_CJK: self._charset_probers.append(SBCSGroupProber()) self._charset_probers.append(Latin1Prober()) for prober in self._charset_probers: if prober.feed(byte_str) == ProbingState.FOUND_IT: self.result = {'encoding': prober.charset_name, 'confidence': prober.get_confidence(), 'language': prober.language} self.done = True break if self.WIN_BYTE_DETECTOR.search(byte_str): self._has_win_bytes = True
[ "def", "feed", "(", "self", ",", "byte_str", ")", ":", "if", "self", ".", "done", ":", "return", "if", "not", "len", "(", "byte_str", ")", ":", "return", "if", "not", "isinstance", "(", "byte_str", ",", "bytearray", ")", ":", "byte_str", "=", "bytear...
Takes a chunk of a document and feeds it through all of the relevant charset probers. After calling ``feed``, you can check the value of the ``done`` attribute to see if you need to continue feeding the ``UniversalDetector`` more data, or if it has made a prediction (in the ``result`` attribute). .. note:: You should always call ``close`` when you're done feeding in your document if ``done`` is not already ``True``.
[ "Takes", "a", "chunk", "of", "a", "document", "and", "feeds", "it", "through", "all", "of", "the", "relevant", "charset", "probers", "." ]
python
train
NoneGG/aredis
aredis/pool.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/pool.py#L376-L399
def release(self, connection): """ Releases the connection back to the pool """ self._checkpid() if connection.pid != self.pid: return # Remove the current connection from _in_use_connection and add it back to the available pool # There is cases where the connection is to be removed but it will not exist and there # must be a safe way to remove i_c = self._in_use_connections.get(connection.node["name"], set()) if connection in i_c: i_c.remove(connection) else: pass # discard connection with unread response if connection.awaiting_response: connection.disconnect() # reduce node connection count in case of too many connection error raised if self.max_connections_per_node and self._created_connections_per_node.get(connection.node['name']): self._created_connections_per_node[connection.node['name']] -= 1 else: self._available_connections.setdefault(connection.node["name"], []).append(connection)
[ "def", "release", "(", "self", ",", "connection", ")", ":", "self", ".", "_checkpid", "(", ")", "if", "connection", ".", "pid", "!=", "self", ".", "pid", ":", "return", "# Remove the current connection from _in_use_connection and add it back to the available pool", "#...
Releases the connection back to the pool
[ "Releases", "the", "connection", "back", "to", "the", "pool" ]
python
train
adafruit/Adafruit_Python_DHT
Adafruit_DHT/common.py
https://github.com/adafruit/Adafruit_Python_DHT/blob/c9407aa0506321bbc63ec8ba3c59fc21291f4746/Adafruit_DHT/common.py#L83-L98
def read_retry(sensor, pin, retries=15, delay_seconds=2, platform=None): """Read DHT sensor of specified sensor type (DHT11, DHT22, or AM2302) on specified pin and return a tuple of humidity (as a floating point value in percent) and temperature (as a floating point value in Celsius). Unlike the read function, this read_retry function will attempt to read multiple times (up to the specified max retries) until a good reading can be found. If a good reading cannot be found after the amount of retries, a tuple of (None, None) is returned. The delay between retries is by default 2 seconds, but can be overridden. """ for i in range(retries): humidity, temperature = read(sensor, pin, platform) if humidity is not None and temperature is not None: return (humidity, temperature) time.sleep(delay_seconds) return (None, None)
[ "def", "read_retry", "(", "sensor", ",", "pin", ",", "retries", "=", "15", ",", "delay_seconds", "=", "2", ",", "platform", "=", "None", ")", ":", "for", "i", "in", "range", "(", "retries", ")", ":", "humidity", ",", "temperature", "=", "read", "(", ...
Read DHT sensor of specified sensor type (DHT11, DHT22, or AM2302) on specified pin and return a tuple of humidity (as a floating point value in percent) and temperature (as a floating point value in Celsius). Unlike the read function, this read_retry function will attempt to read multiple times (up to the specified max retries) until a good reading can be found. If a good reading cannot be found after the amount of retries, a tuple of (None, None) is returned. The delay between retries is by default 2 seconds, but can be overridden.
[ "Read", "DHT", "sensor", "of", "specified", "sensor", "type", "(", "DHT11", "DHT22", "or", "AM2302", ")", "on", "specified", "pin", "and", "return", "a", "tuple", "of", "humidity", "(", "as", "a", "floating", "point", "value", "in", "percent", ")", "and"...
python
train
mattja/distob
distob/distob.py
https://github.com/mattja/distob/blob/b0fc49e157189932c70231077ed35e1aa5717da9/distob/distob.py#L1019-L1078
def _scatter_ndarray(ar, axis=-1, destination=None, blocksize=None): """Turn a numpy ndarray into a DistArray or RemoteArray Args: ar (array_like) axis (int, optional): specifies along which axis to split the array to distribute it. The default is to split along the last axis. `None` means do not distribute. destination (int or list of int, optional): Optionally force the array to go to a specific engine. If an array is to be scattered along an axis, this should be a list of engine ids with the same length as that axis. blocksize (int): Optionally control the size of intervals into which the distributed axis is split (the default splits the distributed axis evenly over all computing engines). """ from .arrays import DistArray, RemoteArray shape = ar.shape ndim = len(shape) if axis is None: return _directed_scatter([ar], destination=[destination], blocksize=blocksize)[0] if axis < -ndim or axis > ndim - 1: raise DistobValueError('axis out of range') if axis < 0: axis = ndim + axis n = shape[axis] if n == 1: return _directed_scatter([ar], destination=[destination])[0] if isinstance(destination, collections.Sequence): ne = len(destination) # number of engines to scatter array to else: if distob.engine is None: setup_engines() ne = distob.engine.nengines # by default scatter across all engines if blocksize is None: blocksize = ((n - 1) // ne) + 1 if blocksize > n: blocksize = n if isinstance(ar, DistArray): if axis == ar._distaxis: return ar else: raise DistobError('Currently can only scatter one axis of array') # Currently, if requested to scatter an array that is already Remote and # large, first get whole array locally, then scatter. Not really optimal. if isinstance(ar, RemoteArray) and n > blocksize: ar = ar._ob s = slice(None) subarrays = [] low = 0 for i in range(0, n // blocksize): high = low + blocksize index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1) subarrays.append(ar[index]) low += blocksize if n % blocksize != 0: high = low + (n % blocksize) index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1) subarrays.append(ar[index]) subarrays = _directed_scatter(subarrays, destination=destination) return DistArray(subarrays, axis)
[ "def", "_scatter_ndarray", "(", "ar", ",", "axis", "=", "-", "1", ",", "destination", "=", "None", ",", "blocksize", "=", "None", ")", ":", "from", ".", "arrays", "import", "DistArray", ",", "RemoteArray", "shape", "=", "ar", ".", "shape", "ndim", "=",...
Turn a numpy ndarray into a DistArray or RemoteArray Args: ar (array_like) axis (int, optional): specifies along which axis to split the array to distribute it. The default is to split along the last axis. `None` means do not distribute. destination (int or list of int, optional): Optionally force the array to go to a specific engine. If an array is to be scattered along an axis, this should be a list of engine ids with the same length as that axis. blocksize (int): Optionally control the size of intervals into which the distributed axis is split (the default splits the distributed axis evenly over all computing engines).
[ "Turn", "a", "numpy", "ndarray", "into", "a", "DistArray", "or", "RemoteArray", "Args", ":", "ar", "(", "array_like", ")", "axis", "(", "int", "optional", ")", ":", "specifies", "along", "which", "axis", "to", "split", "the", "array", "to", "distribute", ...
python
valid
samirelanduk/quickplots
quickplots/charts.py
https://github.com/samirelanduk/quickplots/blob/59f5e6ff367b2c1c24ba7cf1805d03552034c6d8/quickplots/charts.py#L359-L390
def x_lower_limit(self, limit=None): """Returns or sets (if a value is provided) the value at which the x-axis should start. By default this is zero (unless there are negative values). :param limit: If given, the chart's x_lower_limit will be set to this. :raises ValueError: if you try to make the lower limit larger than the\ upper limit.""" if limit is None: if self._x_lower_limit is None: if self.smallest_x() < 0: if self.smallest_x() == self.largest_x(): return int(self.smallest_x() - 1) else: return self.smallest_x() else: return 0 else: return self._x_lower_limit else: if not is_numeric(limit): raise TypeError( "lower x limit must be numeric, not '%s'" % str(limit) ) if limit >= self.largest_x(): raise ValueError( "lower x limit must be less than upper limit (%s), not %s" % ( str(self.largest_x()), str(limit) ) ) self._x_lower_limit = limit
[ "def", "x_lower_limit", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "limit", "is", "None", ":", "if", "self", ".", "_x_lower_limit", "is", "None", ":", "if", "self", ".", "smallest_x", "(", ")", "<", "0", ":", "if", "self", ".", "smalle...
Returns or sets (if a value is provided) the value at which the x-axis should start. By default this is zero (unless there are negative values). :param limit: If given, the chart's x_lower_limit will be set to this. :raises ValueError: if you try to make the lower limit larger than the\ upper limit.
[ "Returns", "or", "sets", "(", "if", "a", "value", "is", "provided", ")", "the", "value", "at", "which", "the", "x", "-", "axis", "should", "start", ".", "By", "default", "this", "is", "zero", "(", "unless", "there", "are", "negative", "values", ")", ...
python
train
boriel/zxbasic
arch/zx48k/backend/__pload.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__pload.py#L432-L497
def _pstorestr(ins): """ Stores 2nd parameter at stack pointer (SP) + X, being X 1st parameter. 1st operand must be a SIGNED integer. Note: This procedure proceeds as _pstore16, since STRINGS are 16bit pointers. """ output = [] temporal = False # 2nd operand first, because must go into the stack value = ins.quad[2] if value[0] == '*': value = value[1:] indirect = True else: indirect = False if value[0] == '_': output.append('ld de, (%s)' % value) if indirect: output.append('call __LOAD_DE_DE') REQUIRES.add('lddede.asm') elif value[0] == '#': output.append('ld de, %s' % value[1:]) else: output.append('pop de') temporal = value[0] != '$' if indirect: output.append('call __LOAD_DE_DE') REQUIRES.add('lddede.asm') # Now 1st operand value = ins.quad[1] if value[0] == '*': value = value[1:] indirect = True else: indirect = False I = int(value) if I >= 0: I += 4 # Return Address + "push IX" output.append('ld bc, %i' % I) if not temporal: if indirect: output.append('call __PISTORE_STR') REQUIRES.add('storestr.asm') else: output.append('call __PSTORE_STR') REQUIRES.add('pstorestr.asm') else: if indirect: output.append('call __PISTORE_STR2') REQUIRES.add('storestr2.asm') else: output.append('call __PSTORE_STR2') REQUIRES.add('pstorestr2.asm') return output
[ "def", "_pstorestr", "(", "ins", ")", ":", "output", "=", "[", "]", "temporal", "=", "False", "# 2nd operand first, because must go into the stack", "value", "=", "ins", ".", "quad", "[", "2", "]", "if", "value", "[", "0", "]", "==", "'*'", ":", "value", ...
Stores 2nd parameter at stack pointer (SP) + X, being X 1st parameter. 1st operand must be a SIGNED integer. Note: This procedure proceeds as _pstore16, since STRINGS are 16bit pointers.
[ "Stores", "2nd", "parameter", "at", "stack", "pointer", "(", "SP", ")", "+", "X", "being", "X", "1st", "parameter", "." ]
python
train
Kronuz/pyScss
scss/extension/compass/layouts.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/extension/compass/layouts.py#L114-L116
def AREA(a, b): """area: Sort pack by area""" return cmp(b[0] * b[1], a[0] * a[1]) or cmp(b[1], a[1]) or cmp(b[0], a[0])
[ "def", "AREA", "(", "a", ",", "b", ")", ":", "return", "cmp", "(", "b", "[", "0", "]", "*", "b", "[", "1", "]", ",", "a", "[", "0", "]", "*", "a", "[", "1", "]", ")", "or", "cmp", "(", "b", "[", "1", "]", ",", "a", "[", "1", "]", ...
area: Sort pack by area
[ "area", ":", "Sort", "pack", "by", "area" ]
python
train
saltstack/salt
salt/cloud/clouds/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L3434-L3475
def list_clusters_by_datacenter(kwargs=None, call=None): ''' List clusters for each datacenter; or clusters for a specified datacenter in this VMware environment To list clusters for each datacenter: CLI Example: .. code-block:: bash salt-cloud -f list_clusters_by_datacenter my-vmware-config To list clusters for a specified datacenter: CLI Example: .. code-block:: bash salt-cloud -f list_clusters_by_datacenter my-vmware-config datacenter="datacenterName" ''' if call != 'function': raise SaltCloudSystemExit( 'The list_clusters_by_datacenter function must be called with ' '-f or --function.' ) ret = {} datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None datacenter_properties = ["name"] datacenter_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.Datacenter, datacenter_properties) for datacenter in datacenter_list: ret[datacenter['name']] = [] for cluster in datacenter['object'].hostFolder.childEntity: if isinstance(cluster, vim.ClusterComputeResource): ret[datacenter['name']].append(cluster.name) if datacenter_name and datacenter_name == datacenter['name']: return {'Clusters by Datacenter': {datacenter_name: ret[datacenter_name]}} return {'Clusters by Datacenter': ret}
[ "def", "list_clusters_by_datacenter", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_clusters_by_datacenter function must be called with '", "'-f or --function.'", ")",...
List clusters for each datacenter; or clusters for a specified datacenter in this VMware environment To list clusters for each datacenter: CLI Example: .. code-block:: bash salt-cloud -f list_clusters_by_datacenter my-vmware-config To list clusters for a specified datacenter: CLI Example: .. code-block:: bash salt-cloud -f list_clusters_by_datacenter my-vmware-config datacenter="datacenterName"
[ "List", "clusters", "for", "each", "datacenter", ";", "or", "clusters", "for", "a", "specified", "datacenter", "in", "this", "VMware", "environment" ]
python
train
briandilley/ebs-deploy
ebs_deploy/__init__.py
https://github.com/briandilley/ebs-deploy/blob/4178c9c1282a9025fb987dab3470bea28c202e10/ebs_deploy/__init__.py#L251-L277
def upload_archive(self, filename, key, auto_create_bucket=True): """ Uploads an application archive version to s3 """ try: bucket = self.s3.get_bucket(self.aws.bucket) if (( self.aws.region != 'us-east-1' and self.aws.region != 'eu-west-1') and bucket.get_location() != self.aws.region) or ( self.aws.region == 'us-east-1' and bucket.get_location() != '') or ( self.aws.region == 'eu-west-1' and bucket.get_location() != 'eu-west-1'): raise Exception("Existing bucket doesn't match region") except S3ResponseError: bucket = self.s3.create_bucket(self.aws.bucket, location=self.aws.region) def __report_upload_progress(sent, total): if not sent: sent = 0 if not total: total = 0 out("Uploaded " + str(sent) + " bytes of " + str(total) \ + " (" + str(int(float(max(1, sent)) / float(total) * 100)) + "%)") # upload the new version k = Key(bucket) k.key = self.aws.bucket_path + key k.set_metadata('time', str(time())) k.set_contents_from_filename(filename, cb=__report_upload_progress, num_cb=10)
[ "def", "upload_archive", "(", "self", ",", "filename", ",", "key", ",", "auto_create_bucket", "=", "True", ")", ":", "try", ":", "bucket", "=", "self", ".", "s3", ".", "get_bucket", "(", "self", ".", "aws", ".", "bucket", ")", "if", "(", "(", "self",...
Uploads an application archive version to s3
[ "Uploads", "an", "application", "archive", "version", "to", "s3" ]
python
valid
wright-group/WrightTools
WrightTools/kit/_list.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/kit/_list.py#L75-L99
def get_index(lis, argument): """Find the index of an item, given either the item or index as an argument. Particularly useful as a wrapper for arguments like channel or axis. Parameters ---------- lis : list List to parse. argument : int or object Argument. Returns ------- int Index of chosen object. """ # get channel if isinstance(argument, int): if -len(lis) <= argument < len(lis): return argument else: raise IndexError("index {0} incompatible with length {1}".format(argument, len(lis))) else: return lis.index(argument)
[ "def", "get_index", "(", "lis", ",", "argument", ")", ":", "# get channel", "if", "isinstance", "(", "argument", ",", "int", ")", ":", "if", "-", "len", "(", "lis", ")", "<=", "argument", "<", "len", "(", "lis", ")", ":", "return", "argument", "else"...
Find the index of an item, given either the item or index as an argument. Particularly useful as a wrapper for arguments like channel or axis. Parameters ---------- lis : list List to parse. argument : int or object Argument. Returns ------- int Index of chosen object.
[ "Find", "the", "index", "of", "an", "item", "given", "either", "the", "item", "or", "index", "as", "an", "argument", "." ]
python
train
saltstack/salt
salt/modules/libcloud_loadbalancer.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_loadbalancer.py#L104-L126
def list_balancers(profile, **libcloud_kwargs): ''' Return a list of load balancers. :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_balancers method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.list_balancers profile1 ''' conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) balancers = conn.list_balancers(**libcloud_kwargs) ret = [] for balancer in balancers: ret.append(_simple_balancer(balancer)) return ret
[ "def", "list_balancers", "(", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "libcloud_k...
Return a list of load balancers. :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_balancers method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.list_balancers profile1
[ "Return", "a", "list", "of", "load", "balancers", "." ]
python
train
eruvanos/openbrokerapi
openbrokerapi/log_util.py
https://github.com/eruvanos/openbrokerapi/blob/29d514e5932f2eac27e03995dd41c8cecf40bb10/openbrokerapi/log_util.py#L5-L33
def basic_config(logger: logging.Logger = logging.root, level=logging.INFO): """ Configures a logger to log <=INFO to stdout and >INFO to stderr :param logger: Logger to configure, defaults to logging.root :param level: Defaults to INFO :return: configured logger (logger from parameters) """ logger.setLevel(level) class InfoFilter(logging.Filter): def filter(self, rec): return rec.levelno in (logging.DEBUG, logging.INFO) formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s", "%d/%m/%Y %H:%M:%S") std_out_handler = logging.StreamHandler(sys.stdout) std_out_handler.setLevel(logging.DEBUG) std_out_handler.setFormatter(formatter) std_out_handler.addFilter(InfoFilter()) std_err_handler = logging.StreamHandler() std_err_handler.setLevel(logging.WARNING) std_err_handler.setFormatter(formatter) logger.addHandler(std_out_handler) logger.addHandler(std_err_handler) return logger
[ "def", "basic_config", "(", "logger", ":", "logging", ".", "Logger", "=", "logging", ".", "root", ",", "level", "=", "logging", ".", "INFO", ")", ":", "logger", ".", "setLevel", "(", "level", ")", "class", "InfoFilter", "(", "logging", ".", "Filter", "...
Configures a logger to log <=INFO to stdout and >INFO to stderr :param logger: Logger to configure, defaults to logging.root :param level: Defaults to INFO :return: configured logger (logger from parameters)
[ "Configures", "a", "logger", "to", "log", "<", "=", "INFO", "to", "stdout", "and", ">", "INFO", "to", "stderr" ]
python
train
santoshphilip/eppy
eppy/hvacbuilder.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L982-L988
def getmakeidfobject(idf, key, name): """get idfobject or make it if it does not exist""" idfobject = idf.getobject(key, name) if not idfobject: return idf.newidfobject(key, Name=name) else: return idfobject
[ "def", "getmakeidfobject", "(", "idf", ",", "key", ",", "name", ")", ":", "idfobject", "=", "idf", ".", "getobject", "(", "key", ",", "name", ")", "if", "not", "idfobject", ":", "return", "idf", ".", "newidfobject", "(", "key", ",", "Name", "=", "nam...
get idfobject or make it if it does not exist
[ "get", "idfobject", "or", "make", "it", "if", "it", "does", "not", "exist" ]
python
train
Commonists/CommonsDownloader
commonsdownloader/commonsdownloader.py
https://github.com/Commonists/CommonsDownloader/blob/ac8147432b31ce3cdee5f7a75d0c48b788ee4666/commonsdownloader/commonsdownloader.py#L34-L44
def get_files_from_textfile(textfile_handler): """Yield the file names and widths by parsing a text file handler.""" for line in textfile_handler: line = line.rstrip() try: (image_name, width) = line.rsplit(',', 1) width = int(width) except ValueError: image_name = line width = None yield (image_name, width)
[ "def", "get_files_from_textfile", "(", "textfile_handler", ")", ":", "for", "line", "in", "textfile_handler", ":", "line", "=", "line", ".", "rstrip", "(", ")", "try", ":", "(", "image_name", ",", "width", ")", "=", "line", ".", "rsplit", "(", "','", ","...
Yield the file names and widths by parsing a text file handler.
[ "Yield", "the", "file", "names", "and", "widths", "by", "parsing", "a", "text", "file", "handler", "." ]
python
train
guaix-ucm/numina
numina/array/wavecalib/arccalibration.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L428-L543
def arccalibration(wv_master, xpos_arc, naxis1_arc, crpix1, wv_ini_search, wv_end_search, wvmin_useful, wvmax_useful, error_xpos_arc, times_sigma_r, frac_triplets_for_sum, times_sigma_theil_sen, poly_degree_wfit, times_sigma_polfilt, times_sigma_cook, times_sigma_inclusion, geometry=None, debugplot=0): """Performs arc line identification for arc calibration. This function is a wrapper of two functions, which are responsible of computing all the relevant information concerning the triplets generated from the master table and the actual identification procedure of the arc lines, respectively. The separation of those computations in two different functions helps to avoid the repetition of calls to the first function when calibrating several arcs using the same master table. Parameters ---------- wv_master : 1d numpy array, float Array with wavelengths corresponding to the master table (Angstroms). xpos_arc : 1d numpy array, float Location of arc lines (pixels). naxis1_arc : int NAXIS1 for arc spectrum. crpix1 : float CRPIX1 value to be employed in the wavelength calibration. wv_ini_search : float Minimum expected wavelength in spectrum. wv_end_search : float Maximum expected wavelength in spectrum. wvmin_useful : float If not None, this value is used to clip detected lines below it. wvmax_useful : float If not None, this value is used to clip detected lines above it. error_xpos_arc : float Error in arc line position (pixels). times_sigma_r : float Times sigma to search for valid line position ratios. frac_triplets_for_sum : float Fraction of distances to different triplets to sum when computing the cost function. times_sigma_theil_sen : float Number of times the (robust) standard deviation around the linear fit (using the Theil-Sen method) to reject points. poly_degree_wfit : int Degree for polynomial fit to wavelength calibration. times_sigma_polfilt : float Number of times the (robust) standard deviation around the polynomial fit to reject points. times_sigma_cook : float Number of times the standard deviation of Cook's distances to detect outliers. If zero, this method of outlier detection is ignored. times_sigma_inclusion : float Number of times the (robust) standard deviation around the polynomial fit to include a new line in the set of identified lines. geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- list_of_wvfeatures : list (of WavecalFeature instances) A list of size equal to the number of identified lines, which elements are instances of the class WavecalFeature, containing all the relevant information concerning the line identification. """ ntriplets_master, ratios_master_sorted, triplets_master_sorted_list = \ gen_triplets_master(wv_master=wv_master, geometry=geometry, debugplot=debugplot) list_of_wvfeatures = arccalibration_direct( wv_master=wv_master, ntriplets_master=ntriplets_master, ratios_master_sorted=ratios_master_sorted, triplets_master_sorted_list=triplets_master_sorted_list, xpos_arc=xpos_arc, naxis1_arc=naxis1_arc, crpix1=crpix1, wv_ini_search=wv_ini_search, wv_end_search=wv_end_search, wvmin_useful=wvmin_useful, wvmax_useful=wvmax_useful, error_xpos_arc=error_xpos_arc, times_sigma_r=times_sigma_r, frac_triplets_for_sum=frac_triplets_for_sum, times_sigma_theil_sen=times_sigma_theil_sen, poly_degree_wfit=poly_degree_wfit, times_sigma_polfilt=times_sigma_polfilt, times_sigma_cook=times_sigma_cook, times_sigma_inclusion=times_sigma_inclusion, geometry=geometry, debugplot=debugplot) return list_of_wvfeatures
[ "def", "arccalibration", "(", "wv_master", ",", "xpos_arc", ",", "naxis1_arc", ",", "crpix1", ",", "wv_ini_search", ",", "wv_end_search", ",", "wvmin_useful", ",", "wvmax_useful", ",", "error_xpos_arc", ",", "times_sigma_r", ",", "frac_triplets_for_sum", ",", "times...
Performs arc line identification for arc calibration. This function is a wrapper of two functions, which are responsible of computing all the relevant information concerning the triplets generated from the master table and the actual identification procedure of the arc lines, respectively. The separation of those computations in two different functions helps to avoid the repetition of calls to the first function when calibrating several arcs using the same master table. Parameters ---------- wv_master : 1d numpy array, float Array with wavelengths corresponding to the master table (Angstroms). xpos_arc : 1d numpy array, float Location of arc lines (pixels). naxis1_arc : int NAXIS1 for arc spectrum. crpix1 : float CRPIX1 value to be employed in the wavelength calibration. wv_ini_search : float Minimum expected wavelength in spectrum. wv_end_search : float Maximum expected wavelength in spectrum. wvmin_useful : float If not None, this value is used to clip detected lines below it. wvmax_useful : float If not None, this value is used to clip detected lines above it. error_xpos_arc : float Error in arc line position (pixels). times_sigma_r : float Times sigma to search for valid line position ratios. frac_triplets_for_sum : float Fraction of distances to different triplets to sum when computing the cost function. times_sigma_theil_sen : float Number of times the (robust) standard deviation around the linear fit (using the Theil-Sen method) to reject points. poly_degree_wfit : int Degree for polynomial fit to wavelength calibration. times_sigma_polfilt : float Number of times the (robust) standard deviation around the polynomial fit to reject points. times_sigma_cook : float Number of times the standard deviation of Cook's distances to detect outliers. If zero, this method of outlier detection is ignored. times_sigma_inclusion : float Number of times the (robust) standard deviation around the polynomial fit to include a new line in the set of identified lines. geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- list_of_wvfeatures : list (of WavecalFeature instances) A list of size equal to the number of identified lines, which elements are instances of the class WavecalFeature, containing all the relevant information concerning the line identification.
[ "Performs", "arc", "line", "identification", "for", "arc", "calibration", "." ]
python
train
campaignmonitor/createsend-python
lib/createsend/administrator.py
https://github.com/campaignmonitor/createsend-python/blob/4bfe2fd5cb2fc9d8f12280b23569eea0a6c66426/lib/createsend/administrator.py#L42-L45
def delete(self): """Deletes the administrator from the account.""" params = {"email": self.email_address} response = self._delete("/admins.json", params=params)
[ "def", "delete", "(", "self", ")", ":", "params", "=", "{", "\"email\"", ":", "self", ".", "email_address", "}", "response", "=", "self", ".", "_delete", "(", "\"/admins.json\"", ",", "params", "=", "params", ")" ]
Deletes the administrator from the account.
[ "Deletes", "the", "administrator", "from", "the", "account", "." ]
python
train
FPGAwars/apio
apio/commands/boards.py
https://github.com/FPGAwars/apio/blob/5c6310f11a061a760764c6b5847bfb431dc3d0bc/apio/commands/boards.py#L18-L26
def cli(ctx, list, fpga): """Manage FPGA boards.""" if list: Resources().list_boards() elif fpga: Resources().list_fpgas() else: click.secho(ctx.get_help())
[ "def", "cli", "(", "ctx", ",", "list", ",", "fpga", ")", ":", "if", "list", ":", "Resources", "(", ")", ".", "list_boards", "(", ")", "elif", "fpga", ":", "Resources", "(", ")", ".", "list_fpgas", "(", ")", "else", ":", "click", ".", "secho", "("...
Manage FPGA boards.
[ "Manage", "FPGA", "boards", "." ]
python
train