repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
onnx/onnx
onnx/helper.py
https://github.com/onnx/onnx/blob/2f7dc10f03a072526d94b6820cedbf2a1ec5a2c4/onnx/helper.py#L290-L340
def make_tensor_value_info( name, # type: Text elem_type, # type: int shape, # type: Optional[Sequence[Union[Text, int]]] doc_string="", # type: Text shape_denotation=None, # type: Optional[List[Text]] ): # type: (...) -> ValueInfoProto """Makes a ValueInfoProto based on the data type and shape.""" value_info_proto = ValueInfoProto() value_info_proto.name = name if doc_string: value_info_proto.doc_string = doc_string tensor_type_proto = value_info_proto.type.tensor_type tensor_type_proto.elem_type = elem_type tensor_shape_proto = tensor_type_proto.shape if shape is not None: # You might think this is a no-op (extending a normal Python # list by [] certainly is), but protobuf lists work a little # differently; if a field is never set, it is omitted from the # resulting protobuf; a list that is explicitly set to be # empty will get an (empty) entry in the protobuf. This # difference is visible to our consumers, so make sure we emit # an empty shape! tensor_shape_proto.dim.extend([]) if shape_denotation: if len(shape_denotation) != len(shape): raise ValueError( 'Invalid shape_denotation. ' 'Must be of the same length as shape.') for i, d in enumerate(shape): dim = tensor_shape_proto.dim.add() if d is None: pass elif isinstance(d, integer_types): dim.dim_value = d elif isinstance(d, text_type): dim.dim_param = d else: raise ValueError( 'Invalid item in shape: {}. ' 'Needs to of integer_types or text_type.'.format(d)) if shape_denotation: dim.denotation = shape_denotation[i] return value_info_proto
[ "def", "make_tensor_value_info", "(", "name", ",", "# type: Text", "elem_type", ",", "# type: int", "shape", ",", "# type: Optional[Sequence[Union[Text, int]]]", "doc_string", "=", "\"\"", ",", "# type: Text", "shape_denotation", "=", "None", ",", "# type: Optional[List[Tex...
Makes a ValueInfoProto based on the data type and shape.
[ "Makes", "a", "ValueInfoProto", "based", "on", "the", "data", "type", "and", "shape", "." ]
python
train
mezz64/pyEight
pyeight/user.py
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L273-L285
def current_heart_rate(self): """Return current heart rate for in-progress session.""" try: rates = self.intervals[0]['timeseries']['heartRate'] num_rates = len(rates) if num_rates == 0: return None rate = rates[num_rates-1][1] except KeyError: rate = None return rate
[ "def", "current_heart_rate", "(", "self", ")", ":", "try", ":", "rates", "=", "self", ".", "intervals", "[", "0", "]", "[", "'timeseries'", "]", "[", "'heartRate'", "]", "num_rates", "=", "len", "(", "rates", ")", "if", "num_rates", "==", "0", ":", "...
Return current heart rate for in-progress session.
[ "Return", "current", "heart", "rate", "for", "in", "-", "progress", "session", "." ]
python
train
mitsei/dlkit
dlkit/handcar/learning/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/objects.py#L1018-L1045
def get_next_objective_banks(self, n=None): """Gets the next set of ObjectiveBank elements in this list which must be less than or equal to the return from available(). arg: n (cardinal): the number of ObjectiveBank elements requested which must be less than or equal to available() return: (osid.learning.ObjectiveBank) - an array of ObjectiveBank elements. The length of the array is less than or equal to the number specified. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented. """ if n > self.available(): # !!! This is not quite as specified (see method docs) !!! raise IllegalState('not enough elements available in this list') else: next_list = [] x = 0 while x < n: try: next_list.append(next(self)) except Exception: # Need to specify exceptions here! raise OperationFailed() x = x + 1 return next_list
[ "def", "get_next_objective_banks", "(", "self", ",", "n", "=", "None", ")", ":", "if", "n", ">", "self", ".", "available", "(", ")", ":", "# !!! This is not quite as specified (see method docs) !!!", "raise", "IllegalState", "(", "'not enough elements available in this ...
Gets the next set of ObjectiveBank elements in this list which must be less than or equal to the return from available(). arg: n (cardinal): the number of ObjectiveBank elements requested which must be less than or equal to available() return: (osid.learning.ObjectiveBank) - an array of ObjectiveBank elements. The length of the array is less than or equal to the number specified. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented.
[ "Gets", "the", "next", "set", "of", "ObjectiveBank", "elements", "in", "this", "list", "which", "must", "be", "less", "than", "or", "equal", "to", "the", "return", "from", "available", "()", "." ]
python
train
senaite/senaite.core
bika/lims/content/worksheet.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/worksheet.py#L1193-L1203
def getRegularAnalyses(self): """ Return the analyses assigned to the current worksheet that are directly associated to an Analysis Request but are not QC analyses. This is all analyses that implement IRoutineAnalysis :return: List of regular analyses :rtype: List of ReferenceAnalysis/DuplicateAnalysis """ qc_types = ['ReferenceAnalysis', 'DuplicateAnalysis'] analyses = self.getAnalyses() return [a for a in analyses if a.portal_type not in qc_types]
[ "def", "getRegularAnalyses", "(", "self", ")", ":", "qc_types", "=", "[", "'ReferenceAnalysis'", ",", "'DuplicateAnalysis'", "]", "analyses", "=", "self", ".", "getAnalyses", "(", ")", "return", "[", "a", "for", "a", "in", "analyses", "if", "a", ".", "port...
Return the analyses assigned to the current worksheet that are directly associated to an Analysis Request but are not QC analyses. This is all analyses that implement IRoutineAnalysis :return: List of regular analyses :rtype: List of ReferenceAnalysis/DuplicateAnalysis
[ "Return", "the", "analyses", "assigned", "to", "the", "current", "worksheet", "that", "are", "directly", "associated", "to", "an", "Analysis", "Request", "but", "are", "not", "QC", "analyses", ".", "This", "is", "all", "analyses", "that", "implement", "IRoutin...
python
train
chrislit/abydos
abydos/phonetic/_beider_morse.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/phonetic/_beider_morse.py#L788-L933
def encode( self, word, language_arg=0, name_mode='gen', match_mode='approx', concat=False, filter_langs=False, ): """Return the Beider-Morse Phonetic Matching encoding(s) of a term. Parameters ---------- word : str The word to transform language_arg : int The language of the term; supported values include: - ``any`` - ``arabic`` - ``cyrillic`` - ``czech`` - ``dutch`` - ``english`` - ``french`` - ``german`` - ``greek`` - ``greeklatin`` - ``hebrew`` - ``hungarian`` - ``italian`` - ``latvian`` - ``polish`` - ``portuguese`` - ``romanian`` - ``russian`` - ``spanish`` - ``turkish`` name_mode : str The name mode of the algorithm: - ``gen`` -- general (default) - ``ash`` -- Ashkenazi - ``sep`` -- Sephardic match_mode : str Matching mode: ``approx`` or ``exact`` concat : bool Concatenation mode filter_langs : bool Filter out incompatible languages Returns ------- tuple The Beider-Morse phonetic value(s) Raises ------ ValueError Unknown language Examples -------- >>> pe = BeiderMorse() >>> pe.encode('Christopher') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir xristopi xritopir xritopi xristofi xritofir xritofi tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir zristofi zritofir zritofi' >>> pe.encode('Niall') 'nial niol' >>> pe.encode('Smith') 'zmit' >>> pe.encode('Schmidt') 'zmit stzmit' >>> pe.encode('Christopher', language_arg='German') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> pe.encode('Christopher', language_arg='English') 'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir xristafir xrQstafir' >>> pe.encode('Christopher', language_arg='German', name_mode='ash') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> pe.encode('Christopher', language_arg='German', match_mode='exact') 'xriStopher xriStofer xristopher xristofer' """ word = normalize('NFC', text_type(word.strip().lower())) name_mode = name_mode.strip().lower()[:3] if name_mode not in {'ash', 'sep', 'gen'}: name_mode = 'gen' if match_mode != 'exact': match_mode = 'approx' # Translate the supplied language_arg value into an integer # representing a set of languages all_langs = ( sum(_LANG_DICT[_] for _ in BMDATA[name_mode]['languages']) - 1 ) lang_choices = 0 if isinstance(language_arg, (int, float, long)): lang_choices = int(language_arg) elif language_arg != '' and isinstance(language_arg, (text_type, str)): for lang in text_type(language_arg).lower().split(','): if lang in _LANG_DICT and (_LANG_DICT[lang] & all_langs): lang_choices += _LANG_DICT[lang] elif not filter_langs: raise ValueError( 'Unknown \'' + name_mode + '\' language: \'' + lang + '\'' ) # Language choices are either all incompatible with the name mode or # no choices were given, so try to autodetect if lang_choices == 0: language_arg = self._language(word, name_mode) else: language_arg = lang_choices language_arg2 = self._language_index_from_code(language_arg, name_mode) rules = BMDATA[name_mode]['rules'][language_arg2] final_rules1 = BMDATA[name_mode][match_mode]['common'] final_rules2 = BMDATA[name_mode][match_mode][language_arg2] result = self._phonetic( word, name_mode, rules, final_rules1, final_rules2, language_arg, concat, ) result = self._phonetic_numbers(result) return result
[ "def", "encode", "(", "self", ",", "word", ",", "language_arg", "=", "0", ",", "name_mode", "=", "'gen'", ",", "match_mode", "=", "'approx'", ",", "concat", "=", "False", ",", "filter_langs", "=", "False", ",", ")", ":", "word", "=", "normalize", "(", ...
Return the Beider-Morse Phonetic Matching encoding(s) of a term. Parameters ---------- word : str The word to transform language_arg : int The language of the term; supported values include: - ``any`` - ``arabic`` - ``cyrillic`` - ``czech`` - ``dutch`` - ``english`` - ``french`` - ``german`` - ``greek`` - ``greeklatin`` - ``hebrew`` - ``hungarian`` - ``italian`` - ``latvian`` - ``polish`` - ``portuguese`` - ``romanian`` - ``russian`` - ``spanish`` - ``turkish`` name_mode : str The name mode of the algorithm: - ``gen`` -- general (default) - ``ash`` -- Ashkenazi - ``sep`` -- Sephardic match_mode : str Matching mode: ``approx`` or ``exact`` concat : bool Concatenation mode filter_langs : bool Filter out incompatible languages Returns ------- tuple The Beider-Morse phonetic value(s) Raises ------ ValueError Unknown language Examples -------- >>> pe = BeiderMorse() >>> pe.encode('Christopher') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir xristopi xritopir xritopi xristofi xritofir xritofi tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir zristofi zritofir zritofi' >>> pe.encode('Niall') 'nial niol' >>> pe.encode('Smith') 'zmit' >>> pe.encode('Schmidt') 'zmit stzmit' >>> pe.encode('Christopher', language_arg='German') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> pe.encode('Christopher', language_arg='English') 'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir xristafir xrQstafir' >>> pe.encode('Christopher', language_arg='German', name_mode='ash') 'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir xristYfir' >>> pe.encode('Christopher', language_arg='German', match_mode='exact') 'xriStopher xriStofer xristopher xristofer'
[ "Return", "the", "Beider", "-", "Morse", "Phonetic", "Matching", "encoding", "(", "s", ")", "of", "a", "term", "." ]
python
valid
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/pylabtools.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/pylabtools.py#L114-L151
def mpl_runner(safe_execfile): """Factory to return a matplotlib-enabled runner for %run. Parameters ---------- safe_execfile : function This must be a function with the same interface as the :meth:`safe_execfile` method of IPython. Returns ------- A function suitable for use as the ``runner`` argument of the %run magic function. """ def mpl_execfile(fname,*where,**kw): """matplotlib-aware wrapper around safe_execfile. Its interface is identical to that of the :func:`execfile` builtin. This is ultimately a call to execfile(), but wrapped in safeties to properly handle interactive rendering.""" import matplotlib import matplotlib.pylab as pylab #print '*** Matplotlib runner ***' # dbg # turn off rendering until end of script is_interactive = matplotlib.rcParams['interactive'] matplotlib.interactive(False) safe_execfile(fname,*where,**kw) matplotlib.interactive(is_interactive) # make rendering call now, if the user tried to do it if pylab.draw_if_interactive.called: pylab.draw() pylab.draw_if_interactive.called = False return mpl_execfile
[ "def", "mpl_runner", "(", "safe_execfile", ")", ":", "def", "mpl_execfile", "(", "fname", ",", "*", "where", ",", "*", "*", "kw", ")", ":", "\"\"\"matplotlib-aware wrapper around safe_execfile.\n\n Its interface is identical to that of the :func:`execfile` builtin.\n\n ...
Factory to return a matplotlib-enabled runner for %run. Parameters ---------- safe_execfile : function This must be a function with the same interface as the :meth:`safe_execfile` method of IPython. Returns ------- A function suitable for use as the ``runner`` argument of the %run magic function.
[ "Factory", "to", "return", "a", "matplotlib", "-", "enabled", "runner", "for", "%run", "." ]
python
test
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L1107-L1116
def slaveof(self, host=None, port=None): """ Set the server to be a replicated slave of the instance identified by the ``host`` and ``port``. If called without arguments, the instance is promoted to a master instead. """ if host is None and port is None: return self.execute_command('SLAVEOF', Token.get_token('NO'), Token.get_token('ONE')) return self.execute_command('SLAVEOF', host, port)
[ "def", "slaveof", "(", "self", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "if", "host", "is", "None", "and", "port", "is", "None", ":", "return", "self", ".", "execute_command", "(", "'SLAVEOF'", ",", "Token", ".", "get_token", "("...
Set the server to be a replicated slave of the instance identified by the ``host`` and ``port``. If called without arguments, the instance is promoted to a master instead.
[ "Set", "the", "server", "to", "be", "a", "replicated", "slave", "of", "the", "instance", "identified", "by", "the", "host", "and", "port", ".", "If", "called", "without", "arguments", "the", "instance", "is", "promoted", "to", "a", "master", "instead", "."...
python
train
inveniosoftware/invenio-search
invenio_search/cli.py
https://github.com/inveniosoftware/invenio-search/blob/19c073d608d4c811f1c5aecb6622402d39715228/invenio_search/cli.py#L108-L116
def create(index_name, body, force, verbose): """Create a new index.""" result = current_search_client.indices.create( index=index_name, body=json.load(body), ignore=[400] if force else None, ) if verbose: click.echo(json.dumps(result))
[ "def", "create", "(", "index_name", ",", "body", ",", "force", ",", "verbose", ")", ":", "result", "=", "current_search_client", ".", "indices", ".", "create", "(", "index", "=", "index_name", ",", "body", "=", "json", ".", "load", "(", "body", ")", ",...
Create a new index.
[ "Create", "a", "new", "index", "." ]
python
train
d11wtq/dockerpty
dockerpty/__init__.py
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/__init__.py#L33-L44
def exec_command( client, container, command, interactive=True, stdout=None, stderr=None, stdin=None): """ Run provided command via exec API in provided container. This is just a wrapper for PseudoTerminal(client, container).exec_command() """ exec_id = exec_create(client, container, command, interactive=interactive) operation = ExecOperation(client, exec_id, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin) PseudoTerminal(client, operation).start()
[ "def", "exec_command", "(", "client", ",", "container", ",", "command", ",", "interactive", "=", "True", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "stdin", "=", "None", ")", ":", "exec_id", "=", "exec_create", "(", "client", ",", "co...
Run provided command via exec API in provided container. This is just a wrapper for PseudoTerminal(client, container).exec_command()
[ "Run", "provided", "command", "via", "exec", "API", "in", "provided", "container", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py#L504-L514
def RIBVRFRouteLimitExceeded_originator_switch_info_switchIpV4Address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") RIBVRFRouteLimitExceeded = ET.SubElement(config, "RIBVRFRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(RIBVRFRouteLimitExceeded, "originator-switch-info") switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address") switchIpV4Address.text = kwargs.pop('switchIpV4Address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "RIBVRFRouteLimitExceeded_originator_switch_info_switchIpV4Address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "RIBVRFRouteLimitExceeded", "=", "ET", ".", "SubElement", "(", "config", ",", "...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
ibis-project/ibis
ibis/expr/api.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L311-L378
def interval( value=None, unit='s', years=None, quarters=None, months=None, weeks=None, days=None, hours=None, minutes=None, seconds=None, milliseconds=None, microseconds=None, nanoseconds=None, ): """ Returns an interval literal Parameters ---------- value : int or datetime.timedelta, default None years : int, default None quarters : int, default None months : int, default None days : int, default None weeks : int, default None hours : int, default None minutes : int, default None seconds : int, default None milliseconds : int, default None microseconds : int, default None nanoseconds : int, default None Returns -------- result : IntervalScalar """ if value is not None: if isinstance(value, datetime.timedelta): unit = 's' value = int(value.total_seconds()) elif not isinstance(value, int): raise ValueError('Interval value must be an integer') else: kwds = [ ('Y', years), ('Q', quarters), ('M', months), ('W', weeks), ('D', days), ('h', hours), ('m', minutes), ('s', seconds), ('ms', milliseconds), ('us', microseconds), ('ns', nanoseconds), ] defined_units = [(k, v) for k, v in kwds if v is not None] if len(defined_units) != 1: raise ValueError('Exactly one argument is required') unit, value = defined_units[0] value_type = literal(value).type() type = dt.Interval(unit, value_type) return literal(value, type=type).op().to_expr()
[ "def", "interval", "(", "value", "=", "None", ",", "unit", "=", "'s'", ",", "years", "=", "None", ",", "quarters", "=", "None", ",", "months", "=", "None", ",", "weeks", "=", "None", ",", "days", "=", "None", ",", "hours", "=", "None", ",", "minu...
Returns an interval literal Parameters ---------- value : int or datetime.timedelta, default None years : int, default None quarters : int, default None months : int, default None days : int, default None weeks : int, default None hours : int, default None minutes : int, default None seconds : int, default None milliseconds : int, default None microseconds : int, default None nanoseconds : int, default None Returns -------- result : IntervalScalar
[ "Returns", "an", "interval", "literal" ]
python
train
CEA-COSMIC/ModOpt
modopt/interface/errors.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/interface/errors.py#L49-L76
def catch_error(exception, log=None): """Catch error This method catches errors and prints them to the terminal. It also saves the errors to a log if provided. Parameters ---------- exception : str Exception message string log : instance, optional Logging structure instance """ if import_fail: err_txt = 'ERROR' else: err_txt = colored('ERROR', 'red') # Print exception to stdout. stream_txt = err_txt + ': ' + str(exception) + '\n' sys.stderr.write(stream_txt) # Check if a logging structure is provided. if not isinstance(log, type(None)): log_txt = 'ERROR: ' + str(exception) + '\n' log.exception(log_txt)
[ "def", "catch_error", "(", "exception", ",", "log", "=", "None", ")", ":", "if", "import_fail", ":", "err_txt", "=", "'ERROR'", "else", ":", "err_txt", "=", "colored", "(", "'ERROR'", ",", "'red'", ")", "# Print exception to stdout.", "stream_txt", "=", "err...
Catch error This method catches errors and prints them to the terminal. It also saves the errors to a log if provided. Parameters ---------- exception : str Exception message string log : instance, optional Logging structure instance
[ "Catch", "error" ]
python
train
onicagroup/runway
runway/hooks/staticsite/build_staticsite.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/hooks/staticsite/build_staticsite.py#L21-L34
def does_s3_object_exist(bucket_name, key, session=None): """Determine if object exists on s3.""" if session: s3_resource = session.resource('s3') else: s3_resource = boto3.resource('s3') try: s3_resource.Object(bucket_name, key).load() except ClientError as exc: if exc.response['Error']['Code'] == '404': return False raise return True
[ "def", "does_s3_object_exist", "(", "bucket_name", ",", "key", ",", "session", "=", "None", ")", ":", "if", "session", ":", "s3_resource", "=", "session", ".", "resource", "(", "'s3'", ")", "else", ":", "s3_resource", "=", "boto3", ".", "resource", "(", ...
Determine if object exists on s3.
[ "Determine", "if", "object", "exists", "on", "s3", "." ]
python
train
ajenhl/tacl
tacl/data_store.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/data_store.py#L412-L438
def diff_supplied(self, results_filenames, labels, tokenizer, output_fh): """Returns `output_fh` populated with CSV results giving the n-grams that are unique to the witnesses in each set of works in `results_sets`, using the labels in `labels`. Note that this is not the same as the symmetric difference of these sets, except in the case where there are only two labels. :param results_filenames: list of results filenames to be diffed :type results_filenames: `list` of `str` :param labels: labels to be applied to the results_sets :type labels: `list` :param tokenizer: tokenizer for the n-grams :type tokenizer: `Tokenizer` :param output_fh: object to output results to :type output_fh: file-like object :rtype: file-like object """ self._add_temporary_results_sets(results_filenames, labels) query = constants.SELECT_DIFF_SUPPLIED_SQL self._logger.info('Running supplied diff query') self._logger.debug('Query: {}'.format(query)) self._log_query_plan(query, []) cursor = self._conn.execute(query) return self._diff(cursor, tokenizer, output_fh)
[ "def", "diff_supplied", "(", "self", ",", "results_filenames", ",", "labels", ",", "tokenizer", ",", "output_fh", ")", ":", "self", ".", "_add_temporary_results_sets", "(", "results_filenames", ",", "labels", ")", "query", "=", "constants", ".", "SELECT_DIFF_SUPPL...
Returns `output_fh` populated with CSV results giving the n-grams that are unique to the witnesses in each set of works in `results_sets`, using the labels in `labels`. Note that this is not the same as the symmetric difference of these sets, except in the case where there are only two labels. :param results_filenames: list of results filenames to be diffed :type results_filenames: `list` of `str` :param labels: labels to be applied to the results_sets :type labels: `list` :param tokenizer: tokenizer for the n-grams :type tokenizer: `Tokenizer` :param output_fh: object to output results to :type output_fh: file-like object :rtype: file-like object
[ "Returns", "output_fh", "populated", "with", "CSV", "results", "giving", "the", "n", "-", "grams", "that", "are", "unique", "to", "the", "witnesses", "in", "each", "set", "of", "works", "in", "results_sets", "using", "the", "labels", "in", "labels", "." ]
python
train
sdispater/orator
orator/query/builder.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/builder.py#L325-L344
def right_join_where(self, table, one, operator, two): """ Add a "right join where" clause to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :return: The current QueryBuilder instance :rtype: QueryBuilder """ return self.join_where(table, one, operator, two, "right")
[ "def", "right_join_where", "(", "self", ",", "table", ",", "one", ",", "operator", ",", "two", ")", ":", "return", "self", ".", "join_where", "(", "table", ",", "one", ",", "operator", ",", "two", ",", "\"right\"", ")" ]
Add a "right join where" clause to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :return: The current QueryBuilder instance :rtype: QueryBuilder
[ "Add", "a", "right", "join", "where", "clause", "to", "the", "query" ]
python
train
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L316-L318
def _type_bool(label,default=False): """Shortcut fot boolean like fields""" return label, abstractSearch.nothing, abstractRender.boolen, default
[ "def", "_type_bool", "(", "label", ",", "default", "=", "False", ")", ":", "return", "label", ",", "abstractSearch", ".", "nothing", ",", "abstractRender", ".", "boolen", ",", "default" ]
Shortcut fot boolean like fields
[ "Shortcut", "fot", "boolean", "like", "fields" ]
python
train
kytos/python-openflow
pyof/v0x01/controller2switch/stats_request.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/v0x01/controller2switch/stats_request.py#L41-L56
def pack(self, value=None): """Pack according to :attr:`body_type`. Make `body` a binary pack before packing this object. Then, restore body. """ backup = self.body if not value: value = self.body if hasattr(value, 'pack'): self.body = value.pack() stats_request_packed = super().pack() self.body = backup return stats_request_packed
[ "def", "pack", "(", "self", ",", "value", "=", "None", ")", ":", "backup", "=", "self", ".", "body", "if", "not", "value", ":", "value", "=", "self", ".", "body", "if", "hasattr", "(", "value", ",", "'pack'", ")", ":", "self", ".", "body", "=", ...
Pack according to :attr:`body_type`. Make `body` a binary pack before packing this object. Then, restore body.
[ "Pack", "according", "to", ":", "attr", ":", "body_type", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L1785-L1797
def get_thread(self, dwThreadId): """ @type dwThreadId: int @param dwThreadId: Global ID of the thread to look for. @rtype: L{Thread} @return: Thread object with the given global ID. """ self.__initialize_snapshot() if dwThreadId not in self.__threadDict: msg = "Unknown thread ID: %d" % dwThreadId raise KeyError(msg) return self.__threadDict[dwThreadId]
[ "def", "get_thread", "(", "self", ",", "dwThreadId", ")", ":", "self", ".", "__initialize_snapshot", "(", ")", "if", "dwThreadId", "not", "in", "self", ".", "__threadDict", ":", "msg", "=", "\"Unknown thread ID: %d\"", "%", "dwThreadId", "raise", "KeyError", "...
@type dwThreadId: int @param dwThreadId: Global ID of the thread to look for. @rtype: L{Thread} @return: Thread object with the given global ID.
[ "@type", "dwThreadId", ":", "int", "@param", "dwThreadId", ":", "Global", "ID", "of", "the", "thread", "to", "look", "for", "." ]
python
train
tjcsl/ion
intranet/apps/lostfound/views.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/lostfound/views.py#L46-L63
def lostitem_add_view(request): """Add a lostitem.""" if request.method == "POST": form = LostItemForm(request.POST) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = safe_html(obj.description) obj.save() messages.success(request, "Successfully added lost item.") return redirect("lostitem_view", obj.id) else: messages.error(request, "Error adding lost item.") else: form = LostItemForm() return render(request, "lostfound/lostitem_form.html", {"form": form, "action": "add"})
[ "def", "lostitem_add_view", "(", "request", ")", ":", "if", "request", ".", "method", "==", "\"POST\"", ":", "form", "=", "LostItemForm", "(", "request", ".", "POST", ")", "logger", ".", "debug", "(", "form", ")", "if", "form", ".", "is_valid", "(", ")...
Add a lostitem.
[ "Add", "a", "lostitem", "." ]
python
train
nutechsoftware/alarmdecoder
alarmdecoder/decoder.py
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/decoder.py#L531-L544
def _handle_aui(self, data): """ Handle AUI messages. :param data: RF message to parse :type data: string :returns: :py:class`~alarmdecoder.messages.AUIMessage` """ msg = AUIMessage(data) self.on_aui_message(message=msg) return msg
[ "def", "_handle_aui", "(", "self", ",", "data", ")", ":", "msg", "=", "AUIMessage", "(", "data", ")", "self", ".", "on_aui_message", "(", "message", "=", "msg", ")", "return", "msg" ]
Handle AUI messages. :param data: RF message to parse :type data: string :returns: :py:class`~alarmdecoder.messages.AUIMessage`
[ "Handle", "AUI", "messages", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAData/dsmethods.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/dsmethods.py#L192-L211
def QDS_StockMinWarpper(func, *args, **kwargs): """ 分钟线QDS装饰器 """ def warpper(*args, **kwargs): data = func(*args, **kwargs) if isinstance(data.index, pd.MultiIndex): return QA_DataStruct_Stock_min(data) else: return QA_DataStruct_Stock_min( data.assign(datetime=pd.to_datetime(data.datetime) ).set_index(['datetime', 'code'], drop=False), dtype='stock_min' ) return warpper
[ "def", "QDS_StockMinWarpper", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "warpper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "i...
分钟线QDS装饰器
[ "分钟线QDS装饰器" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/youngs_1997.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/youngs_1997.py#L83-L119
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) mean = np.zeros_like(sites.vs30) stddevs = [np.zeros_like(sites.vs30) for _ in stddev_types] idx_rock = sites.vs30 >= self.ROCK_VS30 idx_soil = sites.vs30 < self.ROCK_VS30 if idx_rock.any(): C = self.COEFFS_ROCK[imt] self._compute_mean(C, self.CONSTS['A1_rock'], self.CONSTS['A2_rock'], self.CONSTS['A3_rock'], self.CONSTS['A4_rock'], self.CONSTS['A5_rock'], self.CONSTS['A6_rock'], rup.mag, rup.hypo_depth, dists.rrup, mean, idx_rock) self._compute_std(C, rup.mag, stddevs, idx_rock) if imt == SA(period=4.0, damping=5.0): mean = mean / 0.399 if idx_soil.any(): C = self.COEFFS_SOIL[imt] self._compute_mean(C, self.CONSTS['A1_soil'], self.CONSTS['A2_soil'], self.CONSTS['A3_soil'], self.CONSTS['A4_soil'], self.CONSTS['A5_soil'], self.CONSTS['A6_soil'], rup.mag, rup.hypo_depth, dists.rrup, mean, idx_soil) self._compute_std(C, rup.mag, stddevs, idx_soil) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "assert", "all", "(", "stddev_type", "in", "self", ".", "DEFINED_FOR_STANDARD_DEVIATION_TYPES", "for", "stddev_type", "in", "stddev_typ...
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
AltSchool/dynamic-rest
dynamic_rest/filters.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/filters.py#L279-L322
def _filters_to_query(self, includes, excludes, serializer, q=None): """ Construct Django Query object from request. Arguments are dictionaries, which will be passed to Q() as kwargs. e.g. includes = { 'foo' : 'bar', 'baz__in' : [1, 2] } produces: Q(foo='bar', baz__in=[1, 2]) Arguments: includes: TreeMap representing inclusion filters. excludes: TreeMap representing exclusion filters. serializer: serializer instance of top-level object q: Q() object (optional) Returns: Q() instance or None if no inclusion or exclusion filters were specified. """ def rewrite_filters(filters, serializer): out = {} for k, node in six.iteritems(filters): filter_key, field = node.generate_query_key(serializer) if isinstance(field, (BooleanField, NullBooleanField)): node.value = is_truthy(node.value) out[filter_key] = node.value return out q = q or Q() if not includes and not excludes: return None if includes: includes = rewrite_filters(includes, serializer) q &= Q(**includes) if excludes: excludes = rewrite_filters(excludes, serializer) for k, v in six.iteritems(excludes): q &= ~Q(**{k: v}) return q
[ "def", "_filters_to_query", "(", "self", ",", "includes", ",", "excludes", ",", "serializer", ",", "q", "=", "None", ")", ":", "def", "rewrite_filters", "(", "filters", ",", "serializer", ")", ":", "out", "=", "{", "}", "for", "k", ",", "node", "in", ...
Construct Django Query object from request. Arguments are dictionaries, which will be passed to Q() as kwargs. e.g. includes = { 'foo' : 'bar', 'baz__in' : [1, 2] } produces: Q(foo='bar', baz__in=[1, 2]) Arguments: includes: TreeMap representing inclusion filters. excludes: TreeMap representing exclusion filters. serializer: serializer instance of top-level object q: Q() object (optional) Returns: Q() instance or None if no inclusion or exclusion filters were specified.
[ "Construct", "Django", "Query", "object", "from", "request", ".", "Arguments", "are", "dictionaries", "which", "will", "be", "passed", "to", "Q", "()", "as", "kwargs", "." ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsMedModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsMedModel.py#L1211-L1236
def makeBasicSolution(self,EndOfPrdvP,aLvl,interpolator): ''' Given end of period assets and end of period marginal value, construct the basic solution for this period. Parameters ---------- EndOfPrdvP : np.array Array of end-of-period marginal values. aLvl : np.array Array of end-of-period asset values that yield the marginal values in EndOfPrdvP. interpolator : function A function that constructs and returns a consumption function. Returns ------- solution_now : ConsumerSolution The solution to this period's consumption-saving problem, with a consumption function, marginal value function, and minimum m. ''' xLvl,mLvl,pLvl = self.getPointsForInterpolation(EndOfPrdvP,aLvl) MedShk_temp = np.tile(np.reshape(self.MedShkVals,(self.MedShkVals.size,1,1)),\ (1,mLvl.shape[1],mLvl.shape[2])) solution_now = self.usePointsForInterpolation(xLvl,mLvl,pLvl,MedShk_temp,interpolator) return solution_now
[ "def", "makeBasicSolution", "(", "self", ",", "EndOfPrdvP", ",", "aLvl", ",", "interpolator", ")", ":", "xLvl", ",", "mLvl", ",", "pLvl", "=", "self", ".", "getPointsForInterpolation", "(", "EndOfPrdvP", ",", "aLvl", ")", "MedShk_temp", "=", "np", ".", "ti...
Given end of period assets and end of period marginal value, construct the basic solution for this period. Parameters ---------- EndOfPrdvP : np.array Array of end-of-period marginal values. aLvl : np.array Array of end-of-period asset values that yield the marginal values in EndOfPrdvP. interpolator : function A function that constructs and returns a consumption function. Returns ------- solution_now : ConsumerSolution The solution to this period's consumption-saving problem, with a consumption function, marginal value function, and minimum m.
[ "Given", "end", "of", "period", "assets", "and", "end", "of", "period", "marginal", "value", "construct", "the", "basic", "solution", "for", "this", "period", "." ]
python
train
calmjs/calmjs
src/calmjs/artifact.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L594-L607
def execute_builder(self, entry_point, toolchain, spec): """ Accepts the arguments provided by the builder and executes them. """ toolchain(spec) if not exists(spec['export_target']): logger.error( "the entry point '%s' from package '%s' failed to " "generate an artifact at '%s'", entry_point, entry_point.dist, spec['export_target'] ) return {} return self.generate_metadata_entry(entry_point, toolchain, spec)
[ "def", "execute_builder", "(", "self", ",", "entry_point", ",", "toolchain", ",", "spec", ")", ":", "toolchain", "(", "spec", ")", "if", "not", "exists", "(", "spec", "[", "'export_target'", "]", ")", ":", "logger", ".", "error", "(", "\"the entry point '%...
Accepts the arguments provided by the builder and executes them.
[ "Accepts", "the", "arguments", "provided", "by", "the", "builder", "and", "executes", "them", "." ]
python
train
delfick/harpoon
harpoon/ship/builder.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/ship/builder.py#L104-L135
def make_image(self, conf, images, chain=None, parent_chain=None, made=None, ignore_deps=False, ignore_parent=False, pushing=False): """Make us an image""" made = {} if made is None else made chain = [] if chain is None else chain parent_chain = [] if parent_chain is None else parent_chain if conf.name in made: return if conf.name in chain and not ignore_deps: raise BadCommand("Recursive dependency images", chain=chain + [conf.name]) if conf.name in parent_chain and not ignore_parent: raise BadCommand("Recursive FROM statements", chain=parent_chain + [conf.name]) if conf.name not in images: raise NoSuchImage(looking_for=conf.name, available=images.keys()) if not ignore_deps: for dependency, image in conf.dependency_images(): self.make_image(images[dependency], images, chain=chain + [conf.name], made=made, pushing=pushing) if not ignore_parent: for dep in conf.commands.dependent_images: if not isinstance(dep, six.string_types): self.make_image(dep, images, chain, parent_chain + [conf.name], made=made, pushing=pushing) # Should have all our dependencies now log.info("Making image for '%s' (%s)", conf.name, conf.image_name) cached = self.build_image(conf, pushing=pushing) made[conf.name] = True return cached
[ "def", "make_image", "(", "self", ",", "conf", ",", "images", ",", "chain", "=", "None", ",", "parent_chain", "=", "None", ",", "made", "=", "None", ",", "ignore_deps", "=", "False", ",", "ignore_parent", "=", "False", ",", "pushing", "=", "False", ")"...
Make us an image
[ "Make", "us", "an", "image" ]
python
train
ghukill/pyfc4
pyfc4/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L162-L226
def get_resource(self, uri, resource_type=None, response_format=None): ''' Retrieve resource: - Issues an initial GET request - If 200, continues, 404, returns False, otherwise raises Exception - Parse resource type - If custom resource type parser provided, this fires - Else, or if custom parser misses, fire HEAD request and parse LDP resource type from Link header - Return instantiated pyfc4 resource Args: uri (rdflib.term.URIRef,str): input URI resource_type (): resource class e.g. BasicContainer, NonRDFSource, or extensions thereof response_format (str): expects mimetype / Content-Type header such as 'application/rdf+xml', 'text/turtle', etc. Returns: Resource ''' # handle uri uri = self.parse_uri(uri) # remove fcr:metadata if included, as handled below if uri.toPython().endswith('/fcr:metadata'): uri = rdflib.term.URIRef(uri.toPython().rstrip('/fcr:metadata')) # fire GET request get_response = self.api.http_request( 'GET', "%s/fcr:metadata" % uri, response_format=response_format) # 404, item does not exist, return False if get_response.status_code == 404: logger.debug('resource uri %s not found, returning False' % uri) return False # assume exists, parse headers for resource type and return instance elif get_response.status_code == 200: # if resource_type not provided if not resource_type: # if custom resource type parser affixed to repo instance, fire if self.custom_resource_type_parser: logger.debug("custom resource type parser provided, attempting") resource_type = self.custom_resource_type_parser(self, uri, get_response) # parse LDP resource type from headers if custom resource parser misses, # or not provided if not resource_type: # Issue HEAD request to get LDP resource type from URI proper, not /fcr:metadata head_response = self.api.http_request('HEAD', uri) resource_type = self.api.parse_resource_type(head_response) logger.debug('using resource type: %s' % resource_type) # return resource return resource_type(self, uri, response=get_response) else: raise Exception('HTTP %s, error retrieving resource uri %s' % (get_response.status_code, uri))
[ "def", "get_resource", "(", "self", ",", "uri", ",", "resource_type", "=", "None", ",", "response_format", "=", "None", ")", ":", "# handle uri", "uri", "=", "self", ".", "parse_uri", "(", "uri", ")", "# remove fcr:metadata if included, as handled below", "if", ...
Retrieve resource: - Issues an initial GET request - If 200, continues, 404, returns False, otherwise raises Exception - Parse resource type - If custom resource type parser provided, this fires - Else, or if custom parser misses, fire HEAD request and parse LDP resource type from Link header - Return instantiated pyfc4 resource Args: uri (rdflib.term.URIRef,str): input URI resource_type (): resource class e.g. BasicContainer, NonRDFSource, or extensions thereof response_format (str): expects mimetype / Content-Type header such as 'application/rdf+xml', 'text/turtle', etc. Returns: Resource
[ "Retrieve", "resource", ":", "-", "Issues", "an", "initial", "GET", "request", "-", "If", "200", "continues", "404", "returns", "False", "otherwise", "raises", "Exception", "-", "Parse", "resource", "type", "-", "If", "custom", "resource", "type", "parser", ...
python
train
knagra/farnsworth
workshift/views.py
https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/workshift/views.py#L1699-L1715
def type_view(request, semester, pk, profile=None): """ View the details of a particular WorkshiftType. """ any_management = utils.can_manage(request.user, semester, any_pool=True) wtype = get_object_or_404(WorkshiftType, pk=pk) page_name = wtype.title regular_shifts = RegularWorkshift.objects.filter( workshift_type=wtype, pool__semester=semester, ) return render_to_response("view_type.html", { "page_name": page_name, "wtype": wtype, "regular_shifts": regular_shifts, "can_edit": any_management, }, context_instance=RequestContext(request))
[ "def", "type_view", "(", "request", ",", "semester", ",", "pk", ",", "profile", "=", "None", ")", ":", "any_management", "=", "utils", ".", "can_manage", "(", "request", ".", "user", ",", "semester", ",", "any_pool", "=", "True", ")", "wtype", "=", "ge...
View the details of a particular WorkshiftType.
[ "View", "the", "details", "of", "a", "particular", "WorkshiftType", "." ]
python
train
manns/pyspread
pyspread/src/gui/_main_window.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_main_window.py#L1364-L1405
def OnFontDialog(self, event): """Event handler for launching font dialog""" # Get current font data from current cell cursor = self.main_window.grid.actions.cursor attr = self.main_window.grid.code_array.cell_attributes[cursor] size, style, weight, font = \ [attr[name] for name in ["pointsize", "fontstyle", "fontweight", "textfont"]] current_font = wx.Font(int(size), -1, style, weight, 0, font) # Get Font from dialog fontdata = wx.FontData() fontdata.EnableEffects(True) fontdata.SetInitialFont(current_font) dlg = wx.FontDialog(self.main_window, fontdata) if dlg.ShowModal() == wx.ID_OK: fontdata = dlg.GetFontData() font = fontdata.GetChosenFont() post_command_event(self.main_window, self.main_window.FontMsg, font=font.FaceName) post_command_event(self.main_window, self.main_window.FontSizeMsg, size=font.GetPointSize()) post_command_event(self.main_window, self.main_window.FontBoldMsg, weight=font.GetWeightString()) post_command_event(self.main_window, self.main_window.FontItalicsMsg, style=font.GetStyleString()) if is_gtk(): try: wx.Yield() except: pass self.main_window.grid.update_attribute_toolbar()
[ "def", "OnFontDialog", "(", "self", ",", "event", ")", ":", "# Get current font data from current cell", "cursor", "=", "self", ".", "main_window", ".", "grid", ".", "actions", ".", "cursor", "attr", "=", "self", ".", "main_window", ".", "grid", ".", "code_arr...
Event handler for launching font dialog
[ "Event", "handler", "for", "launching", "font", "dialog" ]
python
train
nschloe/matplotlib2tikz
matplotlib2tikz/line2d.py
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/line2d.py#L88-L111
def draw_linecollection(data, obj): """Returns Pgfplots code for a number of patch objects. """ content = [] edgecolors = obj.get_edgecolors() linestyles = obj.get_linestyles() linewidths = obj.get_linewidths() paths = obj.get_paths() for i, path in enumerate(paths): color = edgecolors[i] if i < len(edgecolors) else edgecolors[0] style = linestyles[i] if i < len(linestyles) else linestyles[0] width = linewidths[i] if i < len(linewidths) else linewidths[0] data, options = mypath.get_draw_options(data, obj, color, None, style, width) # TODO what about masks? data, cont, _, _ = mypath.draw_path( data, path, draw_options=options, simplify=False ) content.append(cont + "\n") return data, content
[ "def", "draw_linecollection", "(", "data", ",", "obj", ")", ":", "content", "=", "[", "]", "edgecolors", "=", "obj", ".", "get_edgecolors", "(", ")", "linestyles", "=", "obj", ".", "get_linestyles", "(", ")", "linewidths", "=", "obj", ".", "get_linewidths"...
Returns Pgfplots code for a number of patch objects.
[ "Returns", "Pgfplots", "code", "for", "a", "number", "of", "patch", "objects", "." ]
python
train
bitesofcode/projexui
projexui/menus/xrecentfilesmenu.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/menus/xrecentfilesmenu.py#L114-L132
def setFilenames( self, filenames ): """ Sets the list of filenames that will be used for this menu to the \ inputed list. :param filenames | [<str>, ..] """ mapped = [] for filename in filenames: filename = nativestring(filename) if ( not filename ): continue mapped.append(filename) if ( len(mapped) == self.maximumLength() ): break self._filenames = mapped self.refresh()
[ "def", "setFilenames", "(", "self", ",", "filenames", ")", ":", "mapped", "=", "[", "]", "for", "filename", "in", "filenames", ":", "filename", "=", "nativestring", "(", "filename", ")", "if", "(", "not", "filename", ")", ":", "continue", "mapped", ".", ...
Sets the list of filenames that will be used for this menu to the \ inputed list. :param filenames | [<str>, ..]
[ "Sets", "the", "list", "of", "filenames", "that", "will", "be", "used", "for", "this", "menu", "to", "the", "\\", "inputed", "list", ".", ":", "param", "filenames", "|", "[", "<str", ">", "..", "]" ]
python
train
cimm-kzn/CGRtools
CGRtools/containers/reaction.py
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/reaction.py#L246-L255
def calculate2d(self, force=True): """ recalculate 2d coordinates. currently rings can be calculated badly. :param force: ignore existing coordinates of atoms """ for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: m.calculate2d(force) self.fix_positions()
[ "def", "calculate2d", "(", "self", ",", "force", "=", "True", ")", ":", "for", "ml", "in", "(", "self", ".", "__reagents", ",", "self", ".", "__reactants", ",", "self", ".", "__products", ")", ":", "for", "m", "in", "ml", ":", "m", ".", "calculate2...
recalculate 2d coordinates. currently rings can be calculated badly. :param force: ignore existing coordinates of atoms
[ "recalculate", "2d", "coordinates", ".", "currently", "rings", "can", "be", "calculated", "badly", "." ]
python
train
PythonCharmers/python-future
src/future/backports/html/parser.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/html/parser.py#L135-L141
def reset(self): """Reset this instance. Loses all unprocessed data.""" self.rawdata = '' self.lasttag = '???' self.interesting = interesting_normal self.cdata_elem = None _markupbase.ParserBase.reset(self)
[ "def", "reset", "(", "self", ")", ":", "self", ".", "rawdata", "=", "''", "self", ".", "lasttag", "=", "'???'", "self", ".", "interesting", "=", "interesting_normal", "self", ".", "cdata_elem", "=", "None", "_markupbase", ".", "ParserBase", ".", "reset", ...
Reset this instance. Loses all unprocessed data.
[ "Reset", "this", "instance", ".", "Loses", "all", "unprocessed", "data", "." ]
python
train
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L3357-L3389
def add_prior(self, twig=None, **kwargs): """ [NOT IMPLEMENTED] :raises NotImplementedError: because it isn't """ raise NotImplementedError param = self.get_parameter(twig=twig, **kwargs) # TODO: make sure param is a float parameter? func = _get_add_func(_distributions, 'prior') # TODO: send smart defaults for priors based on limits of parameter params = func(**kwargs) metawargs = {k: v for k, v in params.meta.items() if k not in ['uniqueid', 'uniquetwig', 'twig']} metawargs['context'] = 'prior' logger.info("adding prior on '{}' parameter".format(param.uniquetwig)) self._attach_params(params, **metawargs) redo_kwargs = deepcopy(kwargs) redo_kwargs['func'] = func.func_name self._add_history(redo_func='add_prior', redo_kwargs=redo_kwargs, undo_func='remove_prior', undo_kwargs={'twig': param.uniquetwig}) # return params return self.get_prior(**metawargs)
[ "def", "add_prior", "(", "self", ",", "twig", "=", "None", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "param", "=", "self", ".", "get_parameter", "(", "twig", "=", "twig", ",", "*", "*", "kwargs", ")", "# TODO: make sure param is a ...
[NOT IMPLEMENTED] :raises NotImplementedError: because it isn't
[ "[", "NOT", "IMPLEMENTED", "]" ]
python
train
contentful/contentful-management.py
contentful_management/client.py
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/client.py#L776-L781
def _post(self, url, attributes=None, **kwargs): """ Wrapper for the HTTP POST request. """ return self._request('post', url, attributes, **kwargs)
[ "def", "_post", "(", "self", ",", "url", ",", "attributes", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_request", "(", "'post'", ",", "url", ",", "attributes", ",", "*", "*", "kwargs", ")" ]
Wrapper for the HTTP POST request.
[ "Wrapper", "for", "the", "HTTP", "POST", "request", "." ]
python
train
OCHA-DAP/hdx-python-api
src/hdx/hdx_configuration.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/hdx_configuration.py#L367-L383
def setup_remoteckan(self, remoteckan=None, **kwargs): # type: (Optional[ckanapi.RemoteCKAN], Any) -> None """ Set up remote CKAN from provided CKAN or by creating from configuration Args: remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. Returns: None """ if remoteckan is None: self._remoteckan = self.create_remoteckan(self.get_hdx_site_url(), full_agent=self.get_user_agent(), **kwargs) else: self._remoteckan = remoteckan
[ "def", "setup_remoteckan", "(", "self", ",", "remoteckan", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (Optional[ckanapi.RemoteCKAN], Any) -> None", "if", "remoteckan", "is", "None", ":", "self", ".", "_remoteckan", "=", "self", ".", "create_remotecka...
Set up remote CKAN from provided CKAN or by creating from configuration Args: remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. Returns: None
[ "Set", "up", "remote", "CKAN", "from", "provided", "CKAN", "or", "by", "creating", "from", "configuration" ]
python
train
demianbrecht/flask-canvas
flask_canvas.py
https://github.com/demianbrecht/flask-canvas/blob/07aa310c43f9386598cdfd5b163f94efa7808c85/flask_canvas.py#L133-L143
def _decode_signed_user(encoded_sig, encoded_data): """ Decodes the ``POST``ed signed data """ decoded_sig = _decode(encoded_sig) decoded_data = loads(_decode(encoded_data)) if decoded_sig != hmac.new(app.config['CANVAS_CLIENT_SECRET'], encoded_data, sha256).digest(): raise ValueError("sig doesn't match hash") return decoded_sig, decoded_data
[ "def", "_decode_signed_user", "(", "encoded_sig", ",", "encoded_data", ")", ":", "decoded_sig", "=", "_decode", "(", "encoded_sig", ")", "decoded_data", "=", "loads", "(", "_decode", "(", "encoded_data", ")", ")", "if", "decoded_sig", "!=", "hmac", ".", "new",...
Decodes the ``POST``ed signed data
[ "Decodes", "the", "POST", "ed", "signed", "data" ]
python
train
numenta/htmresearch
projects/union_path_integration/multi_column/multi_column_convergence.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/union_path_integration/multi_column/multi_column_convergence.py#L181-L225
def learn(self): """ Learn all objects on every column. Each column will learn all the features of every object and store the the object's L2 representation to be later used in the inference stage """ self.setLearning(True) for obj in self.objects: self.sendReset() previousLocation = [None] * self.numColumns displacement = [0., 0.] features = obj["features"] numOfFeatures = len(features) # Randomize touch sequences touchSequence = np.random.permutation(numOfFeatures) for sensation in xrange(numOfFeatures): for col in xrange(self.numColumns): # Shift the touch sequence for each column colSequence = np.roll(touchSequence, col) feature = features[colSequence[sensation]] # Move the sensor to the center of the object locationOnObject = np.array([feature["top"] + feature["height"] / 2., feature["left"] + feature["width"] / 2.]) # Calculate displacement from previous location if previousLocation[col] is not None: displacement = locationOnObject - previousLocation[col] previousLocation[col] = locationOnObject # learn each pattern multiple times activeColumns = self.featureSDR[col][feature["name"]] for _ in xrange(self.numLearningPoints): # Sense feature at location self.motorInput[col].addDataToQueue(displacement) self.sensorInput[col].addDataToQueue(activeColumns, False, 0) # Only move to the location on the first sensation. displacement = [0, 0] self.network.run(numOfFeatures * self.numLearningPoints) # update L2 representations for the object self.learnedObjects[obj["name"]] = self.getL2Representations()
[ "def", "learn", "(", "self", ")", ":", "self", ".", "setLearning", "(", "True", ")", "for", "obj", "in", "self", ".", "objects", ":", "self", ".", "sendReset", "(", ")", "previousLocation", "=", "[", "None", "]", "*", "self", ".", "numColumns", "disp...
Learn all objects on every column. Each column will learn all the features of every object and store the the object's L2 representation to be later used in the inference stage
[ "Learn", "all", "objects", "on", "every", "column", ".", "Each", "column", "will", "learn", "all", "the", "features", "of", "every", "object", "and", "store", "the", "the", "object", "s", "L2", "representation", "to", "be", "later", "used", "in", "the", ...
python
train
PmagPy/PmagPy
programs/magic_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui.py#L89-L235
def InitUI(self): """ Make main user interface """ bSizer0 = wx.StaticBoxSizer( wx.StaticBox(self.panel, wx.ID_ANY, "Choose MagIC project directory", name='bSizer0'), wx.HORIZONTAL ) self.dir_path = wx.TextCtrl(self.panel, id=-1, size=(600, 25), style=wx.TE_READONLY) self.dir_path.SetValue(self.WD) self.change_dir_button = buttons.GenButton( self.panel, id=-1, label="change directory", size=(-1, -1), name='change_dir_btn' ) self.change_dir_button.SetBackgroundColour("#F8F8FF") self.change_dir_button.InitColours() self.Bind(wx.EVT_BUTTON, self.on_change_dir_button, self.change_dir_button) bSizer0.Add(self.change_dir_button, wx.ALIGN_LEFT) bSizer0.AddSpacer(40) bSizer0.Add(self.dir_path, wx.ALIGN_CENTER_VERTICAL) self.bSizer_msg = wx.StaticBoxSizer(wx.StaticBox( self.panel, wx.ID_ANY, "Message", name='bsizer_msg'), wx.HORIZONTAL) self.message = wx.StaticText(self.panel, -1, label="Some text will be here", name='messages') self.bSizer_msg.Add(self.message) #---sizer 1 ---- bSizer1 = wx.StaticBoxSizer(wx.StaticBox( self.panel, wx.ID_ANY, "Add information to the data model", name='bSizer1'), wx.HORIZONTAL) text = "1. add location data" self.btn1 = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='locations_btn') self.btn1.SetBackgroundColour("#FDC68A") self.btn1.InitColours() self.Bind(wx.EVT_BUTTON, self.make_grid_frame, self.btn1) text = "2. add site data" self.btn2 = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='sites_btn') self.btn2.SetBackgroundColour("#6ECFF6") self.btn2.InitColours() self.Bind(wx.EVT_BUTTON, self.make_grid_frame, self.btn2) text = "3. add sample data" self.btn3 = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='samples_btn') self.btn3.SetBackgroundColour("#C4DF9B") self.btn3.InitColours() self.Bind(wx.EVT_BUTTON, self.make_grid_frame, self.btn3) text = "4. add specimen data" self.btn4 = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='specimens_btn') self.btn4.SetBackgroundColour("#FDC68A") self.btn4.InitColours() self.Bind(wx.EVT_BUTTON, self.make_grid_frame, self.btn4) text = "5. add age data" self.btn5 = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='ages_btn') self.btn5.SetBackgroundColour("#6ECFF6") self.btn5.InitColours() self.Bind(wx.EVT_BUTTON, self.make_grid_frame, self.btn5) text = "6. add measurements data" self.btn6 = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='measurements_btn') self.btn6.SetBackgroundColour("#C4DF9B") self.btn6.InitColours() self.Bind(wx.EVT_BUTTON, self.make_grid_frame, self.btn6) bsizer1a = wx.BoxSizer(wx.VERTICAL) bsizer1a.AddSpacer(20) bsizer1a.Add(self.btn1, wx.ALIGN_TOP) bsizer1a.AddSpacer(20) bsizer1a.Add(self.btn2, wx.ALIGN_TOP) bsizer1a.AddSpacer(20) bsizer1a.Add(self.btn3, wx.ALIGN_TOP) bsizer1a.AddSpacer(20) bSizer1.Add(bsizer1a, wx.ALIGN_CENTER, wx.EXPAND) bSizer1.AddSpacer(20) #bSizer1.Add(OR, 0, wx.ALIGN_CENTER, 0) bSizer1.AddSpacer(20) bsizer1b = wx.BoxSizer(wx.VERTICAL) #__init__(self, parent, id, label, pos, size, style, validator, name bsizer1b.Add(self.btn4, flag=wx.ALIGN_CENTER|wx.BOTTOM, border=20) bsizer1b.Add(self.btn5, 0, flag=wx.ALIGN_CENTER|wx.BOTTOM, border=20) bsizer1b.Add(self.btn6, 0, wx.ALIGN_CENTER, 0) bSizer1.Add(bsizer1b, 0, wx.ALIGN_CENTER, 0) bSizer1.AddSpacer(20) #---sizer 2 ---- self.bSizer2 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Create file for upload to MagIC database", name='bSizer2'), wx.HORIZONTAL) text = "prepare upload txt file" self.btn_upload = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='upload_btn') self.btn_upload.SetBackgroundColour("#C4DF9B") self.btn_upload.InitColours() self.Bind(wx.EVT_BUTTON, self.on_upload_file, self.btn_upload) self.bSizer2.AddSpacer(20) self.bSizer2.Add(self.btn_upload, 0, wx.ALIGN_CENTER, 0) self.bSizer2.AddSpacer(20) #self.Bind(wx.EVT_BUTTON, self.on_btn_upload, self.btn_upload) #---arrange sizers ---- self.hbox = wx.BoxSizer(wx.HORIZONTAL) vbox = wx.BoxSizer(wx.VERTICAL) vbox.AddSpacer(5) #vbox.Add(self.logo,0,wx.ALIGN_CENTER,0) vbox.AddSpacer(5) vbox.Add(bSizer0, 0, wx.ALIGN_CENTER, 0) vbox.AddSpacer(10) #vbox.Add(bSizer0_1, 0, wx.ALIGN_CENTER, 0) #vbox.AddSpacer(10) vbox.Add(self.bSizer_msg, 0, wx.ALIGN_CENTER, 0) self.bSizer_msg.ShowItems(False) vbox.Add(bSizer1, 0, wx.ALIGN_CENTER, 0) vbox.AddSpacer(10) vbox.AddSpacer(10) self.hbox.AddSpacer(10) vbox.Add(self.bSizer2, 0, wx.ALIGN_CENTER, 0) vbox.AddSpacer(10) self.hbox.Add(vbox, 0, wx.ALIGN_CENTER, 0) self.hbox.AddSpacer(5) self.panel.SetSizer(self.hbox) self.hbox.Fit(self) # do menu print("-I- Initializing menu") menubar = MagICMenu(self) self.SetMenuBar(menubar) self.menubar = menubar
[ "def", "InitUI", "(", "self", ")", ":", "bSizer0", "=", "wx", ".", "StaticBoxSizer", "(", "wx", ".", "StaticBox", "(", "self", ".", "panel", ",", "wx", ".", "ID_ANY", ",", "\"Choose MagIC project directory\"", ",", "name", "=", "'bSizer0'", ")", ",", "wx...
Make main user interface
[ "Make", "main", "user", "interface" ]
python
train
knipknap/SpiffWorkflow
SpiffWorkflow/task.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/task.py#L207-L229
def _setstate(self, value, force=False): """ Setting force to True allows for changing a state after it COMPLETED. This would otherwise be invalid. """ if self._state == value: return if value < self._state and not force: raise WorkflowException(self.task_spec, 'state went from %s to %s!' % ( self.get_state_name(), self.state_names[value])) if __debug__: old = self.get_state_name() self._state = value if __debug__: self.log.append("Moving '%s' from %s to %s" % ( self.get_name(), old, self.get_state_name())) self.state_history.append(value) LOG.debug("Moving '%s' (spec=%s) from %s to %s" % ( self.get_name(), self.task_spec.name, old, self.get_state_name()))
[ "def", "_setstate", "(", "self", ",", "value", ",", "force", "=", "False", ")", ":", "if", "self", ".", "_state", "==", "value", ":", "return", "if", "value", "<", "self", ".", "_state", "and", "not", "force", ":", "raise", "WorkflowException", "(", ...
Setting force to True allows for changing a state after it COMPLETED. This would otherwise be invalid.
[ "Setting", "force", "to", "True", "allows", "for", "changing", "a", "state", "after", "it", "COMPLETED", ".", "This", "would", "otherwise", "be", "invalid", "." ]
python
valid
cocaine/cocaine-framework-python
cocaine/detail/headers.py
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/headers.py#L172-L189
def add(self, name, value): """ Adds a new entry to the table We reduce the table size if the entry will make the table size greater than maxsize. """ # We just clear the table if the entry is too big size = table_entry_size(name, value) if size > self._maxsize: self.dynamic_entries.clear() self._current_size = 0 # Add new entry if the table actually has a size elif self._maxsize > 0: self.dynamic_entries.appendleft((name, value)) self._current_size += size self._shrink()
[ "def", "add", "(", "self", ",", "name", ",", "value", ")", ":", "# We just clear the table if the entry is too big", "size", "=", "table_entry_size", "(", "name", ",", "value", ")", "if", "size", ">", "self", ".", "_maxsize", ":", "self", ".", "dynamic_entries...
Adds a new entry to the table We reduce the table size if the entry will make the table size greater than maxsize.
[ "Adds", "a", "new", "entry", "to", "the", "table" ]
python
train
miso-belica/jusText
justext/core.py
https://github.com/miso-belica/jusText/blob/ad05130df2ca883f291693353f9d86e20fe94a4e/justext/core.py#L71-L98
def decode_html(html, default_encoding=DEFAULT_ENCODING, encoding=None, errors=DEFAULT_ENC_ERRORS): """ Converts a `html` containing an HTML page into Unicode. Tries to guess character encoding from meta tag. """ if isinstance(html, unicode): return html if encoding: return html.decode(encoding, errors) match = CHARSET_META_TAG_PATTERN.search(html) if match: declared_encoding = match.group(1).decode("ASCII") # proceed unknown encoding as if it wasn't found at all with ignored(LookupError): return html.decode(declared_encoding, errors) # unknown encoding try: # try UTF-8 first return html.decode("utf8") except UnicodeDecodeError: # try lucky with default encoding try: return html.decode(default_encoding, errors) except UnicodeDecodeError as e: raise JustextError("Unable to decode the HTML to Unicode: " + unicode(e))
[ "def", "decode_html", "(", "html", ",", "default_encoding", "=", "DEFAULT_ENCODING", ",", "encoding", "=", "None", ",", "errors", "=", "DEFAULT_ENC_ERRORS", ")", ":", "if", "isinstance", "(", "html", ",", "unicode", ")", ":", "return", "html", "if", "encodin...
Converts a `html` containing an HTML page into Unicode. Tries to guess character encoding from meta tag.
[ "Converts", "a", "html", "containing", "an", "HTML", "page", "into", "Unicode", ".", "Tries", "to", "guess", "character", "encoding", "from", "meta", "tag", "." ]
python
train
aleju/imgaug
imgaug/augmentables/polys.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/polys.py#L303-L332
def is_out_of_image(self, image, fully=True, partly=False): """ Estimate whether the polygon is partially or fully outside of the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is assumed to represent the image shape and must contain at least two integers. fully : bool, optional Whether to return True if the polygon is fully outside of the image area. partly : bool, optional Whether to return True if the polygon is at least partially outside fo the image area. Returns ------- bool True if the polygon is partially/fully outside of the image area, depending on defined parameters. False otherwise. """ # TODO this is inconsistent with line strings, which return a default # value in these cases if len(self.exterior) == 0: raise Exception("Cannot determine whether the polygon is inside the image, because it contains no points.") ls = self.to_line_string() return ls.is_out_of_image(image, fully=fully, partly=partly)
[ "def", "is_out_of_image", "(", "self", ",", "image", ",", "fully", "=", "True", ",", "partly", "=", "False", ")", ":", "# TODO this is inconsistent with line strings, which return a default", "# value in these cases", "if", "len", "(", "self", ".", "exterior", ")...
Estimate whether the polygon is partially or fully outside of the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is assumed to represent the image shape and must contain at least two integers. fully : bool, optional Whether to return True if the polygon is fully outside of the image area. partly : bool, optional Whether to return True if the polygon is at least partially outside fo the image area. Returns ------- bool True if the polygon is partially/fully outside of the image area, depending on defined parameters. False otherwise.
[ "Estimate", "whether", "the", "polygon", "is", "partially", "or", "fully", "outside", "of", "the", "image", "area", "." ]
python
valid
Fizzadar/pyinfra
pyinfra/local.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/local.py#L19-L78
def include(filename, hosts=False, when=True): ''' Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir`` directory. Args: hosts (string, list): group name or list of hosts to limit this include to when (bool): indicate whether to trigger operations in this include ''' if not pyinfra.is_cli: raise PyinfraError('local.include is only available in CLI mode.') if not when: return if hosts is not False: hosts = ensure_host_list(hosts, inventory=pseudo_state.inventory) if pseudo_host not in hosts: return if pseudo_state.deploy_dir: filename = path.join(pseudo_state.deploy_dir, filename) frameinfo = get_caller_frameinfo() logger.debug('Including local file: {0}'.format(filename)) try: # Fixes a circular import because `pyinfra.local` is really a CLI # only thing (so should be `pyinfra_cli.local`). It is kept here # to maintain backwards compatability and the nicer public import # (ideally users never need to import from `pyinfra_cli`). from pyinfra_cli.config import extract_file_config from pyinfra_cli.util import exec_file # Load any config defined in the file and setup like a @deploy config_data = extract_file_config(filename) kwargs = { key.lower(): value for key, value in six.iteritems(config_data) if key in [ 'SUDO', 'SUDO_USER', 'SU_USER', 'PRESERVE_SUDO_ENV', 'IGNORE_ERRORS', ] } with pseudo_state.deploy( filename, kwargs, None, frameinfo.lineno, in_deploy=False, ): exec_file(filename) # One potential solution to the above is to add local as an actual # module, ie `pyinfra.modules.local`. except IOError as e: raise PyinfraError( 'Could not include local file: {0}\n{1}'.format(filename, e), )
[ "def", "include", "(", "filename", ",", "hosts", "=", "False", ",", "when", "=", "True", ")", ":", "if", "not", "pyinfra", ".", "is_cli", ":", "raise", "PyinfraError", "(", "'local.include is only available in CLI mode.'", ")", "if", "not", "when", ":", "ret...
Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir`` directory. Args: hosts (string, list): group name or list of hosts to limit this include to when (bool): indicate whether to trigger operations in this include
[ "Executes", "a", "local", "python", "file", "within", "the", "pyinfra", ".", "pseudo_state", ".", "deploy_dir", "directory", "." ]
python
train
projectatomic/osbs-client
osbs/build/build_request.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/build_request.py#L843-L871
def render_bump_release(self): """ If the bump_release plugin is present, configure it """ phase = 'prebuild_plugins' plugin = 'bump_release' if not self.dj.dock_json_has_plugin_conf(phase, plugin): return if self.spec.release.value: logger.info('removing %s from request as release already specified', plugin) self.dj.remove_plugin(phase, plugin) return hub = self.spec.kojihub.value if not hub: logger.info('removing %s from request as koji hub not specified', plugin) self.dj.remove_plugin(phase, plugin) return self.dj.dock_json_set_arg(phase, plugin, 'hub', hub) # For flatpak, we want a name-version-release of # <name>-<stream>-<module_build_version>.<n>, where the .<n> makes # sure that the build is unique in Koji if self.spec.flatpak.value: self.dj.dock_json_set_arg(phase, plugin, 'append', True)
[ "def", "render_bump_release", "(", "self", ")", ":", "phase", "=", "'prebuild_plugins'", "plugin", "=", "'bump_release'", "if", "not", "self", ".", "dj", ".", "dock_json_has_plugin_conf", "(", "phase", ",", "plugin", ")", ":", "return", "if", "self", ".", "s...
If the bump_release plugin is present, configure it
[ "If", "the", "bump_release", "plugin", "is", "present", "configure", "it" ]
python
train
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/worker/workers_real_time_statistics.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/worker/workers_real_time_statistics.py#L88-L101
def get_instance(self, payload): """ Build an instance of WorkersRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.worker.workers_real_time_statistics.WorkersRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_real_time_statistics.WorkersRealTimeStatisticsInstance """ return WorkersRealTimeStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "WorkersRealTimeStatisticsInstance", "(", "self", ".", "_version", ",", "payload", ",", "workspace_sid", "=", "self", ".", "_solution", "[", "'workspace_sid'", "]", ",", ")" ]
Build an instance of WorkersRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.worker.workers_real_time_statistics.WorkersRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_real_time_statistics.WorkersRealTimeStatisticsInstance
[ "Build", "an", "instance", "of", "WorkersRealTimeStatisticsInstance" ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/name.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L398-L412
def com_adobe_fonts_check_name_postscript_name_consistency(ttFont): """Name table ID 6 (PostScript name) must be consistent across platforms.""" postscript_names = set() for entry in ttFont['name'].names: if entry.nameID == NameID.POSTSCRIPT_NAME: postscript_name = entry.toUnicode() postscript_names.add(postscript_name) if len(postscript_names) > 1: yield FAIL, ("Entries in the 'name' table for ID 6 (PostScript name) are " "not consistent. Names found: {}." .format(sorted(postscript_names))) else: yield PASS, ("Entries in the 'name' table for ID 6 " "(PostScript name) are consistent.")
[ "def", "com_adobe_fonts_check_name_postscript_name_consistency", "(", "ttFont", ")", ":", "postscript_names", "=", "set", "(", ")", "for", "entry", "in", "ttFont", "[", "'name'", "]", ".", "names", ":", "if", "entry", ".", "nameID", "==", "NameID", ".", "POSTS...
Name table ID 6 (PostScript name) must be consistent across platforms.
[ "Name", "table", "ID", "6", "(", "PostScript", "name", ")", "must", "be", "consistent", "across", "platforms", "." ]
python
train
jobovy/galpy
galpy/df/streamdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamdf.py#L2808-L2886
def gaussApprox(self,xy,**kwargs): """ NAME: gaussApprox PURPOSE: return the mean and variance of a Gaussian approximation to the stream DF at a given phase-space point in Galactocentric rectangular coordinates (distribution is over missing directions) INPUT: xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned interp= (object-wide interp default) if True, use the interpolated stream track cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the Gaussian approximation in these coordinates is returned OUTPUT: (mean,variance) of the approximate Gaussian DF for the missing directions in xy HISTORY: 2013-12-12 - Written - Bovy (IAS) """ interp= kwargs.get('interp',self._useInterp) lb= kwargs.get('lb',False) #What are we looking for coordGiven= numpy.array([not x is None for x in xy],dtype='bool') nGiven= numpy.sum(coordGiven) #First find the nearest track point if not 'cindx' in kwargs and lb: cindx= self._find_closest_trackpointLB(*xy,interp=interp, usev=True) elif not 'cindx' in kwargs and not lb: cindx= self._find_closest_trackpoint(*xy,xy=True,interp=interp, usev=True) else: cindx= kwargs['cindx'] #Get the covariance matrix if interp and lb: tcov= self._interpolatedAllErrCovsLBUnscaled[cindx] tmean= self._interpolatedObsTrackLB[cindx] elif interp and not lb: tcov= self._interpolatedAllErrCovsXY[cindx] tmean= self._interpolatedObsTrackXY[cindx] elif not interp and lb: tcov= self._allErrCovsLBUnscaled[cindx] tmean= self._ObsTrackLB[cindx] elif not interp and not lb: tcov= self._allErrCovsXY[cindx] tmean= self._ObsTrackXY[cindx] if lb:#Apply scale factors tcov= copy.copy(tcov) tcov*= numpy.tile(self._ErrCovsLBScale,(6,1)) tcov*= numpy.tile(self._ErrCovsLBScale,(6,1)).T #Fancy indexing to recover V22, V11, and V12; V22, V11, V12 as in Appendix B of 0905.2979v1 V11indx0= numpy.array([[ii for jj in range(6-nGiven)] for ii in range(6) if not coordGiven[ii]]) V11indx1= numpy.array([[ii for ii in range(6) if not coordGiven[ii]] for jj in range(6-nGiven)]) V11= tcov[V11indx0,V11indx1] V22indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if coordGiven[ii]]) V22indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(nGiven)]) V22= tcov[V22indx0,V22indx1] V12indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if not coordGiven[ii]]) V12indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(6-nGiven)]) V12= tcov[V12indx0,V12indx1] #Also get m1 and m2, again following Appendix B of 0905.2979v1 m1= tmean[True^coordGiven] m2= tmean[coordGiven] #conditional mean and variance V22inv= numpy.linalg.inv(V22) v2= numpy.array([xy[ii] for ii in range(6) if coordGiven[ii]]) condMean= m1+numpy.dot(V12,numpy.dot(V22inv,v2-m2)) condVar= V11-numpy.dot(V12,numpy.dot(V22inv,V12.T)) return (condMean,condVar)
[ "def", "gaussApprox", "(", "self", ",", "xy", ",", "*", "*", "kwargs", ")", ":", "interp", "=", "kwargs", ".", "get", "(", "'interp'", ",", "self", ".", "_useInterp", ")", "lb", "=", "kwargs", ".", "get", "(", "'lb'", ",", "False", ")", "#What are ...
NAME: gaussApprox PURPOSE: return the mean and variance of a Gaussian approximation to the stream DF at a given phase-space point in Galactocentric rectangular coordinates (distribution is over missing directions) INPUT: xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned interp= (object-wide interp default) if True, use the interpolated stream track cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the Gaussian approximation in these coordinates is returned OUTPUT: (mean,variance) of the approximate Gaussian DF for the missing directions in xy HISTORY: 2013-12-12 - Written - Bovy (IAS)
[ "NAME", ":" ]
python
train
ondergetekende/python-panavatar
panavatar/parameters.py
https://github.com/ondergetekende/python-panavatar/blob/d89d952db27ddd196ac66aafa89cb80dbcd40ca6/panavatar/parameters.py#L174-L198
def weighted_choice(self, probabilities, key): """Makes a weighted choice between several options. Probabilities is a list of 2-tuples, (probability, option). The probabilties don't need to add up to anything, they are automatically scaled.""" try: choice = self.values[key].lower() except KeyError: # override not set. return super(RecordingParameters, self)\ .weighted_choice(probabilities, key) # Find the matching key (case insensitive) for probability, option in probabilities: if str(option).lower() == choice: return option # for function or class-type choices, also check __name__ for probability, option in probabilities: if option.__name__.lower() == choice: return option assert False, "Invalid value provided"
[ "def", "weighted_choice", "(", "self", ",", "probabilities", ",", "key", ")", ":", "try", ":", "choice", "=", "self", ".", "values", "[", "key", "]", ".", "lower", "(", ")", "except", "KeyError", ":", "# override not set.", "return", "super", "(", "Recor...
Makes a weighted choice between several options. Probabilities is a list of 2-tuples, (probability, option). The probabilties don't need to add up to anything, they are automatically scaled.
[ "Makes", "a", "weighted", "choice", "between", "several", "options", "." ]
python
train
fermiPy/fermipy
fermipy/stats_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/stats_utils.py#L156-L164
def profile_bins(self): """ The binning to use to do the profile fitting """ log_mean = np.log10(self.mean()) log_half_width = max(5. * self.sigma(), 3.) # Default is to profile over +-5 sigma, # centered on mean, using 100 bins return np.logspace(log_mean - log_half_width, log_mean + log_half_width, 101)/self._j_ref
[ "def", "profile_bins", "(", "self", ")", ":", "log_mean", "=", "np", ".", "log10", "(", "self", ".", "mean", "(", ")", ")", "log_half_width", "=", "max", "(", "5.", "*", "self", ".", "sigma", "(", ")", ",", "3.", ")", "# Default is to profile over +-5 ...
The binning to use to do the profile fitting
[ "The", "binning", "to", "use", "to", "do", "the", "profile", "fitting" ]
python
train
bitesofcode/projexui
projexui/widgets/xorbrecordbox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbrecordbox.py#L969-L987
def showPopup(self): """ Overloads the popup method from QComboBox to display an ORB tree widget when necessary. :sa setShowTreePopup """ if not self.showTreePopup(): return super(XOrbRecordBox, self).showPopup() tree = self.treePopupWidget() if tree and not tree.isVisible(): tree.move(self.mapToGlobal(QPoint(0, self.height()))) tree.resize(self.width(), 250) tree.resizeToContents() tree.filterItems('') tree.setFilteredColumns(range(tree.columnCount())) tree.show()
[ "def", "showPopup", "(", "self", ")", ":", "if", "not", "self", ".", "showTreePopup", "(", ")", ":", "return", "super", "(", "XOrbRecordBox", ",", "self", ")", ".", "showPopup", "(", ")", "tree", "=", "self", ".", "treePopupWidget", "(", ")", "if", "...
Overloads the popup method from QComboBox to display an ORB tree widget when necessary. :sa setShowTreePopup
[ "Overloads", "the", "popup", "method", "from", "QComboBox", "to", "display", "an", "ORB", "tree", "widget", "when", "necessary", ".", ":", "sa", "setShowTreePopup" ]
python
train
python-wink/python-wink
src/pywink/devices/air_conditioner.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/air_conditioner.py#L87-L101
def set_temperature(self, max_set_point=None): """ :param max_set_point: a float for the max set point value in celsius :return: nothing """ desired_state = {} if max_set_point: desired_state['max_set_point'] = max_set_point response = self.api_interface.set_device_state(self, { "desired_state": desired_state }) self._update_state_from_response(response)
[ "def", "set_temperature", "(", "self", ",", "max_set_point", "=", "None", ")", ":", "desired_state", "=", "{", "}", "if", "max_set_point", ":", "desired_state", "[", "'max_set_point'", "]", "=", "max_set_point", "response", "=", "self", ".", "api_interface", "...
:param max_set_point: a float for the max set point value in celsius :return: nothing
[ ":", "param", "max_set_point", ":", "a", "float", "for", "the", "max", "set", "point", "value", "in", "celsius", ":", "return", ":", "nothing" ]
python
train
Neurosim-lab/netpyne
netpyne/analysis/network.py
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/analysis/network.py#L519-L673
def plotConn (includePre = ['all'], includePost = ['all'], feature = 'strength', orderBy = 'gid', figSize = (10,10), groupBy = 'pop', groupByIntervalPre = None, groupByIntervalPost = None, graphType = 'matrix', synOrConn = 'syn', synMech = None, connsFile = None, tagsFile = None, clim = None, fontSize = 12, saveData = None, saveFig = None, showFig = True): ''' Plot network connectivity - includePre (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to show (default: ['all']) - includePost (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to show (default: ['all']) - feature ('weight'|'delay'|'numConns'|'probability'|'strength'|'convergence'|'divergence'): Feature to show in connectivity matrix; the only features applicable to groupBy='cell' are 'weight', 'delay' and 'numConns'; 'strength' = weight * probability (default: 'strength') - groupBy ('pop'|'cell'|'y'|: Show matrix for individual cells, populations, or by other numeric tag such as 'y' (default: 'pop') - groupByInterval (int or float): Interval of groupBy feature to group cells by in conn matrix, e.g. 100 to group by cortical depth in steps of 100 um (default: None) - orderBy ('gid'|'y'|'ynorm'|...): Unique numeric cell property to order x and y axes by, e.g. 'gid', 'ynorm', 'y' (requires groupBy='cells') (default: 'gid') - graphType ('matrix','bar','pie'): Type of graph to represent data (default: 'matrix') - synOrConn ('syn'|'conn'): Use synapses or connections; note 1 connection can have multiple synapses (default: 'syn') - figSize ((width, height)): Size of figure (default: (10,10)) - synMech (['AMPA', 'GABAA',...]): Show results only for these syn mechs (default: None) - saveData (None|True|'fileName'): File name where to save the final data used to generate the figure; if set to True uses filename from simConfig (default: None) - saveFig (None|True|'fileName'): File name where to save the figure; if set to True uses filename from simConfig (default: None) - showFig (True|False): Whether to show the figure or not (default: True) - Returns figure handles ''' from .. import sim print('Plotting connectivity matrix...') if connsFile and tagsFile: connMatrix, pre, post = _plotConnCalculateFromFile(includePre, includePost, feature, orderBy, groupBy, groupByIntervalPre, groupByIntervalPost, synOrConn, synMech, connsFile, tagsFile) else: connMatrix, pre, post = _plotConnCalculateFromSim(includePre, includePost, feature, orderBy, groupBy, groupByIntervalPre, groupByIntervalPost, synOrConn, synMech) if connMatrix is None: print("Error calculating connMatrix in plotConn()") return None # set font size plt.rcParams.update({'font.size': fontSize}) # matrix plot if graphType == 'matrix': # Create plot fig = plt.figure(figsize=figSize) fig.subplots_adjust(right=0.98) # Less space on right fig.subplots_adjust(top=0.96) # Less space on top fig.subplots_adjust(bottom=0.02) # Less space on bottom h = plt.axes() plt.imshow(connMatrix, interpolation='nearest', cmap='viridis', vmin=np.nanmin(connMatrix), vmax=np.nanmax(connMatrix)) #_bicolormap(gap=0) # Plot grid lines if groupBy == 'cell': cellsPre, cellsPost = pre, post # Make pretty stepy = max(1, int(len(cellsPre)/10.0)) basey = 100 if stepy>100 else 10 stepy = max(1, int(basey * np.floor(float(stepy)/basey))) stepx = max(1, int(len(cellsPost)/10.0)) basex = 100 if stepx>100 else 10 stepx = max(1, int(basex * np.floor(float(stepx)/basex))) h.set_xticks(np.arange(0,len(cellsPost),stepx)) h.set_yticks(np.arange(0,len(cellsPre),stepy)) h.set_xticklabels(np.arange(0,len(cellsPost),stepx)) h.set_yticklabels(np.arange(0,len(cellsPost),stepy)) h.xaxis.set_ticks_position('top') plt.xlim(-0.5,len(cellsPost)-0.5) plt.ylim(len(cellsPre)-0.5,-0.5) elif groupBy == 'pop': popsPre, popsPost = pre, post for ipop, pop in enumerate(popsPre): plt.plot(np.array([0,len(popsPre)])-0.5,np.array([ipop,ipop])-0.5,'-',c=(0.7,0.7,0.7)) for ipop, pop in enumerate(popsPost): plt.plot(np.array([ipop,ipop])-0.5,np.array([0,len(popsPost)])-0.5,'-',c=(0.7,0.7,0.7)) # Make pretty h.set_xticks(list(range(len(popsPost)))) h.set_yticks(list(range(len(popsPre)))) h.set_xticklabels(popsPost) h.set_yticklabels(popsPre) h.xaxis.set_ticks_position('top') plt.xlim(-0.5,len(popsPost)-0.5) plt.ylim(len(popsPre)-0.5,-0.5) else: groupsPre, groupsPost = pre, post for igroup, group in enumerate(groupsPre): plt.plot(np.array([0,len(groupsPre)])-0.5,np.array([igroup,igroup])-0.5,'-',c=(0.7,0.7,0.7)) for igroup, group in enumerate(groupsPost): plt.plot(np.array([igroup,igroup])-0.5,np.array([0,len(groupsPost)])-0.5,'-',c=(0.7,0.7,0.7)) # Make pretty h.set_xticks([i-0.5 for i in range(len(groupsPost))]) h.set_yticks([i-0.5 for i in range(len(groupsPre))]) h.set_xticklabels([int(x) if x>1 else x for x in groupsPost]) h.set_yticklabels([int(x) if x>1 else x for x in groupsPre]) h.xaxis.set_ticks_position('top') plt.xlim(-0.5,len(groupsPost)-0.5) plt.ylim(len(groupsPre)-0.5,-0.5) if not clim: clim = [np.nanmin(connMatrix), np.nanmax(connMatrix)] plt.clim(clim[0], clim[1]) plt.colorbar(label=feature, shrink=0.8) #.set_label(label='Fitness',size=20,weight='bold') plt.xlabel('post') h.xaxis.set_label_coords(0.5, 1.06) plt.ylabel('pre') plt.title ('Connection '+feature+' matrix', y=1.08) # stacked bar graph elif graphType == 'bar': if groupBy == 'pop': popsPre, popsPost = pre, post from netpyne.support import stackedBarGraph SBG = stackedBarGraph.StackedBarGrapher() fig = plt.figure(figsize=figSize) ax = fig.add_subplot(111) SBG.stackedBarPlot(ax, connMatrix.transpose(), colorList, xLabels=popsPost, gap = 0.1, scale=False, xlabel='postsynaptic', ylabel = feature) plt.title ('Connection '+feature+' stacked bar graph') plt.legend(popsPre) plt.tight_layout() elif groupBy == 'cell': print('Error: plotConn graphType="bar" with groupBy="cell" not implemented') elif graphType == 'pie': print('Error: plotConn graphType="pie" not yet implemented') #save figure data if saveData: figData = {'connMatrix': connMatrix, 'feature': feature, 'groupBy': groupBy, 'includePre': includePre, 'includePost': includePost, 'saveData': saveData, 'saveFig': saveFig, 'showFig': showFig} _saveFigData(figData, saveData, 'conn') # save figure if saveFig: if isinstance(saveFig, basestring): filename = saveFig else: filename = sim.cfg.filename+'_'+'conn_'+feature+'.png' plt.savefig(filename) # show fig if showFig: _showFigure() return fig, {'connMatrix': connMatrix, 'feature': feature, 'groupBy': groupBy, 'includePre': includePre, 'includePost': includePost}
[ "def", "plotConn", "(", "includePre", "=", "[", "'all'", "]", ",", "includePost", "=", "[", "'all'", "]", ",", "feature", "=", "'strength'", ",", "orderBy", "=", "'gid'", ",", "figSize", "=", "(", "10", ",", "10", ")", ",", "groupBy", "=", "'pop'", ...
Plot network connectivity - includePre (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to show (default: ['all']) - includePost (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to show (default: ['all']) - feature ('weight'|'delay'|'numConns'|'probability'|'strength'|'convergence'|'divergence'): Feature to show in connectivity matrix; the only features applicable to groupBy='cell' are 'weight', 'delay' and 'numConns'; 'strength' = weight * probability (default: 'strength') - groupBy ('pop'|'cell'|'y'|: Show matrix for individual cells, populations, or by other numeric tag such as 'y' (default: 'pop') - groupByInterval (int or float): Interval of groupBy feature to group cells by in conn matrix, e.g. 100 to group by cortical depth in steps of 100 um (default: None) - orderBy ('gid'|'y'|'ynorm'|...): Unique numeric cell property to order x and y axes by, e.g. 'gid', 'ynorm', 'y' (requires groupBy='cells') (default: 'gid') - graphType ('matrix','bar','pie'): Type of graph to represent data (default: 'matrix') - synOrConn ('syn'|'conn'): Use synapses or connections; note 1 connection can have multiple synapses (default: 'syn') - figSize ((width, height)): Size of figure (default: (10,10)) - synMech (['AMPA', 'GABAA',...]): Show results only for these syn mechs (default: None) - saveData (None|True|'fileName'): File name where to save the final data used to generate the figure; if set to True uses filename from simConfig (default: None) - saveFig (None|True|'fileName'): File name where to save the figure; if set to True uses filename from simConfig (default: None) - showFig (True|False): Whether to show the figure or not (default: True) - Returns figure handles
[ "Plot", "network", "connectivity", "-", "includePre", "(", "[", "all", "|", "allCells", "allNetStims", "|", "120", "|", "E1", "|", "(", "L2", "56", ")", "|", "(", "L5", "[", "4", "5", "6", "]", ")", "]", ")", ":", "Cells", "to", "show", "(", "d...
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L12720-L12743
def _set_sfm_state(self, v, load=False): """ Setter method for sfm_state, mapped from YANG variable /sfm_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_sfm_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sfm_state() directly. YANG Description: SFM Operational Information """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=sfm_state.sfm_state, is_container='container', presence=False, yang_name="sfm-state", rest_name="sfm-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysmgr-sfm', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysmgr-operational', defining_module='brocade-sysmgr-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sfm_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=sfm_state.sfm_state, is_container='container', presence=False, yang_name="sfm-state", rest_name="sfm-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysmgr-sfm', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysmgr-operational', defining_module='brocade-sysmgr-operational', yang_type='container', is_config=True)""", }) self.__sfm_state = t if hasattr(self, '_set'): self._set()
[ "def", "_set_sfm_state", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base...
Setter method for sfm_state, mapped from YANG variable /sfm_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_sfm_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sfm_state() directly. YANG Description: SFM Operational Information
[ "Setter", "method", "for", "sfm_state", "mapped", "from", "YANG", "variable", "/", "sfm_state", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "...
python
train
hydraplatform/hydra-base
hydra_base/lib/units.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/units.py#L397-L443
def add_unit(unit,**kwargs): """ Add the unit defined into the object "unit" to the DB If unit["project_id"] is None it means that the unit is global, otherwise is property of a project If the unit exists emits an exception A minimal example: .. code-block:: python new_unit = dict( name = 'Teaspoons per second', abbreviation = 'tsp s^-1', cf = 0, # Constant conversion factor lf = 1.47867648e-05, # Linear conversion factor dimension_id = 2, description = 'A flow of one teaspoon per second.', ) add_unit(new_unit) """ new_unit = Unit() new_unit.dimension_id = unit["dimension_id"] new_unit.name = unit['name'] # Needed to uniform abbr to abbreviation new_unit.abbreviation = unit['abbreviation'] # Needed to uniform into to description new_unit.description = unit['description'] new_unit.lf = unit['lf'] new_unit.cf = unit['cf'] if ('project_id' in unit) and (unit['project_id'] is not None): # Adding dimension to the "user" dimensions list new_unit.project_id = unit['project_id'] # Save on DB db.DBSession.add(new_unit) db.DBSession.flush() return JSONObject(new_unit)
[ "def", "add_unit", "(", "unit", ",", "*", "*", "kwargs", ")", ":", "new_unit", "=", "Unit", "(", ")", "new_unit", ".", "dimension_id", "=", "unit", "[", "\"dimension_id\"", "]", "new_unit", ".", "name", "=", "unit", "[", "'name'", "]", "# Needed to unifo...
Add the unit defined into the object "unit" to the DB If unit["project_id"] is None it means that the unit is global, otherwise is property of a project If the unit exists emits an exception A minimal example: .. code-block:: python new_unit = dict( name = 'Teaspoons per second', abbreviation = 'tsp s^-1', cf = 0, # Constant conversion factor lf = 1.47867648e-05, # Linear conversion factor dimension_id = 2, description = 'A flow of one teaspoon per second.', ) add_unit(new_unit)
[ "Add", "the", "unit", "defined", "into", "the", "object", "unit", "to", "the", "DB", "If", "unit", "[", "project_id", "]", "is", "None", "it", "means", "that", "the", "unit", "is", "global", "otherwise", "is", "property", "of", "a", "project", "If", "t...
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L42-L73
def remove_extracontigs(in_bam, data): """Remove extra contigs (non chr1-22,X,Y) from an input BAM. These extra contigs can often be arranged in different ways, causing incompatibility issues with GATK and other tools. This also fixes the read group header as in fixrg. This does not yet handle mapping over 1 -> chr1 issues since this requires a ton of search/replace which slows down conversion. """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data))) out_file = os.path.join(work_dir, "%s-noextras.bam" % utils.splitext_plus(os.path.basename(in_bam))[0]) if not utils.file_exists(out_file): out_file = os.path.join(work_dir, "%s-noextras.bam" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, in_bam): with file_transaction(data, out_file) as tx_out_file: target_chroms = _target_chroms_and_header(in_bam, data) str_chroms = " ".join(target_chroms) rg_info = novoalign.get_rg_info(data["rgnames"]) bcbio_py = sys.executable ref_file = dd.get_ref_file(data) local_bam = os.path.join(os.path.dirname(tx_out_file), os.path.basename(in_bam)) cores = dd.get_cores(data) utils.symlink_plus(in_bam, local_bam) bam.index(local_bam, data["config"]) cmd = ("samtools view -@ {cores} -h {local_bam} {str_chroms} | " """{bcbio_py} -c 'from bcbio.pipeline import cleanbam; """ """cleanbam.fix_header("{ref_file}")' | """ "samtools view -@ {cores} -u - | " "samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} - ") do.run(cmd.format(**locals()), "bamprep, remove extra contigs: %s" % dd.get_sample_name(data)) return out_file
[ "def", "remove_extracontigs", "(", "in_bam", ",", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"bamclean\"", ",", "dd", ".", "get_sample_nam...
Remove extra contigs (non chr1-22,X,Y) from an input BAM. These extra contigs can often be arranged in different ways, causing incompatibility issues with GATK and other tools. This also fixes the read group header as in fixrg. This does not yet handle mapping over 1 -> chr1 issues since this requires a ton of search/replace which slows down conversion.
[ "Remove", "extra", "contigs", "(", "non", "chr1", "-", "22", "X", "Y", ")", "from", "an", "input", "BAM", "." ]
python
train
benjamin-hodgson/asynqp
src/asynqp/protocol.py
https://github.com/benjamin-hodgson/asynqp/blob/ea8630d1803d10d4fd64b1a0e50f3097710b34d1/src/asynqp/protocol.py#L66-L74
def heartbeat_timeout(self): """ Called by heartbeat_monitor on timeout """ assert not self._closed, "Did we not stop heartbeat_monitor on close?" log.error("Heartbeat time out") poison_exc = ConnectionLostError('Heartbeat timed out') poison_frame = frames.PoisonPillFrame(poison_exc) self.dispatcher.dispatch_all(poison_frame) # Spec says to just close socket without ConnectionClose handshake. self.close()
[ "def", "heartbeat_timeout", "(", "self", ")", ":", "assert", "not", "self", ".", "_closed", ",", "\"Did we not stop heartbeat_monitor on close?\"", "log", ".", "error", "(", "\"Heartbeat time out\"", ")", "poison_exc", "=", "ConnectionLostError", "(", "'Heartbeat timed ...
Called by heartbeat_monitor on timeout
[ "Called", "by", "heartbeat_monitor", "on", "timeout" ]
python
train
saltstack/salt
salt/modules/debian_ip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/debian_ip.py#L202-L208
def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg)
[ "def", "_raise_error_network", "(", "option", ",", "expected", ")", ":", "msg", "=", "_error_msg_network", "(", "option", ",", "expected", ")", "log", ".", "error", "(", "msg", ")", "raise", "AttributeError", "(", "msg", ")" ]
Log and raise an error with a logical formatted message.
[ "Log", "and", "raise", "an", "error", "with", "a", "logical", "formatted", "message", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/geometry/meshdata.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/geometry/meshdata.py#L122-L148
def get_edges(self, indexed=None): """Edges of the mesh Parameters ---------- indexed : str | None If indexed is None, return (Nf, 3) array of vertex indices, two per edge in the mesh. If indexed is 'faces', then return (Nf, 3, 2) array of vertex indices with 3 edges per face, and two vertices per edge. Returns ------- edges : ndarray The edges. """ if indexed is None: if self._edges is None: self._compute_edges(indexed=None) return self._edges elif indexed == 'faces': if self._edges_indexed_by_faces is None: self._compute_edges(indexed='faces') return self._edges_indexed_by_faces else: raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
[ "def", "get_edges", "(", "self", ",", "indexed", "=", "None", ")", ":", "if", "indexed", "is", "None", ":", "if", "self", ".", "_edges", "is", "None", ":", "self", ".", "_compute_edges", "(", "indexed", "=", "None", ")", "return", "self", ".", "_edge...
Edges of the mesh Parameters ---------- indexed : str | None If indexed is None, return (Nf, 3) array of vertex indices, two per edge in the mesh. If indexed is 'faces', then return (Nf, 3, 2) array of vertex indices with 3 edges per face, and two vertices per edge. Returns ------- edges : ndarray The edges.
[ "Edges", "of", "the", "mesh", "Parameters", "----------", "indexed", ":", "str", "|", "None", "If", "indexed", "is", "None", "return", "(", "Nf", "3", ")", "array", "of", "vertex", "indices", "two", "per", "edge", "in", "the", "mesh", ".", "If", "index...
python
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L186-L193
def evaluate(self, artifact): '''Evaluate the artifact with respect to the agents short term memory. Returns value in [0, 1]. ''' if self.desired_novelty > 0: return self.hedonic_value(self.novelty(artifact.obj)) return self.novelty(artifact.obj) / self.img_size, None
[ "def", "evaluate", "(", "self", ",", "artifact", ")", ":", "if", "self", ".", "desired_novelty", ">", "0", ":", "return", "self", ".", "hedonic_value", "(", "self", ".", "novelty", "(", "artifact", ".", "obj", ")", ")", "return", "self", ".", "novelty"...
Evaluate the artifact with respect to the agents short term memory. Returns value in [0, 1].
[ "Evaluate", "the", "artifact", "with", "respect", "to", "the", "agents", "short", "term", "memory", "." ]
python
train
DS-100/nb-to-gradescope
gs100/converter.py
https://github.com/DS-100/nb-to-gradescope/blob/1a2b37753c4913689557328a796543a767eb3932/gs100/converter.py#L223-L248
def pad_pdf_pages(pdf_name, pages_per_q) -> None: """ Checks if PDF has the correct number of pages. If it has too many, warns the user. If it has too few, adds blank pages until the right length is reached. """ pdf = PyPDF2.PdfFileReader(pdf_name) output = PyPDF2.PdfFileWriter() num_pages = pdf.getNumPages() if num_pages > pages_per_q: logging.warning('{} has {} pages. Only the first ' '{} pages will get output.' .format(pdf_name, num_pages, pages_per_q)) # Copy over up to pages_per_q pages for page in range(min(num_pages, pages_per_q)): output.addPage(pdf.getPage(page)) # Pad if necessary if num_pages < pages_per_q: for page in range(pages_per_q - num_pages): output.addBlankPage() # Output the PDF with open(pdf_name, 'wb') as out_file: output.write(out_file)
[ "def", "pad_pdf_pages", "(", "pdf_name", ",", "pages_per_q", ")", "->", "None", ":", "pdf", "=", "PyPDF2", ".", "PdfFileReader", "(", "pdf_name", ")", "output", "=", "PyPDF2", ".", "PdfFileWriter", "(", ")", "num_pages", "=", "pdf", ".", "getNumPages", "("...
Checks if PDF has the correct number of pages. If it has too many, warns the user. If it has too few, adds blank pages until the right length is reached.
[ "Checks", "if", "PDF", "has", "the", "correct", "number", "of", "pages", ".", "If", "it", "has", "too", "many", "warns", "the", "user", ".", "If", "it", "has", "too", "few", "adds", "blank", "pages", "until", "the", "right", "length", "is", "reached", ...
python
train
bluedazzle/wechat_sender
wechat_sender/sender.py
https://github.com/bluedazzle/wechat_sender/blob/21d861735509153d6b34408157911c25a5d7018b/wechat_sender/sender.py#L107-L130
def periodic_send(self, content, interval, title=''): """ 发送周期消息 :param content: (必填|str) - 需要发送的消息内容 :param interval: (必填|int|datetime.timedelta) - 发送消息间隔时间,支持 datetime.timedelta 或 integer 表示的秒数 :param title: (选填|str) - 需要发送的消息标题 :return: * status:发送状态,True 发送成,False 发送失败 * message:发送失败详情 """ url = '{0}periodic_message'.format(self.remote) if isinstance(interval, datetime.timedelta): interval = int(interval.total_seconds()) if not isinstance(interval, int): raise ValueError data = self._wrap_post_data(title=title, content=content, interval=interval) res = requests.post(url, data, timeout=self.timeout) if res.status_code == requests.codes.ok: res_data = json.loads(self._convert_bytes(res.content)) if res_data.get('status') == STATUS_SUCCESS: return True, res_data.get('message') return False, res_data.get('message') res.raise_for_status() return False, 'Request or Response Error'
[ "def", "periodic_send", "(", "self", ",", "content", ",", "interval", ",", "title", "=", "''", ")", ":", "url", "=", "'{0}periodic_message'", ".", "format", "(", "self", ".", "remote", ")", "if", "isinstance", "(", "interval", ",", "datetime", ".", "time...
发送周期消息 :param content: (必填|str) - 需要发送的消息内容 :param interval: (必填|int|datetime.timedelta) - 发送消息间隔时间,支持 datetime.timedelta 或 integer 表示的秒数 :param title: (选填|str) - 需要发送的消息标题 :return: * status:发送状态,True 发送成,False 发送失败 * message:发送失败详情
[ "发送周期消息" ]
python
train
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L103-L135
def upload(self, remote_path, file_content, ondup=None, **kwargs): """上传单个文件(<2G). | 百度PCS服务目前支持最大2G的单个文件上传。 | 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。 :param remote_path: 网盘中文件的保存路径(包含文件名)。 必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param file_content: 上传文件的内容/文件对象 。 (e.g. ``open('foobar', 'rb')`` ) :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象 """ params = { 'path': remote_path, 'ondup': ondup } files = {'file': ('file', file_content, '')} url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file' return self._request('file', 'upload', url=url, extra_params=params, files=files, **kwargs)
[ "def", "upload", "(", "self", ",", "remote_path", ",", "file_content", ",", "ondup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "remote_path", ",", "'ondup'", ":", "ondup", "}", "files", "=", "{", "'file'", ":"...
上传单个文件(<2G). | 百度PCS服务目前支持最大2G的单个文件上传。 | 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。 :param remote_path: 网盘中文件的保存路径(包含文件名)。 必须以 /apps/ 开头。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param file_content: 上传文件的内容/文件对象 。 (e.g. ``open('foobar', 'rb')`` ) :param ondup: (可选) * 'overwrite':表示覆盖同名文件; * 'newcopy':表示生成文件副本并进行重命名,命名规则为“ 文件名_日期.后缀”。 :return: Response 对象
[ "上传单个文件(<2G)", "." ]
python
train
DinoTools/python-overpy
overpy/__init__.py
https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1514-L1519
def _handle_end_way(self): """ Handle closing way element """ self._result.append(Way(result=self._result, **self._curr)) self._curr = {}
[ "def", "_handle_end_way", "(", "self", ")", ":", "self", ".", "_result", ".", "append", "(", "Way", "(", "result", "=", "self", ".", "_result", ",", "*", "*", "self", ".", "_curr", ")", ")", "self", ".", "_curr", "=", "{", "}" ]
Handle closing way element
[ "Handle", "closing", "way", "element" ]
python
train
mozilla/treeherder
treeherder/log_parser/parsers.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/parsers.py#L202-L211
def start_step(self, lineno, name="Unnamed step", timestamp=None): """Create a new step and update the state to reflect we're now in the middle of a step.""" self.state = self.STATES['step_in_progress'] self.stepnum += 1 self.steps.append({ "name": name, "started": timestamp, "started_linenumber": lineno, "errors": [], })
[ "def", "start_step", "(", "self", ",", "lineno", ",", "name", "=", "\"Unnamed step\"", ",", "timestamp", "=", "None", ")", ":", "self", ".", "state", "=", "self", ".", "STATES", "[", "'step_in_progress'", "]", "self", ".", "stepnum", "+=", "1", "self", ...
Create a new step and update the state to reflect we're now in the middle of a step.
[ "Create", "a", "new", "step", "and", "update", "the", "state", "to", "reflect", "we", "re", "now", "in", "the", "middle", "of", "a", "step", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/beholder/beholder.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/beholder/beholder.py#L121-L127
def _enough_time_has_passed(self, FPS): '''For limiting how often frames are computed.''' if FPS == 0: return False else: earliest_time = self.last_update_time + (1.0 / FPS) return time.time() >= earliest_time
[ "def", "_enough_time_has_passed", "(", "self", ",", "FPS", ")", ":", "if", "FPS", "==", "0", ":", "return", "False", "else", ":", "earliest_time", "=", "self", ".", "last_update_time", "+", "(", "1.0", "/", "FPS", ")", "return", "time", ".", "time", "(...
For limiting how often frames are computed.
[ "For", "limiting", "how", "often", "frames", "are", "computed", "." ]
python
train
mozilla-services/pyramid_multiauth
pyramid_multiauth/__init__.py
https://github.com/mozilla-services/pyramid_multiauth/blob/9548aa55f726920a666791d7c89ac2b9779d2bc1/pyramid_multiauth/__init__.py#L188-L290
def includeme(config): """Include pyramid_multiauth into a pyramid configurator. This function provides a hook for pyramid to include the default settings for auth via pyramid_multiauth. Activate it like so: config.include("pyramid_multiauth") This will pull the list of registered authn policies from the deployment settings, and configure and install each policy in order. The policies to use can be specified in one of two ways: * as the name of a module to be included. * as the name of a callable along with a set of parameters. Here's an example suite of settings: multiauth.policies = ipauth1 ipauth2 pyramid_browserid multiauth.policy.ipauth1.use = pyramid_ipauth.IPAuthentictionPolicy multiauth.policy.ipauth1.ipaddrs = 123.123.0.0/16 multiauth.policy.ipauth1.userid = local1 multiauth.policy.ipauth2.use = pyramid_ipauth.IPAuthentictionPolicy multiauth.policy.ipauth2.ipaddrs = 124.124.0.0/16 multiauth.policy.ipauth2.userid = local2 This will configure a MultiAuthenticationPolicy with three policy objects. The first two will be IPAuthenticationPolicy objects created by passing in the specified keyword arguments. The third will be a BrowserID authentication policy just like you would get from executing: config.include("pyramid_browserid") As a side-effect, the configuration will also get the additional views that pyramid_browserid sets up by default. The *group finder function* and the *authorization policy* are also read from configuration if specified: multiauth.authorization_policy = mypyramidapp.acl.Custom multiauth.groupfinder = mypyramidapp.acl.groupfinder """ # Grab the pyramid-wide settings, to look for any auth config. settings = config.get_settings() # Hook up a default AuthorizationPolicy. # Get the authorization policy from config if present. # Default ACLAuthorizationPolicy is usually what you want. authz_class = settings.get("multiauth.authorization_policy", "pyramid.authorization.ACLAuthorizationPolicy") authz_policy = config.maybe_dotted(authz_class)() # If the app configures one explicitly then this will get overridden. # In autocommit mode this needs to be done before setting the authn policy. config.set_authorization_policy(authz_policy) # Get the groupfinder from config if present. groupfinder = settings.get("multiauth.groupfinder", None) groupfinder = config.maybe_dotted(groupfinder) # Look for callable policy definitions. # Suck them all out at once and store them in a dict for later use. policy_definitions = get_policy_definitions(settings) # Read and process the list of policies to load. # We build up a list of callables which can be executed at config commit # time to obtain the final list of policies. # Yeah, it's complicated. But we want to be able to inherit any default # views or other config added by the sub-policies when they're included. # Process policies in reverse order so that things at the front of the # list can override things at the back of the list. policy_factories = [] policy_names = settings.get("multiauth.policies", "").split() for policy_name in reversed(policy_names): if policy_name in policy_definitions: # It's a policy defined using a callable. # Just append it straight to the list. definition = policy_definitions[policy_name] factory = config.maybe_dotted(definition.pop("use")) policy_factories.append((factory, policy_name, definition)) else: # It's a module to be directly included. try: factory = policy_factory_from_module(config, policy_name) except ImportError: err = "pyramid_multiauth: policy %r has no settings "\ "and is not importable" % (policy_name,) raise ValueError(err) policy_factories.append((factory, policy_name, {})) # OK. We now have a list of callbacks which need to be called at # commit time, and will return the policies in reverse order. # Register a special action to pull them into our list of policies. policies = [] def grab_policies(): for factory, name, kwds in policy_factories: policy = factory(**kwds) if policy: policy._pyramid_multiauth_name = name if not policies or policy is not policies[0]: # Remember, they're being processed in reverse order. # So each new policy needs to go at the front. policies.insert(0, policy) config.action(None, grab_policies, order=PHASE2_CONFIG) authn_policy = MultiAuthenticationPolicy(policies, groupfinder) config.set_authentication_policy(authn_policy)
[ "def", "includeme", "(", "config", ")", ":", "# Grab the pyramid-wide settings, to look for any auth config.", "settings", "=", "config", ".", "get_settings", "(", ")", "# Hook up a default AuthorizationPolicy.", "# Get the authorization policy from config if present.", "# Default AC...
Include pyramid_multiauth into a pyramid configurator. This function provides a hook for pyramid to include the default settings for auth via pyramid_multiauth. Activate it like so: config.include("pyramid_multiauth") This will pull the list of registered authn policies from the deployment settings, and configure and install each policy in order. The policies to use can be specified in one of two ways: * as the name of a module to be included. * as the name of a callable along with a set of parameters. Here's an example suite of settings: multiauth.policies = ipauth1 ipauth2 pyramid_browserid multiauth.policy.ipauth1.use = pyramid_ipauth.IPAuthentictionPolicy multiauth.policy.ipauth1.ipaddrs = 123.123.0.0/16 multiauth.policy.ipauth1.userid = local1 multiauth.policy.ipauth2.use = pyramid_ipauth.IPAuthentictionPolicy multiauth.policy.ipauth2.ipaddrs = 124.124.0.0/16 multiauth.policy.ipauth2.userid = local2 This will configure a MultiAuthenticationPolicy with three policy objects. The first two will be IPAuthenticationPolicy objects created by passing in the specified keyword arguments. The third will be a BrowserID authentication policy just like you would get from executing: config.include("pyramid_browserid") As a side-effect, the configuration will also get the additional views that pyramid_browserid sets up by default. The *group finder function* and the *authorization policy* are also read from configuration if specified: multiauth.authorization_policy = mypyramidapp.acl.Custom multiauth.groupfinder = mypyramidapp.acl.groupfinder
[ "Include", "pyramid_multiauth", "into", "a", "pyramid", "configurator", "." ]
python
train
saltstack/salt
salt/states/infoblox_range.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/infoblox_range.py#L143-L191
def absent(name=None, start_addr=None, end_addr=None, data=None, **api_opts): ''' Ensure the range is removed Supplying the end of the range is optional. State example: .. code-block:: yaml infoblox_range.absent: - name: 'vlan10' infoblox_range.absent: - name: - start_addr: 127.0.1.20 ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if not data: data = {} if 'name' not in data: data.update({'name': name}) if 'start_addr' not in data: data.update({'start_addr': start_addr}) if 'end_addr' not in data: data.update({'end_addr': end_addr}) obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts) if obj is None: obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts) if not obj: ret['result'] = True ret['comment'] = 'already deleted' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to delete range' return ret if __salt__['infoblox.delete_object'](objref=obj['_ref']): ret['result'] = True ret['changes'] = {'old': 'Found {0} - {1}'.format(start_addr, end_addr), 'new': 'Removed'} return ret
[ "def", "absent", "(", "name", "=", "None", ",", "start_addr", "=", "None", ",", "end_addr", "=", "None", ",", "data", "=", "None", ",", "*", "*", "api_opts", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'...
Ensure the range is removed Supplying the end of the range is optional. State example: .. code-block:: yaml infoblox_range.absent: - name: 'vlan10' infoblox_range.absent: - name: - start_addr: 127.0.1.20
[ "Ensure", "the", "range", "is", "removed" ]
python
train
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3130-L3133
def validate_metadata(self, existing): """ create / validate metadata """ self.metadata = [ c.name for c in self.values_axes if c.metadata is not None]
[ "def", "validate_metadata", "(", "self", ",", "existing", ")", ":", "self", ".", "metadata", "=", "[", "c", ".", "name", "for", "c", "in", "self", ".", "values_axes", "if", "c", ".", "metadata", "is", "not", "None", "]" ]
create / validate metadata
[ "create", "/", "validate", "metadata" ]
python
train
jmbhughes/suvi-trainer
suvitrainer/gui.py
https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/gui.py#L279-L290
def updateArray(self, array, indices, value): """ updates array so that pixels at indices take on value :param array: (m,n) array to adjust :param indices: flattened image indices to change value :param value: new value to assign :return: the changed (m,n) array """ lin = np.arange(array.size) new_array = array.flatten() new_array[lin[indices]] = value return new_array.reshape(array.shape)
[ "def", "updateArray", "(", "self", ",", "array", ",", "indices", ",", "value", ")", ":", "lin", "=", "np", ".", "arange", "(", "array", ".", "size", ")", "new_array", "=", "array", ".", "flatten", "(", ")", "new_array", "[", "lin", "[", "indices", ...
updates array so that pixels at indices take on value :param array: (m,n) array to adjust :param indices: flattened image indices to change value :param value: new value to assign :return: the changed (m,n) array
[ "updates", "array", "so", "that", "pixels", "at", "indices", "take", "on", "value", ":", "param", "array", ":", "(", "m", "n", ")", "array", "to", "adjust", ":", "param", "indices", ":", "flattened", "image", "indices", "to", "change", "value", ":", "p...
python
train
gwastro/pycbc-glue
pycbc_glue/ligolw/lsctables.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/lsctables.py#L546-L562
def get_in_segmentlistdict(self, process_ids = None): """ Return a segmentlistdict mapping instrument to in segment list. If process_ids is a sequence of process IDs, then only rows with matching IDs are included otherwise all rows are included. Note: the result is not coalesced, each segmentlist contains the segments listed for that instrument as they appeared in the table. """ seglists = segments.segmentlistdict() for row in self: ifos = row.instruments or (None,) if process_ids is None or row.process_id in process_ids: seglists.extend(dict((ifo, segments.segmentlist([row.in_segment])) for ifo in ifos)) return seglists
[ "def", "get_in_segmentlistdict", "(", "self", ",", "process_ids", "=", "None", ")", ":", "seglists", "=", "segments", ".", "segmentlistdict", "(", ")", "for", "row", "in", "self", ":", "ifos", "=", "row", ".", "instruments", "or", "(", "None", ",", ")", ...
Return a segmentlistdict mapping instrument to in segment list. If process_ids is a sequence of process IDs, then only rows with matching IDs are included otherwise all rows are included. Note: the result is not coalesced, each segmentlist contains the segments listed for that instrument as they appeared in the table.
[ "Return", "a", "segmentlistdict", "mapping", "instrument", "to", "in", "segment", "list", ".", "If", "process_ids", "is", "a", "sequence", "of", "process", "IDs", "then", "only", "rows", "with", "matching", "IDs", "are", "included", "otherwise", "all", "rows",...
python
train
rigetti/pyquil
pyquil/noise.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/noise.py#L167-L175
def gates_by_name(self, name): """ Return all defined noisy gates of a particular gate name. :param str name: The gate name. :return: A list of noise models representing that gate. :rtype: Sequence[KrausModel] """ return [g for g in self.gates if g.gate == name]
[ "def", "gates_by_name", "(", "self", ",", "name", ")", ":", "return", "[", "g", "for", "g", "in", "self", ".", "gates", "if", "g", ".", "gate", "==", "name", "]" ]
Return all defined noisy gates of a particular gate name. :param str name: The gate name. :return: A list of noise models representing that gate. :rtype: Sequence[KrausModel]
[ "Return", "all", "defined", "noisy", "gates", "of", "a", "particular", "gate", "name", "." ]
python
train
odlgroup/odl
odl/discr/discr_mappings.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discr_mappings.py#L559-L590
def _call(self, x, out=None): """Create an interpolator from grid values ``x``. Parameters ---------- x : `Tensor` The array of values to be interpolated out : `FunctionSpaceElement`, optional Element in which to store the interpolator Returns ------- out : `FunctionSpaceElement` Per-axis interpolator for the grid of this operator. If ``out`` was provided, the returned object is a reference to it. """ def per_axis_interp(arg, out=None): """Interpolating function with vectorization.""" if is_valid_input_meshgrid(arg, self.grid.ndim): input_type = 'meshgrid' else: input_type = 'array' interpolator = _PerAxisInterpolator( self.grid.coord_vectors, x, schemes=self.schemes, nn_variants=self.nn_variants, input_type=input_type) return interpolator(arg, out=out) return self.range.element(per_axis_interp, vectorized=True)
[ "def", "_call", "(", "self", ",", "x", ",", "out", "=", "None", ")", ":", "def", "per_axis_interp", "(", "arg", ",", "out", "=", "None", ")", ":", "\"\"\"Interpolating function with vectorization.\"\"\"", "if", "is_valid_input_meshgrid", "(", "arg", ",", "self...
Create an interpolator from grid values ``x``. Parameters ---------- x : `Tensor` The array of values to be interpolated out : `FunctionSpaceElement`, optional Element in which to store the interpolator Returns ------- out : `FunctionSpaceElement` Per-axis interpolator for the grid of this operator. If ``out`` was provided, the returned object is a reference to it.
[ "Create", "an", "interpolator", "from", "grid", "values", "x", "." ]
python
train
jonathf/chaospy
chaospy/quad/collection/leja.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/quad/collection/leja.py#L79-L87
def create_objective(dist, abscissas): """Create objective function.""" abscissas_ = numpy.array(abscissas[1:-1]) def obj(absisa): """Local objective function.""" out = -numpy.sqrt(dist.pdf(absisa)) out *= numpy.prod(numpy.abs(abscissas_ - absisa)) return out return obj
[ "def", "create_objective", "(", "dist", ",", "abscissas", ")", ":", "abscissas_", "=", "numpy", ".", "array", "(", "abscissas", "[", "1", ":", "-", "1", "]", ")", "def", "obj", "(", "absisa", ")", ":", "\"\"\"Local objective function.\"\"\"", "out", "=", ...
Create objective function.
[ "Create", "objective", "function", "." ]
python
train
aws/aws-dynamodb-encryption-python
src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py
https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py#L296-L300
def _disable_encryption(self): # () -> None """Enable encryption methods for ciphers that support them.""" self.encrypt = self._disabled_encrypt self.decrypt = self._disabled_decrypt
[ "def", "_disable_encryption", "(", "self", ")", ":", "# () -> None", "self", ".", "encrypt", "=", "self", ".", "_disabled_encrypt", "self", ".", "decrypt", "=", "self", ".", "_disabled_decrypt" ]
Enable encryption methods for ciphers that support them.
[ "Enable", "encryption", "methods", "for", "ciphers", "that", "support", "them", "." ]
python
train
justquick/django-activity-stream
actstream/templatetags/activity_tags.py
https://github.com/justquick/django-activity-stream/blob/a1e06f2e6429cc5fc321e7801440dd7c5b9d5a35/actstream/templatetags/activity_tags.py#L226-L241
def actor_url(parser, token): """ Renders the URL for a particular actor instance :: <a href="{% actor_url request.user %}">View your actions</a> <a href="{% actor_url another_user %}">{{ another_user }}'s actions</a> """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("Accepted format " "{% actor_url [actor_instance] %}") else: return DisplayActivityActorUrl(*bits[1:])
[ "def", "actor_url", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", "!=", "2", ":", "raise", "TemplateSyntaxError", "(", "\"Accepted format \"", "\"{% actor_url [actor_instance] %}\"", ...
Renders the URL for a particular actor instance :: <a href="{% actor_url request.user %}">View your actions</a> <a href="{% actor_url another_user %}">{{ another_user }}'s actions</a>
[ "Renders", "the", "URL", "for", "a", "particular", "actor", "instance" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15204-L15230
def vtmvg(v1, matrix, v2, nrow, ncol): """ Multiply the transpose of a n-dimensional column vector a nxm matrix, and a m-dimensional column vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vtmvg_c.html :param v1: n-dimensional double precision column vector. :type v1: Array of floats :param matrix: nxm double precision matrix. :type matrix: NxM-Element Array of floats :param v2: m-dimensional double porecision column vector. :type v2: Array of floats :param nrow: Number of rows in matrix (number of rows in v1.) :type nrow: int :param ncol: Number of columns in matrix (number of rows in v2.) :type ncol: int :return: the result of (v1**t * matrix * v2 ) :rtype: float """ v1 = stypes.toDoubleVector(v1) matrix = stypes.toDoubleMatrix(matrix) v2 = stypes.toDoubleVector(v2) nrow = ctypes.c_int(nrow) ncol = ctypes.c_int(ncol) return libspice.vtmvg_c(v1, matrix, v2, nrow, ncol)
[ "def", "vtmvg", "(", "v1", ",", "matrix", ",", "v2", ",", "nrow", ",", "ncol", ")", ":", "v1", "=", "stypes", ".", "toDoubleVector", "(", "v1", ")", "matrix", "=", "stypes", ".", "toDoubleMatrix", "(", "matrix", ")", "v2", "=", "stypes", ".", "toDo...
Multiply the transpose of a n-dimensional column vector a nxm matrix, and a m-dimensional column vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vtmvg_c.html :param v1: n-dimensional double precision column vector. :type v1: Array of floats :param matrix: nxm double precision matrix. :type matrix: NxM-Element Array of floats :param v2: m-dimensional double porecision column vector. :type v2: Array of floats :param nrow: Number of rows in matrix (number of rows in v1.) :type nrow: int :param ncol: Number of columns in matrix (number of rows in v2.) :type ncol: int :return: the result of (v1**t * matrix * v2 ) :rtype: float
[ "Multiply", "the", "transpose", "of", "a", "n", "-", "dimensional", "column", "vector", "a", "nxm", "matrix", "and", "a", "m", "-", "dimensional", "column", "vector", "." ]
python
train
dwavesystems/dimod
dimod/binary_quadratic_model.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L293-L339
def spin(self): """:class:`.BinaryQuadraticModel`: An instance of the Ising model subclass of the :class:`.BinaryQuadraticModel` superclass, corresponding to a binary quadratic model with spins as its variables. Enables access to biases for the spin-valued binary quadratic model regardless of the :class:`vartype` set when the model was created. If the model was created with the :attr:`.binary` vartype, the Ising model subclass is instantiated upon the first use of the :attr:`.spin` property and used in any subsequent reads. Examples: This example creates a QUBO model and uses the :attr:`.spin` property to instantiate the corresponding Ising model. >>> import dimod ... >>> bqm_qubo = dimod.BinaryQuadraticModel({0: -1, 1: -1}, {(0, 1): 2}, 0.0, dimod.BINARY) >>> bqm_spin = bqm_qubo.spin >>> bqm_spin # doctest: +SKIP BinaryQuadraticModel({0: 0.0, 1: 0.0}, {(0, 1): 0.5}, -0.5, Vartype.SPIN) >>> bqm_spin.spin is bqm_spin True Note: Methods like :meth:`.add_variable`, :meth:`.add_variables_from`, :meth:`.add_interaction`, etc. should only be used on the base model. """ # NB: The existence of the _spin property implies that it is up to date, methods that # invalidate it will erase the property try: spin = self._spin if spin is not None: return spin except AttributeError: pass if self.vartype is Vartype.SPIN: self._spin = spin = self else: self._counterpart = self._spin = spin = self.change_vartype(Vartype.SPIN, inplace=False) # we also want to go ahead and set spin.binary to refer back to self spin._binary = self return spin
[ "def", "spin", "(", "self", ")", ":", "# NB: The existence of the _spin property implies that it is up to date, methods that", "# invalidate it will erase the property", "try", ":", "spin", "=", "self", ".", "_spin", "if", "spin", "is", "not", "None", ":", "return", "spin...
:class:`.BinaryQuadraticModel`: An instance of the Ising model subclass of the :class:`.BinaryQuadraticModel` superclass, corresponding to a binary quadratic model with spins as its variables. Enables access to biases for the spin-valued binary quadratic model regardless of the :class:`vartype` set when the model was created. If the model was created with the :attr:`.binary` vartype, the Ising model subclass is instantiated upon the first use of the :attr:`.spin` property and used in any subsequent reads. Examples: This example creates a QUBO model and uses the :attr:`.spin` property to instantiate the corresponding Ising model. >>> import dimod ... >>> bqm_qubo = dimod.BinaryQuadraticModel({0: -1, 1: -1}, {(0, 1): 2}, 0.0, dimod.BINARY) >>> bqm_spin = bqm_qubo.spin >>> bqm_spin # doctest: +SKIP BinaryQuadraticModel({0: 0.0, 1: 0.0}, {(0, 1): 0.5}, -0.5, Vartype.SPIN) >>> bqm_spin.spin is bqm_spin True Note: Methods like :meth:`.add_variable`, :meth:`.add_variables_from`, :meth:`.add_interaction`, etc. should only be used on the base model.
[ ":", "class", ":", ".", "BinaryQuadraticModel", ":", "An", "instance", "of", "the", "Ising", "model", "subclass", "of", "the", ":", "class", ":", ".", "BinaryQuadraticModel", "superclass", "corresponding", "to", "a", "binary", "quadratic", "model", "with", "sp...
python
train
chrisjrn/registrasion
registrasion/controllers/invoice.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/controllers/invoice.py#L247-L251
def _refresh(self): ''' Refreshes the underlying invoice and cart objects. ''' self.invoice.refresh_from_db() if self.invoice.cart: self.invoice.cart.refresh_from_db()
[ "def", "_refresh", "(", "self", ")", ":", "self", ".", "invoice", ".", "refresh_from_db", "(", ")", "if", "self", ".", "invoice", ".", "cart", ":", "self", ".", "invoice", ".", "cart", ".", "refresh_from_db", "(", ")" ]
Refreshes the underlying invoice and cart objects.
[ "Refreshes", "the", "underlying", "invoice", "and", "cart", "objects", "." ]
python
test
CodeReclaimers/neat-python
examples/xor/visualize.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/examples/xor/visualize.py#L42-L88
def plot_spikes(spikes, view=False, filename=None, title=None): """ Plots the trains for a single spiking neuron. """ t_values = [t for t, I, v, u, f in spikes] v_values = [v for t, I, v, u, f in spikes] u_values = [u for t, I, v, u, f in spikes] I_values = [I for t, I, v, u, f in spikes] f_values = [f for t, I, v, u, f in spikes] fig = plt.figure() plt.subplot(4, 1, 1) plt.ylabel("Potential (mv)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, v_values, "g-") if title is None: plt.title("Izhikevich's spiking neuron model") else: plt.title("Izhikevich's spiking neuron model ({0!s})".format(title)) plt.subplot(4, 1, 2) plt.ylabel("Fired") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, f_values, "r-") plt.subplot(4, 1, 3) plt.ylabel("Recovery (u)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, u_values, "r-") plt.subplot(4, 1, 4) plt.ylabel("Current (I)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, I_values, "r-o") if filename is not None: plt.savefig(filename) if view: plt.show() plt.close() fig = None return fig
[ "def", "plot_spikes", "(", "spikes", ",", "view", "=", "False", ",", "filename", "=", "None", ",", "title", "=", "None", ")", ":", "t_values", "=", "[", "t", "for", "t", ",", "I", ",", "v", ",", "u", ",", "f", "in", "spikes", "]", "v_values", "...
Plots the trains for a single spiking neuron.
[ "Plots", "the", "trains", "for", "a", "single", "spiking", "neuron", "." ]
python
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/shex_shape_definition_parser.py
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_shape_definition_parser.py#L45-L62
def visitShapeDefinition(self, ctx: ShExDocParser.ShapeDefinitionContext): """ shapeDefinition: qualifier* '{' oneOfShape? '}' annotation* semanticActions """ if ctx.qualifier(): for q in ctx.qualifier(): self.visit(q) if ctx.oneOfShape(): oneof_parser = ShexOneOfShapeParser(self.context) oneof_parser.visit(ctx.oneOfShape()) self.shape.expression = oneof_parser.expression if ctx.annotation() or ctx.semanticActions(): ansem_parser = ShexAnnotationAndSemactsParser(self.context) for annot in ctx.annotation(): ansem_parser.visit(annot) ansem_parser.visit(ctx.semanticActions()) if ansem_parser.semacts: self.shape.semActs = ansem_parser.semacts if ansem_parser.annotations: self.shape.annotations = ansem_parser.annotations
[ "def", "visitShapeDefinition", "(", "self", ",", "ctx", ":", "ShExDocParser", ".", "ShapeDefinitionContext", ")", ":", "if", "ctx", ".", "qualifier", "(", ")", ":", "for", "q", "in", "ctx", ".", "qualifier", "(", ")", ":", "self", ".", "visit", "(", "q...
shapeDefinition: qualifier* '{' oneOfShape? '}' annotation* semanticActions
[ "shapeDefinition", ":", "qualifier", "*", "{", "oneOfShape?", "}", "annotation", "*", "semanticActions" ]
python
train
skorch-dev/skorch
skorch/net.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/net.py#L1645-L1660
def load_history(self, f): """Load the history of a ``NeuralNet`` from a json file. See ``save_history`` for examples. Parameters ---------- f : file-like object or str """ # TODO: Remove warning in a future release warnings.warn( "load_history is deprecated and will be removed in the next " "release, please use load_params with the f_history keyword", DeprecationWarning) self.history = History.from_file(f)
[ "def", "load_history", "(", "self", ",", "f", ")", ":", "# TODO: Remove warning in a future release", "warnings", ".", "warn", "(", "\"load_history is deprecated and will be removed in the next \"", "\"release, please use load_params with the f_history keyword\"", ",", "DeprecationWa...
Load the history of a ``NeuralNet`` from a json file. See ``save_history`` for examples. Parameters ---------- f : file-like object or str
[ "Load", "the", "history", "of", "a", "NeuralNet", "from", "a", "json", "file", ".", "See", "save_history", "for", "examples", "." ]
python
train
pioneers/python-grizzly
grizzly/__init__.py
https://github.com/pioneers/python-grizzly/blob/a6482c722d5712d6ebe12d48921815276c826c7f/grizzly/__init__.py#L231-L240
def has_reset(self): """Checks the grizzly to see if it reset itself because of voltage sag or other reasons. Useful to reinitialize acceleration or current limiting.""" currentTime = self._read_as_int(Addr.Uptime, 4) if currentTime <= self._ticks: self._ticks = currentTime return True self._ticks = currentTime return False
[ "def", "has_reset", "(", "self", ")", ":", "currentTime", "=", "self", ".", "_read_as_int", "(", "Addr", ".", "Uptime", ",", "4", ")", "if", "currentTime", "<=", "self", ".", "_ticks", ":", "self", ".", "_ticks", "=", "currentTime", "return", "True", "...
Checks the grizzly to see if it reset itself because of voltage sag or other reasons. Useful to reinitialize acceleration or current limiting.
[ "Checks", "the", "grizzly", "to", "see", "if", "it", "reset", "itself", "because", "of", "voltage", "sag", "or", "other", "reasons", ".", "Useful", "to", "reinitialize", "acceleration", "or", "current", "limiting", "." ]
python
train
CivicSpleen/ambry
ambry/jupyter.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/jupyter.py#L12-L20
def warehouse_query(line, cell): "my cell magic" from IPython import get_ipython parts = line.split() w_var_name = parts.pop(0) w = get_ipython().ev(w_var_name) w.query(cell).close()
[ "def", "warehouse_query", "(", "line", ",", "cell", ")", ":", "from", "IPython", "import", "get_ipython", "parts", "=", "line", ".", "split", "(", ")", "w_var_name", "=", "parts", ".", "pop", "(", "0", ")", "w", "=", "get_ipython", "(", ")", ".", "ev...
my cell magic
[ "my", "cell", "magic" ]
python
train
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1094-L1098
def verb_chain_starts(self): """The start positions of ``verb_chains`` elements.""" if not self.is_tagged(VERB_CHAINS): self.tag_verb_chains() return self.starts(VERB_CHAINS)
[ "def", "verb_chain_starts", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "VERB_CHAINS", ")", ":", "self", ".", "tag_verb_chains", "(", ")", "return", "self", ".", "starts", "(", "VERB_CHAINS", ")" ]
The start positions of ``verb_chains`` elements.
[ "The", "start", "positions", "of", "verb_chains", "elements", "." ]
python
train
Accelize/pycosio
pycosio/_core/io_base_raw.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/_core/io_base_raw.py#L262-L284
def readall(self): """ Read and return all the bytes from the stream until EOF. Returns: bytes: Object content """ if not self._readable: raise UnsupportedOperation('read') with self._seek_lock: # Get data starting from seek with handle_os_exceptions(): if self._seek and self._seekable: data = self._read_range(self._seek) # Get all data else: data = self._readall() # Update seek self._seek += len(data) return data
[ "def", "readall", "(", "self", ")", ":", "if", "not", "self", ".", "_readable", ":", "raise", "UnsupportedOperation", "(", "'read'", ")", "with", "self", ".", "_seek_lock", ":", "# Get data starting from seek", "with", "handle_os_exceptions", "(", ")", ":", "i...
Read and return all the bytes from the stream until EOF. Returns: bytes: Object content
[ "Read", "and", "return", "all", "the", "bytes", "from", "the", "stream", "until", "EOF", "." ]
python
train
genialis/resolwe
resolwe/flow/managers/workload_connectors/slurm.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/workload_connectors/slurm.py#L29-L73
def submit(self, data, runtime_dir, argv): """Run process with SLURM. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`. """ limits = data.process.get_resource_limits() logger.debug(__( "Connector '{}' running for Data with id {} ({}).", self.__class__.__module__, data.id, repr(argv) )) # Compute target partition. partition = getattr(settings, 'FLOW_SLURM_PARTITION_DEFAULT', None) if data.process.slug in getattr(settings, 'FLOW_SLURM_PARTITION_OVERRIDES', {}): partition = settings.FLOW_SLURM_PARTITION_OVERRIDES[data.process.slug] try: # Make sure the resulting file is executable on creation. script_path = os.path.join(runtime_dir, 'slurm.sh') file_descriptor = os.open(script_path, os.O_WRONLY | os.O_CREAT, mode=0o555) with os.fdopen(file_descriptor, 'wt') as script: script.write('#!/bin/bash\n') script.write('#SBATCH --mem={}M\n'.format(limits['memory'] + EXECUTOR_MEMORY_OVERHEAD)) script.write('#SBATCH --cpus-per-task={}\n'.format(limits['cores'])) if partition: script.write('#SBATCH --partition={}\n'.format(partition)) # Render the argument vector into a command line. line = ' '.join(map(shlex.quote, argv)) script.write(line + '\n') command = ['/usr/bin/env', 'sbatch', script_path] subprocess.Popen( command, cwd=runtime_dir, stdin=subprocess.DEVNULL ).wait() except OSError as err: logger.error(__( "OSError occurred while preparing SLURM script for Data {}: {}", data.id, err ))
[ "def", "submit", "(", "self", ",", "data", ",", "runtime_dir", ",", "argv", ")", ":", "limits", "=", "data", ".", "process", ".", "get_resource_limits", "(", ")", "logger", ".", "debug", "(", "__", "(", "\"Connector '{}' running for Data with id {} ({}).\"", "...
Run process with SLURM. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
[ "Run", "process", "with", "SLURM", "." ]
python
train
manns/pyspread
pyspread/src/actions/_grid_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L1251-L1280
def zoom_fit(self): """Zooms the rid to fit the window. Only has an effect if the resulting zoom level is between minimum and maximum zoom level. """ zoom = self.grid.grid_renderer.zoom grid_width, grid_height = self.grid.GetSize() rows_height = self._get_rows_height() + \ (float(self.grid.GetColLabelSize()) / zoom) cols_width = self._get_cols_width() + \ (float(self.grid.GetRowLabelSize()) / zoom) # Check target zoom for rows zoom_height = float(grid_height) / rows_height # Check target zoom for columns zoom_width = float(grid_width) / cols_width # Use the minimum target zoom from rows and column target zooms target_zoom = min(zoom_height, zoom_width) # Zoom only if between min and max if config["minimum_zoom"] < target_zoom < config["maximum_zoom"]: self.zoom(target_zoom)
[ "def", "zoom_fit", "(", "self", ")", ":", "zoom", "=", "self", ".", "grid", ".", "grid_renderer", ".", "zoom", "grid_width", ",", "grid_height", "=", "self", ".", "grid", ".", "GetSize", "(", ")", "rows_height", "=", "self", ".", "_get_rows_height", "(",...
Zooms the rid to fit the window. Only has an effect if the resulting zoom level is between minimum and maximum zoom level.
[ "Zooms", "the", "rid", "to", "fit", "the", "window", "." ]
python
train
aleju/imgaug
imgaug/external/poly_point_isect.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/external/poly_point_isect.py#L514-L529
def offer(self, p, e: Event): """ Offer a new event ``s`` at point ``p`` in this queue. """ existing = self.events_scan.setdefault( p, ([], [], [], []) if USE_VERTICAL else ([], [], [])) # Can use double linked-list for easy insertion at beginning/end ''' if e.type == Event.Type.END: existing.insert(0, e) else: existing.append(e) ''' existing[e.type].append(e)
[ "def", "offer", "(", "self", ",", "p", ",", "e", ":", "Event", ")", ":", "existing", "=", "self", ".", "events_scan", ".", "setdefault", "(", "p", ",", "(", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "if", "USE_VERTICAL", "el...
Offer a new event ``s`` at point ``p`` in this queue.
[ "Offer", "a", "new", "event", "s", "at", "point", "p", "in", "this", "queue", "." ]
python
valid
inveniosoftware/invenio-pidrelations
invenio_pidrelations/serializers/schemas.py
https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/serializers/schemas.py#L88-L102
def dump_is_last(self, obj): """Dump the boolean stating if the child in the relation is last. Dumps `None` for parent serialization. """ if self._is_child(obj) and isinstance(obj, PIDNodeOrdered): if obj.children.count() > 0: return obj.children.ordered('asc').all()[-1] == \ self.context['pid'] elif obj.draft_child: return obj.draft_child == self.context['pid'] else: return True else: return None
[ "def", "dump_is_last", "(", "self", ",", "obj", ")", ":", "if", "self", ".", "_is_child", "(", "obj", ")", "and", "isinstance", "(", "obj", ",", "PIDNodeOrdered", ")", ":", "if", "obj", ".", "children", ".", "count", "(", ")", ">", "0", ":", "retur...
Dump the boolean stating if the child in the relation is last. Dumps `None` for parent serialization.
[ "Dump", "the", "boolean", "stating", "if", "the", "child", "in", "the", "relation", "is", "last", "." ]
python
train
tensorpack/tensorpack
examples/FasterRCNN/utils/np_box_ops.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/utils/np_box_ops.py#L63-L78
def iou(boxes1, boxes2): """Computes pairwise intersection-over-union between box collections. Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding M boxes. Returns: a numpy array with shape [N, M] representing pairwise iou scores. """ intersect = intersection(boxes1, boxes2) area1 = area(boxes1) area2 = area(boxes2) union = np.expand_dims(area1, axis=1) + np.expand_dims( area2, axis=0) - intersect return intersect / union
[ "def", "iou", "(", "boxes1", ",", "boxes2", ")", ":", "intersect", "=", "intersection", "(", "boxes1", ",", "boxes2", ")", "area1", "=", "area", "(", "boxes1", ")", "area2", "=", "area", "(", "boxes2", ")", "union", "=", "np", ".", "expand_dims", "("...
Computes pairwise intersection-over-union between box collections. Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding M boxes. Returns: a numpy array with shape [N, M] representing pairwise iou scores.
[ "Computes", "pairwise", "intersection", "-", "over", "-", "union", "between", "box", "collections", "." ]
python
train
shoebot/shoebot
shoebot/core/cairo_sink.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/core/cairo_sink.py#L96-L104
def rendering_finished(self, size, frame, cairo_ctx): """ Called when CairoCanvas has rendered a bot """ surface = cairo_ctx.get_target() if self.format == 'png': surface.write_to_png(self._output_file(frame)) surface.finish() surface.flush()
[ "def", "rendering_finished", "(", "self", ",", "size", ",", "frame", ",", "cairo_ctx", ")", ":", "surface", "=", "cairo_ctx", ".", "get_target", "(", ")", "if", "self", ".", "format", "==", "'png'", ":", "surface", ".", "write_to_png", "(", "self", ".", ...
Called when CairoCanvas has rendered a bot
[ "Called", "when", "CairoCanvas", "has", "rendered", "a", "bot" ]
python
valid
toumorokoshi/sprinter
sprinter/next/environment/injections.py
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/next/environment/injections.py#L42-L48
def inject(self, filename, content): """ add the injection content to the dictionary """ # ensure content always has one trailing newline content = _unicode(content).rstrip() + "\n" if filename not in self.inject_dict: self.inject_dict[filename] = "" self.inject_dict[filename] += content
[ "def", "inject", "(", "self", ",", "filename", ",", "content", ")", ":", "# ensure content always has one trailing newline", "content", "=", "_unicode", "(", "content", ")", ".", "rstrip", "(", ")", "+", "\"\\n\"", "if", "filename", "not", "in", "self", ".", ...
add the injection content to the dictionary
[ "add", "the", "injection", "content", "to", "the", "dictionary" ]
python
train
ray-project/ray
python/ray/monitor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/monitor.py#L295-L325
def run(self): """Run the monitor. This function loops forever, checking for messages about dead database clients and cleaning up state accordingly. """ # Initialize the subscription channel. self.subscribe(ray.gcs_utils.XRAY_HEARTBEAT_BATCH_CHANNEL) self.subscribe(ray.gcs_utils.XRAY_DRIVER_CHANNEL) # TODO(rkn): If there were any dead clients at startup, we should clean # up the associated state in the state tables. # Handle messages from the subscription channels. while True: # Update the mapping from raylet client ID to IP address. # This is only used to update the load metrics for the autoscaler. self.update_raylet_map() # Process autoscaling actions if self.autoscaler: self.autoscaler.update() self._maybe_flush_gcs() # Process a round of messages. self.process_messages() # Wait for a heartbeat interval before processing the next round of # messages. time.sleep(ray._config.heartbeat_timeout_milliseconds() * 1e-3)
[ "def", "run", "(", "self", ")", ":", "# Initialize the subscription channel.", "self", ".", "subscribe", "(", "ray", ".", "gcs_utils", ".", "XRAY_HEARTBEAT_BATCH_CHANNEL", ")", "self", ".", "subscribe", "(", "ray", ".", "gcs_utils", ".", "XRAY_DRIVER_CHANNEL", ")"...
Run the monitor. This function loops forever, checking for messages about dead database clients and cleaning up state accordingly.
[ "Run", "the", "monitor", "." ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L23-L39
def bin_width_doane(a): """Compute accuracy (bin width) based on Doane's formula References ---------- - `<https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width>`_ - `<https://stats.stackexchange.com/questions/55134/ doanes-formula-for-histogram-binning>`_ """ bad = np.isnan(a) | np.isinf(a) data = a[~bad] n = data.size g1 = skew(data) sigma_g1 = np.sqrt(6 * (n - 2) / ((n + 1) * (n + 3))) k = 1 + np.log2(n) + np.log2(1 + np.abs(g1) / sigma_g1) acc = (data.max() - data.min()) / k return acc
[ "def", "bin_width_doane", "(", "a", ")", ":", "bad", "=", "np", ".", "isnan", "(", "a", ")", "|", "np", ".", "isinf", "(", "a", ")", "data", "=", "a", "[", "~", "bad", "]", "n", "=", "data", ".", "size", "g1", "=", "skew", "(", "data", ")",...
Compute accuracy (bin width) based on Doane's formula References ---------- - `<https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width>`_ - `<https://stats.stackexchange.com/questions/55134/ doanes-formula-for-histogram-binning>`_
[ "Compute", "accuracy", "(", "bin", "width", ")", "based", "on", "Doane", "s", "formula" ]
python
train
interedition/collatex
collatex-pythonport/ClusterShell/RangeSet.py
https://github.com/interedition/collatex/blob/76dd1fcc36047bc66a87d31142e72e98b5347821/collatex-pythonport/ClusterShell/RangeSet.py#L271-L283
def _strslices(self): """Stringify slices list (x-y/step format)""" pad = self.padding or 0 for sli in self.slices(): if sli.start + 1 == sli.stop: yield "%0*d" % (pad, sli.start) else: assert sli.step >= 0, "Internal error: sli.step < 0" if sli.step == 1: yield "%0*d-%0*d" % (pad, sli.start, pad, sli.stop - 1) else: yield "%0*d-%0*d/%d" % (pad, sli.start, pad, sli.stop - 1, \ sli.step)
[ "def", "_strslices", "(", "self", ")", ":", "pad", "=", "self", ".", "padding", "or", "0", "for", "sli", "in", "self", ".", "slices", "(", ")", ":", "if", "sli", ".", "start", "+", "1", "==", "sli", ".", "stop", ":", "yield", "\"%0*d\"", "%", "...
Stringify slices list (x-y/step format)
[ "Stringify", "slices", "list", "(", "x", "-", "y", "/", "step", "format", ")" ]
python
train
mmp2/megaman
megaman/relaxation/trace_variable.py
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/trace_variable.py#L36-L55
def report_and_save_keywords(self,relaxation_kwds,precomputed_kwds): """Save relaxation keywords to .txt and .pyc file""" report_name = os.path.join(self.backup_dir,'relaxation_keywords.txt') pretty_relax_kwds = pprint.pformat(relaxation_kwds,indent=4) with open(report_name,'w') as wf: wf.write(pretty_relax_kwds) wf.close() origin_name = os.path.join(self.backup_dir,'relaxation_keywords.pyc') with open(origin_name,'wb') as ro: pickle.dump(relaxation_kwds,ro,protocol=pickle.HIGHEST_PROTOCOL) ro.close() if relaxation_kwds['presave']: precomp_kwds_name = os.path.join(self.backup_dir, 'precomputed_keywords.pyc') with open(precomp_kwds_name, 'wb') as po: pickle.dump(precomputed_kwds, po, protocol=pickle.HIGHEST_PROTOCOL) po.close()
[ "def", "report_and_save_keywords", "(", "self", ",", "relaxation_kwds", ",", "precomputed_kwds", ")", ":", "report_name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "backup_dir", ",", "'relaxation_keywords.txt'", ")", "pretty_relax_kwds", "=", "pprint"...
Save relaxation keywords to .txt and .pyc file
[ "Save", "relaxation", "keywords", "to", ".", "txt", "and", ".", "pyc", "file" ]
python
train
inveniosoftware/invenio-github
invenio_github/tasks.py
https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/tasks.py#L87-L143
def process_release(release_id, verify_sender=False): """Process a received Release.""" from invenio_db import db from invenio_rest.errors import RESTException from .errors import InvalidSenderError from .models import Release, ReleaseStatus from .proxies import current_github release_model = Release.query.filter( Release.release_id == release_id, Release.status.in_([ReleaseStatus.RECEIVED, ReleaseStatus.FAILED]), ).one() release_model.status = ReleaseStatus.PROCESSING db.session.commit() release = current_github.release_api_class(release_model) if verify_sender and not release.verify_sender(): raise InvalidSenderError( u'Invalid sender for event {event} for user {user}' .format(event=release.event.id, user=release.event.user_id) ) def _get_err_obj(msg): """Generate the error entry with a Sentry ID.""" err = {'errors': msg} if hasattr(g, 'sentry_event_id'): err['error_id'] = str(g.sentry_event_id) return err try: release.publish() release.model.status = ReleaseStatus.PUBLISHED except RESTException as rest_ex: release.model.errors = json.loads(rest_ex.get_body()) release.model.status = ReleaseStatus.FAILED current_app.logger.exception( u'Error while processing {release}'.format(release=release.model)) # TODO: We may want to handle GitHub errors differently in the future # except GitHubError as github_ex: # release.model.errors = {'error': str(e)} # release.model.status = ReleaseStatus.FAILED # current_app.logger.exception( # 'Error while processing {release}' # .format(release=release.model)) except CustomGitHubMetadataError as e: release.model.errors = _get_err_obj(str(e)) release.model.status = ReleaseStatus.FAILED current_app.logger.exception( u'Error while processing {release}'.format(release=release.model)) except Exception: release.model.errors = _get_err_obj('Unknown error occured.') release.model.status = ReleaseStatus.FAILED current_app.logger.exception( u'Error while processing {release}'.format(release=release.model)) finally: db.session.commit()
[ "def", "process_release", "(", "release_id", ",", "verify_sender", "=", "False", ")", ":", "from", "invenio_db", "import", "db", "from", "invenio_rest", ".", "errors", "import", "RESTException", "from", ".", "errors", "import", "InvalidSenderError", "from", ".", ...
Process a received Release.
[ "Process", "a", "received", "Release", "." ]
python
train
pydata/xarray
xarray/core/utils.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/utils.py#L258-L265
def to_0d_array(value: Any) -> np.ndarray: """Given a value, wrap it in a 0-D numpy.ndarray. """ if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0): return np.array(value) else: return to_0d_object_array(value)
[ "def", "to_0d_array", "(", "value", ":", "Any", ")", "->", "np", ".", "ndarray", ":", "if", "np", ".", "isscalar", "(", "value", ")", "or", "(", "isinstance", "(", "value", ",", "np", ".", "ndarray", ")", "and", "value", ".", "ndim", "==", "0", "...
Given a value, wrap it in a 0-D numpy.ndarray.
[ "Given", "a", "value", "wrap", "it", "in", "a", "0", "-", "D", "numpy", ".", "ndarray", "." ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5796-L5813
def submitScreenshot(self, screenshotHandle, type_, pchSourcePreviewFilename, pchSourceVRFilename): """ Submit the completed screenshot. If Steam is running this will call into the Steam client and upload the screenshot to the screenshots section of the library for the running application. If Steam is not running, this function will display a notification to the user that the screenshot was taken. The paths should be full paths with extensions. File paths should be absolute including extensions. screenshotHandle can be k_unScreenshotHandleInvalid if this was a new shot taking by the app to be saved and not initiated by a user (achievement earned or something) """ fn = self.function_table.submitScreenshot result = fn(screenshotHandle, type_, pchSourcePreviewFilename, pchSourceVRFilename) return result
[ "def", "submitScreenshot", "(", "self", ",", "screenshotHandle", ",", "type_", ",", "pchSourcePreviewFilename", ",", "pchSourceVRFilename", ")", ":", "fn", "=", "self", ".", "function_table", ".", "submitScreenshot", "result", "=", "fn", "(", "screenshotHandle", "...
Submit the completed screenshot. If Steam is running this will call into the Steam client and upload the screenshot to the screenshots section of the library for the running application. If Steam is not running, this function will display a notification to the user that the screenshot was taken. The paths should be full paths with extensions. File paths should be absolute including extensions. screenshotHandle can be k_unScreenshotHandleInvalid if this was a new shot taking by the app to be saved and not initiated by a user (achievement earned or something)
[ "Submit", "the", "completed", "screenshot", ".", "If", "Steam", "is", "running", "this", "will", "call", "into", "the", "Steam", "client", "and", "upload", "the", "screenshot", "to", "the", "screenshots", "section", "of", "the", "library", "for", "the", "run...
python
train