repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
google/dotty
efilter/parsers/common/tokenizer.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/tokenizer.py#L268-L305
def _next_pattern(self): """Parses the next pattern by matching each in turn.""" current_state = self.state_stack[-1] position = self._position for pattern in self.patterns: if current_state not in pattern.states: continue m = pattern.regex.match(self.source, position) if not m: continue position = m.end() token = None if pattern.next_state: self.state_stack.append(pattern.next_state) if pattern.action: callback = getattr(self, pattern.action, None) if callback is None: raise RuntimeError( "No method defined for pattern action %s!" % pattern.action) if "token" in m.groups(): value = m.group("token") else: value = m.group(0) token = callback(string=value, match=m, pattern=pattern) self._position = position return token self._error("Don't know how to match next. Did you forget quotes?", start=self._position, end=self._position + 1)
[ "def", "_next_pattern", "(", "self", ")", ":", "current_state", "=", "self", ".", "state_stack", "[", "-", "1", "]", "position", "=", "self", ".", "_position", "for", "pattern", "in", "self", ".", "patterns", ":", "if", "current_state", "not", "in", "pat...
Parses the next pattern by matching each in turn.
[ "Parses", "the", "next", "pattern", "by", "matching", "each", "in", "turn", "." ]
python
train
mozilla/configman
configman/config_manager.py
https://github.com/mozilla/configman/blob/83159fed61cc4cbbe5a4a6a00d3acad8a0c39c96/configman/config_manager.py#L451-L472
def dump_conf(self, config_pathname=None): """write a config file to the pathname specified in the parameter. The file extention determines the type of file written and must match a registered type. parameters: config_pathname - the full path and filename of the target config file.""" if not config_pathname: config_pathname = self._get_option('admin.dump_conf').value opener = functools.partial(open, config_pathname, 'w') config_file_type = os.path.splitext(config_pathname)[1][1:] skip_keys = [ k for (k, v) in six.iteritems(self.option_definitions) if isinstance(v, Option) and v.exclude_from_dump_conf ] self.write_conf(config_file_type, opener, skip_keys=skip_keys)
[ "def", "dump_conf", "(", "self", ",", "config_pathname", "=", "None", ")", ":", "if", "not", "config_pathname", ":", "config_pathname", "=", "self", ".", "_get_option", "(", "'admin.dump_conf'", ")", ".", "value", "opener", "=", "functools", ".", "partial", ...
write a config file to the pathname specified in the parameter. The file extention determines the type of file written and must match a registered type. parameters: config_pathname - the full path and filename of the target config file.
[ "write", "a", "config", "file", "to", "the", "pathname", "specified", "in", "the", "parameter", ".", "The", "file", "extention", "determines", "the", "type", "of", "file", "written", "and", "must", "match", "a", "registered", "type", "." ]
python
train
ArchiveTeam/wpull
wpull/application/tasks/writer.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/application/tasks/writer.py#L21-L93
def _build_file_writer(cls, session: AppSession): '''Create the File Writer. Returns: FileWriter: An instance of :class:`.writer.BaseFileWriter`. ''' args = session.args if args.delete_after: return session.factory.new('FileWriter') # is a NullWriter elif args.output_document: session.factory.class_map['FileWriter'] = SingleDocumentWriter return session.factory.new('FileWriter', args.output_document, headers_included=args.save_headers) use_dir = (len(args.urls) != 1 or args.page_requisites or args.recursive) if args.use_directories == 'force': use_dir = True elif args.use_directories == 'no': use_dir = False os_type = 'windows' if 'windows' in args.restrict_file_names \ else 'unix' ascii_only = 'ascii' in args.restrict_file_names no_control = 'nocontrol' not in args.restrict_file_names if 'lower' in args.restrict_file_names: case = 'lower' elif 'upper' in args.restrict_file_names: case = 'upper' else: case = None path_namer = session.factory.new( 'PathNamer', args.directory_prefix, index=args.default_page, use_dir=use_dir, cut=args.cut_dirs, protocol=args.protocol_directories, hostname=args.host_directories, os_type=os_type, ascii_only=ascii_only, no_control=no_control, case=case, max_filename_length=args.max_filename_length, ) if args.recursive or args.page_requisites or args.continue_download: if args.clobber_method == 'disable': file_class = OverwriteFileWriter else: file_class = IgnoreFileWriter elif args.timestamping: file_class = TimestampingFileWriter else: file_class = AntiClobberFileWriter session.factory.class_map['FileWriter'] = file_class return session.factory.new( 'FileWriter', path_namer, file_continuing=args.continue_download, headers_included=args.save_headers, local_timestamping=args.use_server_timestamps, adjust_extension=args.adjust_extension, content_disposition=args.content_disposition, trust_server_names=args.trust_server_names, )
[ "def", "_build_file_writer", "(", "cls", ",", "session", ":", "AppSession", ")", ":", "args", "=", "session", ".", "args", "if", "args", ".", "delete_after", ":", "return", "session", ".", "factory", ".", "new", "(", "'FileWriter'", ")", "# is a NullWriter",...
Create the File Writer. Returns: FileWriter: An instance of :class:`.writer.BaseFileWriter`.
[ "Create", "the", "File", "Writer", "." ]
python
train
titusjan/argos
argos/utils/cls.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/cls.py#L245-L256
def is_text(var, allow_none=False): """ Returns True if var is a unicode text Result py-2 py-3 ----------------- ----- ----- b'bytes literal' False False 'string literal' False True u'unicode literal' True True Also works with the corresponding numpy types. """ return isinstance(var, six.text_type) or (var is None and allow_none)
[ "def", "is_text", "(", "var", ",", "allow_none", "=", "False", ")", ":", "return", "isinstance", "(", "var", ",", "six", ".", "text_type", ")", "or", "(", "var", "is", "None", "and", "allow_none", ")" ]
Returns True if var is a unicode text Result py-2 py-3 ----------------- ----- ----- b'bytes literal' False False 'string literal' False True u'unicode literal' True True Also works with the corresponding numpy types.
[ "Returns", "True", "if", "var", "is", "a", "unicode", "text" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/frontend_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/frontend_widget.py#L36-L59
def highlightBlock(self, string): """ Highlight a block of text. Reimplemented to highlight selectively. """ if not self.highlighting_on: return # The input to this function is a unicode string that may contain # paragraph break characters, non-breaking spaces, etc. Here we acquire # the string as plain text so we can compare it. current_block = self.currentBlock() string = self._frontend._get_block_plain_text(current_block) # Decide whether to check for the regular or continuation prompt. if current_block.contains(self._frontend._prompt_pos): prompt = self._frontend._prompt else: prompt = self._frontend._continuation_prompt # Only highlight if we can identify a prompt, but make sure not to # highlight the prompt. if string.startswith(prompt): self._current_offset = len(prompt) string = string[len(prompt):] super(FrontendHighlighter, self).highlightBlock(string)
[ "def", "highlightBlock", "(", "self", ",", "string", ")", ":", "if", "not", "self", ".", "highlighting_on", ":", "return", "# The input to this function is a unicode string that may contain", "# paragraph break characters, non-breaking spaces, etc. Here we acquire", "# the string a...
Highlight a block of text. Reimplemented to highlight selectively.
[ "Highlight", "a", "block", "of", "text", ".", "Reimplemented", "to", "highlight", "selectively", "." ]
python
test
PyCQA/pylint
pylint/pyreverse/writer.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/pyreverse/writer.py#L112-L117
def set_printer(self, file_name, basename): """initialize DotWriter and add options for layout. """ layout = dict(rankdir="BT") self.printer = DotBackend(basename, additional_param=layout) self.file_name = file_name
[ "def", "set_printer", "(", "self", ",", "file_name", ",", "basename", ")", ":", "layout", "=", "dict", "(", "rankdir", "=", "\"BT\"", ")", "self", ".", "printer", "=", "DotBackend", "(", "basename", ",", "additional_param", "=", "layout", ")", "self", "....
initialize DotWriter and add options for layout.
[ "initialize", "DotWriter", "and", "add", "options", "for", "layout", "." ]
python
test
hendrix/hendrix
hendrix/deploy/base.py
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/base.py#L99-L121
def getConf(cls, settings, options): "updates the options dict to use config options in the settings module" ports = ['http_port', 'https_port', 'cache_port'] for port_name in ports: port = getattr(settings, port_name.upper(), None) # only use the settings ports if the defaults were left unchanged default = getattr(defaults, port_name.upper()) if port and options.get(port_name) == default: options[port_name] = port _opts = [ ('key', 'hx_private_key'), ('cert', 'hx_certficate'), ('wsgi', 'wsgi_application') ] for opt_name, settings_name in _opts: opt = getattr(settings, settings_name.upper(), None) if opt: options[opt_name] = opt if not options['settings']: options['settings'] = environ['DJANGO_SETTINGS_MODULE'] return options
[ "def", "getConf", "(", "cls", ",", "settings", ",", "options", ")", ":", "ports", "=", "[", "'http_port'", ",", "'https_port'", ",", "'cache_port'", "]", "for", "port_name", "in", "ports", ":", "port", "=", "getattr", "(", "settings", ",", "port_name", "...
updates the options dict to use config options in the settings module
[ "updates", "the", "options", "dict", "to", "use", "config", "options", "in", "the", "settings", "module" ]
python
train
ska-sa/katcp-python
katcp/resource_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource_client.py#L1381-L1390
def set_ioloop(self, ioloop=None): """Set the tornado ioloop to use Defaults to tornado.ioloop.IOLoop.current() if set_ioloop() is not called or if ioloop=None. Must be called before start() """ ioloop = ioloop or tornado.ioloop.IOLoop.current() self.ioloop = ioloop for res in dict.values(self.children): res.set_ioloop(ioloop)
[ "def", "set_ioloop", "(", "self", ",", "ioloop", "=", "None", ")", ":", "ioloop", "=", "ioloop", "or", "tornado", ".", "ioloop", ".", "IOLoop", ".", "current", "(", ")", "self", ".", "ioloop", "=", "ioloop", "for", "res", "in", "dict", ".", "values",...
Set the tornado ioloop to use Defaults to tornado.ioloop.IOLoop.current() if set_ioloop() is not called or if ioloop=None. Must be called before start()
[ "Set", "the", "tornado", "ioloop", "to", "use" ]
python
train
zhanglab/psamm
psamm/commands/primarypairs.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/commands/primarypairs.py#L162-L171
def _combine_transfers(self, result): """Combine multiple pair transfers into one.""" transfers = {} for reaction_id, c1, c2, form in result: key = reaction_id, c1, c2 combined_form = transfers.setdefault(key, Formula()) transfers[key] = combined_form | form for (reaction_id, c1, c2), form in iteritems(transfers): yield reaction_id, c1, c2, form
[ "def", "_combine_transfers", "(", "self", ",", "result", ")", ":", "transfers", "=", "{", "}", "for", "reaction_id", ",", "c1", ",", "c2", ",", "form", "in", "result", ":", "key", "=", "reaction_id", ",", "c1", ",", "c2", "combined_form", "=", "transfe...
Combine multiple pair transfers into one.
[ "Combine", "multiple", "pair", "transfers", "into", "one", "." ]
python
train
awslabs/aws-sam-cli
samcli/lib/logs/formatter.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/lib/logs/formatter.py#L114-L121
def _pretty_print_event(event, colored): """ Basic formatter to convert an event object to string """ event.timestamp = colored.yellow(event.timestamp) event.log_stream_name = colored.cyan(event.log_stream_name) return ' '.join([event.log_stream_name, event.timestamp, event.message])
[ "def", "_pretty_print_event", "(", "event", ",", "colored", ")", ":", "event", ".", "timestamp", "=", "colored", ".", "yellow", "(", "event", ".", "timestamp", ")", "event", ".", "log_stream_name", "=", "colored", ".", "cyan", "(", "event", ".", "log_strea...
Basic formatter to convert an event object to string
[ "Basic", "formatter", "to", "convert", "an", "event", "object", "to", "string" ]
python
train
dmwm/DBS
Server/Python/src/dbs/web/DBSWriterModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSWriterModel.py#L254-L283
def insertBulkBlock(self): """ API to insert a bulk block :param blockDump: Output of the block dump command :type blockDump: dict """ try: body = request.body.read() indata = cjson.decode(body) if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])): dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time", self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.") indata = validateJSONInputNoCopy("blockBulk", indata) self.dbsBlockInsert.putBlock(indata) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: #illegal variable name/number if str(ex).find("ORA-01036") != -1: dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex)) else: sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "insertBulkBlock", "(", "self", ")", ":", "try", ":", "body", "=", "request", ".", "body", ".", "read", "(", ")", "indata", "=", "cjson", ".", "decode", "(", "body", ")", "if", "(", "indata", ".", "get", "(", "\"file_parent_list\"", ",", "[", ...
API to insert a bulk block :param blockDump: Output of the block dump command :type blockDump: dict
[ "API", "to", "insert", "a", "bulk", "block" ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsPrefShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsPrefShockModel.py#L308-L351
def usePointsForInterpolation(self,cNrm,mNrm,interpolator): ''' Make a basic solution object with a consumption function and marginal value function (unconditional on the preference shock). Parameters ---------- cNrm : np.array Consumption points for interpolation. mNrm : np.array Corresponding market resource points for interpolation. interpolator : function A function that constructs and returns a consumption function. Returns ------- solution_now : ConsumerSolution The solution to this period's consumption-saving problem, with a consumption function, marginal value function, and minimum m. ''' # Make the preference-shock specific consumption functions PrefShkCount = self.PrefShkVals.size cFunc_list = [] for j in range(PrefShkCount): MPCmin_j = self.MPCminNow*self.PrefShkVals[j]**(1.0/self.CRRA) cFunc_this_shock = LowerEnvelope(LinearInterp(mNrm[j,:],cNrm[j,:], intercept_limit=self.hNrmNow*MPCmin_j, slope_limit=MPCmin_j),self.cFuncNowCnst) cFunc_list.append(cFunc_this_shock) # Combine the list of consumption functions into a single interpolation cFuncNow = LinearInterpOnInterp1D(cFunc_list,self.PrefShkVals) # Make the ex ante marginal value function (before the preference shock) m_grid = self.aXtraGrid + self.mNrmMinNow vP_vec = np.zeros_like(m_grid) for j in range(PrefShkCount): # numeric integration over the preference shock vP_vec += self.uP(cFunc_list[j](m_grid))*self.PrefShkPrbs[j]*self.PrefShkVals[j] vPnvrs_vec = self.uPinv(vP_vec) vPfuncNow = MargValueFunc(LinearInterp(m_grid,vPnvrs_vec),self.CRRA) # Store the results in a solution object and return it solution_now = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow, mNrmMin=self.mNrmMinNow) return solution_now
[ "def", "usePointsForInterpolation", "(", "self", ",", "cNrm", ",", "mNrm", ",", "interpolator", ")", ":", "# Make the preference-shock specific consumption functions", "PrefShkCount", "=", "self", ".", "PrefShkVals", ".", "size", "cFunc_list", "=", "[", "]", "for", ...
Make a basic solution object with a consumption function and marginal value function (unconditional on the preference shock). Parameters ---------- cNrm : np.array Consumption points for interpolation. mNrm : np.array Corresponding market resource points for interpolation. interpolator : function A function that constructs and returns a consumption function. Returns ------- solution_now : ConsumerSolution The solution to this period's consumption-saving problem, with a consumption function, marginal value function, and minimum m.
[ "Make", "a", "basic", "solution", "object", "with", "a", "consumption", "function", "and", "marginal", "value", "function", "(", "unconditional", "on", "the", "preference", "shock", ")", "." ]
python
train
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1292-L1310
def new_rater(self): """Action: add a new rater. """ if self.annot is None: # remove if buttons are disabled self.parent.statusBar().showMessage('No score file loaded') return newuser = NewUserDialog(self.parent.value('scoring_window')) answer = newuser.exec_() if answer == QDialog.Rejected: return rater_name = newuser.rater_name.text() if rater_name != '': self.annot.add_rater(rater_name, newuser.epoch_length.value()) self.display_notes() self.parent.create_menubar()
[ "def", "new_rater", "(", "self", ")", ":", "if", "self", ".", "annot", "is", "None", ":", "# remove if buttons are disabled", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "'No score file loaded'", ")", "return", "newuser", "=", ...
Action: add a new rater.
[ "Action", ":", "add", "a", "new", "rater", "." ]
python
train
pybel/pybel
src/pybel/struct/utils.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/utils.py#L13-L28
def update_metadata(source, target) -> None: """Update the namespace and annotation metadata in the target graph. :param pybel.BELGraph source: :param pybel.BELGraph target: """ target.namespace_url.update(source.namespace_url) target.namespace_pattern.update(source.namespace_pattern) target.annotation_url.update(source.annotation_url) target.annotation_pattern.update(source.annotation_pattern) for keyword, values in source.annotation_list.items(): if keyword not in target.annotation_list: target.annotation_list[keyword] = values else: target.annotation_list[keyword].update(values)
[ "def", "update_metadata", "(", "source", ",", "target", ")", "->", "None", ":", "target", ".", "namespace_url", ".", "update", "(", "source", ".", "namespace_url", ")", "target", ".", "namespace_pattern", ".", "update", "(", "source", ".", "namespace_pattern",...
Update the namespace and annotation metadata in the target graph. :param pybel.BELGraph source: :param pybel.BELGraph target:
[ "Update", "the", "namespace", "and", "annotation", "metadata", "in", "the", "target", "graph", "." ]
python
train
e7dal/bubble3
bubble3/util/value_path.py
https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/bubble3/util/value_path.py#L64-L74
def _make(self, key, content): """clean""" pass self.say('make a new key>>>' + key + '>>>with>>>:' + str(content)) if key.isdigit(): i = int(key) # list index [p] self.say('extending parent list to contain index:' + key) # make a list with size return self._list(i, content) else: return self._dict(key, content)
[ "def", "_make", "(", "self", ",", "key", ",", "content", ")", ":", "pass", "self", ".", "say", "(", "'make a new key>>>'", "+", "key", "+", "'>>>with>>>:'", "+", "str", "(", "content", ")", ")", "if", "key", ".", "isdigit", "(", ")", ":", "i", "=",...
clean
[ "clean" ]
python
train
saltstack/salt
salt/modules/mandrill.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mandrill.py#L64-L77
def _get_api_params(api_url=None, api_version=None, api_key=None): ''' Retrieve the API params from the config file. ''' mandrill_cfg = __salt__['config.merge']('mandrill') if not mandrill_cfg: mandrill_cfg = {} return { 'api_url': api_url or mandrill_cfg.get('api_url') or BASE_URL, # optional 'api_key': api_key or mandrill_cfg.get('key'), # mandatory 'api_version': api_version or mandrill_cfg.get('api_version') or DEFAULT_VERSION }
[ "def", "_get_api_params", "(", "api_url", "=", "None", ",", "api_version", "=", "None", ",", "api_key", "=", "None", ")", ":", "mandrill_cfg", "=", "__salt__", "[", "'config.merge'", "]", "(", "'mandrill'", ")", "if", "not", "mandrill_cfg", ":", "mandrill_cf...
Retrieve the API params from the config file.
[ "Retrieve", "the", "API", "params", "from", "the", "config", "file", "." ]
python
train
bavovanachte/sphinx-wavedrom
sphinxcontrib/wavedrom.py
https://github.com/bavovanachte/sphinx-wavedrom/blob/a8cfb4422ba2163453db669348f273af2e72f324/sphinxcontrib/wavedrom.py#L130-L171
def render_wavedrom(self, node, outpath, bname, format): """ Render a wavedrom image """ # Try to convert node, raise error with code on failure try: svgout = WaveDrom().renderWaveForm(0, json.loads(node['code'])) except JSONDecodeError as e: raise SphinxError("Cannot render the following json code: \n{} \n\nError: {}".format(node['code'], e)) if not os.path.exists(outpath): os.makedirs(outpath) # SVG can be directly written and is supported on all versions if format == 'image/svg+xml': fname = "{}.{}".format(bname, "svg") fpath = os.path.join(outpath, fname) svgout.saveas(fpath) return fname # It gets a bit ugly, if the output does not support svg. We use cairosvg, because it is the easiest # to use (no dependency on installed programs). But it only works for Python 3. try: import cairosvg except: raise SphinxError(__("Cannot import 'cairosvg'. In Python 2 wavedrom figures other than svg are " "not supported, in Python 3 ensure 'cairosvg' is installed.")) if format == 'application/pdf': fname = "{}.{}".format(bname, "pdf") fpath = os.path.join(outpath, fname) cairosvg.svg2pdf(svgout.tostring(), write_to=fpath) return fname if format == 'image/png': fname = "{}.{}".format(bname, "png") fpath = os.path.join(outpath, fname) cairosvg.svg2png(svgout.tostring(), write_to=fpath) return fname raise SphinxError("No valid wavedrom conversion supplied")
[ "def", "render_wavedrom", "(", "self", ",", "node", ",", "outpath", ",", "bname", ",", "format", ")", ":", "# Try to convert node, raise error with code on failure", "try", ":", "svgout", "=", "WaveDrom", "(", ")", ".", "renderWaveForm", "(", "0", ",", "json", ...
Render a wavedrom image
[ "Render", "a", "wavedrom", "image" ]
python
train
aws/sagemaker-python-sdk
src/sagemaker/session.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/session.py#L1266-L1294
def production_variant(model_name, instance_type, initial_instance_count=1, variant_name='AllTraffic', initial_weight=1, accelerator_type=None): """Create a production variant description suitable for use in a ``ProductionVariant`` list as part of a ``CreateEndpointConfig`` request. Args: model_name (str): The name of the SageMaker model this production variant references. instance_type (str): The EC2 instance type for this production variant. For example, 'ml.c4.8xlarge'. initial_instance_count (int): The initial instance count for this production variant (default: 1). variant_name (string): The ``VariantName`` of this production variant (default: 'AllTraffic'). initial_weight (int): The relative ``InitialVariantWeight`` of this production variant (default: 1). accelerator_type (str): Type of Elastic Inference accelerator for this production variant. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: dict[str, str]: An SageMaker ``ProductionVariant`` description """ production_variant_configuration = { 'ModelName': model_name, 'InstanceType': instance_type, 'InitialInstanceCount': initial_instance_count, 'VariantName': variant_name, 'InitialVariantWeight': initial_weight } if accelerator_type: production_variant_configuration['AcceleratorType'] = accelerator_type return production_variant_configuration
[ "def", "production_variant", "(", "model_name", ",", "instance_type", ",", "initial_instance_count", "=", "1", ",", "variant_name", "=", "'AllTraffic'", ",", "initial_weight", "=", "1", ",", "accelerator_type", "=", "None", ")", ":", "production_variant_configuration"...
Create a production variant description suitable for use in a ``ProductionVariant`` list as part of a ``CreateEndpointConfig`` request. Args: model_name (str): The name of the SageMaker model this production variant references. instance_type (str): The EC2 instance type for this production variant. For example, 'ml.c4.8xlarge'. initial_instance_count (int): The initial instance count for this production variant (default: 1). variant_name (string): The ``VariantName`` of this production variant (default: 'AllTraffic'). initial_weight (int): The relative ``InitialVariantWeight`` of this production variant (default: 1). accelerator_type (str): Type of Elastic Inference accelerator for this production variant. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: dict[str, str]: An SageMaker ``ProductionVariant`` description
[ "Create", "a", "production", "variant", "description", "suitable", "for", "use", "in", "a", "ProductionVariant", "list", "as", "part", "of", "a", "CreateEndpointConfig", "request", "." ]
python
train
glottobank/python-newick
src/newick.py
https://github.com/glottobank/python-newick/blob/e8d4d1e4610f271d0f0e5cb86c0e0360b43bd702/src/newick.py#L341-L345
def remove_internal_names(self): """ Set the name of all non-leaf nodes in the subtree to None. """ self.visit(lambda n: setattr(n, 'name', None), lambda n: not n.is_leaf)
[ "def", "remove_internal_names", "(", "self", ")", ":", "self", ".", "visit", "(", "lambda", "n", ":", "setattr", "(", "n", ",", "'name'", ",", "None", ")", ",", "lambda", "n", ":", "not", "n", ".", "is_leaf", ")" ]
Set the name of all non-leaf nodes in the subtree to None.
[ "Set", "the", "name", "of", "all", "non", "-", "leaf", "nodes", "in", "the", "subtree", "to", "None", "." ]
python
test
Karaage-Cluster/karaage
karaage/plugins/kgapplications/views/project.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/plugins/kgapplications/views/project.py#L131-L191
def new_application(request): """ A new application by a user to start a new project. """ # Note default kgapplications/index.html will display error if user logged # in. if not settings.ALLOW_REGISTRATIONS: return render( template_name='kgapplications/project_common_disabled.html', context={}, request=request) roles = {'is_applicant', 'is_authorised'} if not request.user.is_authenticated: attrs, _ = saml.parse_attributes(request) defaults = {'email': attrs['email']} form = forms.UnauthenticatedInviteUserApplicationForm( request.POST or None, initial=defaults) if request.method == 'POST': if form.is_valid(): email = form.cleaned_data['email'] applicant, existing_person = get_applicant_from_email(email) # If applicant is None then there were multiple persons found. # This should never happen as the # UnauthenticatedInviteUserApplicationForm form disallows # existing users applying unauthenticated. assert applicant is not None # Similarly existing_person should always be False here. assert not existing_person application = ProjectApplication() application.applicant = applicant application.save() state_machine = get_application_state_machine() state_machine.start(request, application, roles) # we do not show unauthenticated users the application at this # stage. url = reverse('index') return HttpResponseRedirect(url) return render( template_name='kgapplications/' 'project_common_invite_unauthenticated.html', context={'form': form, }, request=request) else: if request.method == 'POST': person = request.user application = ProjectApplication() application.applicant = person application.save() state_machine = get_application_state_machine() response = state_machine.start(request, application, roles) return response return render( template_name='kgapplications/' 'project_common_invite_authenticated.html', context={}, request=request)
[ "def", "new_application", "(", "request", ")", ":", "# Note default kgapplications/index.html will display error if user logged", "# in.", "if", "not", "settings", ".", "ALLOW_REGISTRATIONS", ":", "return", "render", "(", "template_name", "=", "'kgapplications/project_common_di...
A new application by a user to start a new project.
[ "A", "new", "application", "by", "a", "user", "to", "start", "a", "new", "project", "." ]
python
train
dbuscher/pois
pois/__init__.py
https://github.com/dbuscher/pois/blob/bb9d9a932e716b5d385221768027384691803aa3/pois/__init__.py#L73-L79
def FibreMode(gridSize,modeDiameter): """ Return a pupil-plane Gaussian mode with 1/e diameter given by *modeDiameter*, normalised so that integral power over the mode is unity """ rmode=modeDiameter/2 return np.exp(-(RadiusGrid(gridSize)/rmode)**2)/(np.sqrt(np.pi/2)*rmode)
[ "def", "FibreMode", "(", "gridSize", ",", "modeDiameter", ")", ":", "rmode", "=", "modeDiameter", "/", "2", "return", "np", ".", "exp", "(", "-", "(", "RadiusGrid", "(", "gridSize", ")", "/", "rmode", ")", "**", "2", ")", "/", "(", "np", ".", "sqrt...
Return a pupil-plane Gaussian mode with 1/e diameter given by *modeDiameter*, normalised so that integral power over the mode is unity
[ "Return", "a", "pupil", "-", "plane", "Gaussian", "mode", "with", "1", "/", "e", "diameter", "given", "by", "*", "modeDiameter", "*", "normalised", "so", "that", "integral", "power", "over", "the", "mode", "is", "unity" ]
python
train
pymc-devs/pymc
pymc/CommonDeterministics.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/CommonDeterministics.py#L472-L499
def pufunc(func): """ Called by pfunc to convert NumPy ufuncs to deterministic factories. """ def dtrm_generator(*args): if len(args) != func.nin: raise ValueError('invalid number of arguments') name = func.__name__ + '(' + '_'.join( [str(arg) for arg in list(args)]) + ')' doc_str = 'A deterministic returning %s(%s)' % ( func.__name__, ', '.join([str(arg) for arg in args])) parents = {} for i in xrange(func.nin): parents['in%i' % i] = args[i] def wrapper(**kwargs): return func(*[kwargs['in%i' % i] for i in xrange(func.nin)]) return pm.Deterministic( wrapper, doc_str, name, parents, trace=False, plot=False) dtrm_generator.__name__ = func.__name__ + '_deterministic_generator' dtrm_generator.__doc__ = """ Deterministic-generating wrapper for %s. Original docstring: %s %s """ % (func.__name__, '_' * 60, func.__doc__) return dtrm_generator
[ "def", "pufunc", "(", "func", ")", ":", "def", "dtrm_generator", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "func", ".", "nin", ":", "raise", "ValueError", "(", "'invalid number of arguments'", ")", "name", "=", "func", ".", "__na...
Called by pfunc to convert NumPy ufuncs to deterministic factories.
[ "Called", "by", "pfunc", "to", "convert", "NumPy", "ufuncs", "to", "deterministic", "factories", "." ]
python
train
erdewit/ib_insync
ib_insync/ticker.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ticker.py#L122-L135
def marketPrice(self) -> float: """ Return the first available one of * last price if within current bid/ask; * average of bid and ask (midpoint); * close price. """ price = self.last if ( self.hasBidAsk() and self.bid <= self.last <= self.ask) else \ self.midpoint() if isNan(price): price = self.close return price
[ "def", "marketPrice", "(", "self", ")", "->", "float", ":", "price", "=", "self", ".", "last", "if", "(", "self", ".", "hasBidAsk", "(", ")", "and", "self", ".", "bid", "<=", "self", ".", "last", "<=", "self", ".", "ask", ")", "else", "self", "."...
Return the first available one of * last price if within current bid/ask; * average of bid and ask (midpoint); * close price.
[ "Return", "the", "first", "available", "one", "of" ]
python
train
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L1201-L1206
def set(self, data, size): """ Set chunk data from user-supplied data; truncate if too large. Data may be null. Returns actual size of chunk """ return lib.zchunk_set(self._as_parameter_, data, size)
[ "def", "set", "(", "self", ",", "data", ",", "size", ")", ":", "return", "lib", ".", "zchunk_set", "(", "self", ".", "_as_parameter_", ",", "data", ",", "size", ")" ]
Set chunk data from user-supplied data; truncate if too large. Data may be null. Returns actual size of chunk
[ "Set", "chunk", "data", "from", "user", "-", "supplied", "data", ";", "truncate", "if", "too", "large", ".", "Data", "may", "be", "null", ".", "Returns", "actual", "size", "of", "chunk" ]
python
train
saltstack/salt
salt/fileclient.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L169-L180
def get_file(self, path, dest='', makedirs=False, saltenv='base', gzip=None, cachedir=None): ''' Copies a file from the local files or master depending on implementation ''' raise NotImplementedError
[ "def", "get_file", "(", "self", ",", "path", ",", "dest", "=", "''", ",", "makedirs", "=", "False", ",", "saltenv", "=", "'base'", ",", "gzip", "=", "None", ",", "cachedir", "=", "None", ")", ":", "raise", "NotImplementedError" ]
Copies a file from the local files or master depending on implementation
[ "Copies", "a", "file", "from", "the", "local", "files", "or", "master", "depending", "on", "implementation" ]
python
train
loli/medpy
medpy/metric/histogram.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L572-L625
def jensen_shannon(h1, h2): # 85 us @array, 110 us @list \w 100 bins r""" Jensen-Shannon divergence. A symmetric and numerically more stable empirical extension of the Kullback-Leibler divergence. The Jensen Shannon divergence between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{JSD}(H, H') = \frac{1}{2} d_{KL}(H, H^*) + \frac{1}{2} d_{KL}(H', H^*) with :math:`H^*=\frac{1}{2}(H + H')`. *Attributes:* - semimetric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. Returns ------- jensen_shannon : float Jensen-Shannon divergence. """ h1, h2 = __prepare_histogram(h1, h2) s = (h1 + h2) / 2. return __kullback_leibler(h1, s) / 2. + __kullback_leibler(h2, s) / 2.
[ "def", "jensen_shannon", "(", "h1", ",", "h2", ")", ":", "# 85 us @array, 110 us @list \\w 100 bins", "h1", ",", "h2", "=", "__prepare_histogram", "(", "h1", ",", "h2", ")", "s", "=", "(", "h1", "+", "h2", ")", "/", "2.", "return", "__kullback_leibler", "(...
r""" Jensen-Shannon divergence. A symmetric and numerically more stable empirical extension of the Kullback-Leibler divergence. The Jensen Shannon divergence between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{JSD}(H, H') = \frac{1}{2} d_{KL}(H, H^*) + \frac{1}{2} d_{KL}(H', H^*) with :math:`H^*=\frac{1}{2}(H + H')`. *Attributes:* - semimetric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. Returns ------- jensen_shannon : float Jensen-Shannon divergence.
[ "r", "Jensen", "-", "Shannon", "divergence", ".", "A", "symmetric", "and", "numerically", "more", "stable", "empirical", "extension", "of", "the", "Kullback", "-", "Leibler", "divergence", ".", "The", "Jensen", "Shannon", "divergence", "between", "two", "histogr...
python
train
Shapeways/coyote_framework
coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py#L737-L747
def find_all(self, locator): """ Find wrapper, finds all elements @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used in search @rtype: list @return: A list of WebElementWrappers """ return self.driver_wrapper.find(locator, True, self.element)
[ "def", "find_all", "(", "self", ",", "locator", ")", ":", "return", "self", ".", "driver_wrapper", ".", "find", "(", "locator", ",", "True", ",", "self", ".", "element", ")" ]
Find wrapper, finds all elements @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used in search @rtype: list @return: A list of WebElementWrappers
[ "Find", "wrapper", "finds", "all", "elements" ]
python
train
dependencies-io/cli
dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py
https://github.com/dependencies-io/cli/blob/d8ae97343c48a61d6614d3e8af6a981b4cfb1bcb/dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py#L27-L37
def mock_lockfile_update(path): """ This is a mock update. In place of this, you might simply shell out to a command like `yarn upgrade`. """ updated_lockfile_contents = { 'package1': '1.2.0' } with open(path, 'w+') as f: f.write(json.dumps(updated_lockfile_contents, indent=4)) return updated_lockfile_contents
[ "def", "mock_lockfile_update", "(", "path", ")", ":", "updated_lockfile_contents", "=", "{", "'package1'", ":", "'1.2.0'", "}", "with", "open", "(", "path", ",", "'w+'", ")", "as", "f", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "updated_loc...
This is a mock update. In place of this, you might simply shell out to a command like `yarn upgrade`.
[ "This", "is", "a", "mock", "update", ".", "In", "place", "of", "this", "you", "might", "simply", "shell", "out", "to", "a", "command", "like", "yarn", "upgrade", "." ]
python
train
pywbem/pywbem
pywbem_mock/_wbemconnection_mock.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_wbemconnection_mock.py#L3294-L3324
def _fake_closeenumeration(self, namespace, **params): """ Implements WBEM server responder for :meth:`~pywbem.WBEMConnection.CloseEnumeration` with data from the instance repository. If the EnumerationContext is valid it removes it from the context repository. Otherwise it returns an exception. """ self._validate_namespace(namespace) context_id = params['EnumerationContext'] try: context_data = self.enumeration_contexts[context_id] except KeyError: raise CIMError( CIM_ERR_INVALID_ENUMERATION_CONTEXT, _format("EnumerationContext {0!A} not found in mock server " "enumeration contexts.", context_id)) # This is probably relatively useless because pywbem handles # namespace internally but it could catch an error if user plays # with the context. if context_data['namespace'] != namespace: raise CIMError( CIM_ERR_INVALID_NAMESPACE, _format("Invalid namespace {0!A} for CloseEnumeration {1!A}", namespace, context_id)) del self.enumeration_contexts[context_id]
[ "def", "_fake_closeenumeration", "(", "self", ",", "namespace", ",", "*", "*", "params", ")", ":", "self", ".", "_validate_namespace", "(", "namespace", ")", "context_id", "=", "params", "[", "'EnumerationContext'", "]", "try", ":", "context_data", "=", "self"...
Implements WBEM server responder for :meth:`~pywbem.WBEMConnection.CloseEnumeration` with data from the instance repository. If the EnumerationContext is valid it removes it from the context repository. Otherwise it returns an exception.
[ "Implements", "WBEM", "server", "responder", "for", ":", "meth", ":", "~pywbem", ".", "WBEMConnection", ".", "CloseEnumeration", "with", "data", "from", "the", "instance", "repository", "." ]
python
train
KE-works/pykechain
pykechain/client.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L720-L755
def service_executions(self, name=None, pk=None, scope=None, service=None, **kwargs): """ Retrieve Service Executions. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: (optional) name to limit the search for :type name: basestring or None :param pk: (optional) primary key or id (UUID) of the service to search for :type pk: basestring or None :param scope: (optional) id (UUID) of the scope to search in :type scope: basestring or None :param service: (optional) service UUID to filter on :type service: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: a single :class:`models.ServiceExecution` object :raises NotFoundError: When no `ServiceExecution` object is found """ request_params = { 'name': name, 'id': pk, 'service': service, 'scope': scope } if kwargs: request_params.update(**kwargs) r = self._request('GET', self._build_url('service_executions'), params=request_params) if r.status_code != requests.codes.ok: # pragma: no cover raise NotFoundError("Could not retrieve service executions") data = r.json() return [ServiceExecution(service_exeuction, client=self) for service_exeuction in data['results']]
[ "def", "service_executions", "(", "self", ",", "name", "=", "None", ",", "pk", "=", "None", ",", "scope", "=", "None", ",", "service", "=", "None", ",", "*", "*", "kwargs", ")", ":", "request_params", "=", "{", "'name'", ":", "name", ",", "'id'", "...
Retrieve Service Executions. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: (optional) name to limit the search for :type name: basestring or None :param pk: (optional) primary key or id (UUID) of the service to search for :type pk: basestring or None :param scope: (optional) id (UUID) of the scope to search in :type scope: basestring or None :param service: (optional) service UUID to filter on :type service: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: a single :class:`models.ServiceExecution` object :raises NotFoundError: When no `ServiceExecution` object is found
[ "Retrieve", "Service", "Executions", "." ]
python
train
Yubico/python-pyhsm
pyhsm/base.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/base.py#L436-L464
def validate_aead_otp(self, public_id, otp, key_handle, aead): """ Ask YubiHSM to validate a YubiKey OTP using an AEAD and a key_handle to decrypt the AEAD. @param public_id: The six bytes public id of the YubiKey @param otp: The one time password (OTP) to validate @param key_handle: The key handle that can decrypt the AEAD @param aead: AEAD containing the cryptographic key and permission flags @type public_id: string @type otp: string @type key_handle: integer or string @type aead: L{YHSM_GeneratedAEAD} or string @returns: validation response @rtype: L{YHSM_ValidationResult} @see: L{pyhsm.validate_cmd.YHSM_Cmd_AEAD_Validate_OTP} """ if type(public_id) is not str: assert() if type(otp) is not str: assert() if type(key_handle) is not int: assert() if type(aead) is not str: assert() return pyhsm.validate_cmd.YHSM_Cmd_AEAD_Validate_OTP( \ self.stick, public_id, otp, key_handle, aead).execute()
[ "def", "validate_aead_otp", "(", "self", ",", "public_id", ",", "otp", ",", "key_handle", ",", "aead", ")", ":", "if", "type", "(", "public_id", ")", "is", "not", "str", ":", "assert", "(", ")", "if", "type", "(", "otp", ")", "is", "not", "str", ":...
Ask YubiHSM to validate a YubiKey OTP using an AEAD and a key_handle to decrypt the AEAD. @param public_id: The six bytes public id of the YubiKey @param otp: The one time password (OTP) to validate @param key_handle: The key handle that can decrypt the AEAD @param aead: AEAD containing the cryptographic key and permission flags @type public_id: string @type otp: string @type key_handle: integer or string @type aead: L{YHSM_GeneratedAEAD} or string @returns: validation response @rtype: L{YHSM_ValidationResult} @see: L{pyhsm.validate_cmd.YHSM_Cmd_AEAD_Validate_OTP}
[ "Ask", "YubiHSM", "to", "validate", "a", "YubiKey", "OTP", "using", "an", "AEAD", "and", "a", "key_handle", "to", "decrypt", "the", "AEAD", "." ]
python
train
neurodata/ndio
ndio/remote/ndingest.py
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/ndingest.py#L261-L277
def project_dict(self, project_name, token_name, public): """ Genarate the project dictionary. """ project_dict = {} project_dict['project_name'] = project_name if token_name is not None: if token_name == '': project_dict['token_name'] = project_name else: project_dict['token_name'] = token_name else: project_dict['token_name'] = project_name if public is not None: project_dict['public'] = public return project_dict
[ "def", "project_dict", "(", "self", ",", "project_name", ",", "token_name", ",", "public", ")", ":", "project_dict", "=", "{", "}", "project_dict", "[", "'project_name'", "]", "=", "project_name", "if", "token_name", "is", "not", "None", ":", "if", "token_na...
Genarate the project dictionary.
[ "Genarate", "the", "project", "dictionary", "." ]
python
test
DistrictDataLabs/yellowbrick
yellowbrick/text/postag.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/text/postag.py#L118-L152
def fit(self, X, y=None, **kwargs): """ Fits the corpus to the appropriate tag map. Text documents must be tokenized & tagged before passing to fit. Parameters ---------- X : list or generator Should be provided as a list of documents or a generator that yields a list of documents that contain a list of sentences that contain (token, tag) tuples. y : ndarray or Series of length n An optional array of target values that are ignored by the visualizer. kwargs : dict Pass generic arguments to the drawing method Returns ------- self : instance Returns the instance of the transformer/visualizer """ # TODO: add support for other tagsets? if self.tagset == "penn_treebank": self.pos_tag_counts_ = self._penn_tag_map() self._handle_treebank(X) elif self.tagset == "universal": self.pos_tag_counts_ = self._uni_tag_map() self._handle_universal(X) self.draw() return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# TODO: add support for other tagsets?", "if", "self", ".", "tagset", "==", "\"penn_treebank\"", ":", "self", ".", "pos_tag_counts_", "=", "self", ".", "_penn_t...
Fits the corpus to the appropriate tag map. Text documents must be tokenized & tagged before passing to fit. Parameters ---------- X : list or generator Should be provided as a list of documents or a generator that yields a list of documents that contain a list of sentences that contain (token, tag) tuples. y : ndarray or Series of length n An optional array of target values that are ignored by the visualizer. kwargs : dict Pass generic arguments to the drawing method Returns ------- self : instance Returns the instance of the transformer/visualizer
[ "Fits", "the", "corpus", "to", "the", "appropriate", "tag", "map", ".", "Text", "documents", "must", "be", "tokenized", "&", "tagged", "before", "passing", "to", "fit", "." ]
python
train
ejeschke/ginga
ginga/trcalc.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/trcalc.py#L443-L465
def get_scaled_cutout_basic_view(shp, p1, p2, scales): """ Like get_scaled_cutout_basic, but returns the view/slice to extract from an image, instead of the extraction itself """ x1, y1 = p1[:2] x2, y2 = p2[:2] scale_x, scale_y = scales[:2] # calculate dimensions of NON-scaled cutout old_wd = x2 - x1 + 1 old_ht = y2 - y1 + 1 # calculate dimensions of scaled cutout new_wd = int(round(scale_x * old_wd)) new_ht = int(round(scale_y * old_ht)) if len(scales) == 2: return get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2, new_wd, new_ht) z1, z2, scale_z = p1[2], p2[2], scales[2] old_dp = z2 - z1 + 1 new_dp = int(round(scale_z * old_dp)) return get_scaled_cutout_wdhtdp_view(shp, p1, p2, (new_wd, new_ht, new_dp))
[ "def", "get_scaled_cutout_basic_view", "(", "shp", ",", "p1", ",", "p2", ",", "scales", ")", ":", "x1", ",", "y1", "=", "p1", "[", ":", "2", "]", "x2", ",", "y2", "=", "p2", "[", ":", "2", "]", "scale_x", ",", "scale_y", "=", "scales", "[", ":"...
Like get_scaled_cutout_basic, but returns the view/slice to extract from an image, instead of the extraction itself
[ "Like", "get_scaled_cutout_basic", "but", "returns", "the", "view", "/", "slice", "to", "extract", "from", "an", "image", "instead", "of", "the", "extraction", "itself" ]
python
train
mardix/flask-cloudy
flask_cloudy.py
https://github.com/mardix/flask-cloudy/blob/8085d8fbbafec6c358f0d307bfcb795de50d4acb/flask_cloudy.py#L83-L101
def get_driver_class(provider): """ Return the driver class :param provider: str - provider name :return: """ if "." in provider: parts = provider.split('.') kls = parts.pop() path = '.'.join(parts) module = import_module(path) if not hasattr(module, kls): raise ImportError('{0} provider not found at {1}'.format( kls, path)) driver = getattr(module, kls) else: driver = getattr(Provider, provider.upper()) return get_driver(driver)
[ "def", "get_driver_class", "(", "provider", ")", ":", "if", "\".\"", "in", "provider", ":", "parts", "=", "provider", ".", "split", "(", "'.'", ")", "kls", "=", "parts", ".", "pop", "(", ")", "path", "=", "'.'", ".", "join", "(", "parts", ")", "mod...
Return the driver class :param provider: str - provider name :return:
[ "Return", "the", "driver", "class", ":", "param", "provider", ":", "str", "-", "provider", "name", ":", "return", ":" ]
python
train
kpdyer/regex2dfa
third_party/re2/lib/codereview/codereview.py
https://github.com/kpdyer/regex2dfa/blob/109f877e60ef0dfcb430f11516d215930b7b9936/third_party/re2/lib/codereview/codereview.py#L1277-L1385
def change(ui, repo, *pats, **opts): """create, edit or delete a change list Create, edit or delete a change list. A change list is a group of files to be reviewed and submitted together, plus a textual description of the change. Change lists are referred to by simple alphanumeric names. Changes must be reviewed before they can be submitted. In the absence of options, the change command opens the change list for editing in the default editor. Deleting a change with the -d or -D flag does not affect the contents of the files listed in that change. To revert the files listed in a change, use hg revert @123456 before running hg change -d 123456. """ if codereview_disabled: raise hg_util.Abort(codereview_disabled) dirty = {} if len(pats) > 0 and GoodCLName(pats[0]): name = pats[0] if len(pats) != 1: raise hg_util.Abort("cannot specify CL name and file patterns") pats = pats[1:] cl, err = LoadCL(ui, repo, name, web=True) if err != '': raise hg_util.Abort(err) if not cl.local and (opts["stdin"] or not opts["stdout"]): raise hg_util.Abort("cannot change non-local CL " + name) else: name = "new" cl = CL("new") if repo[None].branch() != "default": raise hg_util.Abort("cannot create CL outside default branch; switch with 'hg update default'") dirty[cl] = True files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo)) if opts["delete"] or opts["deletelocal"]: if opts["delete"] and opts["deletelocal"]: raise hg_util.Abort("cannot use -d and -D together") flag = "-d" if opts["deletelocal"]: flag = "-D" if name == "new": raise hg_util.Abort("cannot use "+flag+" with file patterns") if opts["stdin"] or opts["stdout"]: raise hg_util.Abort("cannot use "+flag+" with -i or -o") if not cl.local: raise hg_util.Abort("cannot change non-local CL " + name) if opts["delete"]: if cl.copied_from: raise hg_util.Abort("original author must delete CL; hg change -D will remove locally") PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed) EditDesc(cl.name, closed=True, private=cl.private) cl.Delete(ui, repo) return if opts["stdin"]: s = sys.stdin.read() clx, line, err = ParseCL(s, name) if err != '': raise hg_util.Abort("error parsing change list: line %d: %s" % (line, err)) if clx.desc is not None: cl.desc = clx.desc; dirty[cl] = True if clx.reviewer is not None: cl.reviewer = clx.reviewer dirty[cl] = True if clx.cc is not None: cl.cc = clx.cc dirty[cl] = True if clx.files is not None: cl.files = clx.files dirty[cl] = True if clx.private != cl.private: cl.private = clx.private dirty[cl] = True if not opts["stdin"] and not opts["stdout"]: if name == "new": cl.files = files err = EditCL(ui, repo, cl) if err != "": raise hg_util.Abort(err) dirty[cl] = True for d, _ in dirty.items(): name = d.name d.Flush(ui, repo) if name == "new": d.Upload(ui, repo, quiet=True) if opts["stdout"]: ui.write(cl.EditorText()) elif opts["pending"]: ui.write(cl.PendingText()) elif name == "new": if ui.quiet: ui.write(cl.name) else: ui.write("CL created: " + cl.url + "\n") return
[ "def", "change", "(", "ui", ",", "repo", ",", "*", "pats", ",", "*", "*", "opts", ")", ":", "if", "codereview_disabled", ":", "raise", "hg_util", ".", "Abort", "(", "codereview_disabled", ")", "dirty", "=", "{", "}", "if", "len", "(", "pats", ")", ...
create, edit or delete a change list Create, edit or delete a change list. A change list is a group of files to be reviewed and submitted together, plus a textual description of the change. Change lists are referred to by simple alphanumeric names. Changes must be reviewed before they can be submitted. In the absence of options, the change command opens the change list for editing in the default editor. Deleting a change with the -d or -D flag does not affect the contents of the files listed in that change. To revert the files listed in a change, use hg revert @123456 before running hg change -d 123456.
[ "create", "edit", "or", "delete", "a", "change", "list" ]
python
train
changhiskhan/poseidon
poseidon/droplet.py
https://github.com/changhiskhan/poseidon/blob/6d1cecbe02f1e510dd185fe23f88f7af35eb737f/poseidon/droplet.py#L453-L465
def delete(self, wait=True): """ Delete this droplet Parameters ---------- wait: bool, default True Whether to block until the pending action is completed """ resp = self.parent.delete(self.id) if wait: self.wait() return resp
[ "def", "delete", "(", "self", ",", "wait", "=", "True", ")", ":", "resp", "=", "self", ".", "parent", ".", "delete", "(", "self", ".", "id", ")", "if", "wait", ":", "self", ".", "wait", "(", ")", "return", "resp" ]
Delete this droplet Parameters ---------- wait: bool, default True Whether to block until the pending action is completed
[ "Delete", "this", "droplet" ]
python
valid
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L222-L292
def post_message(self, msg): '''default post message call''' if '_posted' in msg.__dict__: return msg._posted = True msg._timestamp = time.time() type = msg.get_type() if type != 'HEARTBEAT' or (msg.type != mavlink.MAV_TYPE_GCS and msg.type != mavlink.MAV_TYPE_GIMBAL): self.messages[type] = msg if 'usec' in msg.__dict__: self.uptime = msg.usec * 1.0e-6 if 'time_boot_ms' in msg.__dict__: self.uptime = msg.time_boot_ms * 1.0e-3 if self._timestamp is not None: if self.notimestamps: msg._timestamp = self.uptime else: msg._timestamp = self._timestamp src_system = msg.get_srcSystem() src_component = msg.get_srcComponent() src_tuple = (src_system, src_component) radio_tuple = (ord('3'), ord('D')) if not (src_tuple == radio_tuple or msg.get_type() == 'BAD_DATA'): if not src_tuple in self.last_seq: last_seq = -1 else: last_seq = self.last_seq[src_tuple] seq = (last_seq+1) % 256 seq2 = msg.get_seq() if seq != seq2 and last_seq != -1: diff = (seq2 - seq) % 256 self.mav_loss += diff #print("lost %u seq=%u seq2=%u last_seq=%u src_system=%u %s" % (diff, seq, seq2, last_seq, src_system, msg.get_type())) self.last_seq[src_tuple] = seq2 self.mav_count += 1 self.timestamp = msg._timestamp if type == 'HEARTBEAT' and msg.get_srcComponent() != mavlink.MAV_COMP_ID_GIMBAL: self.target_system = msg.get_srcSystem() self.target_component = msg.get_srcComponent() if float(mavlink.WIRE_PROTOCOL_VERSION) >= 1 and msg.type != mavlink.MAV_TYPE_GCS: self.flightmode = mode_string_v10(msg) self.mav_type = msg.type self.base_mode = msg.base_mode elif type == 'PARAM_VALUE': s = str(msg.param_id) self.params[str(msg.param_id)] = msg.param_value if msg.param_index+1 == msg.param_count: self.param_fetch_in_progress = False self.param_fetch_complete = True elif type == 'SYS_STATUS' and mavlink.WIRE_PROTOCOL_VERSION == '0.9': self.flightmode = mode_string_v09(msg) elif type == 'GPS_RAW': if self.messages['HOME'].fix_type < 2: self.messages['HOME'] = msg elif type == 'GPS_RAW_INT': if self.messages['HOME'].fix_type < 3: self.messages['HOME'] = msg for hook in self.message_hooks: hook(self, msg) if (msg.get_signed() and self.mav.signing.link_id == 0 and msg.get_link_id() != 0 and self.target_system == msg.get_srcSystem() and self.target_component == msg.get_srcComponent()): # change to link_id from incoming packet self.mav.signing.link_id = msg.get_link_id()
[ "def", "post_message", "(", "self", ",", "msg", ")", ":", "if", "'_posted'", "in", "msg", ".", "__dict__", ":", "return", "msg", ".", "_posted", "=", "True", "msg", ".", "_timestamp", "=", "time", ".", "time", "(", ")", "type", "=", "msg", ".", "ge...
default post message call
[ "default", "post", "message", "call" ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L919-L942
def title_of_design_condition(self, value=None): """Corresponds to IDD Field `title_of_design_condition` Args: value (str): value for IDD Field `title_of_design_condition` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `title_of_design_condition`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `title_of_design_condition`') self._title_of_design_condition = value
[ "def", "title_of_design_condition", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to b...
Corresponds to IDD Field `title_of_design_condition` Args: value (str): value for IDD Field `title_of_design_condition` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "title_of_design_condition" ]
python
train
zhanglab/psamm
psamm/datasource/sbml.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/sbml.py#L987-L995
def _add_gene_list(self, parent_tag, gene_id_dict): """Create list of all gene products as sbml readable elements.""" list_all_genes = ET.SubElement(parent_tag, _tag( 'listOfGeneProducts', FBC_V2)) for id, label in sorted(iteritems(gene_id_dict)): gene_tag = ET.SubElement( list_all_genes, _tag('geneProduct', FBC_V2)) gene_tag.set(_tag('id', FBC_V2), id) gene_tag.set(_tag('label', FBC_V2), label)
[ "def", "_add_gene_list", "(", "self", ",", "parent_tag", ",", "gene_id_dict", ")", ":", "list_all_genes", "=", "ET", ".", "SubElement", "(", "parent_tag", ",", "_tag", "(", "'listOfGeneProducts'", ",", "FBC_V2", ")", ")", "for", "id", ",", "label", "in", "...
Create list of all gene products as sbml readable elements.
[ "Create", "list", "of", "all", "gene", "products", "as", "sbml", "readable", "elements", "." ]
python
train
Autodesk/cryptorito
cryptorito/__init__.py
https://github.com/Autodesk/cryptorito/blob/277fc7cc42c31c5bc37e26d8bf5a2ac746a6ea85/cryptorito/__init__.py#L214-L230
def fingerprint_from_keybase(fingerprint, kb_obj): """Extracts a key matching a specific fingerprint from a Keybase API response""" if 'public_keys' in kb_obj and \ 'pgp_public_keys' in kb_obj['public_keys']: for key in kb_obj['public_keys']['pgp_public_keys']: keyprint = fingerprint_from_var(key).lower() fingerprint = fingerprint.lower() if fingerprint == keyprint or \ keyprint.startswith(fingerprint) or \ keyprint.endswith(fingerprint): return { 'fingerprint': keyprint, 'bundle': key } return None
[ "def", "fingerprint_from_keybase", "(", "fingerprint", ",", "kb_obj", ")", ":", "if", "'public_keys'", "in", "kb_obj", "and", "'pgp_public_keys'", "in", "kb_obj", "[", "'public_keys'", "]", ":", "for", "key", "in", "kb_obj", "[", "'public_keys'", "]", "[", "'p...
Extracts a key matching a specific fingerprint from a Keybase API response
[ "Extracts", "a", "key", "matching", "a", "specific", "fingerprint", "from", "a", "Keybase", "API", "response" ]
python
train
saltstack/salt
salt/netapi/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/__init__.py#L111-L120
def local_subset(self, *args, **kwargs): ''' Run :ref:`execution modules <all-salt.modules>` against subsets of minions .. versionadded:: 2016.3.0 Wraps :py:meth:`salt.client.LocalClient.cmd_subset` ''' local = salt.client.get_local_client(mopts=self.opts) return local.cmd_subset(*args, **kwargs)
[ "def", "local_subset", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "local", "=", "salt", ".", "client", ".", "get_local_client", "(", "mopts", "=", "self", ".", "opts", ")", "return", "local", ".", "cmd_subset", "(", "*", "args"...
Run :ref:`execution modules <all-salt.modules>` against subsets of minions .. versionadded:: 2016.3.0 Wraps :py:meth:`salt.client.LocalClient.cmd_subset`
[ "Run", ":", "ref", ":", "execution", "modules", "<all", "-", "salt", ".", "modules", ">", "against", "subsets", "of", "minions" ]
python
train
rainwoodman/sharedmem
sharedmem/sharedmem.py
https://github.com/rainwoodman/sharedmem/blob/b23e59c1ed0e28f7b6c96c17a04d55c700e06e3a/sharedmem/sharedmem.py#L643-L782
def map(self, func, sequence, reduce=None, star=False, minlength=0): """ Map-reduce with multile processes. Apply func to each item on the sequence, in parallel. As the results are collected, reduce is called on the result. The reduced result is returned as a list. Parameters ---------- func : callable The function to call. It must accept the same number of arguments as the length of an item in the sequence. .. warning:: func is not supposed to use exceptions for flow control. In non-debug mode all exceptions will be wrapped into a :py:class:`SlaveException`. sequence : list or array_like The sequence of arguments to be applied to func. reduce : callable, optional Apply an reduction operation on the return values of func. If func returns a tuple, they are treated as positional arguments of reduce. star : boolean if True, the items in sequence are treated as positional arguments of reduce. minlength: integer Minimal length of `sequence` to start parallel processing. if len(sequence) < minlength, fall back to sequential processing. This can be used to avoid the overhead of starting the worker processes when there is little work. Returns ------- results : list The list of reduced results from the map operation, in the order of the arguments of sequence. Raises ------ SlaveException If any of the slave process encounters an exception. Inspect :py:attr:`SlaveException.reason` for the underlying exception. """ def realreduce(r): if reduce: if isinstance(r, tuple): return reduce(*r) else: return reduce(r) return r def realfunc(i): if star: return func(*i) else: return func(i) if len(sequence) <= 0 or self.np == 0 or get_debug(): # Do this in serial self.local = lambda : None self.local.rank = 0 rt = [realreduce(realfunc(i)) for i in sequence] self.local = None return rt # never use more than len(sequence) processes np = min([self.np, len(sequence)]) Q = self.backend.QueueFactory(64) R = self.backend.QueueFactory(64) self.ordered.reset() pg = ProcessGroup(main=self._main, np=np, backend=self.backend, args=(Q, R, sequence, realfunc)) pg.start() L = [] N = [] def feeder(pg, Q, N): # will fail silently if any error occurs. j = 0 try: for i, work in enumerate(sequence): if not hasattr(sequence, '__getitem__'): pg.put(Q, (i, work)) else: pg.put(Q, (i, )) j = j + 1 N.append(j) for i in range(np): pg.put(Q, None) except StopProcessGroup: return finally: pass feeder = threading.Thread(None, feeder, args=(pg, Q, N)) feeder.start() # we run fetcher on main thread to catch exceptions # raised by reduce count = 0 try: while True: try: capsule = pg.get(R) except queue.Empty: continue except StopProcessGroup: raise pg.get_exception() capsule = capsule[0], realreduce(capsule[1]) heapq.heappush(L, capsule) count = count + 1 if len(N) > 0 and count == N[0]: # if finished feeding see if all # results have been obtained break rt = [] # R.close() # R.join_thread() while len(L) > 0: rt.append(heapq.heappop(L)[1]) pg.join() feeder.join() assert N[0] == len(rt) return rt except BaseException as e: pg.killall() pg.join() feeder.join() raise
[ "def", "map", "(", "self", ",", "func", ",", "sequence", ",", "reduce", "=", "None", ",", "star", "=", "False", ",", "minlength", "=", "0", ")", ":", "def", "realreduce", "(", "r", ")", ":", "if", "reduce", ":", "if", "isinstance", "(", "r", ",",...
Map-reduce with multile processes. Apply func to each item on the sequence, in parallel. As the results are collected, reduce is called on the result. The reduced result is returned as a list. Parameters ---------- func : callable The function to call. It must accept the same number of arguments as the length of an item in the sequence. .. warning:: func is not supposed to use exceptions for flow control. In non-debug mode all exceptions will be wrapped into a :py:class:`SlaveException`. sequence : list or array_like The sequence of arguments to be applied to func. reduce : callable, optional Apply an reduction operation on the return values of func. If func returns a tuple, they are treated as positional arguments of reduce. star : boolean if True, the items in sequence are treated as positional arguments of reduce. minlength: integer Minimal length of `sequence` to start parallel processing. if len(sequence) < minlength, fall back to sequential processing. This can be used to avoid the overhead of starting the worker processes when there is little work. Returns ------- results : list The list of reduced results from the map operation, in the order of the arguments of sequence. Raises ------ SlaveException If any of the slave process encounters an exception. Inspect :py:attr:`SlaveException.reason` for the underlying exception.
[ "Map", "-", "reduce", "with", "multile", "processes", "." ]
python
valid
tcalmant/ipopo
pelix/ldapfilter.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ldapfilter.py#L396-L406
def _comparator_star(filter_value, tested_value): """ Tests a filter containing a joker """ if isinstance(tested_value, ITERABLES): for value in tested_value: if _star_comparison(filter_value, value): return True return False return _star_comparison(filter_value, tested_value)
[ "def", "_comparator_star", "(", "filter_value", ",", "tested_value", ")", ":", "if", "isinstance", "(", "tested_value", ",", "ITERABLES", ")", ":", "for", "value", "in", "tested_value", ":", "if", "_star_comparison", "(", "filter_value", ",", "value", ")", ":"...
Tests a filter containing a joker
[ "Tests", "a", "filter", "containing", "a", "joker" ]
python
train
michael-lazar/rtv
rtv/packages/praw/objects.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/objects.py#L1875-L1879
def _convert(reddit_session, data): """Return a Redditor object from the data.""" retval = Redditor(reddit_session, data['name'], fetch=False) retval.id = data['id'].split('_')[1] # pylint: disable=C0103,W0201 return retval
[ "def", "_convert", "(", "reddit_session", ",", "data", ")", ":", "retval", "=", "Redditor", "(", "reddit_session", ",", "data", "[", "'name'", "]", ",", "fetch", "=", "False", ")", "retval", ".", "id", "=", "data", "[", "'id'", "]", ".", "split", "("...
Return a Redditor object from the data.
[ "Return", "a", "Redditor", "object", "from", "the", "data", "." ]
python
train
ssato/python-anyconfig
src/anyconfig/backend/shellvars.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/shellvars.py#L128-L137
def dump_to_stream(self, cnf, stream, **kwargs): """ Dump config 'cnf' to a file or file-like object 'stream'. :param cnf: Shell variables data to dump :param stream: Shell script file or file like object :param kwargs: backend-specific optional keyword parameters :: dict """ for key, val in anyconfig.compat.iteritems(cnf): stream.write("%s='%s'%s" % (key, val, os.linesep))
[ "def", "dump_to_stream", "(", "self", ",", "cnf", ",", "stream", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "val", "in", "anyconfig", ".", "compat", ".", "iteritems", "(", "cnf", ")", ":", "stream", ".", "write", "(", "\"%s='%s'%s\"", "%",...
Dump config 'cnf' to a file or file-like object 'stream'. :param cnf: Shell variables data to dump :param stream: Shell script file or file like object :param kwargs: backend-specific optional keyword parameters :: dict
[ "Dump", "config", "cnf", "to", "a", "file", "or", "file", "-", "like", "object", "stream", "." ]
python
train
Erotemic/utool
utool/util_path.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1579-L1588
def assertpath(path_, msg='', **kwargs): """ Asserts that a patha exists """ if NO_ASSERTS: return if path_ is None: raise AssertionError('path is None! %s' % (path_, msg)) if path_ == '': raise AssertionError('path=%r is the empty string! %s' % (path_, msg)) if not checkpath(path_, **kwargs): raise AssertionError('path=%r does not exist! %s' % (path_, msg))
[ "def", "assertpath", "(", "path_", ",", "msg", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "NO_ASSERTS", ":", "return", "if", "path_", "is", "None", ":", "raise", "AssertionError", "(", "'path is None! %s'", "%", "(", "path_", ",", "msg", ")", ...
Asserts that a patha exists
[ "Asserts", "that", "a", "patha", "exists" ]
python
train
angr/angr
angr/sim_manager.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_manager.py#L193-L211
def remove_technique(self, tech): """ Remove an exploration technique from a list of active techniques. :param tech: An ExplorationTechnique object. :type tech: ExplorationTechnique """ if not isinstance(tech, ExplorationTechnique): raise SimulationManagerError def _is_overriden(name): return getattr(tech, name).__code__ is not getattr(ExplorationTechnique, name).__code__ overriden = filter(_is_overriden, ('step', 'filter', 'selector', 'step_state', 'successors')) hooks = {name: getattr(tech, name) for name in overriden} HookSet.remove_hooks(self, **hooks) self._techniques.remove(tech) return tech
[ "def", "remove_technique", "(", "self", ",", "tech", ")", ":", "if", "not", "isinstance", "(", "tech", ",", "ExplorationTechnique", ")", ":", "raise", "SimulationManagerError", "def", "_is_overriden", "(", "name", ")", ":", "return", "getattr", "(", "tech", ...
Remove an exploration technique from a list of active techniques. :param tech: An ExplorationTechnique object. :type tech: ExplorationTechnique
[ "Remove", "an", "exploration", "technique", "from", "a", "list", "of", "active", "techniques", "." ]
python
train
mitsei/dlkit
dlkit/records/assessment/basic/multi_choice_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/multi_choice_records.py#L884-L889
def clear_choices(self): """stub""" if self.get_choices_metadata().is_read_only(): raise NoAccess() self.my_osid_object_form._my_map['choices'] = \ self._choices_metadata['default_object_values'][0]
[ "def", "clear_choices", "(", "self", ")", ":", "if", "self", ".", "get_choices_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "NoAccess", "(", ")", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'choices'", "]", "=", "self", ...
stub
[ "stub" ]
python
train
hydraplatform/hydra-base
hydra_base/lib/network.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/network.py#L1729-L1750
def set_node_status(node_id, status, **kwargs): """ Set the status of a node to 'X' """ user_id = kwargs.get('user_id') try: node_i = db.DBSession.query(Node).filter(Node.id == node_id).one() except NoResultFound: raise ResourceNotFoundError("Node %s not found"%(node_id)) node_i.network.check_write_permission(user_id) node_i.status = status for link in node_i.links_to: link.status = status for link in node_i.links_from: link.status = status db.DBSession.flush() return node_i
[ "def", "set_node_status", "(", "node_id", ",", "status", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "try", ":", "node_i", "=", "db", ".", "DBSession", ".", "query", "(", "Node", ")", ".", "filter",...
Set the status of a node to 'X'
[ "Set", "the", "status", "of", "a", "node", "to", "X" ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L4625-L4638
def _le_slash(self, annot, p1, p2, lr): """Make stream commands for slash line end symbol. "lr" denotes left (False) or right point. """ m, im, L, R, w, scol, fcol, opacity = self._le_annot_parms(annot, p1, p2) rw = 1.1547 * max(1, w) * 1.0 # makes rect diagonal a 30 deg inclination M = R if lr else L r = Rect(M.x - rw, M.y - 2 * w, M.x + rw, M.y + 2 * w) top = r.tl * im bot = r.br * im ap = "\nq\n%s%f %f m\n" % (opacity, top.x, top.y) ap += "%f %f l\n" % (bot.x, bot.y) ap += "%g w\n" % w ap += scol + "s\nQ\n" return ap
[ "def", "_le_slash", "(", "self", ",", "annot", ",", "p1", ",", "p2", ",", "lr", ")", ":", "m", ",", "im", ",", "L", ",", "R", ",", "w", ",", "scol", ",", "fcol", ",", "opacity", "=", "self", ".", "_le_annot_parms", "(", "annot", ",", "p1", ",...
Make stream commands for slash line end symbol. "lr" denotes left (False) or right point.
[ "Make", "stream", "commands", "for", "slash", "line", "end", "symbol", ".", "lr", "denotes", "left", "(", "False", ")", "or", "right", "point", "." ]
python
train
sony/nnabla
python/src/nnabla/experimental/graph_converters/batch_normalization_linear.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/graph_converters/batch_normalization_linear.py#L27-L53
def convert(self, vroot, entry_variables): """ All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. """ self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables cnt = 0 with nn.parameter_scope(self.name): # Function loop in the forward order for t, func in enumerate(self.graph_info.funcs): if func.name == "BatchNormalization": bn_func = func # TODO: should deal with both? if bn_func.info.args["batch_stat"] == False: o = self._bn_linear_conversion(bn_func, cnt) cnt += 1 continue # Identity conversion o = self._identity_conversion(func) self.end_variable = o return self.end_variable
[ "def", "convert", "(", "self", ",", "vroot", ",", "entry_variables", ")", ":", "self", ".", "graph_info", "=", "GraphInfo", "(", "vroot", ")", "self", ".", "entry_variables", "=", "entry_variables", "cnt", "=", "0", "with", "nn", ".", "parameter_scope", "(...
All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
[ "All", "functions", "are", "replaced", "with", "the", "same", "new", "function", "." ]
python
train
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3472-L3515
def locate_intersection(self, other): """Locate the intersection with another array. Parameters ---------- other : array_like, int Array of values to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True, False]) >>> loc2 array([False, True, True, False]) >>> idx1[loc1] <SortedIndex shape=(2,) dtype=int64> [6, 20] >>> idx2[loc2] <SortedIndex shape=(2,) dtype=int64> [6, 20] """ # check inputs other = SortedIndex(other, copy=False) # find intersection assume_unique = self.is_unique and other.is_unique loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_unique=assume_unique) return loc, loc_other
[ "def", "locate_intersection", "(", "self", ",", "other", ")", ":", "# check inputs", "other", "=", "SortedIndex", "(", "other", ",", "copy", "=", "False", ")", "# find intersection", "assume_unique", "=", "self", ".", "is_unique", "and", "other", ".", "is_uniq...
Locate the intersection with another array. Parameters ---------- other : array_like, int Array of values to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True, False]) >>> loc2 array([False, True, True, False]) >>> idx1[loc1] <SortedIndex shape=(2,) dtype=int64> [6, 20] >>> idx2[loc2] <SortedIndex shape=(2,) dtype=int64> [6, 20]
[ "Locate", "the", "intersection", "with", "another", "array", "." ]
python
train
SBRG/ssbio
ssbio/utils.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L551-L564
def dict_head(d, N=5): """Return the head of a dictionary. It will be random! Default is to return the first 5 key/value pairs in a dictionary. Args: d: Dictionary to get head. N: Number of elements to display. Returns: dict: the first N items of the dictionary. """ return {k: d[k] for k in list(d.keys())[:N]}
[ "def", "dict_head", "(", "d", ",", "N", "=", "5", ")", ":", "return", "{", "k", ":", "d", "[", "k", "]", "for", "k", "in", "list", "(", "d", ".", "keys", "(", ")", ")", "[", ":", "N", "]", "}" ]
Return the head of a dictionary. It will be random! Default is to return the first 5 key/value pairs in a dictionary. Args: d: Dictionary to get head. N: Number of elements to display. Returns: dict: the first N items of the dictionary.
[ "Return", "the", "head", "of", "a", "dictionary", ".", "It", "will", "be", "random!" ]
python
train
materialsproject/pymatgen
pymatgen/symmetry/analyzer.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/analyzer.py#L1022-L1056
def _find_mirror(self, axis): """ Looks for mirror symmetry of specified type about axis. Possible types are "h" or "vd". Horizontal (h) mirrors are perpendicular to the axis while vertical (v) or diagonal (d) mirrors are parallel. v mirrors has atoms lying on the mirror plane while d mirrors do not. """ mirror_type = "" # First test whether the axis itself is the normal to a mirror plane. if self.is_valid_op(SymmOp.reflection(axis)): self.symmops.append(SymmOp.reflection(axis)) mirror_type = "h" else: # Iterate through all pairs of atoms to find mirror for s1, s2 in itertools.combinations(self.centered_mol, 2): if s1.species == s2.species: normal = s1.coords - s2.coords if np.dot(normal, axis) < self.tol: op = SymmOp.reflection(normal) if self.is_valid_op(op): self.symmops.append(op) if len(self.rot_sym) > 1: mirror_type = "d" for v, r in self.rot_sym: if not np.linalg.norm(v - axis) < self.tol: if np.dot(v, normal) < self.tol: mirror_type = "v" break else: mirror_type = "v" break return mirror_type
[ "def", "_find_mirror", "(", "self", ",", "axis", ")", ":", "mirror_type", "=", "\"\"", "# First test whether the axis itself is the normal to a mirror plane.", "if", "self", ".", "is_valid_op", "(", "SymmOp", ".", "reflection", "(", "axis", ")", ")", ":", "self", ...
Looks for mirror symmetry of specified type about axis. Possible types are "h" or "vd". Horizontal (h) mirrors are perpendicular to the axis while vertical (v) or diagonal (d) mirrors are parallel. v mirrors has atoms lying on the mirror plane while d mirrors do not.
[ "Looks", "for", "mirror", "symmetry", "of", "specified", "type", "about", "axis", ".", "Possible", "types", "are", "h", "or", "vd", ".", "Horizontal", "(", "h", ")", "mirrors", "are", "perpendicular", "to", "the", "axis", "while", "vertical", "(", "v", "...
python
train
PyCQA/astroid
astroid/scoped_nodes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/scoped_nodes.py#L2290-L2312
def instance_attr(self, name, context=None): """Get the list of nodes associated to the given attribute name. Assignments are looked for in both this class and in parents. :returns: The list of assignments to the given name. :rtype: list(NodeNG) :raises AttributeInferenceError: If no attribute with this name can be found in this class or parent classes. """ # Return a copy, so we don't modify self.instance_attrs, # which could lead to infinite loop. values = list(self.instance_attrs.get(name, [])) # get all values from parents for class_node in self.instance_attr_ancestors(name, context): values += class_node.instance_attrs[name] values = [n for n in values if not isinstance(n, node_classes.DelAttr)] if values: return values raise exceptions.AttributeInferenceError( target=self, attribute=name, context=context )
[ "def", "instance_attr", "(", "self", ",", "name", ",", "context", "=", "None", ")", ":", "# Return a copy, so we don't modify self.instance_attrs,", "# which could lead to infinite loop.", "values", "=", "list", "(", "self", ".", "instance_attrs", ".", "get", "(", "na...
Get the list of nodes associated to the given attribute name. Assignments are looked for in both this class and in parents. :returns: The list of assignments to the given name. :rtype: list(NodeNG) :raises AttributeInferenceError: If no attribute with this name can be found in this class or parent classes.
[ "Get", "the", "list", "of", "nodes", "associated", "to", "the", "given", "attribute", "name", "." ]
python
train
OpenAgInitiative/openag_python
openag/cli/cloud/db.py
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/cli/cloud/db.py#L10-L29
def init(cloud_url): """ Choose a cloud server to use. Sets CLOUD_URL as the cloud server to use and sets up replication of global databases from that cloud server if a local database is already initialized (via `openag db init`). """ old_cloud_url = config["cloud_server"]["url"] if old_cloud_url and old_cloud_url != cloud_url: raise click.ClickException( 'Server "{}" already selected. Call `openag cloud deinit` to ' 'detach from that server before selecting a new one'.format( old_cloud_url ) ) parsed_url = urlparse(cloud_url) if not parsed_url.scheme or not parsed_url.netloc or not parsed_url.port: raise click.BadParameter("Invalid url") if config["local_server"]["url"]: utils.replicate_global_dbs(cloud_url=cloud_url) config["cloud_server"]["url"] = cloud_url
[ "def", "init", "(", "cloud_url", ")", ":", "old_cloud_url", "=", "config", "[", "\"cloud_server\"", "]", "[", "\"url\"", "]", "if", "old_cloud_url", "and", "old_cloud_url", "!=", "cloud_url", ":", "raise", "click", ".", "ClickException", "(", "'Server \"{}\" alr...
Choose a cloud server to use. Sets CLOUD_URL as the cloud server to use and sets up replication of global databases from that cloud server if a local database is already initialized (via `openag db init`).
[ "Choose", "a", "cloud", "server", "to", "use", ".", "Sets", "CLOUD_URL", "as", "the", "cloud", "server", "to", "use", "and", "sets", "up", "replication", "of", "global", "databases", "from", "that", "cloud", "server", "if", "a", "local", "database", "is", ...
python
train
ynop/audiomate
audiomate/corpus/corpus.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/corpus.py#L291-L313
def new_issuer(self, issuer_idx, info=None): """ Add a new issuer to the dataset with the given data. Parameters: issuer_idx (str): The id to associate the issuer with. If None or already exists, one is generated. info (dict, list): Additional info of the issuer. Returns: Issuer: The newly added issuer. """ new_issuer_idx = issuer_idx # Add index to idx if already existing if new_issuer_idx in self._issuers.keys(): new_issuer_idx = naming.index_name_if_in_list(new_issuer_idx, self._issuers.keys()) new_issuer = issuers.Issuer(new_issuer_idx, info=info) self._issuers[new_issuer_idx] = new_issuer return new_issuer
[ "def", "new_issuer", "(", "self", ",", "issuer_idx", ",", "info", "=", "None", ")", ":", "new_issuer_idx", "=", "issuer_idx", "# Add index to idx if already existing", "if", "new_issuer_idx", "in", "self", ".", "_issuers", ".", "keys", "(", ")", ":", "new_issuer...
Add a new issuer to the dataset with the given data. Parameters: issuer_idx (str): The id to associate the issuer with. If None or already exists, one is generated. info (dict, list): Additional info of the issuer. Returns: Issuer: The newly added issuer.
[ "Add", "a", "new", "issuer", "to", "the", "dataset", "with", "the", "given", "data", "." ]
python
train
angr/angr
angr/sim_procedure.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_procedure.py#L406-L417
def exit(self, exit_code): """ Add an exit representing terminating the program. """ self.inhibit_autoret = True self.state.options.discard(o.AST_DEPS) self.state.options.discard(o.AUTO_REFS) if isinstance(exit_code, int): exit_code = self.state.solver.BVV(exit_code, self.state.arch.bits) self.state.history.add_event('terminate', exit_code=exit_code) self.successors.add_successor(self.state, self.state.regs.ip, self.state.solver.true, 'Ijk_Exit')
[ "def", "exit", "(", "self", ",", "exit_code", ")", ":", "self", ".", "inhibit_autoret", "=", "True", "self", ".", "state", ".", "options", ".", "discard", "(", "o", ".", "AST_DEPS", ")", "self", ".", "state", ".", "options", ".", "discard", "(", "o",...
Add an exit representing terminating the program.
[ "Add", "an", "exit", "representing", "terminating", "the", "program", "." ]
python
train
OpenHumans/open-humans-api
ohapi/command_line.py
https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/command_line.py#L219-L227
def upload_metadata_cli(directory, create_csv='', review='', max_size='128m', verbose=False, debug=False): """ Command line function for drafting or reviewing metadata files. For more information visit :func:`upload_metadata<ohapi.command_line.upload_metadata>`. """ return upload_metadata(directory, create_csv, review, max_size, verbose, debug)
[ "def", "upload_metadata_cli", "(", "directory", ",", "create_csv", "=", "''", ",", "review", "=", "''", ",", "max_size", "=", "'128m'", ",", "verbose", "=", "False", ",", "debug", "=", "False", ")", ":", "return", "upload_metadata", "(", "directory", ",", ...
Command line function for drafting or reviewing metadata files. For more information visit :func:`upload_metadata<ohapi.command_line.upload_metadata>`.
[ "Command", "line", "function", "for", "drafting", "or", "reviewing", "metadata", "files", ".", "For", "more", "information", "visit", ":", "func", ":", "upload_metadata<ohapi", ".", "command_line", ".", "upload_metadata", ">", "." ]
python
train
quantmind/pulsar-odm
odm/mapper.py
https://github.com/quantmind/pulsar-odm/blob/5955c20beca0a89270c2b390335838deb7d5915e/odm/mapper.py#L303-L309
def database_all(self): """Return a dictionary mapping engines with databases """ all = {} for engine in self.engines(): all[engine] = self._database_all(engine) return all
[ "def", "database_all", "(", "self", ")", ":", "all", "=", "{", "}", "for", "engine", "in", "self", ".", "engines", "(", ")", ":", "all", "[", "engine", "]", "=", "self", ".", "_database_all", "(", "engine", ")", "return", "all" ]
Return a dictionary mapping engines with databases
[ "Return", "a", "dictionary", "mapping", "engines", "with", "databases" ]
python
train
pandas-dev/pandas
pandas/core/panel.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L694-L733
def dropna(self, axis=0, how='any', inplace=False): """ Drop 2D from panel, holding passed axis constant. Parameters ---------- axis : int, default 0 Axis to hold constant. E.g. axis=1 will drop major_axis entries having a certain amount of NA data how : {'all', 'any'}, default 'any' 'any': one or more values are NA in the DataFrame along the axis. For 'all' they all must be. inplace : bool, default False If True, do operation inplace and return None. Returns ------- dropped : Panel """ axis = self._get_axis_number(axis) values = self.values mask = notna(values) for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})): mask = mask.sum(ax) per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:]) if how == 'all': cond = mask > 0 else: cond = mask == per_slice new_ax = self._get_axis(axis)[cond] result = self.reindex_axis(new_ax, axis=axis) if inplace: self._update_inplace(result) else: return result
[ "def", "dropna", "(", "self", ",", "axis", "=", "0", ",", "how", "=", "'any'", ",", "inplace", "=", "False", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "values", "=", "self", ".", "values", "mask", "=", "notna", "(", ...
Drop 2D from panel, holding passed axis constant. Parameters ---------- axis : int, default 0 Axis to hold constant. E.g. axis=1 will drop major_axis entries having a certain amount of NA data how : {'all', 'any'}, default 'any' 'any': one or more values are NA in the DataFrame along the axis. For 'all' they all must be. inplace : bool, default False If True, do operation inplace and return None. Returns ------- dropped : Panel
[ "Drop", "2D", "from", "panel", "holding", "passed", "axis", "constant", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/transformed_distribution.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/transformed_distribution.py#L41-L49
def _pick_scalar_condition(pred, cond_true, cond_false): """Convenience function which chooses the condition based on the predicate.""" # Note: This function is only valid if all of pred, cond_true, and cond_false # are scalars. This means its semantics are arguably more like tf.cond than # tf.where even though we use tf.where to implement it. pred_ = tf.get_static_value(tf.convert_to_tensor(value=pred)) if pred_ is None: return tf.where(pred, cond_true, cond_false) return cond_true if pred_ else cond_false
[ "def", "_pick_scalar_condition", "(", "pred", ",", "cond_true", ",", "cond_false", ")", ":", "# Note: This function is only valid if all of pred, cond_true, and cond_false", "# are scalars. This means its semantics are arguably more like tf.cond than", "# tf.where even though we use tf.where...
Convenience function which chooses the condition based on the predicate.
[ "Convenience", "function", "which", "chooses", "the", "condition", "based", "on", "the", "predicate", "." ]
python
test
tensorflow/probability
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L454-L541
def backward_smoothing_pass(self, filtered_means, filtered_covs, predicted_means, predicted_covs): """Run the backward pass in Kalman smoother. The backward smoothing is using Rauch, Tung and Striebel smoother as as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning: A Probabilistic Perspective, The MIT Press. The inputs are returned by `forward_filter` function. Args: filtered_means: Means of the per-timestep filtered marginal distributions p(z_t | x_{:t}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`. filtered_covs: Covariances of the per-timestep filtered marginal distributions p(z_t | x_{:t}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. predicted_means: Means of the per-timestep predictive distributions over latent states, p(z_{t+1} | x_{:t}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`. predicted_covs: Covariances of the per-timestep predictive distributions over latent states, p(z_{t+1} | x_{:t}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. Returns: posterior_means: Means of the smoothed marginal distributions p(z_t | x_{1:T}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`, which is of the same shape as filtered_means. posterior_covs: Covariances of the smoothed marginal distributions p(z_t | x_{1:T}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. which is of the same shape as filtered_covs. """ with tf.name_scope("backward_pass"): filtered_means = tf.convert_to_tensor( value=filtered_means, name="filtered_means") filtered_covs = tf.convert_to_tensor( value=filtered_covs, name="filtered_covs") predicted_means = tf.convert_to_tensor( value=predicted_means, name="predicted_means") predicted_covs = tf.convert_to_tensor( value=predicted_covs, name="predicted_covs") # To scan over time dimension, we need to move 'num_timesteps' from the # event shape to the initial dimension of the tensor. filtered_means = distribution_util.move_dimension(filtered_means, -2, 0) filtered_covs = distribution_util.move_dimension(filtered_covs, -3, 0) predicted_means = distribution_util.move_dimension(predicted_means, -2, 0) predicted_covs = distribution_util.move_dimension(predicted_covs, -3, 0) # The means are assumed to be vectors. Adding a dummy index to # ensure the `matmul` op working smoothly. filtered_means = filtered_means[..., tf.newaxis] predicted_means = predicted_means[..., tf.newaxis] initial_backward_mean = predicted_means[-1, ...] initial_backward_cov = predicted_covs[-1, ...] num_timesteps = tf.shape(input=filtered_means)[0] initial_state = BackwardPassState( backward_mean=initial_backward_mean, backward_cov=initial_backward_cov, timestep=self.initial_step + num_timesteps - 1) update_step_fn = build_backward_pass_step( self.get_transition_matrix_for_timestep) # For backward pass, it scans the `elems` from last to first. posterior_states = tf.scan(update_step_fn, elems=(filtered_means, filtered_covs, predicted_means, predicted_covs), initializer=initial_state, reverse=True) # Move the time dimension back into the event shape. posterior_means = distribution_util.move_dimension( posterior_states.backward_mean[..., 0], 0, -2) posterior_covs = distribution_util.move_dimension( posterior_states.backward_cov, 0, -3) return (posterior_means, posterior_covs)
[ "def", "backward_smoothing_pass", "(", "self", ",", "filtered_means", ",", "filtered_covs", ",", "predicted_means", ",", "predicted_covs", ")", ":", "with", "tf", ".", "name_scope", "(", "\"backward_pass\"", ")", ":", "filtered_means", "=", "tf", ".", "convert_to_...
Run the backward pass in Kalman smoother. The backward smoothing is using Rauch, Tung and Striebel smoother as as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning: A Probabilistic Perspective, The MIT Press. The inputs are returned by `forward_filter` function. Args: filtered_means: Means of the per-timestep filtered marginal distributions p(z_t | x_{:t}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`. filtered_covs: Covariances of the per-timestep filtered marginal distributions p(z_t | x_{:t}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. predicted_means: Means of the per-timestep predictive distributions over latent states, p(z_{t+1} | x_{:t}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`. predicted_covs: Covariances of the per-timestep predictive distributions over latent states, p(z_{t+1} | x_{:t}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. Returns: posterior_means: Means of the smoothed marginal distributions p(z_t | x_{1:T}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`, which is of the same shape as filtered_means. posterior_covs: Covariances of the smoothed marginal distributions p(z_t | x_{1:T}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. which is of the same shape as filtered_covs.
[ "Run", "the", "backward", "pass", "in", "Kalman", "smoother", "." ]
python
test
delph-in/pydelphin
delphin/mrs/penman.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/penman.py#L29-L41
def load(fh, model): """ Deserialize PENMAN graphs from a file (handle or filename) Args: fh: filename or file object model: Xmrs subclass instantiated from decoded triples Returns: a list of objects (of class *model*) """ graphs = penman.load(fh, cls=XMRSCodec) xs = [model.from_triples(g.triples()) for g in graphs] return xs
[ "def", "load", "(", "fh", ",", "model", ")", ":", "graphs", "=", "penman", ".", "load", "(", "fh", ",", "cls", "=", "XMRSCodec", ")", "xs", "=", "[", "model", ".", "from_triples", "(", "g", ".", "triples", "(", ")", ")", "for", "g", "in", "grap...
Deserialize PENMAN graphs from a file (handle or filename) Args: fh: filename or file object model: Xmrs subclass instantiated from decoded triples Returns: a list of objects (of class *model*)
[ "Deserialize", "PENMAN", "graphs", "from", "a", "file", "(", "handle", "or", "filename", ")" ]
python
train
CyberReboot/vent
vent/menus/tools.py
https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/menus/tools.py#L82-L202
def create(self, group_view=False): """ Update with current tools """ self.add_handlers({'^T': self.quit, '^Q': self.quit}) self.add(npyscreen.TitleText, name='Select which tools to ' + self.action['action'] + ':', editable=False) togglable = ['remove'] if self.action['action_name'] in togglable: self.cur_view = self.add(npyscreen.TitleText, name='Group view:', value='all groups', editable=False, rely=3) self.add_handlers({'^V': self.toggle_view}) i = 5 else: i = 4 if self.action['action_name'] == 'start': response = self.tools_inst.inventory(choices=['repos', 'tools', 'built', 'running']) else: response = self.tools_inst.inventory(choices=['repos', 'tools']) if response[0]: inventory = response[1] repos = inventory['repos'] # dict has repo as key and list of core/non-core tools as values has_core = {} has_non_core = {} # find all tools that are in this repo # and list them if they are core for repo in repos: core_list = [] ncore_list = [] # splice the repo names for processing if (repo.startswith('http')): repo_name = repo.rsplit('/', 2)[1:] else: repo_name = repo.split('/') for tool in inventory['tools']: tool_repo_name = tool.split(':') # cross reference repo names if (repo_name[0] == tool_repo_name[0] and repo_name[1] == tool_repo_name[1]): # check to ensure tool not set to locally active = no # in vent.cfg externally_active = False vent_cfg_file = self.api_action.vent_config vent_cfg = Template(vent_cfg_file) tool_pairs = vent_cfg.section('external-services')[1] for ext_tool in tool_pairs: if ext_tool[0].lower() == inventory['tools'][tool]: try: ext_tool_options = json.loads(ext_tool[1]) loc = 'locally_active' if (loc in ext_tool_options and ext_tool_options[loc] == 'no'): externally_active = True except Exception as e: self.logger.error("Couldn't check ext" ' because: ' + str(e)) externally_active = False manifest = Template(self.api_action.manifest) if not externally_active: instance_num = re.search(r'\d+$', manifest.option( tool, 'name')[1]) if not instance_num: ncore_list.append(tool) # multiple instances share same image elif self.action['action_name'] not in self.no_instance: ncore_list.append(tool) has_core[repo] = core_list has_non_core[repo] = ncore_list for repo in repos: self.tools_tc[repo] = {} if self.action['cores']: # make sure only repos with core tools are displayed if has_core.get(repo): self.repo_widgets[repo] = self.add(npyscreen.TitleText, name='Plugin: '+repo, editable=False, rely=i, relx=5) for tool in has_core[repo]: tool_name = tool.split(':', 2)[2].split('/')[-1] if tool_name == '': tool_name = '/' self.tools_tc[repo][tool] = self.add( npyscreen.CheckBox, name=tool_name, value=True, relx=10) i += 1 i += 3 else: # make sure only repos with non-core tools are displayed if has_non_core.get(repo): self.repo_widgets[repo] = self.add(npyscreen.TitleText, name='Plugin: '+repo, editable=False, rely=i, relx=5) for tool in has_non_core[repo]: tool_name = tool.split(':', 2)[2].split('/')[-1] if tool_name == '': tool_name = '/' self.tools_tc[repo][tool] = self.add( npyscreen.CheckBox, name=tool_name, value=True, relx=10) i += 1 i += 3 return
[ "def", "create", "(", "self", ",", "group_view", "=", "False", ")", ":", "self", ".", "add_handlers", "(", "{", "'^T'", ":", "self", ".", "quit", ",", "'^Q'", ":", "self", ".", "quit", "}", ")", "self", ".", "add", "(", "npyscreen", ".", "TitleText...
Update with current tools
[ "Update", "with", "current", "tools" ]
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/cli/autocompletion.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/autocompletion.py#L13-L101
def autocomplete(): """Entry Point for completion of main and subcommand options. """ # Don't complete if user hasn't sourced bash_completion file. if 'PIP_AUTO_COMPLETE' not in os.environ: return cwords = os.environ['COMP_WORDS'].split()[1:] cword = int(os.environ['COMP_CWORD']) try: current = cwords[cword - 1] except IndexError: current = '' subcommands = [cmd for cmd, summary in get_summaries()] options = [] # subcommand try: subcommand_name = [w for w in cwords if w in subcommands][0] except IndexError: subcommand_name = None parser = create_main_parser() # subcommand options if subcommand_name: # special case: 'help' subcommand has no options if subcommand_name == 'help': sys.exit(1) # special case: list locally installed dists for show and uninstall should_list_installed = ( subcommand_name in ['show', 'uninstall'] and not current.startswith('-') ) if should_list_installed: installed = [] lc = current.lower() for dist in get_installed_distributions(local_only=True): if dist.key.startswith(lc) and dist.key not in cwords[1:]: installed.append(dist.key) # if there are no dists installed, fall back to option completion if installed: for dist in installed: print(dist) sys.exit(1) subcommand = commands_dict[subcommand_name]() for opt in subcommand.parser.option_list_all: if opt.help != optparse.SUPPRESS_HELP: for opt_str in opt._long_opts + opt._short_opts: options.append((opt_str, opt.nargs)) # filter out previously specified options from available options prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] options = [(x, v) for (x, v) in options if x not in prev_opts] # filter options by current input options = [(k, v) for k, v in options if k.startswith(current)] # get completion type given cwords and available subcommand options completion_type = get_path_completion_type( cwords, cword, subcommand.parser.option_list_all, ) # get completion files and directories if ``completion_type`` is # ``<file>``, ``<dir>`` or ``<path>`` if completion_type: options = auto_complete_paths(current, completion_type) options = ((opt, 0) for opt in options) for option in options: opt_label = option[0] # append '=' to options which require args if option[1] and option[0][:2] == "--": opt_label += '=' print(opt_label) else: # show main parser options only when necessary opts = [i.option_list for i in parser.option_groups] opts.append(parser.option_list) opts = (o for it in opts for o in it) if current.startswith('-'): for opt in opts: if opt.help != optparse.SUPPRESS_HELP: subcommands += opt._long_opts + opt._short_opts else: # get completion type given cwords and all available options completion_type = get_path_completion_type(cwords, cword, opts) if completion_type: subcommands = auto_complete_paths(current, completion_type) print(' '.join([x for x in subcommands if x.startswith(current)])) sys.exit(1)
[ "def", "autocomplete", "(", ")", ":", "# Don't complete if user hasn't sourced bash_completion file.", "if", "'PIP_AUTO_COMPLETE'", "not", "in", "os", ".", "environ", ":", "return", "cwords", "=", "os", ".", "environ", "[", "'COMP_WORDS'", "]", ".", "split", "(", ...
Entry Point for completion of main and subcommand options.
[ "Entry", "Point", "for", "completion", "of", "main", "and", "subcommand", "options", "." ]
python
train
openstax/cnx-epub
cnxepub/adapters.py
https://github.com/openstax/cnx-epub/blob/f648a309eff551b0a68a115a98ddf7858149a2ea/cnxepub/adapters.py#L58-L68
def adapt_package(package): """Adapts ``.epub.Package`` to a ``BinderItem`` and cascades the adaptation downward to ``DocumentItem`` and ``ResourceItem``. The results of this process provide the same interface as ``.models.Binder``, ``.models.Document`` and ``.models.Resource``. """ navigation_item = package.navigation html = etree.parse(navigation_item.data) tree = parse_navigation_html_to_tree(html, navigation_item.name) return _node_to_model(tree, package)
[ "def", "adapt_package", "(", "package", ")", ":", "navigation_item", "=", "package", ".", "navigation", "html", "=", "etree", ".", "parse", "(", "navigation_item", ".", "data", ")", "tree", "=", "parse_navigation_html_to_tree", "(", "html", ",", "navigation_item...
Adapts ``.epub.Package`` to a ``BinderItem`` and cascades the adaptation downward to ``DocumentItem`` and ``ResourceItem``. The results of this process provide the same interface as ``.models.Binder``, ``.models.Document`` and ``.models.Resource``.
[ "Adapts", ".", "epub", ".", "Package", "to", "a", "BinderItem", "and", "cascades", "the", "adaptation", "downward", "to", "DocumentItem", "and", "ResourceItem", ".", "The", "results", "of", "this", "process", "provide", "the", "same", "interface", "as", ".", ...
python
train
ellmetha/django-machina
machina/apps/forum_moderation/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_moderation/views.py#L267-L269
def post(self, request, *args, **kwargs): """ Handles POST requests. """ return self.update_type(request, *args, **kwargs)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "update_type", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Handles POST requests.
[ "Handles", "POST", "requests", "." ]
python
train
saltstack/salt
salt/key.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/key.py#L518-L540
def list_keys(self): ''' Return a dict of managed keys and what the key status are ''' key_dirs = self._check_minions_directories() ret = {} for dir_ in key_dirs: if dir_ is None: continue ret[os.path.basename(dir_)] = [] try: for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(dir_)): if not fn_.startswith('.'): if os.path.isfile(os.path.join(dir_, fn_)): ret[os.path.basename(dir_)].append( salt.utils.stringutils.to_unicode(fn_) ) except (OSError, IOError): # key dir kind is not created yet, just skip continue return ret
[ "def", "list_keys", "(", "self", ")", ":", "key_dirs", "=", "self", ".", "_check_minions_directories", "(", ")", "ret", "=", "{", "}", "for", "dir_", "in", "key_dirs", ":", "if", "dir_", "is", "None", ":", "continue", "ret", "[", "os", ".", "path", "...
Return a dict of managed keys and what the key status are
[ "Return", "a", "dict", "of", "managed", "keys", "and", "what", "the", "key", "status", "are" ]
python
train
DinoTools/python-overpy
overpy/__init__.py
https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1008-L1061
def from_xml(cls, child, result=None): """ Create new way element from XML data :param child: XML node to be parsed :type child: xml.etree.ElementTree.Element :param result: The result this node belongs to :type result: overpy.Result :return: New Way oject :rtype: overpy.Way :raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match :raises ValueError: If the ref attribute of the xml node is not provided :raises ValueError: If a tag doesn't have a name """ if child.tag.lower() != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=child.tag.lower() ) tags = {} node_ids = [] center_lat = None center_lon = None for sub_child in child: if sub_child.tag.lower() == "tag": name = sub_child.attrib.get("k") if name is None: raise ValueError("Tag without name/key.") value = sub_child.attrib.get("v") tags[name] = value if sub_child.tag.lower() == "nd": ref_id = sub_child.attrib.get("ref") if ref_id is None: raise ValueError("Unable to find required ref value.") ref_id = int(ref_id) node_ids.append(ref_id) if sub_child.tag.lower() == "center": (center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child) way_id = child.attrib.get("id") if way_id is not None: way_id = int(way_id) attributes = {} ignore = ["id"] for n, v in child.attrib.items(): if n in ignore: continue attributes[n] = v return cls(way_id=way_id, center_lat=center_lat, center_lon=center_lon, attributes=attributes, node_ids=node_ids, tags=tags, result=result)
[ "def", "from_xml", "(", "cls", ",", "child", ",", "result", "=", "None", ")", ":", "if", "child", ".", "tag", ".", "lower", "(", ")", "!=", "cls", ".", "_type_value", ":", "raise", "exception", ".", "ElementDataWrongType", "(", "type_expected", "=", "c...
Create new way element from XML data :param child: XML node to be parsed :type child: xml.etree.ElementTree.Element :param result: The result this node belongs to :type result: overpy.Result :return: New Way oject :rtype: overpy.Way :raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match :raises ValueError: If the ref attribute of the xml node is not provided :raises ValueError: If a tag doesn't have a name
[ "Create", "new", "way", "element", "from", "XML", "data" ]
python
train
square/connect-python-sdk
squareconnect/models/model_break.py
https://github.com/square/connect-python-sdk/blob/adc1d09e817986cdc607391580f71d6b48ed4066/squareconnect/models/model_break.py#L153-L167
def break_type_id(self, break_type_id): """ Sets the break_type_id of this ModelBreak. The `BreakType` this `Break` was templated on. :param break_type_id: The break_type_id of this ModelBreak. :type: str """ if break_type_id is None: raise ValueError("Invalid value for `break_type_id`, must not be `None`") if len(break_type_id) < 1: raise ValueError("Invalid value for `break_type_id`, length must be greater than or equal to `1`") self._break_type_id = break_type_id
[ "def", "break_type_id", "(", "self", ",", "break_type_id", ")", ":", "if", "break_type_id", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `break_type_id`, must not be `None`\"", ")", "if", "len", "(", "break_type_id", ")", "<", "1", ":", "rai...
Sets the break_type_id of this ModelBreak. The `BreakType` this `Break` was templated on. :param break_type_id: The break_type_id of this ModelBreak. :type: str
[ "Sets", "the", "break_type_id", "of", "this", "ModelBreak", ".", "The", "BreakType", "this", "Break", "was", "templated", "on", "." ]
python
train
arista-eosplus/pyeapi
pyeapi/client.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/client.py#L546-L581
def config(self, commands, **kwargs): """Configures the node with the specified commands This method is used to send configuration commands to the node. It will take either a string or a list and prepend the necessary commands to put the session into config mode. Args: commands (str, list): The commands to send to the node in config mode. If the commands argument is a string it will be cast to a list. The list of commands will also be prepended with the necessary commands to put the session in config mode. **kwargs: Additional keyword arguments for expanded eAPI functionality. Only supported eAPI params are used in building the request Returns: The config method will return a list of dictionaries with the output from each command. The function will strip the response from any commands it prepends. """ commands = make_iterable(commands) commands = list(commands) # push the configure command onto the command stack commands.insert(0, 'configure terminal') response = self.run_commands(commands, **kwargs) if self.autorefresh: self.refresh() # pop the configure command output off the stack response.pop(0) return response
[ "def", "config", "(", "self", ",", "commands", ",", "*", "*", "kwargs", ")", ":", "commands", "=", "make_iterable", "(", "commands", ")", "commands", "=", "list", "(", "commands", ")", "# push the configure command onto the command stack", "commands", ".", "inse...
Configures the node with the specified commands This method is used to send configuration commands to the node. It will take either a string or a list and prepend the necessary commands to put the session into config mode. Args: commands (str, list): The commands to send to the node in config mode. If the commands argument is a string it will be cast to a list. The list of commands will also be prepended with the necessary commands to put the session in config mode. **kwargs: Additional keyword arguments for expanded eAPI functionality. Only supported eAPI params are used in building the request Returns: The config method will return a list of dictionaries with the output from each command. The function will strip the response from any commands it prepends.
[ "Configures", "the", "node", "with", "the", "specified", "commands" ]
python
train
awslabs/serverless-application-model
samtranslator/plugins/globals/globals.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/plugins/globals/globals.py#L286-L314
def _do_merge(self, global_value, local_value): """ Actually perform the merge operation for the given inputs. This method is used as part of the recursion. Therefore input values can be of any type. So is the output. :param global_value: Global value to be merged :param local_value: Local value to be merged :return: Merged result """ token_global = self._token_of(global_value) token_local = self._token_of(local_value) # The following statements codify the rules explained in the doctring above if token_global != token_local: return self._prefer_local(global_value, local_value) elif self.TOKEN.PRIMITIVE == token_global == token_local: return self._prefer_local(global_value, local_value) elif self.TOKEN.DICT == token_global == token_local: return self._merge_dict(global_value, local_value) elif self.TOKEN.LIST == token_global == token_local: return self._merge_lists(global_value, local_value) else: raise TypeError( "Unsupported type of objects. GlobalType={}, LocalType={}".format(token_global, token_local))
[ "def", "_do_merge", "(", "self", ",", "global_value", ",", "local_value", ")", ":", "token_global", "=", "self", ".", "_token_of", "(", "global_value", ")", "token_local", "=", "self", ".", "_token_of", "(", "local_value", ")", "# The following statements codify t...
Actually perform the merge operation for the given inputs. This method is used as part of the recursion. Therefore input values can be of any type. So is the output. :param global_value: Global value to be merged :param local_value: Local value to be merged :return: Merged result
[ "Actually", "perform", "the", "merge", "operation", "for", "the", "given", "inputs", ".", "This", "method", "is", "used", "as", "part", "of", "the", "recursion", ".", "Therefore", "input", "values", "can", "be", "of", "any", "type", ".", "So", "is", "the...
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/database.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/database.py#L382-L391
def _fix_incoming(self, son, collection): """Apply manipulators to an incoming SON object before it gets stored. :Parameters: - `son`: the son object going into the database - `collection`: the collection the son object is being saved in """ son = self._apply_incoming_manipulators(son, collection) son = self._apply_incoming_copying_manipulators(son, collection) return son
[ "def", "_fix_incoming", "(", "self", ",", "son", ",", "collection", ")", ":", "son", "=", "self", ".", "_apply_incoming_manipulators", "(", "son", ",", "collection", ")", "son", "=", "self", ".", "_apply_incoming_copying_manipulators", "(", "son", ",", "collec...
Apply manipulators to an incoming SON object before it gets stored. :Parameters: - `son`: the son object going into the database - `collection`: the collection the son object is being saved in
[ "Apply", "manipulators", "to", "an", "incoming", "SON", "object", "before", "it", "gets", "stored", "." ]
python
train
gabstopper/smc-python
smc/elements/profiles.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/profiles.py#L259-L268
def create(cls, name, sandbox_data_center, portal_username=None, comment=None): """ Create a Sandbox Service element """ json = { 'name': name, 'sandbox_data_center': element_resolver(sandbox_data_center), 'portal_username': portal_username if portal_username else '', 'comment': comment} return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "sandbox_data_center", ",", "portal_username", "=", "None", ",", "comment", "=", "None", ")", ":", "json", "=", "{", "'name'", ":", "name", ",", "'sandbox_data_center'", ":", "element_resolver", "(", "sandbox_da...
Create a Sandbox Service element
[ "Create", "a", "Sandbox", "Service", "element" ]
python
train
PythonCharmers/python-future
src/future/backports/http/server.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/server.py#L988-L991
def is_python(self, path): """Test whether argument path is a Python script.""" head, tail = os.path.splitext(path) return tail.lower() in (".py", ".pyw")
[ "def", "is_python", "(", "self", ",", "path", ")", ":", "head", ",", "tail", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "return", "tail", ".", "lower", "(", ")", "in", "(", "\".py\"", ",", "\".pyw\"", ")" ]
Test whether argument path is a Python script.
[ "Test", "whether", "argument", "path", "is", "a", "Python", "script", "." ]
python
train
tanghaibao/jcvi
jcvi/projects/str.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/str.py#L1611-L1628
def get_lo_hi_from_CI(s, exclude=None): """ Parse the confidence interval from CI. >>> get_lo_hi_from_CI("20-20/40-60") (40, 60) """ a, b = s.split("|") ai, aj = a.split("-") bi, bj = b.split("-") los = [int(ai), int(bi)] his = [int(aj), int(bj)] if exclude and exclude in los: los.remove(exclude) if exclude and exclude in his: his.remove(exclude) return max(los), max(his)
[ "def", "get_lo_hi_from_CI", "(", "s", ",", "exclude", "=", "None", ")", ":", "a", ",", "b", "=", "s", ".", "split", "(", "\"|\"", ")", "ai", ",", "aj", "=", "a", ".", "split", "(", "\"-\"", ")", "bi", ",", "bj", "=", "b", ".", "split", "(", ...
Parse the confidence interval from CI. >>> get_lo_hi_from_CI("20-20/40-60") (40, 60)
[ "Parse", "the", "confidence", "interval", "from", "CI", "." ]
python
train
wummel/linkchecker
linkcheck/checker/urlbase.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/urlbase.py#L485-L497
def close_connection (self): """ Close an opened url connection. """ if self.url_connection is None: # no connection is open return try: self.url_connection.close() except Exception: # ignore close errors pass self.url_connection = None
[ "def", "close_connection", "(", "self", ")", ":", "if", "self", ".", "url_connection", "is", "None", ":", "# no connection is open", "return", "try", ":", "self", ".", "url_connection", ".", "close", "(", ")", "except", "Exception", ":", "# ignore close errors",...
Close an opened url connection.
[ "Close", "an", "opened", "url", "connection", "." ]
python
train
PyCQA/pylint
pylint/checkers/imports.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/imports.py#L473-L497
def visit_import(self, node): """triggered when an import statement is seen""" self._check_reimport(node) self._check_import_as_rename(node) modnode = node.root() names = [name for name, _ in node.names] if len(names) >= 2: self.add_message("multiple-imports", args=", ".join(names), node=node) for name in names: self._check_deprecated_module(node, name) self._check_preferred_module(node, name) imported_module = self._get_imported_module(node, name) if isinstance(node.parent, astroid.Module): # Allow imports nested self._check_position(node) if isinstance(node.scope(), astroid.Module): self._record_import(node, imported_module) if imported_module is None: continue self._check_relative_import(modnode, node, imported_module, name) self._add_imported_module(node, imported_module.name)
[ "def", "visit_import", "(", "self", ",", "node", ")", ":", "self", ".", "_check_reimport", "(", "node", ")", "self", ".", "_check_import_as_rename", "(", "node", ")", "modnode", "=", "node", ".", "root", "(", ")", "names", "=", "[", "name", "for", "nam...
triggered when an import statement is seen
[ "triggered", "when", "an", "import", "statement", "is", "seen" ]
python
test
apache/airflow
airflow/contrib/hooks/bigquery_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L634-L853
def run_query(self, sql, destination_dataset_table=None, write_disposition='WRITE_EMPTY', allow_large_results=False, flatten_results=None, udf_config=None, use_legacy_sql=None, maximum_billing_tier=None, maximum_bytes_billed=None, create_disposition='CREATE_IF_NEEDED', query_params=None, labels=None, schema_update_options=(), priority='INTERACTIVE', time_partitioning=None, api_resource_configs=None, cluster_fields=None, location=None): """ Executes a BigQuery SQL query. Optionally persists results in a BigQuery table. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about these parameters. :param sql: The BigQuery SQL to execute. :type sql: str :param destination_dataset_table: The dotted ``<dataset>.<table>`` BigQuery table to save the query results. :type destination_dataset_table: str :param write_disposition: What to do if the table already exists in BigQuery. :type write_disposition: str :param allow_large_results: Whether to allow large results. :type allow_large_results: bool :param flatten_results: If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. ``allowLargeResults`` must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened. :type flatten_results: bool :param udf_config: The User Defined Function configuration for the query. See https://cloud.google.com/bigquery/user-defined-functions for details. :type udf_config: list :param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false). If `None`, defaults to `self.use_legacy_sql`. :type use_legacy_sql: bool :param api_resource_configs: a dictionary that contain params 'configuration' applied for Google BigQuery Jobs API: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs for example, {'query': {'useQueryCache': False}}. You could use it if you need to provide some params that are not supported by the BigQueryHook like args. :type api_resource_configs: dict :param maximum_billing_tier: Positive integer that serves as a multiplier of the basic price. :type maximum_billing_tier: int :param maximum_bytes_billed: Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default. :type maximum_bytes_billed: float :param create_disposition: Specifies whether the job is allowed to create new tables. :type create_disposition: str :param query_params: a list of dictionary containing query parameter types and values, passed to BigQuery :type query_params: list :param labels: a dictionary containing labels for the job/query, passed to BigQuery :type labels: dict :param schema_update_options: Allows the schema of the destination table to be updated as a side effect of the query job. :type schema_update_options: tuple :param priority: Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE. :type priority: str :param time_partitioning: configure optional time partitioning fields i.e. partition by field, type and expiration as per API specifications. :type time_partitioning: dict :param cluster_fields: Request that the result of this query be stored sorted by one or more columns. This is only available in combination with time_partitioning. The order of columns given determines the sort order. :type cluster_fields: list[str] :param location: The geographic location of the job. Required except for US and EU. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location :type location: str """ if time_partitioning is None: time_partitioning = {} if location: self.location = location if not api_resource_configs: api_resource_configs = self.api_resource_configs else: _validate_value('api_resource_configs', api_resource_configs, dict) configuration = deepcopy(api_resource_configs) if 'query' not in configuration: configuration['query'] = {} else: _validate_value("api_resource_configs['query']", configuration['query'], dict) if sql is None and not configuration['query'].get('query', None): raise TypeError('`BigQueryBaseCursor.run_query` ' 'missing 1 required positional argument: `sql`') # BigQuery also allows you to define how you want a table's schema to change # as a side effect of a query job # for more details: # https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions allowed_schema_update_options = [ 'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION" ] if not set(allowed_schema_update_options ).issuperset(set(schema_update_options)): raise ValueError("{0} contains invalid schema update options. " "Please only use one or more of the following " "options: {1}" .format(schema_update_options, allowed_schema_update_options)) if schema_update_options: if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]: raise ValueError("schema_update_options is only " "allowed if write_disposition is " "'WRITE_APPEND' or 'WRITE_TRUNCATE'.") if destination_dataset_table: destination_project, destination_dataset, destination_table = \ _split_tablename(table_input=destination_dataset_table, default_project_id=self.project_id) destination_dataset_table = { 'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table, } if cluster_fields: cluster_fields = {'fields': cluster_fields} query_param_list = [ (sql, 'query', None, six.string_types), (priority, 'priority', 'INTERACTIVE', six.string_types), (use_legacy_sql, 'useLegacySql', self.use_legacy_sql, bool), (query_params, 'queryParameters', None, list), (udf_config, 'userDefinedFunctionResources', None, list), (maximum_billing_tier, 'maximumBillingTier', None, int), (maximum_bytes_billed, 'maximumBytesBilled', None, float), (time_partitioning, 'timePartitioning', {}, dict), (schema_update_options, 'schemaUpdateOptions', None, tuple), (destination_dataset_table, 'destinationTable', None, dict), (cluster_fields, 'clustering', None, dict), ] for param_tuple in query_param_list: param, param_name, param_default, param_type = param_tuple if param_name not in configuration['query'] and param in [None, {}, ()]: if param_name == 'timePartitioning': param_default = _cleanse_time_partitioning( destination_dataset_table, time_partitioning) param = param_default if param not in [None, {}, ()]: _api_resource_configs_duplication_check( param_name, param, configuration['query']) configuration['query'][param_name] = param # check valid type of provided param, # it last step because we can get param from 2 sources, # and first of all need to find it _validate_value(param_name, configuration['query'][param_name], param_type) if param_name == 'schemaUpdateOptions' and param: self.log.info("Adding experimental 'schemaUpdateOptions': " "%s", schema_update_options) if param_name == 'destinationTable': for key in ['projectId', 'datasetId', 'tableId']: if key not in configuration['query']['destinationTable']: raise ValueError( "Not correct 'destinationTable' in " "api_resource_configs. 'destinationTable' " "must be a dict with {'projectId':'', " "'datasetId':'', 'tableId':''}") configuration['query'].update({ 'allowLargeResults': allow_large_results, 'flattenResults': flatten_results, 'writeDisposition': write_disposition, 'createDisposition': create_disposition, }) if 'useLegacySql' in configuration['query'] and configuration['query']['useLegacySql'] and\ 'queryParameters' in configuration['query']: raise ValueError("Query parameters are not allowed " "when using legacy SQL") if labels: _api_resource_configs_duplication_check( 'labels', labels, configuration) configuration['labels'] = labels return self.run_with_configuration(configuration)
[ "def", "run_query", "(", "self", ",", "sql", ",", "destination_dataset_table", "=", "None", ",", "write_disposition", "=", "'WRITE_EMPTY'", ",", "allow_large_results", "=", "False", ",", "flatten_results", "=", "None", ",", "udf_config", "=", "None", ",", "use_l...
Executes a BigQuery SQL query. Optionally persists results in a BigQuery table. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs For more details about these parameters. :param sql: The BigQuery SQL to execute. :type sql: str :param destination_dataset_table: The dotted ``<dataset>.<table>`` BigQuery table to save the query results. :type destination_dataset_table: str :param write_disposition: What to do if the table already exists in BigQuery. :type write_disposition: str :param allow_large_results: Whether to allow large results. :type allow_large_results: bool :param flatten_results: If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. ``allowLargeResults`` must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened. :type flatten_results: bool :param udf_config: The User Defined Function configuration for the query. See https://cloud.google.com/bigquery/user-defined-functions for details. :type udf_config: list :param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false). If `None`, defaults to `self.use_legacy_sql`. :type use_legacy_sql: bool :param api_resource_configs: a dictionary that contain params 'configuration' applied for Google BigQuery Jobs API: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs for example, {'query': {'useQueryCache': False}}. You could use it if you need to provide some params that are not supported by the BigQueryHook like args. :type api_resource_configs: dict :param maximum_billing_tier: Positive integer that serves as a multiplier of the basic price. :type maximum_billing_tier: int :param maximum_bytes_billed: Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default. :type maximum_bytes_billed: float :param create_disposition: Specifies whether the job is allowed to create new tables. :type create_disposition: str :param query_params: a list of dictionary containing query parameter types and values, passed to BigQuery :type query_params: list :param labels: a dictionary containing labels for the job/query, passed to BigQuery :type labels: dict :param schema_update_options: Allows the schema of the destination table to be updated as a side effect of the query job. :type schema_update_options: tuple :param priority: Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE. :type priority: str :param time_partitioning: configure optional time partitioning fields i.e. partition by field, type and expiration as per API specifications. :type time_partitioning: dict :param cluster_fields: Request that the result of this query be stored sorted by one or more columns. This is only available in combination with time_partitioning. The order of columns given determines the sort order. :type cluster_fields: list[str] :param location: The geographic location of the job. Required except for US and EU. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location :type location: str
[ "Executes", "a", "BigQuery", "SQL", "query", ".", "Optionally", "persists", "results", "in", "a", "BigQuery", "table", ".", "See", "here", ":" ]
python
test
hozn/stravalib
stravalib/model.py
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/model.py#L362-L375
def is_authenticated_athlete(self): """ :return: Boolean as to whether the athlete is the authenticated athlete. """ if self._is_authenticated is None: if self.resource_state == DETAILED: # If the athlete is in detailed state it must be the authenticated athlete self._is_authenticated = True else: # We need to check this athlete's id matches the authenticated athlete's id self.assert_bind_client() authenticated_athlete = self.bind_client.get_athlete() self._is_authenticated = authenticated_athlete.id == self.id return self._is_authenticated
[ "def", "is_authenticated_athlete", "(", "self", ")", ":", "if", "self", ".", "_is_authenticated", "is", "None", ":", "if", "self", ".", "resource_state", "==", "DETAILED", ":", "# If the athlete is in detailed state it must be the authenticated athlete", "self", ".", "_...
:return: Boolean as to whether the athlete is the authenticated athlete.
[ ":", "return", ":", "Boolean", "as", "to", "whether", "the", "athlete", "is", "the", "authenticated", "athlete", "." ]
python
train
ssalentin/plip
plip/modules/preparation.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/preparation.py#L777-L800
def refine_hbonds_ldon(self, all_hbonds, salt_lneg, salt_pneg): """Refine selection of hydrogen bonds. Do not allow groups which already form salt bridges to form H-Bonds.""" i_set = {} for hbond in all_hbonds: i_set[hbond] = False for salt in salt_pneg: protidx, ligidx = [at.idx for at in salt.negative.atoms], [at.idx for at in salt.positive.atoms] if hbond.d.idx in ligidx and hbond.a.idx in protidx: i_set[hbond] = True for salt in salt_lneg: protidx, ligidx = [at.idx for at in salt.positive.atoms], [at.idx for at in salt.negative.atoms] if hbond.d.idx in ligidx and hbond.a.idx in protidx: i_set[hbond] = True # Allow only one hydrogen bond per donor, select interaction with larger donor angle second_set = {} hbls = [k for k in i_set.keys() if not i_set[k]] for hbl in hbls: if hbl.d.idx not in second_set: second_set[hbl.d.idx] = (hbl.angle, hbl) else: if second_set[hbl.d.idx][0] < hbl.angle: second_set[hbl.d.idx] = (hbl.angle, hbl) return [hb[1] for hb in second_set.values()]
[ "def", "refine_hbonds_ldon", "(", "self", ",", "all_hbonds", ",", "salt_lneg", ",", "salt_pneg", ")", ":", "i_set", "=", "{", "}", "for", "hbond", "in", "all_hbonds", ":", "i_set", "[", "hbond", "]", "=", "False", "for", "salt", "in", "salt_pneg", ":", ...
Refine selection of hydrogen bonds. Do not allow groups which already form salt bridges to form H-Bonds.
[ "Refine", "selection", "of", "hydrogen", "bonds", ".", "Do", "not", "allow", "groups", "which", "already", "form", "salt", "bridges", "to", "form", "H", "-", "Bonds", "." ]
python
train
yvesalexandre/bandicoot
bandicoot/weekmatrix.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/weekmatrix.py#L290-L307
def _find_weektime(datetime, time_type='min'): """ Finds the minutes/seconds aways from midnight between Sunday and Monday. Parameters ---------- datetime : datetime The date and time that needs to be converted. time_type : 'min' or 'sec' States whether the time difference should be specified in seconds or minutes. """ if time_type == 'sec': return datetime.weekday() * 24 * 60 * 60 + datetime.hour * 60 * 60 + datetime.minute * 60 + datetime.second elif time_type == 'min': return datetime.weekday() * 24 * 60 + datetime.hour * 60 + datetime.minute else: raise ValueError("Invalid time type specified.")
[ "def", "_find_weektime", "(", "datetime", ",", "time_type", "=", "'min'", ")", ":", "if", "time_type", "==", "'sec'", ":", "return", "datetime", ".", "weekday", "(", ")", "*", "24", "*", "60", "*", "60", "+", "datetime", ".", "hour", "*", "60", "*", ...
Finds the minutes/seconds aways from midnight between Sunday and Monday. Parameters ---------- datetime : datetime The date and time that needs to be converted. time_type : 'min' or 'sec' States whether the time difference should be specified in seconds or minutes.
[ "Finds", "the", "minutes", "/", "seconds", "aways", "from", "midnight", "between", "Sunday", "and", "Monday", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/binomial.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/binomial.py#L43-L62
def _bdtr(k, n, p): """The binomial cumulative distribution function. Args: k: floating point `Tensor`. n: floating point `Tensor`. p: floating point `Tensor`. Returns: `sum_{j=0}^k p^j (1 - p)^(n - j)`. """ # Trick for getting safe backprop/gradients into n, k when # betainc(a = 0, ..) = nan # Write: # where(unsafe, safe_output, betainc(where(unsafe, safe_input, input))) ones = tf.ones_like(n - k) k_eq_n = tf.equal(k, n) safe_dn = tf.where(k_eq_n, ones, n - k) dk = tf.math.betainc(a=safe_dn, b=k + 1, x=1 - p) return tf.where(k_eq_n, ones, dk)
[ "def", "_bdtr", "(", "k", ",", "n", ",", "p", ")", ":", "# Trick for getting safe backprop/gradients into n, k when", "# betainc(a = 0, ..) = nan", "# Write:", "# where(unsafe, safe_output, betainc(where(unsafe, safe_input, input)))", "ones", "=", "tf", ".", "ones_like", "(...
The binomial cumulative distribution function. Args: k: floating point `Tensor`. n: floating point `Tensor`. p: floating point `Tensor`. Returns: `sum_{j=0}^k p^j (1 - p)^(n - j)`.
[ "The", "binomial", "cumulative", "distribution", "function", "." ]
python
test
learningequality/ricecooker
ricecooker/classes/questions.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/questions.py#L229-L241
def validate(self): """ validate: Makes sure perseus question is valid Args: None Returns: boolean indicating if perseus question is valid """ try: assert self.question == "", "Assumption Failed: Perseus question should not have a question" assert self.question_type == exercises.PERSEUS_QUESTION, "Assumption Failed: Question should be perseus type" assert self.answers == [], "Assumption Failed: Answer list should be empty for perseus question" assert self.hints == [], "Assumption Failed: Hints list should be empty for perseus question" return super(PerseusQuestion, self).validate() except AssertionError as ae: raise InvalidQuestionException("Invalid question: {0}".format(self.__dict__))
[ "def", "validate", "(", "self", ")", ":", "try", ":", "assert", "self", ".", "question", "==", "\"\"", ",", "\"Assumption Failed: Perseus question should not have a question\"", "assert", "self", ".", "question_type", "==", "exercises", ".", "PERSEUS_QUESTION", ",", ...
validate: Makes sure perseus question is valid Args: None Returns: boolean indicating if perseus question is valid
[ "validate", ":", "Makes", "sure", "perseus", "question", "is", "valid", "Args", ":", "None", "Returns", ":", "boolean", "indicating", "if", "perseus", "question", "is", "valid" ]
python
train
wummel/dosage
dosagelib/rss.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/rss.py#L26-L30
def addElement(self, parent, tag, value): """Add an RSS item.""" elem = self.rss.createElement(tag) node = self.rss.createTextNode(value) return parent.appendChild(elem).appendChild(node)
[ "def", "addElement", "(", "self", ",", "parent", ",", "tag", ",", "value", ")", ":", "elem", "=", "self", ".", "rss", ".", "createElement", "(", "tag", ")", "node", "=", "self", ".", "rss", ".", "createTextNode", "(", "value", ")", "return", "parent"...
Add an RSS item.
[ "Add", "an", "RSS", "item", "." ]
python
train
nschloe/pygmsh
pygmsh/opencascade/geometry.py
https://github.com/nschloe/pygmsh/blob/1a1a07481aebe6c161b60dd31e0fbe1ddf330d61/pygmsh/opencascade/geometry.py#L163-L175
def boolean_intersection(self, entities, delete_first=True, delete_other=True): """Boolean intersection, see https://gmsh.info/doc/texinfo/gmsh.html#Boolean-operations input_entity and tool_entity are called object and tool in gmsh documentation. """ assert len(entities) > 1 return self._boolean_operation( "BooleanIntersection", [entities[0]], entities[1:], delete_first=delete_first, delete_other=delete_other, )
[ "def", "boolean_intersection", "(", "self", ",", "entities", ",", "delete_first", "=", "True", ",", "delete_other", "=", "True", ")", ":", "assert", "len", "(", "entities", ")", ">", "1", "return", "self", ".", "_boolean_operation", "(", "\"BooleanIntersection...
Boolean intersection, see https://gmsh.info/doc/texinfo/gmsh.html#Boolean-operations input_entity and tool_entity are called object and tool in gmsh documentation.
[ "Boolean", "intersection", "see", "https", ":", "//", "gmsh", ".", "info", "/", "doc", "/", "texinfo", "/", "gmsh", ".", "html#Boolean", "-", "operations", "input_entity", "and", "tool_entity", "are", "called", "object", "and", "tool", "in", "gmsh", "documen...
python
train
LogicalDash/LiSE
ELiDE/ELiDE/board/arrow.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/arrow.py#L407-L422
def pos_along(self, pct): """Return coordinates for where a Pawn should be if it has travelled along ``pct`` of my length (between 0 and 1). Might get complex when I switch over to using beziers for arrows, but for now this is quite simple, using distance along a line segment. """ if pct < 0 or pct > 1: raise ValueError("Invalid portion") (ox, oy) = self.origin.center (dx, dy) = self.destination.center xdist = (dx - ox) * pct ydist = (dy - oy) * pct return (ox + xdist, oy + ydist)
[ "def", "pos_along", "(", "self", ",", "pct", ")", ":", "if", "pct", "<", "0", "or", "pct", ">", "1", ":", "raise", "ValueError", "(", "\"Invalid portion\"", ")", "(", "ox", ",", "oy", ")", "=", "self", ".", "origin", ".", "center", "(", "dx", ","...
Return coordinates for where a Pawn should be if it has travelled along ``pct`` of my length (between 0 and 1). Might get complex when I switch over to using beziers for arrows, but for now this is quite simple, using distance along a line segment.
[ "Return", "coordinates", "for", "where", "a", "Pawn", "should", "be", "if", "it", "has", "travelled", "along", "pct", "of", "my", "length", "(", "between", "0", "and", "1", ")", "." ]
python
train
waqasbhatti/astrobase
astrobase/hatsurveys/hatlc.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1929-L2031
def main(): ''' This is called when we're executed from the commandline. The current usage from the command-line is described below:: usage: hatlc [-h] [--describe] hatlcfile read a HAT LC of any format and output to stdout positional arguments: hatlcfile path to the light curve you want to read and pipe to stdout optional arguments: -h, --help show this help message and exit --describe don't dump the columns, show only object info and LC metadata ''' # handle SIGPIPE sent by less, head, et al. import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) import argparse aparser = argparse.ArgumentParser( description='read a HAT LC of any format and output to stdout' ) aparser.add_argument( 'hatlcfile', action='store', type=str, help=("path to the light curve you want to read and pipe to stdout") ) aparser.add_argument( '--describe', action='store_true', default=False, help=("don't dump the columns, show only object info and LC metadata") ) args = aparser.parse_args() filetoread = args.hatlcfile if not os.path.exists(filetoread): LOGERROR("file provided: %s doesn't seem to exist" % filetoread) sys.exit(1) # figure out the type of LC this is filename = os.path.basename(filetoread) # switch based on filetype if filename.endswith('-hatlc.csv.gz') or filename.endswith('-csvlc.gz'): if args.describe: describe(read_csvlc(filename)) sys.exit(0) else: with gzip.open(filename,'rb') as infd: for line in infd: print(line.decode(),end='') elif filename.endswith('-hatlc.sqlite.gz'): lcdict, msg = read_and_filter_sqlitecurve(filetoread) # dump the description describe(lcdict, offsetwith='#') # stop here if describe is True if args.describe: sys.exit(0) # otherwise, continue to parse the cols, etc. # get the aperture names apertures = sorted(lcdict['lcapertures'].keys()) # update column defs per aperture for aper in apertures: COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in LC_MAG_COLUMNS}) COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in LC_ERR_COLUMNS}) COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in LC_FLAG_COLUMNS}) formstr = ','.join([COLUMNDEFS[x][1] for x in lcdict['columns']]) ndet = lcdict['objectinfo']['ndet'] for ind in range(ndet): line = [lcdict[x][ind] for x in lcdict['columns']] formline = formstr % tuple(line) print(formline) else: LOGERROR('unrecognized HATLC file: %s' % filetoread) sys.exit(1)
[ "def", "main", "(", ")", ":", "# handle SIGPIPE sent by less, head, et al.", "import", "signal", "signal", ".", "signal", "(", "signal", ".", "SIGPIPE", ",", "signal", ".", "SIG_DFL", ")", "import", "argparse", "aparser", "=", "argparse", ".", "ArgumentParser", ...
This is called when we're executed from the commandline. The current usage from the command-line is described below:: usage: hatlc [-h] [--describe] hatlcfile read a HAT LC of any format and output to stdout positional arguments: hatlcfile path to the light curve you want to read and pipe to stdout optional arguments: -h, --help show this help message and exit --describe don't dump the columns, show only object info and LC metadata
[ "This", "is", "called", "when", "we", "re", "executed", "from", "the", "commandline", "." ]
python
valid
astropy/photutils
photutils/aperture/ellipse.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/ellipse.py#L26-L98
def to_mask(self, method='exact', subpixels=5): """ Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects. """ use_exact, subpixels = self._translate_mask_mode(method, subpixels) if hasattr(self, 'a'): a = self.a b = self.b elif hasattr(self, 'a_in'): # annulus a = self.a_out b = self.b_out b_in = self.a_in * self.b_out / self.a_out else: raise ValueError('Cannot determine the aperture shape.') masks = [] for bbox, edges in zip(self.bounding_boxes, self._centered_edges): ny, nx = bbox.shape mask = elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, a, b, self.theta, use_exact, subpixels) # subtract the inner ellipse for an annulus if hasattr(self, 'a_in'): mask -= elliptical_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, self.a_in, b_in, self.theta, use_exact, subpixels) masks.append(ApertureMask(mask, bbox)) return masks
[ "def", "to_mask", "(", "self", ",", "method", "=", "'exact'", ",", "subpixels", "=", "5", ")", ":", "use_exact", ",", "subpixels", "=", "self", ".", "_translate_mask_mode", "(", "method", ",", "subpixels", ")", "if", "hasattr", "(", "self", ",", "'a'", ...
Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects.
[ "Return", "a", "list", "of", "~photutils", ".", "ApertureMask", "objects", "one", "for", "each", "aperture", "position", "." ]
python
train
fancybits/pychannels
pychannels/__init__.py
https://github.com/fancybits/pychannels/blob/080f269b6d17d4622a0787000befe31bebc1a15d/pychannels/__init__.py#L30-L53
def _request(self, method, path, params=None): """Make the actual request and returns the parsed response.""" url = self._base_url + path try: if method == 'GET': response = requests.get(url, timeout=TIMEOUT) elif method == "POST": response = requests.post(url, params, timeout=TIMEOUT) elif method == "PUT": response = requests.put(url, params, timeout=TIMEOUT) elif method == "DELETE": response = requests.delete(url, timeout=TIMEOUT) if response: return response.json() else: return {'status': 'error'} except requests.exceptions.HTTPError: return {'status': 'error'} except requests.exceptions.Timeout: return {'status': 'offline'} except requests.exceptions.RequestException: return {'status': 'offline'}
[ "def", "_request", "(", "self", ",", "method", ",", "path", ",", "params", "=", "None", ")", ":", "url", "=", "self", ".", "_base_url", "+", "path", "try", ":", "if", "method", "==", "'GET'", ":", "response", "=", "requests", ".", "get", "(", "url"...
Make the actual request and returns the parsed response.
[ "Make", "the", "actual", "request", "and", "returns", "the", "parsed", "response", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xchart/xchart.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchart.py#L540-L551
def setHorizontalAxis(self, axis): """ Sets the horizontal axis for this chart. :param axis | <XChartAxis> """ self._horizontalAxis = axis if axis: axis.setOrientation(Qt.Horizontal) self.uiXAxisVIEW.setFixedHeight(axis.minimumLabelHeight() + 5) self.uiXAxisVIEW.setVisible(axis is not None)
[ "def", "setHorizontalAxis", "(", "self", ",", "axis", ")", ":", "self", ".", "_horizontalAxis", "=", "axis", "if", "axis", ":", "axis", ".", "setOrientation", "(", "Qt", ".", "Horizontal", ")", "self", ".", "uiXAxisVIEW", ".", "setFixedHeight", "(", "axis"...
Sets the horizontal axis for this chart. :param axis | <XChartAxis>
[ "Sets", "the", "horizontal", "axis", "for", "this", "chart", ".", ":", "param", "axis", "|", "<XChartAxis", ">" ]
python
train
Nekroze/librarian
librarian/deck.py
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/deck.py#L86-L103
def contains_info(self, key, value): """ Returns how many cards in the deck have the specified value under the specified key in their info data. This method requires a library to be stored in the deck instance and will return `None` if there is no library. """ if self.library is None: return 0 load = self.library.load_card matches = 0 for code in self.cards: card = load(code) if card.get_info(key) == value: matches += 1 return matches
[ "def", "contains_info", "(", "self", ",", "key", ",", "value", ")", ":", "if", "self", ".", "library", "is", "None", ":", "return", "0", "load", "=", "self", ".", "library", ".", "load_card", "matches", "=", "0", "for", "code", "in", "self", ".", "...
Returns how many cards in the deck have the specified value under the specified key in their info data. This method requires a library to be stored in the deck instance and will return `None` if there is no library.
[ "Returns", "how", "many", "cards", "in", "the", "deck", "have", "the", "specified", "value", "under", "the", "specified", "key", "in", "their", "info", "data", "." ]
python
train
twisted/axiom
axiom/store.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/store.py#L2066-L2081
def getTypeID(self, tableClass): """ Retrieve the typeID associated with a particular table in the in-database schema for this Store. A typeID is an opaque integer representing the Item subclass, and the associated table in this Store's SQLite database. @param tableClass: a subclass of Item @return: an integer """ key = (tableClass.typeName, tableClass.schemaVersion) if key in self.typenameAndVersionToID: return self.typenameAndVersionToID[key] return self.transact(self._maybeCreateTable, tableClass, key)
[ "def", "getTypeID", "(", "self", ",", "tableClass", ")", ":", "key", "=", "(", "tableClass", ".", "typeName", ",", "tableClass", ".", "schemaVersion", ")", "if", "key", "in", "self", ".", "typenameAndVersionToID", ":", "return", "self", ".", "typenameAndVers...
Retrieve the typeID associated with a particular table in the in-database schema for this Store. A typeID is an opaque integer representing the Item subclass, and the associated table in this Store's SQLite database. @param tableClass: a subclass of Item @return: an integer
[ "Retrieve", "the", "typeID", "associated", "with", "a", "particular", "table", "in", "the", "in", "-", "database", "schema", "for", "this", "Store", ".", "A", "typeID", "is", "an", "opaque", "integer", "representing", "the", "Item", "subclass", "and", "the",...
python
train
simonw/datasette
datasette/app.py
https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/app.py#L414-L455
def inspect(self): " Inspect the database and return a dictionary of table metadata " if self._inspect: return self._inspect self._inspect = {} for filename in self.files: if filename is MEMORY: self._inspect[":memory:"] = { "hash": "000", "file": ":memory:", "size": 0, "views": {}, "tables": {}, } else: path = Path(filename) name = path.stem if name in self._inspect: raise Exception("Multiple files with same stem %s" % name) try: with sqlite3.connect( "file:{}?mode=ro".format(path), uri=True ) as conn: self.prepare_connection(conn) self._inspect[name] = { "hash": inspect_hash(path), "file": str(path), "size": path.stat().st_size, "views": inspect_views(conn), "tables": inspect_tables(conn, (self.metadata("databases") or {}).get(name, {})) } except sqlite3.OperationalError as e: if (e.args[0] == 'no such module: VirtualSpatialIndex'): raise click.UsageError( "It looks like you're trying to load a SpatiaLite" " database without first loading the SpatiaLite module." "\n\nRead more: https://datasette.readthedocs.io/en/latest/spatialite.html" ) else: raise return self._inspect
[ "def", "inspect", "(", "self", ")", ":", "if", "self", ".", "_inspect", ":", "return", "self", ".", "_inspect", "self", ".", "_inspect", "=", "{", "}", "for", "filename", "in", "self", ".", "files", ":", "if", "filename", "is", "MEMORY", ":", "self",...
Inspect the database and return a dictionary of table metadata
[ "Inspect", "the", "database", "and", "return", "a", "dictionary", "of", "table", "metadata" ]
python
train
pyviz/holoviews
holoviews/core/ndmapping.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/ndmapping.py#L152-L181
def _add_item(self, dim_vals, data, sort=True, update=True): """ Adds item to the data, applying dimension types and ensuring key conforms to Dimension type and values. """ sort = sort and self.sort if not isinstance(dim_vals, tuple): dim_vals = (dim_vals,) self._item_check(dim_vals, data) # Apply dimension types dim_types = zip([kd.type for kd in self.kdims], dim_vals) dim_vals = tuple(v if None in [t, v] else t(v) for t, v in dim_types) valid_vals = zip(self.kdims, dim_vals) for dim, val in valid_vals: if dim.values and val is not None and val not in dim.values: raise KeyError('%s dimension value %s not in' ' specified dimension values.' % (dim, repr(val))) # Updates nested data structures rather than simply overriding them. if (update and (dim_vals in self.data) and isinstance(self.data[dim_vals], (MultiDimensionalMapping, OrderedDict))): self.data[dim_vals].update(data) else: self.data[dim_vals] = data if sort: self._resort()
[ "def", "_add_item", "(", "self", ",", "dim_vals", ",", "data", ",", "sort", "=", "True", ",", "update", "=", "True", ")", ":", "sort", "=", "sort", "and", "self", ".", "sort", "if", "not", "isinstance", "(", "dim_vals", ",", "tuple", ")", ":", "dim...
Adds item to the data, applying dimension types and ensuring key conforms to Dimension type and values.
[ "Adds", "item", "to", "the", "data", "applying", "dimension", "types", "and", "ensuring", "key", "conforms", "to", "Dimension", "type", "and", "values", "." ]
python
train
datastax/python-driver
cassandra/cluster.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L1721-L1728
def remove_host(self, host): """ Called when the control connection observes that a node has left the ring. Intended for internal use only. """ if host and self.metadata.remove_host(host): log.info("Cassandra host %s removed", host) self.on_remove(host)
[ "def", "remove_host", "(", "self", ",", "host", ")", ":", "if", "host", "and", "self", ".", "metadata", ".", "remove_host", "(", "host", ")", ":", "log", ".", "info", "(", "\"Cassandra host %s removed\"", ",", "host", ")", "self", ".", "on_remove", "(", ...
Called when the control connection observes that a node has left the ring. Intended for internal use only.
[ "Called", "when", "the", "control", "connection", "observes", "that", "a", "node", "has", "left", "the", "ring", ".", "Intended", "for", "internal", "use", "only", "." ]
python
train
user-cont/conu
conu/backend/podman/container.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/podman/container.py#L346-L354
def get_metadata(self): """ Convert dictionary returned after podman inspect command into instance of ContainerMetadata class :return: ContainerMetadata, container metadata instance """ if self._metadata is None: self._metadata = ContainerMetadata() inspect_to_container_metadata(self._metadata, self.inspect(refresh=True), self.image) return self._metadata
[ "def", "get_metadata", "(", "self", ")", ":", "if", "self", ".", "_metadata", "is", "None", ":", "self", ".", "_metadata", "=", "ContainerMetadata", "(", ")", "inspect_to_container_metadata", "(", "self", ".", "_metadata", ",", "self", ".", "inspect", "(", ...
Convert dictionary returned after podman inspect command into instance of ContainerMetadata class :return: ContainerMetadata, container metadata instance
[ "Convert", "dictionary", "returned", "after", "podman", "inspect", "command", "into", "instance", "of", "ContainerMetadata", "class", ":", "return", ":", "ContainerMetadata", "container", "metadata", "instance" ]
python
train
google/apitools
apitools/base/py/gzip.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/gzip.py#L612-L617
def decompress(data): """Decompress a gzip compressed string in one shot. Return the decompressed string. """ with GzipFile(fileobj=io.BytesIO(data)) as f: return f.read()
[ "def", "decompress", "(", "data", ")", ":", "with", "GzipFile", "(", "fileobj", "=", "io", ".", "BytesIO", "(", "data", ")", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Decompress a gzip compressed string in one shot. Return the decompressed string.
[ "Decompress", "a", "gzip", "compressed", "string", "in", "one", "shot", ".", "Return", "the", "decompressed", "string", "." ]
python
train