repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
titusjan/argos
argos/qt/registrytable.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/qt/registrytable.py#L184-L188
def itemFromIndex(self, index): """ Gets the item given the model index """ sourceIndex = self.mapToSource(index) return self.sourceModel().itemFromIndex(sourceIndex)
[ "def", "itemFromIndex", "(", "self", ",", "index", ")", ":", "sourceIndex", "=", "self", ".", "mapToSource", "(", "index", ")", "return", "self", ".", "sourceModel", "(", ")", ".", "itemFromIndex", "(", "sourceIndex", ")" ]
Gets the item given the model index
[ "Gets", "the", "item", "given", "the", "model", "index" ]
python
train
Mollom/mollom_python
mollom/mollom.py
https://github.com/Mollom/mollom_python/blob/dfacb63fd79f82c0eabde76b511116df5b51d6f1/mollom/mollom.py#L211-L246
def check_captcha(self, captcha_id, solution, author_name=None, author_url=None, author_mail=None, author_ip=None, author_id=None, author_open_id=None, honeypot=None ): """Checks a CAPTCHA that was solved by the end-user. Keyword arguments: captcha_id -- Unique identifier of the CAPTCHA solved. solution -- Solution provided by the end-user for the CAPTCHA. author_name -- The name of the content author. author_url -- The homepage/website URL of the content author. author_mail -- The e-mail address of the content author. author_ip -- The IP address of the content author. author_id -- The local user ID on the client site of the content author. author_open_id -- List of Open IDs of the content author. honeypot -- The value of a client-side honeypot form element, if non-empty. Returns: solved -- Boolean whether or not the CAPTCHA was solved correctly. If the CAPTCHA is associated with an unsure contents, it is recommended to recheck the content. """ check_catpcha_endpoint = Template("${rest_root}/captcha/${captcha_id}") url = check_catpcha_endpoint.substitute(rest_root=self._rest_root, captcha_id=captcha_id) data = {"solution": solution} response = self.__post_request(url, data) # Mollom returns "1" for success and "0" for failure return response["captcha"]["solved"] == "1"
[ "def", "check_captcha", "(", "self", ",", "captcha_id", ",", "solution", ",", "author_name", "=", "None", ",", "author_url", "=", "None", ",", "author_mail", "=", "None", ",", "author_ip", "=", "None", ",", "author_id", "=", "None", ",", "author_open_id", ...
Checks a CAPTCHA that was solved by the end-user. Keyword arguments: captcha_id -- Unique identifier of the CAPTCHA solved. solution -- Solution provided by the end-user for the CAPTCHA. author_name -- The name of the content author. author_url -- The homepage/website URL of the content author. author_mail -- The e-mail address of the content author. author_ip -- The IP address of the content author. author_id -- The local user ID on the client site of the content author. author_open_id -- List of Open IDs of the content author. honeypot -- The value of a client-side honeypot form element, if non-empty. Returns: solved -- Boolean whether or not the CAPTCHA was solved correctly. If the CAPTCHA is associated with an unsure contents, it is recommended to recheck the content.
[ "Checks", "a", "CAPTCHA", "that", "was", "solved", "by", "the", "end", "-", "user", "." ]
python
train
kakwa/ldapcherry
ldapcherry/backend/backendLdap.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/backend/backendLdap.py#L194-L238
def _connect(self): """Initialize an ldap client""" ldap_client = ldap.initialize(self.uri) ldap.set_option(ldap.OPT_REFERRALS, 0) ldap.set_option(ldap.OPT_TIMEOUT, self.timeout) if self.starttls == 'on': ldap.set_option(ldap.OPT_X_TLS_DEMAND, True) else: ldap.set_option(ldap.OPT_X_TLS_DEMAND, False) # set the CA file if declared and if necessary if self.ca and self.checkcert == 'on': # check if the CA file actually exists if os.path.isfile(self.ca): ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca) else: raise CaFileDontExist(self.ca) if self.checkcert == 'off': # this is dark magic # remove any of these two lines and it doesn't work ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) ldap_client.set_option( ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER ) else: # this is even darker magic ldap_client.set_option( ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND ) # it doesn't make sense to set it to never # (== don't check certifate) # but it only works with this option... # ... and it checks the certificat # (I've lost my sanity over this) ldap.set_option( ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER ) if self.starttls == 'on': try: ldap_client.start_tls_s() except Exception as e: self._exception_handler(e) return ldap_client
[ "def", "_connect", "(", "self", ")", ":", "ldap_client", "=", "ldap", ".", "initialize", "(", "self", ".", "uri", ")", "ldap", ".", "set_option", "(", "ldap", ".", "OPT_REFERRALS", ",", "0", ")", "ldap", ".", "set_option", "(", "ldap", ".", "OPT_TIMEOU...
Initialize an ldap client
[ "Initialize", "an", "ldap", "client" ]
python
train
pypa/pipenv
pipenv/vendor/jinja2/ext.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/ext.py#L119-L129
def call_method(self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None): """Call a method of the extension. This is a shortcut for :meth:`attr` + :class:`jinja2.nodes.Call`. """ if args is None: args = [] if kwargs is None: kwargs = [] return nodes.Call(self.attr(name, lineno=lineno), args, kwargs, dyn_args, dyn_kwargs, lineno=lineno)
[ "def", "call_method", "(", "self", ",", "name", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "dyn_args", "=", "None", ",", "dyn_kwargs", "=", "None", ",", "lineno", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", ...
Call a method of the extension. This is a shortcut for :meth:`attr` + :class:`jinja2.nodes.Call`.
[ "Call", "a", "method", "of", "the", "extension", ".", "This", "is", "a", "shortcut", "for", ":", "meth", ":", "attr", "+", ":", "class", ":", "jinja2", ".", "nodes", ".", "Call", "." ]
python
train
fossasia/AYABInterface
AYABInterface/interaction.py
https://github.com/fossasia/AYABInterface/blob/e2065eed8daf17b2936f6ca5e488c9bfb850914e/AYABInterface/interaction.py#L47-L59
def communicate_through(self, file): """Setup communication through a file. :rtype: AYABInterface.communication.Communication """ if self._communication is not None: raise ValueError("Already communicating.") self._communication = communication = Communication( file, self._get_needle_positions, self._machine, [self._on_message_received], right_end_needle=self.right_end_needle, left_end_needle=self.left_end_needle) return communication
[ "def", "communicate_through", "(", "self", ",", "file", ")", ":", "if", "self", ".", "_communication", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Already communicating.\"", ")", "self", ".", "_communication", "=", "communication", "=", "Communicati...
Setup communication through a file. :rtype: AYABInterface.communication.Communication
[ "Setup", "communication", "through", "a", "file", "." ]
python
train
hsolbrig/pyjsg
pyjsg/jsglib/jsg_object.py
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/jsglib/jsg_object.py#L133-L140
def _default(self, obj: object): """ Return a serializable version of obj. Overrides JsonObj _default method :param obj: Object to be serialized :return: Serialized version of obj """ return None if obj is JSGNull else obj.val if type(obj) is AnyType else \ JSGObject._strip_nones(obj.__dict__) if isinstance(obj, JsonObj) \ else cast(JSGString, obj).val if issubclass(type(obj), JSGString) else str(obj)
[ "def", "_default", "(", "self", ",", "obj", ":", "object", ")", ":", "return", "None", "if", "obj", "is", "JSGNull", "else", "obj", ".", "val", "if", "type", "(", "obj", ")", "is", "AnyType", "else", "JSGObject", ".", "_strip_nones", "(", "obj", ".",...
Return a serializable version of obj. Overrides JsonObj _default method :param obj: Object to be serialized :return: Serialized version of obj
[ "Return", "a", "serializable", "version", "of", "obj", ".", "Overrides", "JsonObj", "_default", "method", ":", "param", "obj", ":", "Object", "to", "be", "serialized", ":", "return", ":", "Serialized", "version", "of", "obj" ]
python
train
rocky/python3-trepan
trepan/processor/command/base_submgr.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/command/base_submgr.py#L107-L162
def help(self, args): """Give help for a command which has subcommands. This can be called in several ways: help cmd help cmd subcmd help cmd commands Our shtick is to give help for the overall command only if subcommand or 'commands' is not given. If a subcommand is given and found, then specific help for that is given. If 'commands' is given we will list the all the subcommands. """ if len(args) <= 2: # "help cmd". Give the general help for the command part. doc = self.__doc__ or self.run.__doc__ if doc: self.rst_msg(doc.rstrip('\n')) else: self.proc.intf[-1].errmsg('Sorry - author mess up. ' + 'No help registered for command' + self.name) pass return subcmd_name = args[2] if '*' == subcmd_name: self.section("List of subcommands for command '%s':" % self.name) self.msg(self.columnize_commands(self.cmds.list())) return # "help cmd subcmd". Give help specific for that subcommand. cmd = self.cmds.lookup(subcmd_name) if cmd: doc = cmd.__doc__ or cmd.run.__doc__ if doc: self.proc.rst_msg(doc.rstrip('\n')) else: self.proc.intf[-1] \ .errmsg('Sorry - author mess up. No help registered for ' 'subcommand %s of command %s' % (subcmd_name, self.name)) pass else: cmds = [c for c in self.cmds.list() if re.match('^' + subcmd_name, c) ] if cmds == []: self.errmsg("No %s subcommands found matching /^%s/. " "Try \"help\"." % (self.name, subcmd_name)) else: self.section("Subcommand(s) of \"%s\" matching /^%s/:" % (self.name, subcmd_name,)) self.msg_nocr(self.columnize_commands(cmds)) pass pass return
[ "def", "help", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<=", "2", ":", "# \"help cmd\". Give the general help for the command part.", "doc", "=", "self", ".", "__doc__", "or", "self", ".", "run", ".", "__doc__", "if", "doc", ":",...
Give help for a command which has subcommands. This can be called in several ways: help cmd help cmd subcmd help cmd commands Our shtick is to give help for the overall command only if subcommand or 'commands' is not given. If a subcommand is given and found, then specific help for that is given. If 'commands' is given we will list the all the subcommands.
[ "Give", "help", "for", "a", "command", "which", "has", "subcommands", ".", "This", "can", "be", "called", "in", "several", "ways", ":", "help", "cmd", "help", "cmd", "subcmd", "help", "cmd", "commands" ]
python
test
inspirehep/harvesting-kit
harvestingkit/oup_package.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/oup_package.py#L79-L101
def connect(self): """Logs into the specified ftp server and returns connector.""" for tried_connection_count in range(CFG_FTP_CONNECTION_ATTEMPTS): try: self.ftp = FtpHandler(self.config.OXFORD.URL, self.config.OXFORD.LOGIN, self.config.OXFORD.PASSWORD) self.logger.debug(("Successful connection to the " "Oxford University Press server")) return except socket_timeout_exception as err: self.logger.error(('Failed to connect %d of %d times. ' 'Will sleep for %d seconds and try again.') % (tried_connection_count+1, CFG_FTP_CONNECTION_ATTEMPTS, CFG_FTP_TIMEOUT_SLEEP_DURATION)) time.sleep(CFG_FTP_TIMEOUT_SLEEP_DURATION) except Exception as err: self.logger.error(('Failed to connect to the Oxford ' 'University Press server. %s') % (err,)) break raise LoginException(err)
[ "def", "connect", "(", "self", ")", ":", "for", "tried_connection_count", "in", "range", "(", "CFG_FTP_CONNECTION_ATTEMPTS", ")", ":", "try", ":", "self", ".", "ftp", "=", "FtpHandler", "(", "self", ".", "config", ".", "OXFORD", ".", "URL", ",", "self", ...
Logs into the specified ftp server and returns connector.
[ "Logs", "into", "the", "specified", "ftp", "server", "and", "returns", "connector", "." ]
python
valid
twilio/twilio-python
twilio/rest/autopilot/v1/assistant/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/__init__.py#L388-L397
def defaults(self): """ Access the defaults :returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList :rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList """ if self._defaults is None: self._defaults = DefaultsList(self._version, assistant_sid=self._solution['sid'], ) return self._defaults
[ "def", "defaults", "(", "self", ")", ":", "if", "self", ".", "_defaults", "is", "None", ":", "self", ".", "_defaults", "=", "DefaultsList", "(", "self", ".", "_version", ",", "assistant_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", ...
Access the defaults :returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList :rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList
[ "Access", "the", "defaults" ]
python
train
blockstack-packages/blockstack-gpg
blockstack_gpg/gpg.py
https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L703-L759
def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ): """ Put an application GPG key. Stash the private key locally to an app-specific keyring. Return {'status': True, 'key_url': ..., 'key_data': ...} on success Return {'error': ...} on error If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash) This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID on a successful execution. It is up to you to wait until the transaction is confirmed before using the key. Otherwise, the key is stored to mutable storage. """ assert is_valid_appname(appname) assert is_valid_keyname(keyname) try: keydir = make_gpg_home( appname, config_dir=config_dir ) key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir ) assert key_id is not None, "Failed to stash key" log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir)) except Exception, e: log.exception(e) log.error("Failed to store GPG key '%s'" % keyname) return {'error': "Failed to store GPG key locally"} # get public key... assert is_valid_appname(appname) try: pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir ) except: return {'error': 'Failed to load key'} fq_key_name = "gpg.%s.%s" % (appname, keyname) key_url = None if not immutable: res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in res: return res key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] ) else: res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in res: return res key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] ) res['key_url'] = key_url res['key_data'] = pubkey_data res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir ) log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url)) return res
[ "def", "gpg_app_put_key", "(", "blockchain_id", ",", "appname", ",", "keyname", ",", "key_data", ",", "txid", "=", "None", ",", "immutable", "=", "False", ",", "proxy", "=", "None", ",", "wallet_keys", "=", "None", ",", "config_dir", "=", "None", ")", ":...
Put an application GPG key. Stash the private key locally to an app-specific keyring. Return {'status': True, 'key_url': ..., 'key_data': ...} on success Return {'error': ...} on error If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash) This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID on a successful execution. It is up to you to wait until the transaction is confirmed before using the key. Otherwise, the key is stored to mutable storage.
[ "Put", "an", "application", "GPG", "key", ".", "Stash", "the", "private", "key", "locally", "to", "an", "app", "-", "specific", "keyring", "." ]
python
train
jupyter-widgets/ipywidgets
ipywidgets/widgets/interaction.py
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/interaction.py#L341-L354
def widget_from_single_value(o): """Make widgets from single values, which can be used as parameter defaults.""" if isinstance(o, string_types): return Text(value=unicode_type(o)) elif isinstance(o, bool): return Checkbox(value=o) elif isinstance(o, Integral): min, max, value = _get_min_max_value(None, None, o) return IntSlider(value=o, min=min, max=max) elif isinstance(o, Real): min, max, value = _get_min_max_value(None, None, o) return FloatSlider(value=o, min=min, max=max) else: return None
[ "def", "widget_from_single_value", "(", "o", ")", ":", "if", "isinstance", "(", "o", ",", "string_types", ")", ":", "return", "Text", "(", "value", "=", "unicode_type", "(", "o", ")", ")", "elif", "isinstance", "(", "o", ",", "bool", ")", ":", "return"...
Make widgets from single values, which can be used as parameter defaults.
[ "Make", "widgets", "from", "single", "values", "which", "can", "be", "used", "as", "parameter", "defaults", "." ]
python
train
WZBSocialScienceCenter/tmtoolkit
tmtoolkit/topicmod/model_stats.py
https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/model_stats.py#L84-L91
def get_most_salient_words(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None): """ Order the words from `vocab` by "saliency score" (Chuang et al. 2012) from most to least salient. Optionally only return the `n` most salient words. J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models" """ return _words_by_salience_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n)
[ "def", "get_most_salient_words", "(", "vocab", ",", "topic_word_distrib", ",", "doc_topic_distrib", ",", "doc_lengths", ",", "n", "=", "None", ")", ":", "return", "_words_by_salience_score", "(", "vocab", ",", "topic_word_distrib", ",", "doc_topic_distrib", ",", "do...
Order the words from `vocab` by "saliency score" (Chuang et al. 2012) from most to least salient. Optionally only return the `n` most salient words. J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
[ "Order", "the", "words", "from", "vocab", "by", "saliency", "score", "(", "Chuang", "et", "al", ".", "2012", ")", "from", "most", "to", "least", "salient", ".", "Optionally", "only", "return", "the", "n", "most", "salient", "words", "." ]
python
train
RiotGames/cloud-inquisitor
plugins/public/cinq-auditor-cloudtrail/cinq_auditor_cloudtrail/__init__.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-auditor-cloudtrail/cinq_auditor_cloudtrail/__init__.py#L272-L302
def create_sns_topic(self, region): """Creates an SNS topic if needed. Returns the ARN if the created SNS topic Args: region (str): Region name Returns: `str` """ sns = self.session.client('sns', region_name=region) self.log.info('Creating SNS topic for {}/{}'.format(self.account, region)) # Create the topic res = sns.create_topic(Name=self.topic_name) arn = res['TopicArn'] # Allow CloudTrail to publish messages with a policy update tmpl = get_template('cloudtrail_sns_policy.json') policy = tmpl.render(region=region, account_id=self.account.account_number, topic_name=self.topic_name) sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue=policy) auditlog( event='cloudtrail.create_sns_topic', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return arn
[ "def", "create_sns_topic", "(", "self", ",", "region", ")", ":", "sns", "=", "self", ".", "session", ".", "client", "(", "'sns'", ",", "region_name", "=", "region", ")", "self", ".", "log", ".", "info", "(", "'Creating SNS topic for {}/{}'", ".", "format",...
Creates an SNS topic if needed. Returns the ARN if the created SNS topic Args: region (str): Region name Returns: `str`
[ "Creates", "an", "SNS", "topic", "if", "needed", ".", "Returns", "the", "ARN", "if", "the", "created", "SNS", "topic" ]
python
train
SwissDataScienceCenter/renku-python
renku/cli/runner.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/runner.py#L77-L101
def rerun(client, run, job): """Re-run existing workflow or tool using CWL runner.""" from renku.models.provenance import ProcessRun activity = client.process_commmit() if not isinstance(activity, ProcessRun): click.secho('No tool was found.', fg='red', file=sys.stderr) return try: args = ['cwl-runner', activity.path] if job: job_file = tempfile.NamedTemporaryFile( suffix='.yml', dir=os.getcwd(), delete=False ) args.append(job_file.name) with job_file as fp: yaml.dump(yaml.safe_load(job), stream=fp, encoding='utf-8') if run: return call(args, cwd=os.getcwd()) finally: if job: os.unlink(job_file.name)
[ "def", "rerun", "(", "client", ",", "run", ",", "job", ")", ":", "from", "renku", ".", "models", ".", "provenance", "import", "ProcessRun", "activity", "=", "client", ".", "process_commmit", "(", ")", "if", "not", "isinstance", "(", "activity", ",", "Pro...
Re-run existing workflow or tool using CWL runner.
[ "Re", "-", "run", "existing", "workflow", "or", "tool", "using", "CWL", "runner", "." ]
python
train
biocore/burrito-fillings
bfillings/cd_hit.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/cd_hit.py#L203-L208
def _get_result_paths(self, data): """Return dict of {key: ResultPath}""" result = {} result['FASTA'] = ResultPath(Path=self._get_seqs_outfile()) result['CLSTR'] = ResultPath(Path=self._get_clstr_outfile()) return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "result", "=", "{", "}", "result", "[", "'FASTA'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "_get_seqs_outfile", "(", ")", ")", "result", "[", "'CLSTR'", "]", "=", "ResultPat...
Return dict of {key: ResultPath}
[ "Return", "dict", "of", "{", "key", ":", "ResultPath", "}" ]
python
train
belbio/bel
bel/db/arangodb.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L62-L87
def get_client(host=None, port=None, username=None, password=None, enable_logging=True): """Get arango client and edgestore db handle""" host = utils.first_true( [host, config["bel_api"]["servers"]["arangodb_host"], "localhost"] ) port = utils.first_true([port, config["bel_api"]["servers"]["arangodb_port"], 8529]) username = utils.first_true( [username, config["bel_api"]["servers"]["arangodb_username"], ""] ) password = utils.first_true( [ password, config.get( "secrets", config["secrets"]["bel_api"]["servers"].get("arangodb_password"), ), "", ] ) client = arango.client.ArangoClient( protocol=config["bel_api"]["servers"]["arangodb_protocol"], host=host, port=port ) return client
[ "def", "get_client", "(", "host", "=", "None", ",", "port", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "enable_logging", "=", "True", ")", ":", "host", "=", "utils", ".", "first_true", "(", "[", "host", ",", "config...
Get arango client and edgestore db handle
[ "Get", "arango", "client", "and", "edgestore", "db", "handle" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L6509-L6530
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'exclude_tags_completely' ) and self.exclude_tags_completely is not None: _dict['exclude_tags_completely'] = self.exclude_tags_completely if hasattr(self, 'exclude_tags_keep_content' ) and self.exclude_tags_keep_content is not None: _dict['exclude_tags_keep_content'] = self.exclude_tags_keep_content if hasattr(self, 'keep_content') and self.keep_content is not None: _dict['keep_content'] = self.keep_content._to_dict() if hasattr(self, 'exclude_content') and self.exclude_content is not None: _dict['exclude_content'] = self.exclude_content._to_dict() if hasattr( self, 'keep_tag_attributes') and self.keep_tag_attributes is not None: _dict['keep_tag_attributes'] = self.keep_tag_attributes if hasattr(self, 'exclude_tag_attributes' ) and self.exclude_tag_attributes is not None: _dict['exclude_tag_attributes'] = self.exclude_tag_attributes return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'exclude_tags_completely'", ")", "and", "self", ".", "exclude_tags_completely", "is", "not", "None", ":", "_dict", "[", "'exclude_tags_completely'", "]", "=...
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
AustralianSynchrotron/lightflow
lightflow/models/dag.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/dag.py#L77-L100
def define(self, schema, *, validate=True): """ Store the task graph definition (schema). The schema has to adhere to the following rules: A key in the schema dict represents a parent task and the value one or more children: {parent: [child]} or {parent: [child1, child2]} The data output of one task can be routed to a labelled input slot of successor tasks using a dictionary instead of a list for the children: {parent: {child1: 'positive', child2: 'negative'}} An empty slot name or None skips the creation of a labelled slot: {parent: {child1: '', child2: None}} Args: schema (dict): A dictionary with the schema definition. validate (bool): Set to True to validate the graph by checking whether it is a directed acyclic graph. """ self._schema = schema if validate: self.validate(self.make_graph(self._schema))
[ "def", "define", "(", "self", ",", "schema", ",", "*", ",", "validate", "=", "True", ")", ":", "self", ".", "_schema", "=", "schema", "if", "validate", ":", "self", ".", "validate", "(", "self", ".", "make_graph", "(", "self", ".", "_schema", ")", ...
Store the task graph definition (schema). The schema has to adhere to the following rules: A key in the schema dict represents a parent task and the value one or more children: {parent: [child]} or {parent: [child1, child2]} The data output of one task can be routed to a labelled input slot of successor tasks using a dictionary instead of a list for the children: {parent: {child1: 'positive', child2: 'negative'}} An empty slot name or None skips the creation of a labelled slot: {parent: {child1: '', child2: None}} Args: schema (dict): A dictionary with the schema definition. validate (bool): Set to True to validate the graph by checking whether it is a directed acyclic graph.
[ "Store", "the", "task", "graph", "definition", "(", "schema", ")", "." ]
python
train
python/performance
performance/compare.py
https://github.com/python/performance/blob/2a9524c0a5714e85106671bc61d750e800fe17db/performance/compare.py#L89-L103
def tscore(sample1, sample2): """Calculate a t-test score for the difference between two samples. Args: sample1: one sample. sample2: the other sample. Returns: The t-test score, as a float. """ if len(sample1) != len(sample2): raise ValueError("different number of values") error = pooled_sample_variance(sample1, sample2) / len(sample1) diff = statistics.mean(sample1) - statistics.mean(sample2) return diff / math.sqrt(error * 2)
[ "def", "tscore", "(", "sample1", ",", "sample2", ")", ":", "if", "len", "(", "sample1", ")", "!=", "len", "(", "sample2", ")", ":", "raise", "ValueError", "(", "\"different number of values\"", ")", "error", "=", "pooled_sample_variance", "(", "sample1", ","...
Calculate a t-test score for the difference between two samples. Args: sample1: one sample. sample2: the other sample. Returns: The t-test score, as a float.
[ "Calculate", "a", "t", "-", "test", "score", "for", "the", "difference", "between", "two", "samples", "." ]
python
test
oscarlazoarjona/fast
fast/angular_momentum.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/angular_momentum.py#L365-L416
def spherical_tensor(Ji, Jj, K, Q): ur"""Return a matrix representation of the spherical tensor with quantum numbers $J_i, J_j, K, Q$. >>> from sympy import pprint >>> pprint(spherical_tensor(1, 1, 1, 0)) ⎡-√2 ⎤ ⎢──── 0 0 ⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢ 0 0 0 ⎥ ⎢ ⎥ ⎢ √2⎥ ⎢ 0 0 ──⎥ ⎣ 2 ⎦ >>> pprint(spherical_tensor(1, 2, 1, -1)) ⎡ √10 ⎤ ⎢0 0 ─── 0 0 ⎥ ⎢ 10 ⎥ ⎢ ⎥ ⎢ √30 ⎥ ⎢0 0 0 ─── 0 ⎥ ⎢ 10 ⎥ ⎢ ⎥ ⎢ √15⎥ ⎢0 0 0 0 ───⎥ ⎣ 5 ⎦ """ keti = {(Ji, Mi): Matrix([KroneckerDelta(i, j) for j in range(2*Ji+1)]) for i, Mi in enumerate(perm_m(Ji))} braj = {(Jj, Mj): Matrix([KroneckerDelta(i, j) for j in range(2*Jj+1)]).adjoint() for i, Mj in enumerate(perm_m(Jj))} if K not in perm_j(Ji, Jj): raise ValueError("K value is not allowed.") if Q not in perm_m(K): raise ValueError("Q value is not allowed.") Ni = 2*Ji+1 Nj = 2*Jj+1 T = zeros(Ni, Nj) for i, Mi in enumerate(perm_m(Ji)): for j, Mj in enumerate(perm_m(Jj)): T += (-1)**(Jj-Mj)*clebsch_gordan(Ji, Jj, K, Mi, -Mj, Q) * \ keti[(Ji, Mi)]*braj[(Jj, Mj)] return T
[ "def", "spherical_tensor", "(", "Ji", ",", "Jj", ",", "K", ",", "Q", ")", ":", "keti", "=", "{", "(", "Ji", ",", "Mi", ")", ":", "Matrix", "(", "[", "KroneckerDelta", "(", "i", ",", "j", ")", "for", "j", "in", "range", "(", "2", "*", "Ji", ...
ur"""Return a matrix representation of the spherical tensor with quantum numbers $J_i, J_j, K, Q$. >>> from sympy import pprint >>> pprint(spherical_tensor(1, 1, 1, 0)) ⎡-√2 ⎤ ⎢──── 0 0 ⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢ 0 0 0 ⎥ ⎢ ⎥ ⎢ √2⎥ ⎢ 0 0 ──⎥ ⎣ 2 ⎦ >>> pprint(spherical_tensor(1, 2, 1, -1)) ⎡ √10 ⎤ ⎢0 0 ─── 0 0 ⎥ ⎢ 10 ⎥ ⎢ ⎥ ⎢ √30 ⎥ ⎢0 0 0 ─── 0 ⎥ ⎢ 10 ⎥ ⎢ ⎥ ⎢ √15⎥ ⎢0 0 0 0 ───⎥ ⎣ 5 ⎦
[ "ur", "Return", "a", "matrix", "representation", "of", "the", "spherical", "tensor", "with", "quantum", "numbers", "$J_i", "J_j", "K", "Q$", "." ]
python
train
CellProfiler/centrosome
centrosome/cpmorphology.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L2479-L2592
def euler_number(labels, indexes=None): """Calculate the Euler number of each label labels - a label matrix indexes - the indexes of the labels to measure or None to treat the labels matrix as a binary matrix """ if indexes is None: labels = labels != 0 indexes = np.array([1],dtype=np.int32) elif getattr(indexes,'__getitem__',False): indexes = np.array(indexes,dtype=np.int32) else: indexes = np.array([indexes],dtype=np.int32) fix = fixup_scipy_ndimage_result # # The algorithm here is from the following reference: # S.B. Gray, "Local Properties of Binary Images in Two Dimensions", # IEEE Transactions on Computers, Vol c-20 # 5 p 551, May 1971 # # The general idea is that crossings into objects can be measured locally # through counting numbers of patterns resulting in crossings. There # are three sets that are applicable in Euler Numbers: # Q1: 1 0 0 1 0 0 0 0 (or more simply, 1 bit per quad) # 0 0 0 0 1 0 0 1 # # Q3: 0 1 1 0 1 1 1 1 (or 3 bits per quad) # 1 1 1 1 1 0 0 1 # # QD: 1 0 0 1 # 0 1 1 0 # # and the Euler number = W of an object is # # 4W = n(Q1) - n(Q3) - 2n(QD) (equation 34) # W = (n(Q1) - n(Q3) - 2n(QD))/4 # # We shift the label matrix to make matrices, padded by zeros on the # sides for each of the four positions of the quad: # I00 I01 # I10 I11 # # We can then assign each bitquad to a label based on the value # of the label at one of the "on" bits. For example, the first pattern # of Q1 has the label I00 because that bit is on. It's truth value is # I00 != I01 and I00 != I02 and I00 != I03. # I_shape = (labels.shape[0]+3,labels.shape[1]+3) I00 = np.zeros(I_shape,int) I01 = np.zeros(I_shape,int) I10 = np.zeros(I_shape,int) I11 = np.zeros(I_shape,int) slice_00 = [slice(1,labels.shape[0]+1),slice(1,labels.shape[1]+1)] slice_01 = [slice(1,labels.shape[0]+1),slice(0,labels.shape[1])] slice_10 = [slice(labels.shape[0]),slice(1,labels.shape[1]+1)] slice_11 = [slice(0,labels.shape[0]),slice(0,labels.shape[1])] I00[slice_00] = labels I01[slice_01] = labels I10[slice_10] = labels I11[slice_11] = labels # # There are 6 binary comparisons among the four bits # EQ00_01 = I00 == I01; EQ01_00 = EQ00_01 EQ00_10 = I00 == I10; EQ10_00 = EQ00_10 EQ00_11 = I00 == I11; EQ11_00 = EQ00_11 EQ01_10 = I01 == I10; EQ10_01 = EQ01_10 EQ01_11 = I01 == I11; EQ11_01 = EQ01_11 EQ10_11 = I10 == I11; EQ11_10 = EQ10_11 NE00_01 = np.logical_not(EQ00_01); NE01_00 = NE00_01 NE00_10 = np.logical_not(EQ00_10); NE10_00 = NE00_10 NE00_11 = np.logical_not(EQ00_11); NE11_00 = NE00_11 NE01_10 = np.logical_not(EQ01_10); NE10_01 = NE01_10 NE01_11 = np.logical_not(EQ01_11); NE11_01 = NE01_11 NE10_11 = np.logical_not(EQ10_11); NE11_10 = NE10_11 # # Q1: 1 0 # 0 0 Q1_condition = (NE00_01 & NE00_10 & NE00_11).astype(int) # 0 1 # 0 0 Q1_condition[slice_00] += (NE01_00 & NE01_10 & NE01_11)[slice_01] # 0 0 # 1 0 Q1_condition[slice_00] += (NE10_00 & NE10_01 & NE10_11)[slice_10] # 0 0 # 0 1 Q1_condition[slice_00] += (NE11_00 & NE11_01 & NE11_10)[slice_11] Q1 = fix(scind.sum(Q1_condition, I00, indexes)) # # Q3: 1 1 # 1 0 Q3_condition = (EQ00_10 & EQ00_01 & NE00_11).astype(int) # 0 1 # 1 1 Q3_condition[slice_00] += (NE11_00 & EQ11_10 & EQ11_01)[slice_11] # 1 0 # 1 1 Q3_condition += (NE00_01 & EQ00_10 & EQ00_11) # 1 1 # 0 1 Q3_condition += (NE00_10 & EQ00_01 & EQ00_11) Q3 = fix(scind.sum(Q3_condition, I00, indexes)) # QD: 1 0 # 0 1 QD_condition = (NE00_01 & NE00_10 & EQ00_11).astype(int) # 0 1 # 1 0 QD_condition[slice_00] += (NE01_00 & NE01_11 & EQ01_10)[slice_01] QD = fix(scind.sum(QD_condition, I00, indexes)) W = (Q1 - Q3 - 2*QD).astype(float)/4.0 if indexes is None: return W[0] return W
[ "def", "euler_number", "(", "labels", ",", "indexes", "=", "None", ")", ":", "if", "indexes", "is", "None", ":", "labels", "=", "labels", "!=", "0", "indexes", "=", "np", ".", "array", "(", "[", "1", "]", ",", "dtype", "=", "np", ".", "int32", ")...
Calculate the Euler number of each label labels - a label matrix indexes - the indexes of the labels to measure or None to treat the labels matrix as a binary matrix
[ "Calculate", "the", "Euler", "number", "of", "each", "label", "labels", "-", "a", "label", "matrix", "indexes", "-", "the", "indexes", "of", "the", "labels", "to", "measure", "or", "None", "to", "treat", "the", "labels", "matrix", "as", "a", "binary", "m...
python
train
shoebot/shoebot
lib/web/BeautifulSoup.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/BeautifulSoup.py#L278-L282
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs)
[ "def", "findPreviousSibling", "(", "self", ",", "name", "=", "None", ",", "attrs", "=", "{", "}", ",", "text", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_findOne", "(", "self", ".", "findPreviousSiblings", ",", "name", "...
Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.
[ "Returns", "the", "closest", "sibling", "to", "this", "Tag", "that", "matches", "the", "given", "criteria", "and", "appears", "before", "this", "Tag", "in", "the", "document", "." ]
python
valid
DataONEorg/d1_python
client_cli/src/d1_cli/impl/command_parser.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L419-L428
def do_archive(self, line): """archive <identifier> [identifier ...] Mark one or more existing Science Objects as archived.""" pids = self._split_args(line, 1, -1) self._command_processor.science_object_archive(pids) self._print_info_if_verbose( "Added archive operation for identifier(s) {} to write queue".format( ", ".join(pids) ) )
[ "def", "do_archive", "(", "self", ",", "line", ")", ":", "pids", "=", "self", ".", "_split_args", "(", "line", ",", "1", ",", "-", "1", ")", "self", ".", "_command_processor", ".", "science_object_archive", "(", "pids", ")", "self", ".", "_print_info_if_...
archive <identifier> [identifier ...] Mark one or more existing Science Objects as archived.
[ "archive", "<identifier", ">", "[", "identifier", "...", "]", "Mark", "one", "or", "more", "existing", "Science", "Objects", "as", "archived", "." ]
python
train
GNS3/gns3-server
gns3server/compute/base_node.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/base_node.py#L643-L689
def _add_ubridge_ethernet_connection(self, bridge_name, ethernet_interface, block_host_traffic=False): """ Creates a connection with an Ethernet interface in uBridge. :param bridge_name: bridge name in uBridge :param ethernet_interface: Ethernet interface name :param block_host_traffic: block network traffic originating from the host OS (Windows only) """ if sys.platform.startswith("linux") and block_host_traffic is False: # on Linux we use RAW sockets by default excepting if host traffic must be blocked yield from self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=ethernet_interface)) elif sys.platform.startswith("win"): # on Windows we use Winpcap/Npcap windows_interfaces = interfaces() npf_id = None source_mac = None for interface in windows_interfaces: # Winpcap/Npcap uses a NPF ID to identify an interface on Windows if "netcard" in interface and ethernet_interface in interface["netcard"]: npf_id = interface["id"] source_mac = interface["mac_address"] elif ethernet_interface in interface["name"]: npf_id = interface["id"] source_mac = interface["mac_address"] if npf_id: yield from self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=npf_id)) else: raise NodeError("Could not find NPF id for interface {}".format(ethernet_interface)) if block_host_traffic: if source_mac: yield from self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=bridge_name, mac=source_mac)) log.info('PCAP filter applied on "{interface}" for source MAC {mac}'.format(interface=ethernet_interface, mac=source_mac)) else: log.warning("Could not block host network traffic on {} (no MAC address found)".format(ethernet_interface)) else: # on other platforms we just rely on the pcap library yield from self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=ethernet_interface)) source_mac = None for interface in interfaces(): if interface["name"] == ethernet_interface: source_mac = interface["mac_address"] if source_mac: yield from self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=bridge_name, mac=source_mac)) log.info('PCAP filter applied on "{interface}" for source MAC {mac}'.format(interface=ethernet_interface, mac=source_mac))
[ "def", "_add_ubridge_ethernet_connection", "(", "self", ",", "bridge_name", ",", "ethernet_interface", ",", "block_host_traffic", "=", "False", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "\"linux\"", ")", "and", "block_host_traffic", "is", "Fa...
Creates a connection with an Ethernet interface in uBridge. :param bridge_name: bridge name in uBridge :param ethernet_interface: Ethernet interface name :param block_host_traffic: block network traffic originating from the host OS (Windows only)
[ "Creates", "a", "connection", "with", "an", "Ethernet", "interface", "in", "uBridge", "." ]
python
train
jssimporter/python-jss
jss/contrib/mount_shares_better.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/contrib/mount_shares_better.py#L67-L94
def mount_share_at_path(share_path, mount_path): """Mounts a share at the specified path Args: share_path: String URL with all auth info to connect to file share. mount_path: Path to mount share on. Returns: The mount point or raises an error """ sh_url = CFURLCreateWithString(None, share_path, None) mo_url = CFURLCreateWithString(None, mount_path, None) # Set UI to reduced interaction open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI} # Allow mounting sub-directories of root shares # Also specify the share should be mounted directly at (not under) # mount_path mount_options = {NetFS.kNetFSAllowSubMountsKey: True, NetFS.kNetFSMountAtMountDirKey: True} # Mount! result, output = NetFS.NetFSMountURLSync(sh_url, mo_url, None, None, open_options, mount_options, None) # Check if it worked if result != 0: raise Exception('Error mounting url "%s" at path "%s": %s' % (share_path, mount_path, output)) # Return the mountpath return str(output[0])
[ "def", "mount_share_at_path", "(", "share_path", ",", "mount_path", ")", ":", "sh_url", "=", "CFURLCreateWithString", "(", "None", ",", "share_path", ",", "None", ")", "mo_url", "=", "CFURLCreateWithString", "(", "None", ",", "mount_path", ",", "None", ")", "#...
Mounts a share at the specified path Args: share_path: String URL with all auth info to connect to file share. mount_path: Path to mount share on. Returns: The mount point or raises an error
[ "Mounts", "a", "share", "at", "the", "specified", "path" ]
python
train
linuxsoftware/ls.joyous
ls/joyous/models/events.py
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L972-L979
def prev_date(self): """ Date when this event last occurred in the local time zone (Does not include postponements, but does exclude cancellations) """ prevDt = self.__localBefore(timezone.localtime(), dt.time.min) if prevDt is not None: return prevDt.date()
[ "def", "prev_date", "(", "self", ")", ":", "prevDt", "=", "self", ".", "__localBefore", "(", "timezone", ".", "localtime", "(", ")", ",", "dt", ".", "time", ".", "min", ")", "if", "prevDt", "is", "not", "None", ":", "return", "prevDt", ".", "date", ...
Date when this event last occurred in the local time zone (Does not include postponements, but does exclude cancellations)
[ "Date", "when", "this", "event", "last", "occurred", "in", "the", "local", "time", "zone", "(", "Does", "not", "include", "postponements", "but", "does", "exclude", "cancellations", ")" ]
python
train
e-dard/postcodes
postcodes.py
https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L22-L34
def get(postcode): """ Request data associated with `postcode`. :param postcode: the postcode to search for. The postcode may contain spaces (they will be removed). :returns: a dict of the nearest postcode's data or None if no postcode data is found. """ postcode = quote(postcode.replace(' ', '')) url = '%s/postcode/%s.json' % (END_POINT, postcode) return _get_json_resp(url)
[ "def", "get", "(", "postcode", ")", ":", "postcode", "=", "quote", "(", "postcode", ".", "replace", "(", "' '", ",", "''", ")", ")", "url", "=", "'%s/postcode/%s.json'", "%", "(", "END_POINT", ",", "postcode", ")", "return", "_get_json_resp", "(", "url",...
Request data associated with `postcode`. :param postcode: the postcode to search for. The postcode may contain spaces (they will be removed). :returns: a dict of the nearest postcode's data or None if no postcode data is found.
[ "Request", "data", "associated", "with", "postcode", "." ]
python
train
note35/sinon
sinon/lib/stub.py
https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/stub.py#L187-L204
def withArgs(self, *args, **kwargs): #pylint: disable=invalid-name """ Adds a condition for when the stub is called. When the condition is met, a special return value can be returned. Adds the specified argument(s) into the condition list. For example, when the stub function is called with argument 1, it will return "#": stub.withArgs(1).returns("#") Without returns/throws at the end of the chain of functions, nothing will happen. For example, in this case, although 1 is in the condition list, nothing will happen: stub.withArgs(1) Return: a SinonStub object (able to be chained) """ cond_args = args if len(args) > 0 else None cond_kwargs = kwargs if len(kwargs) > 0 else None return _SinonStubCondition(copy=self._copy, cond_args=cond_args, cond_kwargs=cond_kwargs, oncall=self._oncall)
[ "def", "withArgs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#pylint: disable=invalid-name", "cond_args", "=", "args", "if", "len", "(", "args", ")", ">", "0", "else", "None", "cond_kwargs", "=", "kwargs", "if", "len", "(", "kw...
Adds a condition for when the stub is called. When the condition is met, a special return value can be returned. Adds the specified argument(s) into the condition list. For example, when the stub function is called with argument 1, it will return "#": stub.withArgs(1).returns("#") Without returns/throws at the end of the chain of functions, nothing will happen. For example, in this case, although 1 is in the condition list, nothing will happen: stub.withArgs(1) Return: a SinonStub object (able to be chained)
[ "Adds", "a", "condition", "for", "when", "the", "stub", "is", "called", ".", "When", "the", "condition", "is", "met", "a", "special", "return", "value", "can", "be", "returned", ".", "Adds", "the", "specified", "argument", "(", "s", ")", "into", "the", ...
python
train
Unidata/MetPy
metpy/interpolate/grid.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/interpolate/grid.py#L258-L335
def interpolate_to_grid(x, y, z, interp_type='linear', hres=50000, minimum_neighbors=3, gamma=0.25, kappa_star=5.052, search_radius=None, rbf_func='linear', rbf_smooth=0, boundary_coords=None): r"""Interpolate given (x,y), observation (z) pairs to a grid based on given parameters. Parameters ---------- x: array_like x coordinate y: array_like y coordinate z: array_like observation value interp_type: str What type of interpolation to use. Available options include: 1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`. 2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`. Default "linear". hres: float The horizontal resolution of the generated grid, given in the same units as the x and y parameters. Default 50000. minimum_neighbors: int Minimum number of neighbors needed to perform barnes or cressman interpolation for a point. Default is 3. gamma: float Adjustable smoothing parameter for the barnes interpolation. Default 0.25. kappa_star: float Response parameter for barnes interpolation, specified nondimensionally in terms of the Nyquist. Default 5.052 search_radius: float A search radius to use for the barnes and cressman interpolation schemes. If search_radius is not specified, it will default to the average spacing of observations. rbf_func: str Specifies which function to use for Rbf interpolation. Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more information. rbf_smooth: float Smoothing value applied to rbf interpolation. Higher values result in more smoothing. boundary_coords: dictionary Optional dictionary containing coordinates of the study area boundary. Dictionary should be in format: {'west': west, 'south': south, 'east': east, 'north': north} Returns ------- grid_x: (N, 2) ndarray Meshgrid for the resulting interpolation in the x dimension grid_y: (N, 2) ndarray Meshgrid for the resulting interpolation in the y dimension ndarray img: (M, N) ndarray 2-dimensional array representing the interpolated values for each grid. Notes ----- This function acts as a wrapper for `interpolate_points` to allow it to generate a regular grid. See Also -------- interpolate_to_points """ # Generate the grid if boundary_coords is None: boundary_coords = get_boundary_coords(x, y) grid_x, grid_y = generate_grid(hres, boundary_coords) # Handle grid-to-points conversion, and use function from `interpolation` points_obs = np.array(list(zip(x, y))) points_grid = generate_grid_coords(grid_x, grid_y) img = interpolate_to_points(points_obs, z, points_grid, interp_type=interp_type, minimum_neighbors=minimum_neighbors, gamma=gamma, kappa_star=kappa_star, search_radius=search_radius, rbf_func=rbf_func, rbf_smooth=rbf_smooth) return grid_x, grid_y, img.reshape(grid_x.shape)
[ "def", "interpolate_to_grid", "(", "x", ",", "y", ",", "z", ",", "interp_type", "=", "'linear'", ",", "hres", "=", "50000", ",", "minimum_neighbors", "=", "3", ",", "gamma", "=", "0.25", ",", "kappa_star", "=", "5.052", ",", "search_radius", "=", "None",...
r"""Interpolate given (x,y), observation (z) pairs to a grid based on given parameters. Parameters ---------- x: array_like x coordinate y: array_like y coordinate z: array_like observation value interp_type: str What type of interpolation to use. Available options include: 1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`. 2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`. Default "linear". hres: float The horizontal resolution of the generated grid, given in the same units as the x and y parameters. Default 50000. minimum_neighbors: int Minimum number of neighbors needed to perform barnes or cressman interpolation for a point. Default is 3. gamma: float Adjustable smoothing parameter for the barnes interpolation. Default 0.25. kappa_star: float Response parameter for barnes interpolation, specified nondimensionally in terms of the Nyquist. Default 5.052 search_radius: float A search radius to use for the barnes and cressman interpolation schemes. If search_radius is not specified, it will default to the average spacing of observations. rbf_func: str Specifies which function to use for Rbf interpolation. Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more information. rbf_smooth: float Smoothing value applied to rbf interpolation. Higher values result in more smoothing. boundary_coords: dictionary Optional dictionary containing coordinates of the study area boundary. Dictionary should be in format: {'west': west, 'south': south, 'east': east, 'north': north} Returns ------- grid_x: (N, 2) ndarray Meshgrid for the resulting interpolation in the x dimension grid_y: (N, 2) ndarray Meshgrid for the resulting interpolation in the y dimension ndarray img: (M, N) ndarray 2-dimensional array representing the interpolated values for each grid. Notes ----- This function acts as a wrapper for `interpolate_points` to allow it to generate a regular grid. See Also -------- interpolate_to_points
[ "r", "Interpolate", "given", "(", "x", "y", ")", "observation", "(", "z", ")", "pairs", "to", "a", "grid", "based", "on", "given", "parameters", "." ]
python
train
subdownloader/subdownloader
subdownloader/video2.py
https://github.com/subdownloader/subdownloader/blob/bbccedd11b18d925ad4c062b5eb65981e24d0433/subdownloader/video2.py#L131-L138
def get_osdb_hash(self): """ Get the hash of this local videofile :return: hash as string """ if self._osdb_hash is None: self._osdb_hash = self._calculate_osdb_hash() return self._osdb_hash
[ "def", "get_osdb_hash", "(", "self", ")", ":", "if", "self", ".", "_osdb_hash", "is", "None", ":", "self", ".", "_osdb_hash", "=", "self", ".", "_calculate_osdb_hash", "(", ")", "return", "self", ".", "_osdb_hash" ]
Get the hash of this local videofile :return: hash as string
[ "Get", "the", "hash", "of", "this", "local", "videofile", ":", "return", ":", "hash", "as", "string" ]
python
train
lltk/lltk
lltk/nl/scrapers/uitmuntend.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/nl/scrapers/uitmuntend.py#L60-L79
def articles(self): ''' Tries to scrape the correct articles for singular and plural from uitmuntend.nl. ''' result = [None, None] element = self._first('NN') if element: element = element.split('\r\n')[0] if ' | ' in element: # This means there is a plural singular, plural = element.split(' | ') singular, plural = singular.strip(), plural.strip() else: # This means there is no plural singular, plural = element.strip(), '' result[1] = '' if singular: result[0] = singular.split(' ')[0].split('/') if plural: result[1] = plural.split(' ')[0].split('/') return result
[ "def", "articles", "(", "self", ")", ":", "result", "=", "[", "None", ",", "None", "]", "element", "=", "self", ".", "_first", "(", "'NN'", ")", "if", "element", ":", "element", "=", "element", ".", "split", "(", "'\\r\\n'", ")", "[", "0", "]", "...
Tries to scrape the correct articles for singular and plural from uitmuntend.nl.
[ "Tries", "to", "scrape", "the", "correct", "articles", "for", "singular", "and", "plural", "from", "uitmuntend", ".", "nl", "." ]
python
train
dagster-io/dagster
python_modules/dagster/dagster/core/types/field_utils.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/field_utils.py#L284-L308
def PermissiveDict(fields=None): '''A permissive dict will permit the user to partially specify the permitted fields. Any fields that are specified and passed in will be type checked. Other fields will be allowed, but will be ignored by the type checker. ''' if fields: check_user_facing_fields_dict(fields, 'PermissiveDict') class _PermissiveDict(_ConfigComposite): def __init__(self): key = 'PermissiveDict.' + str(DictCounter.get_next_count()) super(_PermissiveDict, self).__init__( name=None, key=key, fields=fields or dict(), description='A configuration dictionary with typed fields', type_attributes=ConfigTypeAttributes(is_builtin=True), ) @property def is_permissive_composite(self): return True return _PermissiveDict
[ "def", "PermissiveDict", "(", "fields", "=", "None", ")", ":", "if", "fields", ":", "check_user_facing_fields_dict", "(", "fields", ",", "'PermissiveDict'", ")", "class", "_PermissiveDict", "(", "_ConfigComposite", ")", ":", "def", "__init__", "(", "self", ")", ...
A permissive dict will permit the user to partially specify the permitted fields. Any fields that are specified and passed in will be type checked. Other fields will be allowed, but will be ignored by the type checker.
[ "A", "permissive", "dict", "will", "permit", "the", "user", "to", "partially", "specify", "the", "permitted", "fields", ".", "Any", "fields", "that", "are", "specified", "and", "passed", "in", "will", "be", "type", "checked", ".", "Other", "fields", "will", ...
python
test
4degrees/riffle
source/riffle/model.py
https://github.com/4degrees/riffle/blob/e5a0d908df8c93ff1ee7abdda8875fd1667df53d/source/riffle/model.py#L191-L216
def _fetchChildren(self): '''Fetch and return new child items.''' children = [] # List paths under this directory. paths = [] for name in os.listdir(self.path): paths.append(os.path.normpath(os.path.join(self.path, name))) # Handle collections. collections, remainder = clique.assemble( paths, [clique.PATTERNS['frames']] ) for path in remainder: try: child = ItemFactory(path) except ValueError: pass else: children.append(child) for collection in collections: children.append(Collection(collection)) return children
[ "def", "_fetchChildren", "(", "self", ")", ":", "children", "=", "[", "]", "# List paths under this directory.", "paths", "=", "[", "]", "for", "name", "in", "os", ".", "listdir", "(", "self", ".", "path", ")", ":", "paths", ".", "append", "(", "os", "...
Fetch and return new child items.
[ "Fetch", "and", "return", "new", "child", "items", "." ]
python
test
night-crawler/django-docker-helpers
django_docker_helpers/utils.py
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/utils.py#L97-L140
def dot_path(obj: t.Union[t.Dict, object], path: str, default: t.Any = None, separator: str = '.'): """ Provides an access to elements of a mixed dict/object type by a delimiter-separated path. :: class O1: my_dict = {'a': {'b': 1}} class O2: def __init__(self): self.nested = O1() class O3: final = O2() o = O3() assert utils.dot_path(o, 'final.nested.my_dict.a.b') == 1 .. testoutput:: True :param obj: object or dict :param path: path to value :param default: default value if chain resolve failed :param separator: ``.`` by default :return: value or default """ path_items = path.split(separator) val = obj sentinel = object() for item in path_items: if isinstance(val, dict): val = val.get(item, sentinel) if val is sentinel: return default else: val = getattr(val, item, sentinel) if val is sentinel: return default return val
[ "def", "dot_path", "(", "obj", ":", "t", ".", "Union", "[", "t", ".", "Dict", ",", "object", "]", ",", "path", ":", "str", ",", "default", ":", "t", ".", "Any", "=", "None", ",", "separator", ":", "str", "=", "'.'", ")", ":", "path_items", "=",...
Provides an access to elements of a mixed dict/object type by a delimiter-separated path. :: class O1: my_dict = {'a': {'b': 1}} class O2: def __init__(self): self.nested = O1() class O3: final = O2() o = O3() assert utils.dot_path(o, 'final.nested.my_dict.a.b') == 1 .. testoutput:: True :param obj: object or dict :param path: path to value :param default: default value if chain resolve failed :param separator: ``.`` by default :return: value or default
[ "Provides", "an", "access", "to", "elements", "of", "a", "mixed", "dict", "/", "object", "type", "by", "a", "delimiter", "-", "separated", "path", ".", "::" ]
python
train
CameronLonsdale/lantern
lantern/modules/shift.py
https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/shift.py#L48-L80
def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english): """Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``. Example: >>> decryptions = crack("KHOOR", fitness.english.quadgrams) >>> print(''.join(decryptions[0].plaintext)) HELLO Args: ciphertext (iterable): The symbols to decrypt *fitness_functions (variable length argument list): Functions to score decryption with Keyword Args: min_key (int): Key to start with max_key (int): Key to stop at (exclusive) shift_function (function(shift, symbol)): Shift function to use Returns: Sorted list of decryptions Raises: ValueError: If min_key exceeds max_key ValueError: If no fitness_functions are given """ if min_key >= max_key: raise ValueError("min_key cannot exceed max_key") decryptions = [] for key in range(min_key, max_key): plaintext = decrypt(key, ciphertext, shift_function=shift_function) decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions))) return sorted(decryptions, reverse=True)
[ "def", "crack", "(", "ciphertext", ",", "*", "fitness_functions", ",", "min_key", "=", "0", ",", "max_key", "=", "26", ",", "shift_function", "=", "shift_case_english", ")", ":", "if", "min_key", ">=", "max_key", ":", "raise", "ValueError", "(", "\"min_key c...
Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``. Example: >>> decryptions = crack("KHOOR", fitness.english.quadgrams) >>> print(''.join(decryptions[0].plaintext)) HELLO Args: ciphertext (iterable): The symbols to decrypt *fitness_functions (variable length argument list): Functions to score decryption with Keyword Args: min_key (int): Key to start with max_key (int): Key to stop at (exclusive) shift_function (function(shift, symbol)): Shift function to use Returns: Sorted list of decryptions Raises: ValueError: If min_key exceeds max_key ValueError: If no fitness_functions are given
[ "Break", "ciphertext", "by", "enumerating", "keys", "between", "min_key", "and", "max_key", "." ]
python
train
msoulier/tftpy
tftpy/TftpStates.py
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L39-L53
def handleOACK(self, pkt): """This method handles an OACK from the server, syncing any accepted options.""" if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") # Set options to OACK options self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
[ "def", "handleOACK", "(", "self", ",", "pkt", ")", ":", "if", "len", "(", "pkt", ".", "options", ".", "keys", "(", ")", ")", ">", "0", ":", "if", "pkt", ".", "match_options", "(", "self", ".", "context", ".", "options", ")", ":", "log", ".", "i...
This method handles an OACK from the server, syncing any accepted options.
[ "This", "method", "handles", "an", "OACK", "from", "the", "server", "syncing", "any", "accepted", "options", "." ]
python
train
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L2857-L2905
def set(self, value): """ Sets the value of the object :param value: A unicode string. May be a dotted integer string, or if _map is provided, one of the mapped values. :raises: ValueError - when an invalid value is passed """ if not isinstance(value, str_cls): raise TypeError(unwrap( ''' %s value must be a unicode string, not %s ''', type_name(self), type_name(value) )) self._native = value if self._map is not None: if value in self._reverse_map: value = self._reverse_map[value] self.contents = b'' first = None for index, part in enumerate(value.split('.')): part = int(part) # The first two parts are merged into a single byte if index == 0: first = part continue elif index == 1: part = (first * 40) + part encoded_part = chr_cls(0x7F & part) part = part >> 7 while part > 0: encoded_part = chr_cls(0x80 | (0x7F & part)) + encoded_part part = part >> 7 self.contents += encoded_part self._header = None if self._trailer != b'': self._trailer = b''
[ "def", "set", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "str_cls", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n %s value must be a unicode string, not %s\n '''", ",", "type_name", "...
Sets the value of the object :param value: A unicode string. May be a dotted integer string, or if _map is provided, one of the mapped values. :raises: ValueError - when an invalid value is passed
[ "Sets", "the", "value", "of", "the", "object" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py#L96-L109
def show_raslog_output_show_all_raslog_raslog_entries_date_and_time_info(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_raslog = ET.Element("show_raslog") config = show_raslog output = ET.SubElement(show_raslog, "output") show_all_raslog = ET.SubElement(output, "show-all-raslog") raslog_entries = ET.SubElement(show_all_raslog, "raslog-entries") date_and_time_info = ET.SubElement(raslog_entries, "date-and-time-info") date_and_time_info.text = kwargs.pop('date_and_time_info') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_raslog_output_show_all_raslog_raslog_entries_date_and_time_info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_raslog", "=", "ET", ".", "Element", "(", "\"show_raslog\"", ")", "conf...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
ANTsX/ANTsPy
ants/core/ants_transform_io.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/core/ants_transform_io.py#L16-L35
def new_ants_transform(precision='float', dimension=3, transform_type='AffineTransform', parameters=None): """ Create a new ANTsTransform ANTsR function: None Example ------- >>> import ants >>> tx = ants.new_ants_transform() """ libfn = utils.get_lib_fn('newAntsTransform%s%i' % (utils.short_ptype(precision), dimension)) itk_tx = libfn(precision, dimension, transform_type) ants_tx = tio.ANTsTransform(precision=precision, dimension=dimension, transform_type=transform_type, pointer=itk_tx) if parameters is not None: ants_tx.set_parameters(parameters) return ants_tx
[ "def", "new_ants_transform", "(", "precision", "=", "'float'", ",", "dimension", "=", "3", ",", "transform_type", "=", "'AffineTransform'", ",", "parameters", "=", "None", ")", ":", "libfn", "=", "utils", ".", "get_lib_fn", "(", "'newAntsTransform%s%i'", "%", ...
Create a new ANTsTransform ANTsR function: None Example ------- >>> import ants >>> tx = ants.new_ants_transform()
[ "Create", "a", "new", "ANTsTransform" ]
python
train
sorgerlab/indra
indra/sources/bel/rdf_processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L526-L652
def get_transcription(self): """Extract Increase/DecreaseAmount INDRA Statements from BEL. Three distinct SPARQL patterns are used to extract amount regulations from BEL. - q_tscript1 searches for a subject which is a Transcription ActivityType of a ProteinAbundance and an object which is an RNAAbundance that is either increased or decreased. Examples: transcriptionalActivity(proteinAbundance(HGNC:FOXP2)) directlyIncreases rnaAbundance(HGNC:SYK) transcriptionalActivity(proteinAbundance(HGNC:FOXP2)) directlyDecreases rnaAbundance(HGNC:CALCRL) - q_tscript2 searches for a subject which is a ProteinAbundance and an object which is an RNAAbundance. Note that this pattern typically exists in an indirect form (i.e. increases/decreases). Example: proteinAbundance(HGNC:MTF1) directlyIncreases rnaAbundance(HGNC:LCN1) - q_tscript3 searches for a subject which is a ModifiedProteinAbundance, with an object which is an RNAAbundance. In the BEL large corpus, this pattern is found for subjects which are protein families or mouse/rat proteins, and the predicate in an indirect increase. Example: proteinAbundance(PFR:"Akt Family",proteinModification(P)) increases rnaAbundance(RGD:Cald1) """ q_tscript1 = prefixes + """ SELECT ?tfName ?targetName ?stmt ?tf ?target ?rel WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?target . ?subject a belvoc:AbundanceActivity . ?subject belvoc:hasActivityType belvoc:Transcription . ?subject belvoc:hasChild ?tf . ?tf a belvoc:ProteinAbundance . ?tf belvoc:hasConcept ?tfName . ?target a belvoc:RNAAbundance . ?target belvoc:hasConcept ?targetName . } """ q_tscript2 = prefixes + """ SELECT ?tfName ?targetName ?stmt ?tf ?target ?rel WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?tf . ?stmt belvoc:hasObject ?target . ?tf a belvoc:ProteinAbundance . ?tf belvoc:hasConcept ?tfName . ?target a belvoc:RNAAbundance . ?target belvoc:hasConcept ?targetName . } """ q_tscript3 = prefixes + """ SELECT ?tfName ?targetName ?stmt ?tf ?target ?rel ?mod ?pos WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?target . ?subject a belvoc:ModifiedProteinAbundance . ?subject belvoc:hasModificationType ?mod . ?subject belvoc:hasChild ?tf . ?tf belvoc:hasConcept ?tfName . ?target a belvoc:RNAAbundance . ?target belvoc:hasConcept ?targetName . OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . } } """ for q_tscript in (q_tscript1, q_tscript2, q_tscript3): res_tscript = self.g.query(q_tscript) for stmt in res_tscript: # Get modifications on the subject, if any if q_tscript == q_tscript1: tf = self._get_agent(stmt[0], stmt[3]) tf.activity = ActivityCondition('transcription', True) elif q_tscript == q_tscript3: mod = term_from_uri(stmt[6]) mod_pos = term_from_uri(stmt[7]) mc = self._get_mod_condition(mod, mod_pos) if mc is None: continue tf = self._get_agent(stmt[0], stmt[3]) tf.mods = mods=[mc] else: tf = self._get_agent(stmt[0], stmt[3]) # Parse out the elements of the query evidence = self._get_evidence(stmt[2]) target = self._get_agent(stmt[1], stmt[4]) stmt_str = strip_statement(stmt[2]) # Get the relationship (increases/decreases, etc.) rel = term_from_uri(stmt[5]) if rel == 'DirectlyIncreases' or rel == 'DirectlyDecreases': is_direct = True else: is_direct = False # Build the INDRA statement stmt = None if rel == 'DirectlyIncreases' or rel == 'Increases': stmt = IncreaseAmount(tf, target, evidence) elif rel == 'DirectlyDecreases' or rel == 'Decreases': stmt = DecreaseAmount(tf, target, evidence) # If we've matched a pattern, mark this as a converted statement if stmt is not None: if is_direct: self.statements.append(stmt) self.converted_direct_stmts.append(stmt_str) else: self.indirect_stmts.append(stmt) self.converted_indirect_stmts.append(stmt_str)
[ "def", "get_transcription", "(", "self", ")", ":", "q_tscript1", "=", "prefixes", "+", "\"\"\"\n SELECT ?tfName ?targetName ?stmt ?tf ?target ?rel\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?st...
Extract Increase/DecreaseAmount INDRA Statements from BEL. Three distinct SPARQL patterns are used to extract amount regulations from BEL. - q_tscript1 searches for a subject which is a Transcription ActivityType of a ProteinAbundance and an object which is an RNAAbundance that is either increased or decreased. Examples: transcriptionalActivity(proteinAbundance(HGNC:FOXP2)) directlyIncreases rnaAbundance(HGNC:SYK) transcriptionalActivity(proteinAbundance(HGNC:FOXP2)) directlyDecreases rnaAbundance(HGNC:CALCRL) - q_tscript2 searches for a subject which is a ProteinAbundance and an object which is an RNAAbundance. Note that this pattern typically exists in an indirect form (i.e. increases/decreases). Example: proteinAbundance(HGNC:MTF1) directlyIncreases rnaAbundance(HGNC:LCN1) - q_tscript3 searches for a subject which is a ModifiedProteinAbundance, with an object which is an RNAAbundance. In the BEL large corpus, this pattern is found for subjects which are protein families or mouse/rat proteins, and the predicate in an indirect increase. Example: proteinAbundance(PFR:"Akt Family",proteinModification(P)) increases rnaAbundance(RGD:Cald1)
[ "Extract", "Increase", "/", "DecreaseAmount", "INDRA", "Statements", "from", "BEL", "." ]
python
train
deep-compute/logagg
logagg/collector.py
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L184-L221
def assign_default_log_values(self, fpath, line, formatter): ''' >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> from pprint import pprint >>> formatter = 'logagg.formatters.mongodb' >>> fpath = '/var/log/mongodb/mongodb.log' >>> line = 'some log line here' >>> default_log = lc.assign_default_log_values(fpath, line, formatter) >>> pprint(default_log) #doctest: +ELLIPSIS {'data': {}, 'error': False, 'error_tb': '', 'event': 'event', 'file': '/var/log/mongodb/mongodb.log', 'formatter': 'logagg.formatters.mongodb', 'host': '...', 'id': None, 'level': 'debug', 'raw': 'some log line here', 'timestamp': '...', 'type': 'log'} ''' return dict( id=None, file=fpath, host=self.HOST, formatter=formatter, event='event', data={}, raw=line, timestamp=datetime.datetime.utcnow().isoformat(), type='log', level='debug', error= False, error_tb='', )
[ "def", "assign_default_log_values", "(", "self", ",", "fpath", ",", "line", ",", "formatter", ")", ":", "return", "dict", "(", "id", "=", "None", ",", "file", "=", "fpath", ",", "host", "=", "self", ".", "HOST", ",", "formatter", "=", "formatter", ",",...
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> from pprint import pprint >>> formatter = 'logagg.formatters.mongodb' >>> fpath = '/var/log/mongodb/mongodb.log' >>> line = 'some log line here' >>> default_log = lc.assign_default_log_values(fpath, line, formatter) >>> pprint(default_log) #doctest: +ELLIPSIS {'data': {}, 'error': False, 'error_tb': '', 'event': 'event', 'file': '/var/log/mongodb/mongodb.log', 'formatter': 'logagg.formatters.mongodb', 'host': '...', 'id': None, 'level': 'debug', 'raw': 'some log line here', 'timestamp': '...', 'type': 'log'}
[ ">>>", "lc", "=", "LogCollector", "(", "file", "=", "/", "path", "/", "to", "/", "log_file", ".", "log", ":", "formatter", "=", "logagg", ".", "formatters", ".", "basescript", "30", ")", ">>>", "from", "pprint", "import", "pprint" ]
python
train
Akay7/nosql2django
nosql2django/parser_mapper.py
https://github.com/Akay7/nosql2django/blob/f33af832d4d0d652bd730471d1ce6a717700d1e7/nosql2django/parser_mapper.py#L27-L53
def save_to_db(model_text_id, parsed_values): """save to db and return saved object""" Model = apps.get_model(model_text_id) # normalise values and separate to m2m, simple simple_fields = {} many2many_fields = {} for field, value in parsed_values.items(): if (Model._meta.get_field( field).get_internal_type() == 'ManyToManyField'): many2many_fields[field] = value elif (Model._meta.get_field( field).get_internal_type() == 'DateTimeField'): simple_fields[field] = time_parser.parse(value) else: simple_fields[field] = value # ToDo: add unique identify parameter to field # ToDo: allow unique identify m2m field model, created = Model.objects.get_or_create(**simple_fields) for field, value in many2many_fields.items(): setattr(model, field, value) model.save() return model
[ "def", "save_to_db", "(", "model_text_id", ",", "parsed_values", ")", ":", "Model", "=", "apps", ".", "get_model", "(", "model_text_id", ")", "# normalise values and separate to m2m, simple", "simple_fields", "=", "{", "}", "many2many_fields", "=", "{", "}", "for", ...
save to db and return saved object
[ "save", "to", "db", "and", "return", "saved", "object" ]
python
train
RaRe-Technologies/gensim-simserver
simserver/simserver.py
https://github.com/RaRe-Technologies/gensim-simserver/blob/e7e59e836ef6d9da019a8c6b218ef0bdd998b2da/simserver/simserver.py#L556-L583
def train(self, corpus=None, method='auto', clear_buffer=True, params=None): """ Create an indexing model. Will overwrite the model if it already exists. All indexes become invalid, because documents in them use a now-obsolete representation. The model is trained on documents previously entered via `buffer`, or directly on `corpus`, if specified. """ if corpus is not None: # use the supplied corpus only (erase existing buffer, if any) self.flush(clear_buffer=True) self.buffer(corpus) if not self.fresh_docs: msg = "train called but no training corpus specified for %s" % self logger.error(msg) raise ValueError(msg) if method == 'auto': numdocs = len(self.fresh_docs) if numdocs < 1000: logging.warning("too few training documents; using simple log-entropy model instead of latent semantic indexing") method = 'logentropy' else: method = 'lsi' if params is None: params = {} self.model = SimModel(self.fresh_docs, method=method, params=params) self.flush(save_model=True, clear_buffer=clear_buffer)
[ "def", "train", "(", "self", ",", "corpus", "=", "None", ",", "method", "=", "'auto'", ",", "clear_buffer", "=", "True", ",", "params", "=", "None", ")", ":", "if", "corpus", "is", "not", "None", ":", "# use the supplied corpus only (erase existing buffer, if ...
Create an indexing model. Will overwrite the model if it already exists. All indexes become invalid, because documents in them use a now-obsolete representation. The model is trained on documents previously entered via `buffer`, or directly on `corpus`, if specified.
[ "Create", "an", "indexing", "model", ".", "Will", "overwrite", "the", "model", "if", "it", "already", "exists", ".", "All", "indexes", "become", "invalid", "because", "documents", "in", "them", "use", "a", "now", "-", "obsolete", "representation", "." ]
python
train
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L834-L838
def skips(self, user): """ Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__. """ return self._get(self._build_url(self.endpoint.skips(id=user)))
[ "def", "skips", "(", "self", ",", "user", ")", ":", "return", "self", ".", "_get", "(", "self", ".", "_build_url", "(", "self", ".", "endpoint", ".", "skips", "(", "id", "=", "user", ")", ")", ")" ]
Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__.
[ "Skips", "for", "user", ".", "Zendesk", "API", "Reference", "<https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "ticket_skips", ">", "__", "." ]
python
train
raiden-network/raiden
raiden/network/proxies/payment_channel.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/payment_channel.py#L178-L195
def close( self, nonce: Nonce, balance_hash: BalanceHash, additional_hash: AdditionalHash, signature: Signature, block_identifier: BlockSpecification, ): """ Closes the channel using the provided balance proof. """ self.token_network.close( channel_identifier=self.channel_identifier, partner=self.participant2, balance_hash=balance_hash, nonce=nonce, additional_hash=additional_hash, signature=signature, given_block_identifier=block_identifier, )
[ "def", "close", "(", "self", ",", "nonce", ":", "Nonce", ",", "balance_hash", ":", "BalanceHash", ",", "additional_hash", ":", "AdditionalHash", ",", "signature", ":", "Signature", ",", "block_identifier", ":", "BlockSpecification", ",", ")", ":", "self", ".",...
Closes the channel using the provided balance proof.
[ "Closes", "the", "channel", "using", "the", "provided", "balance", "proof", "." ]
python
train
marshallward/f90nml
f90nml/parser.py
https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/parser.py#L639-L647
def _parse_indices(self): """Parse a sequence of Fortran vector indices as a list of tuples.""" v_name = self.prior_token v_indices = [] while self.token in (',', '('): v_indices.append(self._parse_index(v_name)) return v_indices
[ "def", "_parse_indices", "(", "self", ")", ":", "v_name", "=", "self", ".", "prior_token", "v_indices", "=", "[", "]", "while", "self", ".", "token", "in", "(", "','", ",", "'('", ")", ":", "v_indices", ".", "append", "(", "self", ".", "_parse_index", ...
Parse a sequence of Fortran vector indices as a list of tuples.
[ "Parse", "a", "sequence", "of", "Fortran", "vector", "indices", "as", "a", "list", "of", "tuples", "." ]
python
train
craigahobbs/chisel
src/chisel/app.py
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L224-L241
def response_json(self, status, response, content_type='application/json', encoding='utf-8', headers=None, jsonp=None): """ Send a JSON response """ encoder = JSONEncoder( check_circular=self.app.validate_output, allow_nan=False, sort_keys=True, indent=2 if self.app.pretty_output else None, separators=(',', ': ') if self.app.pretty_output else (',', ':') ) content = encoder.encode(response) if jsonp: content_list = [jsonp.encode(encoding), b'(', content.encode(encoding), b');'] else: content_list = [content.encode(encoding)] return self.response(status, content_type, content_list, headers=headers)
[ "def", "response_json", "(", "self", ",", "status", ",", "response", ",", "content_type", "=", "'application/json'", ",", "encoding", "=", "'utf-8'", ",", "headers", "=", "None", ",", "jsonp", "=", "None", ")", ":", "encoder", "=", "JSONEncoder", "(", "che...
Send a JSON response
[ "Send", "a", "JSON", "response" ]
python
train
crate/crash
src/crate/crash/tabulate.py
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/tabulate.py#L468-L478
def _visible_width(s): """Visible width of a printed string. ANSI color codes are removed. >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") (5, 5) """ if isinstance(s, _text_type) or isinstance(s, _binary_type): return _max_line_width(_strip_invisible(s)) else: return _max_line_width(_text_type(s))
[ "def", "_visible_width", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "_text_type", ")", "or", "isinstance", "(", "s", ",", "_binary_type", ")", ":", "return", "_max_line_width", "(", "_strip_invisible", "(", "s", ")", ")", "else", ":", "return...
Visible width of a printed string. ANSI color codes are removed. >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") (5, 5)
[ "Visible", "width", "of", "a", "printed", "string", ".", "ANSI", "color", "codes", "are", "removed", "." ]
python
train
moonso/loqusdb
loqusdb/utils/vcf.py
https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/vcf.py#L89-L180
def check_vcf(vcf_path, expected_type='snv'): """Check if there are any problems with the vcf file Args: vcf_path(str) expected_type(str): 'sv' or 'snv' Returns: vcf_info(dict): dict like { 'nr_variants':<INT>, 'variant_type': <STR> in ['snv', 'sv'], 'individuals': <LIST> individual positions in file } """ LOG.info("Check if vcf is on correct format...") vcf = VCF(vcf_path) individuals = vcf.samples variant_type = None previous_pos = None previous_chrom = None posititon_variants = set() nr_variants = 0 for nr_variants,variant in enumerate(vcf,1): # Check the type of variant current_type = 'sv' if variant.var_type == 'sv' else 'snv' if not variant_type: variant_type = current_type # Vcf can not include both snvs and svs if variant_type != current_type: raise VcfError("Vcf includes a mix of snvs and svs") current_chrom = variant.CHROM current_pos = variant.POS # We start with a simple id that can be used by SV:s variant_id = "{0}_{1}".format(current_chrom, current_pos) # For SNVs we can create a proper variant id with chrom_pos_ref_alt if variant_type == 'snv': variant_id = get_variant_id(variant) # Initiate variables if not previous_chrom: previous_chrom = current_chrom previous_pos = current_pos posititon_variants = set([variant_id]) continue # Update variables if new chromosome if current_chrom != previous_chrom: previous_chrom = current_chrom previous_pos = current_pos posititon_variants = set([variant_id]) continue if variant_type == 'snv': # Check if variant is unique if current_pos == previous_pos: if variant_id in posititon_variants: raise VcfError("Variant {0} occurs several times"\ " in vcf".format(variant_id)) else: posititon_variants.add(variant_id) # Check if vcf is sorted else: if not current_pos >= previous_pos: raise VcfError("Vcf if not sorted in a correct way") previous_pos = current_pos # Reset posititon_variants since we are on a new position posititon_variants = set([variant_id]) if variant_type != expected_type: raise VcfError("VCF file does not only include {0}s, please check vcf {1}".format( expected_type.upper(), vcf_path)) LOG.info("Vcf file %s looks fine", vcf_path) LOG.info("Nr of variants in vcf: {0}".format(nr_variants)) LOG.info("Type of variants in vcf: {0}".format(variant_type)) vcf_info = { 'nr_variants': nr_variants, 'variant_type': variant_type, 'individuals': individuals, } return vcf_info
[ "def", "check_vcf", "(", "vcf_path", ",", "expected_type", "=", "'snv'", ")", ":", "LOG", ".", "info", "(", "\"Check if vcf is on correct format...\"", ")", "vcf", "=", "VCF", "(", "vcf_path", ")", "individuals", "=", "vcf", ".", "samples", "variant_type", "="...
Check if there are any problems with the vcf file Args: vcf_path(str) expected_type(str): 'sv' or 'snv' Returns: vcf_info(dict): dict like { 'nr_variants':<INT>, 'variant_type': <STR> in ['snv', 'sv'], 'individuals': <LIST> individual positions in file }
[ "Check", "if", "there", "are", "any", "problems", "with", "the", "vcf", "file" ]
python
train
log2timeline/plaso
plaso/engine/zeromq_queue.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/zeromq_queue.py#L181-L219
def _CreateZMQSocket(self): """Creates a ZeroMQ socket.""" logger.debug('Creating socket for {0:s}'.format(self.name)) if not self._zmq_context: self._zmq_context = zmq.Context() # The terminate and close threading events need to be created when the # socket is opened. Threading events are unpickleable objects and cannot # passed in multiprocessing on Windows. if not self._terminate_event: self._terminate_event = threading.Event() if not self._closed_event: self._closed_event = threading.Event() if self._zmq_socket: logger.debug('Closing old socket for {0:s}'.format(self.name)) self._zmq_socket.close() self._zmq_socket = None self._zmq_socket = self._zmq_context.socket(self._SOCKET_TYPE) self._SetSocketTimeouts() self._SetSocketHighWaterMark() if self.port: address = '{0:s}:{1:d}'.format(self._SOCKET_ADDRESS, self.port) if self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_CONNECT: self._zmq_socket.connect(address) logger.debug('{0:s} connected to {1:s}'.format(self.name, address)) else: self._zmq_socket.bind(address) logger.debug( '{0:s} bound to specified port {1:s}'.format(self.name, address)) else: self.port = self._zmq_socket.bind_to_random_port(self._SOCKET_ADDRESS) logger.debug( '{0:s} bound to random port {1:d}'.format(self.name, self.port))
[ "def", "_CreateZMQSocket", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Creating socket for {0:s}'", ".", "format", "(", "self", ".", "name", ")", ")", "if", "not", "self", ".", "_zmq_context", ":", "self", ".", "_zmq_context", "=", "zmq", ".", ...
Creates a ZeroMQ socket.
[ "Creates", "a", "ZeroMQ", "socket", "." ]
python
train
secdev/scapy
scapy/layers/tls/cert.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/cert.py#L959-L984
def verifyChainFromCAPath(self, capath, untrusted_file=None): """ Does the same job as .verifyChainFromCAFile() but using the list of anchors in capath directory. The directory should (only) contain certificates files in PEM format. As for .verifyChainFromCAFile(), a list of untrusted certificates can be passed as a file (concatenation of the certificates in PEM format). """ try: anchors = [] for cafile in os.listdir(capath): anchors.append(Cert(open(os.path.join(capath, cafile), "rb").read())) # noqa: E501 except Exception: raise Exception("capath provided is not a valid cert path") untrusted = None if untrusted_file: try: f = open(untrusted_file, "rb") untrusted_certs = f.read() f.close() except Exception: raise Exception("Could not read from untrusted_file") untrusted = [Cert(c) for c in split_pem(untrusted_certs)] return self.verifyChain(anchors, untrusted)
[ "def", "verifyChainFromCAPath", "(", "self", ",", "capath", ",", "untrusted_file", "=", "None", ")", ":", "try", ":", "anchors", "=", "[", "]", "for", "cafile", "in", "os", ".", "listdir", "(", "capath", ")", ":", "anchors", ".", "append", "(", "Cert",...
Does the same job as .verifyChainFromCAFile() but using the list of anchors in capath directory. The directory should (only) contain certificates files in PEM format. As for .verifyChainFromCAFile(), a list of untrusted certificates can be passed as a file (concatenation of the certificates in PEM format).
[ "Does", "the", "same", "job", "as", ".", "verifyChainFromCAFile", "()", "but", "using", "the", "list", "of", "anchors", "in", "capath", "directory", ".", "The", "directory", "should", "(", "only", ")", "contain", "certificates", "files", "in", "PEM", "format...
python
train
alexhayes/django-toolkit
django_toolkit/date_util.py
https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/date_util.py#L50-L63
def days(start, stop): """ Return days between start & stop (inclusive) Note that start must be less than stop or else 0 is returned. @param start: Start date @param stop: Stop date @return int """ dates=rrule.rruleset() # Get dates between start/stop (which are inclusive) dates.rrule(rrule.rrule(rrule.DAILY, dtstart=start, until=stop)) return dates.count()
[ "def", "days", "(", "start", ",", "stop", ")", ":", "dates", "=", "rrule", ".", "rruleset", "(", ")", "# Get dates between start/stop (which are inclusive)", "dates", ".", "rrule", "(", "rrule", ".", "rrule", "(", "rrule", ".", "DAILY", ",", "dtstart", "=", ...
Return days between start & stop (inclusive) Note that start must be less than stop or else 0 is returned. @param start: Start date @param stop: Stop date @return int
[ "Return", "days", "between", "start", "&", "stop", "(", "inclusive", ")", "Note", "that", "start", "must", "be", "less", "than", "stop", "or", "else", "0", "is", "returned", "." ]
python
train
datajoint/datajoint-python
datajoint/external.py
https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/external.py#L121-L129
def references(self): """ :return: generator of referencing table names and their referencing columns """ return self.connection.query(""" SELECT concat('`', table_schema, '`.`', table_name, '`') as referencing_table, column_name FROM information_schema.key_column_usage WHERE referenced_table_name="{tab}" and referenced_table_schema="{db}" """.format(tab=self.table_name, db=self.database), as_dict=True)
[ "def", "references", "(", "self", ")", ":", "return", "self", ".", "connection", ".", "query", "(", "\"\"\"\n SELECT concat('`', table_schema, '`.`', table_name, '`') as referencing_table, column_name\n FROM information_schema.key_column_usage\n WHERE referenced_table_...
:return: generator of referencing table names and their referencing columns
[ ":", "return", ":", "generator", "of", "referencing", "table", "names", "and", "their", "referencing", "columns" ]
python
train
Parsl/parsl
parsl/dataflow/dflow.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/dataflow/dflow.py#L1033-L1050
def load(cls, config: Optional[Config] = None): """Load a DataFlowKernel. Args: - config (Config) : Configuration to load. This config will be passed to a new DataFlowKernel instantiation which will be set as the active DataFlowKernel. Returns: - DataFlowKernel : The loaded DataFlowKernel object. """ if cls._dfk is not None: raise RuntimeError('Config has already been loaded') if config is None: cls._dfk = DataFlowKernel(Config()) else: cls._dfk = DataFlowKernel(config) return cls._dfk
[ "def", "load", "(", "cls", ",", "config", ":", "Optional", "[", "Config", "]", "=", "None", ")", ":", "if", "cls", ".", "_dfk", "is", "not", "None", ":", "raise", "RuntimeError", "(", "'Config has already been loaded'", ")", "if", "config", "is", "None",...
Load a DataFlowKernel. Args: - config (Config) : Configuration to load. This config will be passed to a new DataFlowKernel instantiation which will be set as the active DataFlowKernel. Returns: - DataFlowKernel : The loaded DataFlowKernel object.
[ "Load", "a", "DataFlowKernel", "." ]
python
valid
Groundworkstech/pybfd
pybfd/bfd.py
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L516-L521
def file_flags(self, _file_flags): """Set the new file flags attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.set_file_flags(self._ptr, _file_flags)
[ "def", "file_flags", "(", "self", ",", "_file_flags", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "set_file_flags", "(", "self", ".", "_ptr", ",", "_file_flags", ")" ]
Set the new file flags attribute of the BFD file being processed.
[ "Set", "the", "new", "file", "flags", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
python
train
IdentityPython/pysaml2
src/saml2/attribute_converter.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/attribute_converter.py#L104-L148
def list_to_local(acs, attrlist, allow_unknown_attributes=False): """ Replaces the attribute names in a attribute value assertion with the equivalent name from a local name format. :param acs: List of Attribute Converters :param attrlist: List of Attributes :param allow_unknown_attributes: If unknown attributes are allowed :return: A key,values dictionary """ if not acs: acs = [AttributeConverter()] acsd = {"": acs} else: acsd = dict([(a.name_format, a) for a in acs]) ava = {} for attr in attrlist: try: _func = acsd[attr.name_format].ava_from except KeyError: if attr.name_format == NAME_FORMAT_UNSPECIFIED or \ allow_unknown_attributes: _func = acs[0].lcd_ava_from else: logger.info("Unsupported attribute name format: %s", attr.name_format) continue try: key, val = _func(attr) except KeyError: if allow_unknown_attributes: key, val = acs[0].lcd_ava_from(attr) else: logger.info("Unknown attribute name: %s", attr) continue except AttributeError: continue try: ava[key].extend(val) except KeyError: ava[key] = val return ava
[ "def", "list_to_local", "(", "acs", ",", "attrlist", ",", "allow_unknown_attributes", "=", "False", ")", ":", "if", "not", "acs", ":", "acs", "=", "[", "AttributeConverter", "(", ")", "]", "acsd", "=", "{", "\"\"", ":", "acs", "}", "else", ":", "acsd",...
Replaces the attribute names in a attribute value assertion with the equivalent name from a local name format. :param acs: List of Attribute Converters :param attrlist: List of Attributes :param allow_unknown_attributes: If unknown attributes are allowed :return: A key,values dictionary
[ "Replaces", "the", "attribute", "names", "in", "a", "attribute", "value", "assertion", "with", "the", "equivalent", "name", "from", "a", "local", "name", "format", "." ]
python
train
saltstack/salt
salt/cloud/clouds/xen.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L370-L411
def vdi_list(call=None, kwargs=None): ''' Return available Xen VDI images If this function is called with the ``-f`` or ``--function`` then it can return a list with minimal deatil using the ``terse=True`` keyword argument. .. code-block:: bash salt-cloud -f vdi_list myxen terse=True ''' if call == 'action': raise SaltCloudException( 'This function must be called with -f or --function.') log.debug('kwargs is %s', kwargs) if kwargs is not None: if 'terse' in kwargs: if kwargs['terse'] == 'True': terse = True else: terse = False else: terse = False else: kwargs = {} terse = False session = _get_session() vdis = session.xenapi.VDI.get_all() ret = {} for vdi in vdis: data = session.xenapi.VDI.get_record(vdi) log.debug(type(terse)) if terse is True: ret[data.get('name_label')] = { 'uuid': data.get('uuid'), 'OpqueRef': vdi} else: data.update({'OpaqueRef': vdi}) ret[data.get('name_label')] = data return ret
[ "def", "vdi_list", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudException", "(", "'This function must be called with -f or --function.'", ")", "log", ".", "debug", "(", "'kwargs is %s'", ...
Return available Xen VDI images If this function is called with the ``-f`` or ``--function`` then it can return a list with minimal deatil using the ``terse=True`` keyword argument. .. code-block:: bash salt-cloud -f vdi_list myxen terse=True
[ "Return", "available", "Xen", "VDI", "images" ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/config.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/config.py#L16-L21
def read(self, filename): """Read a filename as UTF-8 configuration data.""" kwargs = {} if sys.version_info >= (3, 2): kwargs['encoding'] = "utf-8" return configparser.RawConfigParser.read(self, filename, **kwargs)
[ "def", "read", "(", "self", ",", "filename", ")", ":", "kwargs", "=", "{", "}", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "2", ")", ":", "kwargs", "[", "'encoding'", "]", "=", "\"utf-8\"", "return", "configparser", ".", "RawConfigParser", ...
Read a filename as UTF-8 configuration data.
[ "Read", "a", "filename", "as", "UTF", "-", "8", "configuration", "data", "." ]
python
test
mar10/wsgidav
wsgidav/util.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/util.py#L91-L118
def _parse_gmt_time(timestring): """Return a standard time tuple (see time and calendar), for a date/time string.""" # Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 try: return time.strptime(timestring, "%a, %d %b %Y %H:%M:%S GMT") except Exception: pass # Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 try: return time.strptime(timestring, "%A %d-%b-%y %H:%M:%S GMT") except Exception: pass # Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format try: return time.strptime(timestring, "%a %b %d %H:%M:%S %Y") except Exception: pass # Sun Nov 6 08:49:37 1994 +0100 ; ANSI C's asctime() format with # timezon try: return parsedate(timestring) except Exception: pass return None
[ "def", "_parse_gmt_time", "(", "timestring", ")", ":", "# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123", "try", ":", "return", "time", ".", "strptime", "(", "timestring", ",", "\"%a, %d %b %Y %H:%M:%S GMT\"", ")", "except", "Exception", ":", "pass", "# Su...
Return a standard time tuple (see time and calendar), for a date/time string.
[ "Return", "a", "standard", "time", "tuple", "(", "see", "time", "and", "calendar", ")", "for", "a", "date", "/", "time", "string", "." ]
python
valid
MisterY/pydatum
pydatum/datum.py
https://github.com/MisterY/pydatum/blob/4b39f43040e31a95bcf219603b6429078a9ba3c2/pydatum/datum.py#L172-L175
def subtract_months(self, months: int) -> datetime: """ Subtracts a number of months from the current value """ self.value = self.value - relativedelta(months=months) return self.value
[ "def", "subtract_months", "(", "self", ",", "months", ":", "int", ")", "->", "datetime", ":", "self", ".", "value", "=", "self", ".", "value", "-", "relativedelta", "(", "months", "=", "months", ")", "return", "self", ".", "value" ]
Subtracts a number of months from the current value
[ "Subtracts", "a", "number", "of", "months", "from", "the", "current", "value" ]
python
train
CityOfZion/neo-python
neo/Implementations/Notifications/LevelDB/NotificationDB.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Implementations/Notifications/LevelDB/NotificationDB.py#L233-L259
def get_by_addr(self, address): """ Lookup a set of notifications by address Args: address (UInt160 or str): hash of address for notifications Returns: list: a list of notifications """ addr = address if isinstance(address, str) and len(address) == 34: addr = Helper.AddrStrToScriptHash(address) if not isinstance(addr, UInt160): raise Exception("Incorrect address format") addrlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR).snapshot() results = [] for val in addrlist_snapshot.iterator(prefix=bytes(addr.Data), include_key=False): if len(val) > 4: try: event = SmartContractEvent.FromByteArray(val) results.append(event) except Exception as e: logger.error("could not parse event: %s %s" % (e, val)) return results
[ "def", "get_by_addr", "(", "self", ",", "address", ")", ":", "addr", "=", "address", "if", "isinstance", "(", "address", ",", "str", ")", "and", "len", "(", "address", ")", "==", "34", ":", "addr", "=", "Helper", ".", "AddrStrToScriptHash", "(", "addre...
Lookup a set of notifications by address Args: address (UInt160 or str): hash of address for notifications Returns: list: a list of notifications
[ "Lookup", "a", "set", "of", "notifications", "by", "address", "Args", ":", "address", "(", "UInt160", "or", "str", ")", ":", "hash", "of", "address", "for", "notifications" ]
python
train
rytilahti/python-songpal
songpal/discovery.py
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/discovery.py#L21-L68
async def discover(timeout, debug=0, callback=None): """Discover supported devices.""" ST = "urn:schemas-sony-com:service:ScalarWebAPI:1" _LOGGER.info("Discovering for %s seconds" % timeout) from async_upnp_client import UpnpFactory from async_upnp_client.aiohttp import AiohttpRequester async def parse_device(device): requester = AiohttpRequester() factory = UpnpFactory(requester) url = device["location"] device = await factory.async_create_device(url) if debug > 0: print(etree.ElementTree.tostring(device.xml).decode()) NS = { 'av': 'urn:schemas-sony-com:av', } info = device.xml.find(".//av:X_ScalarWebAPI_DeviceInfo", NS) if not info: _LOGGER.error("Unable to find X_ScalaerWebAPI_DeviceInfo") return endpoint = info.find(".//av:X_ScalarWebAPI_BaseURL", NS).text version = info.find(".//av:X_ScalarWebAPI_Version", NS).text services = [x.text for x in info.findall(".//av:X_ScalarWebAPI_ServiceType", NS)] dev = DiscoveredDevice(name=device.name, model_number=device.model_number, udn=device.udn, endpoint=endpoint, version=version, services=services, upnp_services=list(device.services.keys()), upnp_location=url) _LOGGER.debug("Discovered: %s" % dev) if callback is not None: await callback(dev) await async_search(timeout=timeout, service_type=ST, async_callback=parse_device)
[ "async", "def", "discover", "(", "timeout", ",", "debug", "=", "0", ",", "callback", "=", "None", ")", ":", "ST", "=", "\"urn:schemas-sony-com:service:ScalarWebAPI:1\"", "_LOGGER", ".", "info", "(", "\"Discovering for %s seconds\"", "%", "timeout", ")", "from", ...
Discover supported devices.
[ "Discover", "supported", "devices", "." ]
python
train
noxdafox/clipspy
clips/classes.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L187-L202
def save_instances(self, path, binary=False, mode=SaveMode.LOCAL_SAVE): """Save the instances in the system to the specified file. If binary is True, the instances will be saved in binary format. The Python equivalent of the CLIPS save-instances command. """ if binary: ret = lib.EnvBinarySaveInstances(self._env, path.encode(), mode) else: ret = lib.EnvSaveInstances(self._env, path.encode(), mode) if ret == 0: raise CLIPSError(self._env) return ret
[ "def", "save_instances", "(", "self", ",", "path", ",", "binary", "=", "False", ",", "mode", "=", "SaveMode", ".", "LOCAL_SAVE", ")", ":", "if", "binary", ":", "ret", "=", "lib", ".", "EnvBinarySaveInstances", "(", "self", ".", "_env", ",", "path", "."...
Save the instances in the system to the specified file. If binary is True, the instances will be saved in binary format. The Python equivalent of the CLIPS save-instances command.
[ "Save", "the", "instances", "in", "the", "system", "to", "the", "specified", "file", "." ]
python
train
PythonCharmers/python-future
src/future/backports/http/client.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/client.py#L771-L782
def set_tunnel(self, host, port=None, headers=None): """ Sets up the host and the port for the HTTP CONNECT Tunnelling. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. """ self._tunnel_host = host self._tunnel_port = port if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear()
[ "def", "set_tunnel", "(", "self", ",", "host", ",", "port", "=", "None", ",", "headers", "=", "None", ")", ":", "self", ".", "_tunnel_host", "=", "host", "self", ".", "_tunnel_port", "=", "port", "if", "headers", ":", "self", ".", "_tunnel_headers", "=...
Sets up the host and the port for the HTTP CONNECT Tunnelling. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request.
[ "Sets", "up", "the", "host", "and", "the", "port", "for", "the", "HTTP", "CONNECT", "Tunnelling", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/parsers/wmi_parser.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/wmi_parser.py#L99-L138
def ParseMultiple(self, result_dicts): """Parse WMI Event Consumers.""" for result_dict in result_dicts: wmi_dict = result_dict.ToDict() try: creator_sid_bytes = bytes(wmi_dict["CreatorSID"]) wmi_dict["CreatorSID"] = BinarySIDtoStringSID(creator_sid_bytes) except ValueError: # We recover from corrupt SIDs by outputting it raw as a string wmi_dict["CreatorSID"] = compatibility.Repr(wmi_dict["CreatorSID"]) except KeyError: pass for output_type in self.output_types: anomalies = [] output = rdfvalue.RDFValue.classes[output_type]() for k, v in iteritems(wmi_dict): try: output.Set(k, v) except AttributeError as e: # Skip any attribute we don't know about anomalies.append("Unknown field %s, with value %s" % (k, v)) except ValueError as e: anomalies.append("Invalid value %s for field %s: %s" % (v, k, e)) # Yield anomalies first to help with debugging if anomalies: yield rdf_anomaly.Anomaly( type="PARSER_ANOMALY", generated_by=self.__class__.__name__, finding=anomalies) # Raise if the parser generated no output but there were fields. if wmi_dict and not output: raise ValueError("Non-empty dict %s returned empty output." % wmi_dict) yield output
[ "def", "ParseMultiple", "(", "self", ",", "result_dicts", ")", ":", "for", "result_dict", "in", "result_dicts", ":", "wmi_dict", "=", "result_dict", ".", "ToDict", "(", ")", "try", ":", "creator_sid_bytes", "=", "bytes", "(", "wmi_dict", "[", "\"CreatorSID\"",...
Parse WMI Event Consumers.
[ "Parse", "WMI", "Event", "Consumers", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L9138-L9155
def has_child_banks(self, bank_id): """Tests if a bank has any children. arg: bank_id (osid.id.Id): a ``bank_id`` return: (boolean) - ``true`` if the ``bank_id`` has children, ``false`` otherwise raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.has_child_bins if self._catalog_session is not None: return self._catalog_session.has_child_catalogs(catalog_id=bank_id) return self._hierarchy_session.has_children(id_=bank_id)
[ "def", "has_child_banks", "(", "self", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.has_child_bins", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "ha...
Tests if a bank has any children. arg: bank_id (osid.id.Id): a ``bank_id`` return: (boolean) - ``true`` if the ``bank_id`` has children, ``false`` otherwise raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Tests", "if", "a", "bank", "has", "any", "children", "." ]
python
train
guaix-ucm/numina
numina/array/wavecalib/crosscorrelation.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/crosscorrelation.py#L150-L370
def periodic_corr1d(sp_reference, sp_offset, fminmax=None, naround_zero=None, norm_spectra=False, plottitle=None, pdf=None, debugplot=0): """Periodic correlation between two spectra, implemented using FFT. Parameters ---------- sp_reference : numpy array Reference spectrum. sp_offset : numpy array Spectrum which offset is going to be measured relative to the reference spectrum. fminmax : tuple of floats or None Minimum and maximum frequencies to be used. If None, no frequency filtering is employed. naround_zero : int Half width of the window (around zero offset) to look for the correlation peak. If None, the whole correlation spectrum is employed. Otherwise, the peak will be sought in the interval [-naround_zero, +naround_zero]. norm_spectra : bool If True, the filtered spectra are normalized before computing the correlation function. This can be important when comparing the peak value of this function using different spectra. plottitle : str Optional plot title. pdf : PdfFile object or None If not None, output is sent to PDF file. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- offset : float Offset between the two input spectra. fpeak : float Maximum of the cross-correlation function. """ # protections if sp_reference.ndim != 1 or sp_offset.ndim != 1: raise ValueError("Invalid array dimensions") if sp_reference.shape != sp_offset.shape: raise ValueError("x and y shapes are different") if plottitle is None: plottitle = ' ' naxis1 = len(sp_reference) xcorr = np.arange(naxis1) naxis1_half = int(naxis1 / 2) for i in range(naxis1_half): xcorr[i + naxis1_half] -= naxis1 isort = xcorr.argsort() xcorr = xcorr[isort] if fminmax is not None: fmin, fmax = fminmax sp_reference_filtmask = filtmask(sp_reference, fmin=fmin, fmax=fmax, debugplot=debugplot) sp_offset_filtmask = filtmask(sp_offset, fmin=fmin, fmax=fmax, debugplot=debugplot) if abs(debugplot) in (21, 22): from numina.array.display.matplotlib_qt import plt xdum = np.arange(naxis1) + 1 # reference spectrum ax = ximplotxy(xdum, sp_reference, show=False, title='reference spectrum', label='original spectrum') ax.plot(xdum, sp_reference_filtmask, label='filtered and masked spectrum') ax.legend() plt.show() # offset spectrum ax = ximplotxy(xdum, sp_offset, show=False, title='offset spectrum', label='original spectrum') ax.plot(xdum, sp_offset_filtmask, label='filtered and masked spectrum') ax.legend() plt.show() else: sp_reference_filtmask = sp_reference sp_offset_filtmask = sp_offset if (abs(debugplot) in (21, 22)) or (pdf is not None): xdum = np.arange(naxis1) + 1 ax = ximplotxy(xdum, sp_reference_filtmask, show=False, title=plottitle, label='reference spectrum') ax.plot(xdum, sp_offset_filtmask, label='offset spectrum') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) # normalize spectra if required if norm_spectra: sp_reference_norm = np.copy(sp_reference_filtmask) sp_offset_norm = np.copy(sp_offset_filtmask) sp_dum = np.concatenate((sp_reference_norm, sp_offset_norm)) spmin = min(sp_dum) spmax = max(sp_dum) idum = np.where(sp_reference_norm > 0) sp_reference_norm[idum] /= spmax idum = np.where(sp_reference_norm < 0) sp_reference_norm[idum] /= -spmin idum = np.where(sp_offset_norm > 0) sp_offset_norm[idum] /= spmax idum = np.where(sp_offset_norm < 0) sp_offset_norm[idum] /= -spmin if (abs(debugplot) in (21, 22)) or (pdf is not None): xdum = np.arange(naxis1) + 1 ax = ximplotxy(xdum, sp_reference_norm, show=False, title=plottitle + ' [normalized]', label='reference spectrum') ax.plot(xdum, sp_offset_norm, label='offset spectrum') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) else: sp_reference_norm = sp_reference_filtmask sp_offset_norm = sp_offset_filtmask corr = np.fft.ifft(np.fft.fft(sp_offset_norm) * np.fft.fft(sp_reference_norm).conj()).real corr = corr[isort] # determine correlation peak if naround_zero is None: iminpeak = 0 imaxpeak = naxis1 else: iminpeak = max(int(naxis1 / 2 - naround_zero), 0) imaxpeak = min(int(naxis1 / 2 + naround_zero), naxis1) ixpeak = corr[iminpeak:imaxpeak].argmax() + iminpeak # fit correlation peak with 2nd order polynomial nfit = 7 nmed = nfit // 2 imin = ixpeak - nmed imax = ixpeak + nmed lpeak_ok = True if imin < 0 or imax > len(corr): x_refined_peak = 0 y_refined_peak = 0 lpeak_ok = False poly_peak = Polynomial([0.0]) else: x_fit = np.arange(-nmed, nmed + 1, dtype=np.float) y_fit = corr[imin:(imax+1)] poly_peak = Polynomial.fit(x_fit, y_fit, 2) poly_peak = Polynomial.cast(poly_peak) coef = poly_peak.coef if coef[2] != 0: x_refined_peak = -coef[1] / (2.0 * coef[2]) else: x_refined_peak = 0.0 y_refined_peak = poly_peak(x_refined_peak) x_refined_peak += ixpeak offset = x_refined_peak - naxis1_half fpeak = y_refined_peak if (abs(debugplot) % 10 != 0) or (pdf is not None): ax = ximplotxy(xcorr, corr, xlabel='offset (pixels)', ylabel='cross-correlation function', title=plottitle, xlim=(-naxis1/2, naxis1/2), show=False) ax.axvline(offset, color='grey', linestyle='dashed') coffset = "(offset:{0:6.2f} pixels)".format(offset) ax.text(0.01, 0.99, coffset, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes) if naround_zero is not None: cwindow = "(peak region: [{},{}] pixels)".format(-naround_zero, naround_zero) ax.text(0.01, 0.93, cwindow, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes) # inset plot inset_ax = inset_axes( ax, width="40%", height="40%", loc=1 ) inset_ax.plot(xcorr, corr) if naround_zero is not None: inset_ax.set_xlim([-naround_zero, naround_zero]) else: inset_ax.set_xlim([-50, 50]) if lpeak_ok: xplot = np.arange(-nmed, nmed, 0.5) yplot = poly_peak(xplot) xplot += ixpeak - naxis1_half inset_ax.plot(xplot, yplot, '-') inset_ax.plot([x_refined_peak - naxis1_half], [y_refined_peak], 'o') inset_ax.axvline(offset, color='grey', linestyle='dashed') if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, tight_layout=False, pltshow=True) return offset, fpeak
[ "def", "periodic_corr1d", "(", "sp_reference", ",", "sp_offset", ",", "fminmax", "=", "None", ",", "naround_zero", "=", "None", ",", "norm_spectra", "=", "False", ",", "plottitle", "=", "None", ",", "pdf", "=", "None", ",", "debugplot", "=", "0", ")", ":...
Periodic correlation between two spectra, implemented using FFT. Parameters ---------- sp_reference : numpy array Reference spectrum. sp_offset : numpy array Spectrum which offset is going to be measured relative to the reference spectrum. fminmax : tuple of floats or None Minimum and maximum frequencies to be used. If None, no frequency filtering is employed. naround_zero : int Half width of the window (around zero offset) to look for the correlation peak. If None, the whole correlation spectrum is employed. Otherwise, the peak will be sought in the interval [-naround_zero, +naround_zero]. norm_spectra : bool If True, the filtered spectra are normalized before computing the correlation function. This can be important when comparing the peak value of this function using different spectra. plottitle : str Optional plot title. pdf : PdfFile object or None If not None, output is sent to PDF file. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- offset : float Offset between the two input spectra. fpeak : float Maximum of the cross-correlation function.
[ "Periodic", "correlation", "between", "two", "spectra", "implemented", "using", "FFT", "." ]
python
train
shoebot/shoebot
lib/graph/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/graph/__init__.py#L381-L411
def update(self, iterations=10): """ Iterates the graph layout and updates node positions. """ # The graph fades in when initially constructed. self.alpha += 0.05 self.alpha = min(self.alpha, 1.0) # Iterates over the graph's layout. # Each step the graph's bounds are recalculated # and a number of iterations are processed, # more and more as the layout progresses. if self.layout.i == 0: self.layout.prepare() self.layout.i += 1 elif self.layout.i == 1: self.layout.iterate() elif self.layout.i < self.layout.n: n = min(iterations, self.layout.i / 10 + 1) for i in range(n): self.layout.iterate() # Calculate the absolute center of the graph. min_, max = self.layout.bounds self.x = _ctx.WIDTH - max.x*self.d - min_.x*self.d self.y = _ctx.HEIGHT - max.y*self.d - min_.y*self.d self.x /= 2 self.y /= 2 return not self.layout.done
[ "def", "update", "(", "self", ",", "iterations", "=", "10", ")", ":", "# The graph fades in when initially constructed.", "self", ".", "alpha", "+=", "0.05", "self", ".", "alpha", "=", "min", "(", "self", ".", "alpha", ",", "1.0", ")", "# Iterates over the gra...
Iterates the graph layout and updates node positions.
[ "Iterates", "the", "graph", "layout", "and", "updates", "node", "positions", "." ]
python
valid
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L913-L1252
def scan_loop(self, command, repeat_command=100, use_delay=True, additional_delay=0, mask_steps=3, enable_mask_steps=None, enable_double_columns=None, same_mask_for_all_dc=False, fast_dc_loop=True, bol_function=None, eol_function=None, digital_injection=False, enable_shift_masks=None, disable_shift_masks=None, restore_shift_masks=True, mask=None, double_column_correction=False): '''Implementation of the scan loops (mask shifting, loop over double columns, repeatedly sending any arbitrary command). Parameters ---------- command : BitVector (FEI4) command that will be sent out serially. repeat_command : int The number of repetitions command will be sent out each mask step. use_delay : bool Add additional delay to the command (append zeros). This helps to avoid FE data errors because of sending to many commands to the FE chip. additional_delay: int Additional delay to increase the command-to-command delay (in number of clock cycles / 25ns). mask_steps : int Number of mask steps (from 1 to 672). enable_mask_steps : list, tuple List of mask steps which will be applied. Default is all mask steps. From 0 to (mask-1). A value equal None or empty list will select all mask steps. enable_double_columns : list, tuple List of double columns which will be enabled during scan. Default is all double columns. From 0 to 39 (double columns counted from zero). A value equal None or empty list will select all double columns. same_mask_for_all_dc : bool Use same mask for all double columns. This will only affect all shift masks (see enable_shift_masks). Enabling this is in general a good idea since all double columns will have the same configuration and the scan speed can increased by an order of magnitude. fast_dc_loop : bool If True, optimize double column (DC) loop to save time. Note that bol_function and eol_function cannot do register operations, if True. bol_function : function Begin of loop function that will be called each time before sending command. Argument is a function pointer (without braces) or functor. eol_function : function End of loop function that will be called each time after sending command. Argument is a function pointer (without braces) or functor. digital_injection : bool Enables digital injection. C_High and C_Low will be disabled. enable_shift_masks : list, tuple List of enable pixel masks which will be shifted during scan. Mask set to 1 for selected pixels else 0. None will select "Enable", "C_High", "C_Low". disable_shift_masks : list, tuple List of disable pixel masks which will be shifted during scan. Mask set to 0 for selected pixels else 1. None will disable no mask. restore_shift_masks : bool Writing the initial (restored) FE pixel configuration into FE after finishing the scan loop. mask : array-like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked pixel. Masked pixels will be disabled during shifting of the enable shift masks, and enabled during shifting disable shift mask. double_column_correction : str, bool, list, tuple Enables double column PlsrDAC correction. If value is a filename (string) or list/tuple, the default PlsrDAC correction will be overwritten. First line of the file must be a Python list ([0, 0, ...]) ''' if not isinstance(command, bitarray): raise TypeError if enable_shift_masks is None: enable_shift_masks = ["Enable", "C_High", "C_Low"] if disable_shift_masks is None: disable_shift_masks = [] # get PlsrDAC correction if isinstance(double_column_correction, basestring): # from file with open(double_column_correction) as fp: plsr_dac_correction = list(literal_eval(fp.readline().strip())) elif isinstance(double_column_correction, (list, tuple)): # from list/tuple plsr_dac_correction = list(double_column_correction) else: # default if "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks) and "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_High'] elif "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_Med'] elif "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_Low'] # initial PlsrDAC value for PlsrDAC correction initial_plsr_dac = self.register.get_global_register_value("PlsrDAC") # create restore point restore_point_name = str(self.run_number) + '_' + self.run_id + '_scan_loop' with self.register.restored(name=restore_point_name): # pre-calculate often used commands conf_mode_command = self.register.get_commands("ConfMode")[0] run_mode_command = self.register.get_commands("RunMode")[0] if use_delay: delay = self.register.get_commands("zeros", length=additional_delay + calculate_wait_cycles(mask_steps))[0] scan_loop_command = command + delay else: scan_loop_command = command def enable_columns(dc): if digital_injection: return [dc * 2 + 1, dc * 2 + 2] else: # analog injection if dc == 0: return [1] elif dc == 39: return [78, 79, 80] else: return [dc * 2, dc * 2 + 1] def write_double_columns(dc): if digital_injection: return [dc] else: # analog injection if dc == 0: return [0] elif dc == 39: return [38, 39] else: return [dc - 1, dc] def get_dc_address_command(dc): commands = [] commands.append(conf_mode_command) self.register.set_global_register_value("Colpr_Addr", dc) commands.append(self.register.get_commands("WrRegister", name=["Colpr_Addr"])[0]) if double_column_correction: self.register.set_global_register_value("PlsrDAC", initial_plsr_dac + int(round(plsr_dac_correction[dc]))) commands.append(self.register.get_commands("WrRegister", name=["PlsrDAC"])[0]) commands.append(run_mode_command) return self.register_utils.concatenate_commands(commands, byte_padding=True) if not enable_mask_steps: enable_mask_steps = range(mask_steps) if not enable_double_columns: enable_double_columns = range(40) # preparing for scan commands = [] commands.append(conf_mode_command) if digital_injection is True: # check if C_High and/or C_Low is in enable_shift_mask and/or disable_shift_mask if "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks) or "C_High".lower() in map(lambda x: x.lower(), disable_shift_masks): raise ValueError('C_High must not be shift mask when using digital injection') if "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks) or "C_Low".lower() in map(lambda x: x.lower(), disable_shift_masks): raise ValueError('C_Low must not be shift mask when using digital injection') # turn off all injection capacitors by default self.register.set_pixel_register_value("C_High", 0) self.register.set_pixel_register_value("C_Low", 0) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=["C_Low", "C_High"], joint_write=True)) self.register.set_global_register_value("DIGHITIN_SEL", 1) # self.register.set_global_register_value("CalEn", 1) # for GlobalPulse instead Cal-Command else: self.register.set_global_register_value("DIGHITIN_SEL", 0) # setting EnableDigInj to 0 not necessary since DIGHITIN_SEL is turned off # self.register.set_pixel_register_value("EnableDigInj", 0) # plotting registers # plt.clf() # plt.imshow(curr_en_mask.T, interpolation='nearest', aspect="auto") # plt.pcolor(curr_en_mask.T) # plt.colorbar() # plt.savefig('mask_step' + str(mask_step) + '.pdf') commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) for mask_step in enable_mask_steps: if self.abort_run.is_set(): break commands = [] commands.append(conf_mode_command) if same_mask_for_all_dc: # generate and write first mask step if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False if mask is not None else True, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False if mask is not None else True, name=enable_shift_masks, joint_write=True)) if digital_injection is True: # write EnableDigInj last # write DIGHITIN_SEL since after mask writing it is disabled self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) else: # set masks to default values if disable_shift_masks: map(lambda mask_name: self.register.set_pixel_register_value(mask_name, 1), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: map(lambda mask_name: self.register.set_pixel_register_value(mask_name, 0), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=enable_shift_masks, joint_write=True)) if digital_injection is True: # write EnableDigInj last # write DIGHITIN_SEL since after mask writing it is disabled self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) logging.info('%d injection(s): mask step %d %s', repeat_command, mask_step, ('[%d - %d]' % (enable_mask_steps[0], enable_mask_steps[-1])) if len(enable_mask_steps) > 1 else ('[%d]' % enable_mask_steps[0])) if same_mask_for_all_dc: if fast_dc_loop: # fast DC loop with optimized pixel register writing # set repeat, should be 1 by default when arriving here self.dut['TX']['CMD_REPEAT'] = repeat_command # get DC command for the first DC in the list, DC command is byte padded # fill CMD memory with DC command and scan loop command, inside the loop only overwrite DC command dc_address_command = get_dc_address_command(enable_double_columns[0]) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break if index != 0: # full command is already set before loop # get DC command before wait to save some time dc_address_command = get_dc_address_command(dc) self.register_utils.wait_for_command() if eol_function: eol_function() # do this after command has finished # only set command after FPGA is ready # overwrite only the DC command in CMD memory self.register_utils.set_command(dc_address_command, set_length=False) # do not set length here, because it was already set up before the loop if bol_function: bol_function() self.dut['TX']['START'] # wait here before we go on because we just jumped out of the loop self.register_utils.wait_for_command() if eol_function: eol_function() self.dut['TX']['START_SEQUENCE_LENGTH'] = 0 else: # the slow DC loop allows writing commands inside bol and eol functions for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break dc_address_command = get_dc_address_command(dc) self.register_utils.send_command(dc_address_command) if bol_function: bol_function() self.register_utils.send_command(scan_loop_command, repeat=repeat_command) if eol_function: eol_function() else: if fast_dc_loop: # fast DC loop with optimized pixel register writing dc = enable_double_columns[0] ec = enable_columns(dc) dcs = write_double_columns(dc) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) dc_address_command = get_dc_address_command(dc) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.dut['TX']['CMD_REPEAT'] = repeat_command self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break if index != 0: # full command is already set before loop ec = enable_columns(dc) dcs = write_double_columns(dc) dcs.extend(write_double_columns(enable_double_columns[index - 1])) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) dc_address_command = get_dc_address_command(dc) self.register_utils.wait_for_command() if eol_function: eol_function() # do this after command has finished self.register_utils.send_commands(commands) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.dut['TX']['CMD_REPEAT'] = repeat_command self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) if bol_function: bol_function() self.dut['TX']['START'] self.register_utils.wait_for_command() if eol_function: eol_function() self.dut['TX']['START_SEQUENCE_LENGTH'] = 0 else: for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break ec = enable_columns(dc) dcs = write_double_columns(dc) if index != 0: dcs.extend(write_double_columns(enable_double_columns[index - 1])) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) dc_address_command = get_dc_address_command(dc) self.register_utils.send_command(dc_address_command) if bol_function: bol_function() self.register_utils.send_command(scan_loop_command, repeat=repeat_command) if eol_function: eol_function() commands = [] commands.extend(self.register.get_commands("ConfMode")) # write registers that were changed in scan_loop() commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL", "Colpr_Addr", "PlsrDAC"])) if restore_shift_masks: commands = [] commands.extend(self.register.get_commands("ConfMode")) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL", "Colpr_Addr", "PlsrDAC"])) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name=disable_shift_masks)) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name=enable_shift_masks)) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="EnableDigInj")) # commands.extend(self.register.get_commands("RunMode")) self.register_utils.send_commands(commands)
[ "def", "scan_loop", "(", "self", ",", "command", ",", "repeat_command", "=", "100", ",", "use_delay", "=", "True", ",", "additional_delay", "=", "0", ",", "mask_steps", "=", "3", ",", "enable_mask_steps", "=", "None", ",", "enable_double_columns", "=", "None...
Implementation of the scan loops (mask shifting, loop over double columns, repeatedly sending any arbitrary command). Parameters ---------- command : BitVector (FEI4) command that will be sent out serially. repeat_command : int The number of repetitions command will be sent out each mask step. use_delay : bool Add additional delay to the command (append zeros). This helps to avoid FE data errors because of sending to many commands to the FE chip. additional_delay: int Additional delay to increase the command-to-command delay (in number of clock cycles / 25ns). mask_steps : int Number of mask steps (from 1 to 672). enable_mask_steps : list, tuple List of mask steps which will be applied. Default is all mask steps. From 0 to (mask-1). A value equal None or empty list will select all mask steps. enable_double_columns : list, tuple List of double columns which will be enabled during scan. Default is all double columns. From 0 to 39 (double columns counted from zero). A value equal None or empty list will select all double columns. same_mask_for_all_dc : bool Use same mask for all double columns. This will only affect all shift masks (see enable_shift_masks). Enabling this is in general a good idea since all double columns will have the same configuration and the scan speed can increased by an order of magnitude. fast_dc_loop : bool If True, optimize double column (DC) loop to save time. Note that bol_function and eol_function cannot do register operations, if True. bol_function : function Begin of loop function that will be called each time before sending command. Argument is a function pointer (without braces) or functor. eol_function : function End of loop function that will be called each time after sending command. Argument is a function pointer (without braces) or functor. digital_injection : bool Enables digital injection. C_High and C_Low will be disabled. enable_shift_masks : list, tuple List of enable pixel masks which will be shifted during scan. Mask set to 1 for selected pixels else 0. None will select "Enable", "C_High", "C_Low". disable_shift_masks : list, tuple List of disable pixel masks which will be shifted during scan. Mask set to 0 for selected pixels else 1. None will disable no mask. restore_shift_masks : bool Writing the initial (restored) FE pixel configuration into FE after finishing the scan loop. mask : array-like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked pixel. Masked pixels will be disabled during shifting of the enable shift masks, and enabled during shifting disable shift mask. double_column_correction : str, bool, list, tuple Enables double column PlsrDAC correction. If value is a filename (string) or list/tuple, the default PlsrDAC correction will be overwritten. First line of the file must be a Python list ([0, 0, ...])
[ "Implementation", "of", "the", "scan", "loops", "(", "mask", "shifting", "loop", "over", "double", "columns", "repeatedly", "sending", "any", "arbitrary", "command", ")", ".", "Parameters", "----------", "command", ":", "BitVector", "(", "FEI4", ")", "command", ...
python
train
LionelAuroux/pyrser
pyrser/parsing/base.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/parsing/base.py#L543-L556
def read_cstring(self) -> bool: """ read a double quoted string Read following BNF rule else return False:: '"' -> ['\\' #char | ~'\\'] '"' """ self._stream.save_context() idx = self._stream.index if self.read_char("\"") and self.read_until("\"", "\\"): txt = self._stream[idx:self._stream.index] return self._stream.validate_context() return self._stream.restore_context()
[ "def", "read_cstring", "(", "self", ")", "->", "bool", ":", "self", ".", "_stream", ".", "save_context", "(", ")", "idx", "=", "self", ".", "_stream", ".", "index", "if", "self", ".", "read_char", "(", "\"\\\"\"", ")", "and", "self", ".", "read_until",...
read a double quoted string Read following BNF rule else return False:: '"' -> ['\\' #char | ~'\\'] '"'
[ "read", "a", "double", "quoted", "string", "Read", "following", "BNF", "rule", "else", "return", "False", "::" ]
python
test
kyuupichan/aiorpcX
aiorpcx/jsonrpc.py
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L247-L280
def message_to_item(cls, message): '''Translate an unframed received message and return an (item, request_id) pair. The item can be a Request, Notification, Response or a list. A JSON RPC error response is returned as an RPCError inside a Response object. If a Batch is returned, request_id is an iterable of request ids, one per batch member. If the message violates the protocol in some way a ProtocolError is returned, except if the message was determined to be a response, in which case the ProtocolError is placed inside a Response object. This is so that client code can mark a request as having been responded to even if the response was bad. raises: ProtocolError ''' payload = cls._message_to_payload(message) if isinstance(payload, dict): if 'method' in payload: return cls._process_request(payload) else: return cls._process_response(payload) elif isinstance(payload, list) and cls.allow_batches: if not payload: raise cls._error(JSONRPC.INVALID_REQUEST, 'batch is empty', True, None) return payload, None raise cls._error(cls.INVALID_REQUEST, 'request object must be a dictionary', True, None)
[ "def", "message_to_item", "(", "cls", ",", "message", ")", ":", "payload", "=", "cls", ".", "_message_to_payload", "(", "message", ")", "if", "isinstance", "(", "payload", ",", "dict", ")", ":", "if", "'method'", "in", "payload", ":", "return", "cls", "....
Translate an unframed received message and return an (item, request_id) pair. The item can be a Request, Notification, Response or a list. A JSON RPC error response is returned as an RPCError inside a Response object. If a Batch is returned, request_id is an iterable of request ids, one per batch member. If the message violates the protocol in some way a ProtocolError is returned, except if the message was determined to be a response, in which case the ProtocolError is placed inside a Response object. This is so that client code can mark a request as having been responded to even if the response was bad. raises: ProtocolError
[ "Translate", "an", "unframed", "received", "message", "and", "return", "an", "(", "item", "request_id", ")", "pair", "." ]
python
train
gem/oq-engine
openquake/baselib/node.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/node.py#L160-L172
def floatformat(fmt_string): """ Context manager to change the default format string for the function :func:`openquake.commonlib.writers.scientificformat`. :param fmt_string: the format to use; for instance '%13.9E' """ fmt_defaults = scientificformat.__defaults__ scientificformat.__defaults__ = (fmt_string,) + fmt_defaults[1:] try: yield finally: scientificformat.__defaults__ = fmt_defaults
[ "def", "floatformat", "(", "fmt_string", ")", ":", "fmt_defaults", "=", "scientificformat", ".", "__defaults__", "scientificformat", ".", "__defaults__", "=", "(", "fmt_string", ",", ")", "+", "fmt_defaults", "[", "1", ":", "]", "try", ":", "yield", "finally",...
Context manager to change the default format string for the function :func:`openquake.commonlib.writers.scientificformat`. :param fmt_string: the format to use; for instance '%13.9E'
[ "Context", "manager", "to", "change", "the", "default", "format", "string", "for", "the", "function", ":", "func", ":", "openquake", ".", "commonlib", ".", "writers", ".", "scientificformat", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_distill.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_distill.py#L91-L114
def create_teacher_experiment(run_config, hparams, argv): """Creates experiment function.""" tf.logging.info("training teacher") tf.logging.set_verbosity(tf.logging.INFO) trainer_lib.set_random_seed(FLAGS.random_seed) usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) t2t_trainer.maybe_log_registry_and_exit() if FLAGS.cloud_mlengine: return cloud_mlengine.launch() if FLAGS.generate_data: t2t_trainer.generate_data() if cloud_mlengine.job_dir(): FLAGS.output_dir = cloud_mlengine.job_dir() if argv: t2t_trainer.set_hparams_from_args(argv[1:]) hparams.distill_phase = "train" exp_fn = t2t_trainer.create_experiment_fn() exp = exp_fn(run_config, hparams) return exp
[ "def", "create_teacher_experiment", "(", "run_config", ",", "hparams", ",", "argv", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"training teacher\"", ")", "tf", ".", "logging", ".", "set_verbosity", "(", "tf", ".", "logging", ".", "INFO", ")", "tra...
Creates experiment function.
[ "Creates", "experiment", "function", "." ]
python
train
numenta/nupic
src/nupic/algorithms/backtracking_tm.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/backtracking_tm.py#L1479-L1523
def _computeOutput(self): """ Computes output for both learning and inference. In both cases, the output is the boolean OR of ``activeState`` and ``predictedState`` at ``t``. Stores ``currentOutput`` for ``checkPrediction``. :returns: TODO: document """ # TODO: This operation can be sped up by: # 1.) Pre-allocating space for the currentOutput # 2.) Making predictedState and activeState of type 'float32' up front # 3.) Using logical_or(self.predictedState['t'], self.activeState['t'], # self.currentOutput) if self.outputType == 'activeState1CellPerCol': # Fire only the most confident cell in columns that have 2 or more # active cells mostActiveCellPerCol = self.cellConfidence['t'].argmax(axis=1) self.currentOutput = numpy.zeros(self.infActiveState['t'].shape, dtype='float32') # Turn on the most confident cell in each column. Note here that # Columns refers to TM columns, even though each TM column is a row # in the numpy array. numCols = self.currentOutput.shape[0] self.currentOutput[(xrange(numCols), mostActiveCellPerCol)] = 1 # Don't turn on anything in columns which are not active at all activeCols = self.infActiveState['t'].max(axis=1) inactiveCols = numpy.where(activeCols==0)[0] self.currentOutput[inactiveCols, :] = 0 elif self.outputType == 'activeState': self.currentOutput = self.infActiveState['t'] elif self.outputType == 'normal': self.currentOutput = numpy.logical_or(self.infPredictedState['t'], self.infActiveState['t']) else: raise RuntimeError("Unimplemented outputType") return self.currentOutput.reshape(-1).astype('float32')
[ "def", "_computeOutput", "(", "self", ")", ":", "# TODO: This operation can be sped up by:", "# 1.) Pre-allocating space for the currentOutput", "# 2.) Making predictedState and activeState of type 'float32' up front", "# 3.) Using logical_or(self.predictedState['t'], self.activeState['t'],"...
Computes output for both learning and inference. In both cases, the output is the boolean OR of ``activeState`` and ``predictedState`` at ``t``. Stores ``currentOutput`` for ``checkPrediction``. :returns: TODO: document
[ "Computes", "output", "for", "both", "learning", "and", "inference", ".", "In", "both", "cases", "the", "output", "is", "the", "boolean", "OR", "of", "activeState", "and", "predictedState", "at", "t", ".", "Stores", "currentOutput", "for", "checkPrediction", "...
python
valid
limix/glimix-core
glimix_core/lmm/_lmm_scan.py
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/lmm/_lmm_scan.py#L267-L315
def scan(self, M): """ LML, fixed-effect sizes, and scale of the candidate set. Parameters ---------- M : array_like Fixed-effects set. Returns ------- lml : float Log of the marginal likelihood. effsizes0 : ndarray Covariates fixed-effect sizes. effsizes0_se : ndarray Covariates fixed-effect size standard errors. effsizes1 : ndarray Candidate set fixed-effect sizes. effsizes1_se : ndarray Candidate fixed-effect size standard errors. scale : ndarray Optimal scale. """ from numpy_sugar.linalg import ddot from numpy_sugar import is_all_finite M = asarray(M, float) if M.shape[1] == 0: return { "lml": self.null_lml(), "effsizes0": self.null_beta, "effsizes0_se": self.null_beta_se, "effsizes1": empty((0)), "effsizes1_se": empty((0)), "scale": self.null_scale, } if not is_all_finite(M): raise ValueError("M parameter has non-finite elements.") MTQ = [dot(M.T, Q) for Q in self._QS[0] if Q.size > 0] yTBM = [dot(i, j.T) for (i, j) in zip(self._yTQDi, MTQ)] XTBM = [dot(i, j.T) for (i, j) in zip(self._XTQDi, MTQ)] D = self._D MTBM = [ddot(i, 1 / j) @ i.T for i, j in zip(MTQ, D) if j.min() > 0] return self._multicovariate_set(yTBM, XTBM, MTBM)
[ "def", "scan", "(", "self", ",", "M", ")", ":", "from", "numpy_sugar", ".", "linalg", "import", "ddot", "from", "numpy_sugar", "import", "is_all_finite", "M", "=", "asarray", "(", "M", ",", "float", ")", "if", "M", ".", "shape", "[", "1", "]", "==", ...
LML, fixed-effect sizes, and scale of the candidate set. Parameters ---------- M : array_like Fixed-effects set. Returns ------- lml : float Log of the marginal likelihood. effsizes0 : ndarray Covariates fixed-effect sizes. effsizes0_se : ndarray Covariates fixed-effect size standard errors. effsizes1 : ndarray Candidate set fixed-effect sizes. effsizes1_se : ndarray Candidate fixed-effect size standard errors. scale : ndarray Optimal scale.
[ "LML", "fixed", "-", "effect", "sizes", "and", "scale", "of", "the", "candidate", "set", "." ]
python
valid
GNS3/gns3-server
gns3server/compute/notification_manager.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/notification_manager.py#L60-L68
def instance(): """ Singleton to return only on instance of NotificationManager. :returns: instance of NotificationManager """ if not hasattr(NotificationManager, '_instance') or NotificationManager._instance is None: NotificationManager._instance = NotificationManager() return NotificationManager._instance
[ "def", "instance", "(", ")", ":", "if", "not", "hasattr", "(", "NotificationManager", ",", "'_instance'", ")", "or", "NotificationManager", ".", "_instance", "is", "None", ":", "NotificationManager", ".", "_instance", "=", "NotificationManager", "(", ")", "retur...
Singleton to return only on instance of NotificationManager. :returns: instance of NotificationManager
[ "Singleton", "to", "return", "only", "on", "instance", "of", "NotificationManager", ".", ":", "returns", ":", "instance", "of", "NotificationManager" ]
python
train
RiotGames/cloud-inquisitor
plugins/public/cinq-auditor-cloudtrail/cinq_auditor_cloudtrail/__init__.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-auditor-cloudtrail/cinq_auditor_cloudtrail/__init__.py#L338-L361
def subscribe_sns_topic_to_sqs(self, region): """Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed Args: region (`str`): Name of the AWS region Returns: `str` """ sns = self.session.resource('sns', region_name=region) topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)) topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue) auditlog( event='cloudtrail.subscribe_sns_topic_to_sqs', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return topic.attributes['TopicArn']
[ "def", "subscribe_sns_topic_to_sqs", "(", "self", ",", "region", ")", ":", "sns", "=", "self", ".", "session", ".", "resource", "(", "'sns'", ",", "region_name", "=", "region", ")", "topic", "=", "sns", ".", "Topic", "(", "'arn:aws:sns:{}:{}:{}'", ".", "fo...
Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed Args: region (`str`): Name of the AWS region Returns: `str`
[ "Subscribe", "SQS", "to", "the", "SNS", "topic", ".", "Returns", "the", "ARN", "of", "the", "SNS", "Topic", "subscribed" ]
python
train
bachya/pyairvisual
pyairvisual/api.py
https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/api.py#L28-L38
async def city(self, city: str, state: str, country: str) -> dict: """Return data for the specified city.""" data = await self._request( 'get', 'city', params={ 'city': city, 'state': state, 'country': country }) return data['data']
[ "async", "def", "city", "(", "self", ",", "city", ":", "str", ",", "state", ":", "str", ",", "country", ":", "str", ")", "->", "dict", ":", "data", "=", "await", "self", ".", "_request", "(", "'get'", ",", "'city'", ",", "params", "=", "{", "'cit...
Return data for the specified city.
[ "Return", "data", "for", "the", "specified", "city", "." ]
python
train
googleapis/google-cloud-python
logging/google/cloud/logging/_http.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_http.py#L163-L176
def logger_delete(self, project, logger_name): """API call: delete all entries in a logger via a DELETE request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete :type project: str :param project: ID of project containing the log entries to delete :type logger_name: str :param logger_name: name of logger containing the log entries to delete """ path = "/projects/%s/logs/%s" % (project, logger_name) self.api_request(method="DELETE", path=path)
[ "def", "logger_delete", "(", "self", ",", "project", ",", "logger_name", ")", ":", "path", "=", "\"/projects/%s/logs/%s\"", "%", "(", "project", ",", "logger_name", ")", "self", ".", "api_request", "(", "method", "=", "\"DELETE\"", ",", "path", "=", "path", ...
API call: delete all entries in a logger via a DELETE request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete :type project: str :param project: ID of project containing the log entries to delete :type logger_name: str :param logger_name: name of logger containing the log entries to delete
[ "API", "call", ":", "delete", "all", "entries", "in", "a", "logger", "via", "a", "DELETE", "request" ]
python
train
Chilipp/sphinx-nbexamples
sphinx_nbexamples/__init__.py
https://github.com/Chilipp/sphinx-nbexamples/blob/08e0319ff3c70f8a931dfa8890caf48add4d0470/sphinx_nbexamples/__init__.py#L509-L548
def get_description(self): """Get summary and description of this notebook""" def split_header(s, get_header=True): s = s.lstrip().rstrip() parts = s.splitlines() if parts[0].startswith('#'): if get_header: header = re.sub('#+\s*', '', parts.pop(0)) if not parts: return header, '' else: header = '' rest = '\n'.join(parts).lstrip().split('\n\n') desc = rest[0].replace('\n', ' ') return header, desc else: if get_header: if parts[0].startswith(('=', '-')): parts = parts[1:] header = parts.pop(0) if parts and parts[0].startswith(('=', '-')): parts.pop(0) if not parts: return header, '' else: header = '' rest = '\n'.join(parts).lstrip().split('\n\n') desc = rest[0].replace('\n', ' ') return header, desc first_cell = self.nb['cells'][0] if not first_cell['cell_type'] == 'markdown': return '', '' header, desc = split_header(first_cell['source']) if not desc and len(self.nb['cells']) > 1: second_cell = self.nb['cells'][1] if second_cell['cell_type'] == 'markdown': _, desc = split_header(second_cell['source'], False) return header, desc
[ "def", "get_description", "(", "self", ")", ":", "def", "split_header", "(", "s", ",", "get_header", "=", "True", ")", ":", "s", "=", "s", ".", "lstrip", "(", ")", ".", "rstrip", "(", ")", "parts", "=", "s", ".", "splitlines", "(", ")", "if", "pa...
Get summary and description of this notebook
[ "Get", "summary", "and", "description", "of", "this", "notebook" ]
python
test
christian-oudard/htmltreediff
htmltreediff/diff_core.py
https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/diff_core.py#L268-L285
def merge_blocks(a_blocks, b_blocks): """Given two lists of blocks, combine them, in the proper order. Ensure that there are no overlaps, and that they are for sequences of the same length. """ # Check sentinels for sequence length. assert a_blocks[-1][2] == b_blocks[-1][2] == 0 # sentinel size is 0 assert a_blocks[-1] == b_blocks[-1] combined_blocks = sorted(list(set(a_blocks + b_blocks))) # Check for overlaps. i = j = 0 for a, b, size in combined_blocks: assert i <= a assert j <= b i = a + size j = b + size return combined_blocks
[ "def", "merge_blocks", "(", "a_blocks", ",", "b_blocks", ")", ":", "# Check sentinels for sequence length.", "assert", "a_blocks", "[", "-", "1", "]", "[", "2", "]", "==", "b_blocks", "[", "-", "1", "]", "[", "2", "]", "==", "0", "# sentinel size is 0", "a...
Given two lists of blocks, combine them, in the proper order. Ensure that there are no overlaps, and that they are for sequences of the same length.
[ "Given", "two", "lists", "of", "blocks", "combine", "them", "in", "the", "proper", "order", "." ]
python
train
cyrus-/cypy
cypy/np/__init__.py
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/np/__init__.py#L60-L78
def packed(self): """ each row is placed side-by-side with the length of the row interlaced the head of the packed matrix contains offsets to this length e.g. [[11, 22, 33], [44, 55], []] => [3, 7, 10, 3, 11, 22, 33, 2, 44, 55, 0] """ # not the most efficient implementation atm but whatever n_rows = len(self) size = len(self)*2 + self.n_edges packed = numpy.empty(size, self.dtype) offset = n_rows for r, row in enumerate(self): packed[r] = offset n_edges = len(row) packed[offset] = n_edges packed[(offset+1):(offset+1+n_edges)] = numpy.fromiter(row, self.dtype) offset += 1 + n_edges return packed
[ "def", "packed", "(", "self", ")", ":", "# not the most efficient implementation atm but whatever", "n_rows", "=", "len", "(", "self", ")", "size", "=", "len", "(", "self", ")", "*", "2", "+", "self", ".", "n_edges", "packed", "=", "numpy", ".", "empty", "...
each row is placed side-by-side with the length of the row interlaced the head of the packed matrix contains offsets to this length e.g. [[11, 22, 33], [44, 55], []] => [3, 7, 10, 3, 11, 22, 33, 2, 44, 55, 0]
[ "each", "row", "is", "placed", "side", "-", "by", "-", "side", "with", "the", "length", "of", "the", "row", "interlaced", "the", "head", "of", "the", "packed", "matrix", "contains", "offsets", "to", "this", "length", "e", ".", "g", ".", "[[", "11", "...
python
train
log2timeline/dfvfs
dfvfs/lib/gzipfile.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/gzipfile.py#L288-L333
def ReadAtOffset(self, offset, size=None): """Reads a byte string from the gzip member at the specified offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: offset (int): offset within the uncompressed data in this member to read from. size (Optional[int]): maximum number of bytes to read, where None represents all remaining data, to a maximum of the uncompressed cache size. Returns: bytes: data read. Raises: IOError: if the read failed. ValueError: if a negative read size or offset is specified. """ if size is not None and size < 0: raise ValueError('Invalid size value {0!s}'.format(size)) if offset < 0: raise ValueError('Invalid offset value {0!s}'.format(offset)) if size == 0 or offset >= self.uncompressed_data_size: return b'' if self._cache_start_offset is None: self._LoadDataIntoCache(self._file_object, offset) if offset > self._cache_end_offset or offset < self._cache_start_offset: self.FlushCache() self._LoadDataIntoCache(self._file_object, offset) cache_offset = offset - self._cache_start_offset if not size: return self._cache[cache_offset:] data_end_offset = cache_offset + size if data_end_offset > self._cache_end_offset: return self._cache[cache_offset:] return self._cache[cache_offset:data_end_offset]
[ "def", "ReadAtOffset", "(", "self", ",", "offset", ",", "size", "=", "None", ")", ":", "if", "size", "is", "not", "None", "and", "size", "<", "0", ":", "raise", "ValueError", "(", "'Invalid size value {0!s}'", ".", "format", "(", "size", ")", ")", "if"...
Reads a byte string from the gzip member at the specified offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: offset (int): offset within the uncompressed data in this member to read from. size (Optional[int]): maximum number of bytes to read, where None represents all remaining data, to a maximum of the uncompressed cache size. Returns: bytes: data read. Raises: IOError: if the read failed. ValueError: if a negative read size or offset is specified.
[ "Reads", "a", "byte", "string", "from", "the", "gzip", "member", "at", "the", "specified", "offset", "." ]
python
train
saltstack/salt
salt/runners/vault.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/vault.py#L262-L269
def _get_token_create_url(config): ''' Create Vault url for token creation ''' role_name = config.get('role_name', None) auth_path = '/v1/auth/token/create' base_url = config['url'] return '/'.join(x.strip('/') for x in (base_url, auth_path, role_name) if x)
[ "def", "_get_token_create_url", "(", "config", ")", ":", "role_name", "=", "config", ".", "get", "(", "'role_name'", ",", "None", ")", "auth_path", "=", "'/v1/auth/token/create'", "base_url", "=", "config", "[", "'url'", "]", "return", "'/'", ".", "join", "(...
Create Vault url for token creation
[ "Create", "Vault", "url", "for", "token", "creation" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L11661-L11672
def log_request_data_encode(self, target_system, target_component, id, ofs, count): ''' Request a chunk of a log target_system : System ID (uint8_t) target_component : Component ID (uint8_t) id : Log id (from LOG_ENTRY reply) (uint16_t) ofs : Offset into the log (uint32_t) count : Number of bytes (uint32_t) ''' return MAVLink_log_request_data_message(target_system, target_component, id, ofs, count)
[ "def", "log_request_data_encode", "(", "self", ",", "target_system", ",", "target_component", ",", "id", ",", "ofs", ",", "count", ")", ":", "return", "MAVLink_log_request_data_message", "(", "target_system", ",", "target_component", ",", "id", ",", "ofs", ",", ...
Request a chunk of a log target_system : System ID (uint8_t) target_component : Component ID (uint8_t) id : Log id (from LOG_ENTRY reply) (uint16_t) ofs : Offset into the log (uint32_t) count : Number of bytes (uint32_t)
[ "Request", "a", "chunk", "of", "a", "log" ]
python
train
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L1075-L1103
def _build_const_declartions(self, with_init=True): """ Generate constants declarations :return: list of declarations """ decls = [] # Use type as provided by user in loop indices index_type = self.get_index_type() i = 2 # subscript for cli input, 1 is reserved for repeat for k in self.constants: # const long long N = strtoul(argv[2]) # with increasing N and 1 # TODO change subscript of argv depending on constant count type_decl = c_ast.TypeDecl(k.name, ['const'], c_ast.IdentifierType(index_type)) init = None if with_init: init = c_ast.FuncCall( c_ast.ID('atoi'), c_ast.ExprList([c_ast.ArrayRef(c_ast.ID('argv'), c_ast.Constant('int', str(i)))])) i += 1 decls.append(c_ast.Decl( k.name, ['const'], [], [], type_decl, init, None)) return decls
[ "def", "_build_const_declartions", "(", "self", ",", "with_init", "=", "True", ")", ":", "decls", "=", "[", "]", "# Use type as provided by user in loop indices", "index_type", "=", "self", ".", "get_index_type", "(", ")", "i", "=", "2", "# subscript for cli input, ...
Generate constants declarations :return: list of declarations
[ "Generate", "constants", "declarations" ]
python
test
chrisspen/burlap
burlap/supervisor.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/supervisor.py#L237-L297
def deploy_services(self, site=None): """ Collects the configurations for all registered services and writes the appropriate supervisord.conf file. """ verbose = self.verbose r = self.local_renderer if not r.env.manage_configs: return # # target_sites = self.genv.available_sites_by_host.get(hostname, None) self.render_paths() supervisor_services = [] if r.env.purge_all_confs: r.sudo('rm -Rf /etc/supervisor/conf.d/*') #TODO:check available_sites_by_host and remove dead? self.write_configs(site=site) for _site, site_data in self.iter_sites(site=site, renderer=self.render_paths): if verbose: print('deploy_services.site:', _site) # Only load site configurations that are allowed for this host. # if target_sites is not None: # assert isinstance(target_sites, (tuple, list)) # if site not in target_sites: # continue for cb in self.genv._supervisor_create_service_callbacks: if self.verbose: print('cb:', cb) ret = cb(site=_site) if self.verbose: print('ret:', ret) if isinstance(ret, six.string_types): supervisor_services.append(ret) elif isinstance(ret, tuple): assert len(ret) == 2 conf_name, conf_content = ret if self.dryrun: print('supervisor conf filename:', conf_name) print(conf_content) self.write_to_file(conf_content) self.env.services_rendered = '\n'.join(supervisor_services) fn = self.render_to_file(self.env.config_template) r.put(local_path=fn, remote_path=self.env.config_path, use_sudo=True) # We use supervisorctl to configure supervisor, but this will throw a uselessly vague # error message is supervisor isn't running. if not self.is_running(): self.start() # Reload config and then add and remove as necessary (restarts programs) r.sudo('supervisorctl update')
[ "def", "deploy_services", "(", "self", ",", "site", "=", "None", ")", ":", "verbose", "=", "self", ".", "verbose", "r", "=", "self", ".", "local_renderer", "if", "not", "r", ".", "env", ".", "manage_configs", ":", "return", "#", "# target_sites = s...
Collects the configurations for all registered services and writes the appropriate supervisord.conf file.
[ "Collects", "the", "configurations", "for", "all", "registered", "services", "and", "writes", "the", "appropriate", "supervisord", ".", "conf", "file", "." ]
python
valid
vektorlab/slacksocket
slacksocket/webclient.py
https://github.com/vektorlab/slacksocket/blob/8eb8b0f14fe80740217ea0aaf6feb7f736bdf57f/slacksocket/webclient.py#L155-L163
def match(self, attr, val): """ lookup object in directory with attribute matching value """ self._lock.acquire() try: for x in self: if getattr(x, attr) == val: return x finally: self._lock.release()
[ "def", "match", "(", "self", ",", "attr", ",", "val", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "try", ":", "for", "x", "in", "self", ":", "if", "getattr", "(", "x", ",", "attr", ")", "==", "val", ":", "return", "x", "finally", ...
lookup object in directory with attribute matching value
[ "lookup", "object", "in", "directory", "with", "attribute", "matching", "value" ]
python
train
pybel/pybel
src/pybel/struct/filters/node_predicates.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/filters/node_predicates.py#L152-L174
def _node_has_modifier(graph: BELGraph, node: BaseEntity, modifier: str) -> bool: """Return true if over any of a nodes edges, it has a given modifier. Modifier can be one of: - :data:`pybel.constants.ACTIVITY`, - :data:`pybel.constants.DEGRADATION` - :data:`pybel.constants.TRANSLOCATION`. :param modifier: One of :data:`pybel.constants.ACTIVITY`, :data:`pybel.constants.DEGRADATION`, or :data:`pybel.constants.TRANSLOCATION` """ modifier_in_subject = any( part_has_modifier(d, SUBJECT, modifier) for _, _, d in graph.out_edges(node, data=True) ) modifier_in_object = any( part_has_modifier(d, OBJECT, modifier) for _, _, d in graph.in_edges(node, data=True) ) return modifier_in_subject or modifier_in_object
[ "def", "_node_has_modifier", "(", "graph", ":", "BELGraph", ",", "node", ":", "BaseEntity", ",", "modifier", ":", "str", ")", "->", "bool", ":", "modifier_in_subject", "=", "any", "(", "part_has_modifier", "(", "d", ",", "SUBJECT", ",", "modifier", ")", "f...
Return true if over any of a nodes edges, it has a given modifier. Modifier can be one of: - :data:`pybel.constants.ACTIVITY`, - :data:`pybel.constants.DEGRADATION` - :data:`pybel.constants.TRANSLOCATION`. :param modifier: One of :data:`pybel.constants.ACTIVITY`, :data:`pybel.constants.DEGRADATION`, or :data:`pybel.constants.TRANSLOCATION`
[ "Return", "true", "if", "over", "any", "of", "a", "nodes", "edges", "it", "has", "a", "given", "modifier", "." ]
python
train
jonbretman/jinja-to-js
jinja_to_js/__init__.py
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L405-L422
def _process_name(self, node, **kwargs): """ Processes a `Name` node. Some examples of `Name` nodes: {{ foo }} -> 'foo' is a Name {% if foo }} -> 'foo' is a Name """ with self._interpolation(): with self._python_bool_wrapper(**kwargs): if node.name not in self.stored_names and node.ctx != 'store': self.output.write(self.context_name) self.output.write('.') if node.ctx == 'store': self.stored_names.add(node.name) self.output.write(node.name)
[ "def", "_process_name", "(", "self", ",", "node", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "_interpolation", "(", ")", ":", "with", "self", ".", "_python_bool_wrapper", "(", "*", "*", "kwargs", ")", ":", "if", "node", ".", "name", "not...
Processes a `Name` node. Some examples of `Name` nodes: {{ foo }} -> 'foo' is a Name {% if foo }} -> 'foo' is a Name
[ "Processes", "a", "Name", "node", ".", "Some", "examples", "of", "Name", "nodes", ":", "{{", "foo", "}}", "-", ">", "foo", "is", "a", "Name", "{", "%", "if", "foo", "}}", "-", ">", "foo", "is", "a", "Name" ]
python
train
7sDream/zhihu-py3
zhihu/question.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/question.py#L176-L273
def answers(self): """获取问题的所有答案. :return: 问题的所有答案,返回生成器 :rtype: Answer.Iterable """ from .author import Author from .answer import Answer self._make_soup() # TODO: 统一逻辑. 完全可以都用 _parse_answer_html 的逻辑替换 if self._url.endswith('sort=created'): pager = self.soup.find('div', class_='zm-invite-pager') if pager is None: max_page = 1 else: max_page = int(pager.find_all('span')[-2].a.text) for page in range(1, max_page + 1): if page == 1: soup = self.soup else: url = self._url + '&page=%d' % page soup = BeautifulSoup(self._session.get(url).content) error_answers = soup.find_all('div', id='answer-status') for each in error_answers: each['class'] = 'zm-editable-content' answers_wrap = soup.find('div', id='zh-question-answer-wrap') # 正式处理 authors = answers_wrap.find_all( 'div', class_='zm-item-answer-author-info') urls = answers_wrap.find_all('a', class_='answer-date-link') up_num = answers_wrap.find_all('div', class_='zm-item-vote-info') contents = answers_wrap.find_all( 'div', class_='zm-editable-content') assert len(authors) == len(urls) == len(up_num) == len( contents) for author, url, up_num, content in \ zip(authors, urls, up_num, contents): a_url, name, motto, photo = parser_author_from_tag(author) author_obj = Author(a_url, name, motto, photo_url=photo, session=self._session) url = Zhihu_URL + url['href'] up_num = int(up_num['data-votecount']) content = answer_content_process(content) yield Answer(url, self, author_obj, up_num, content, session=self._session) else: pagesize = 10 new_header = dict(Default_Header) new_header['Referer'] = self.url params = {"url_token": self.id, 'pagesize': pagesize, 'offset': 0} data = {'_xsrf': self.xsrf, 'method': 'next', 'params': ''} for i in range(0, (self.answer_num - 1) // pagesize + 1): if i == 0: # 修正各种建议修改的回答…… error_answers = self.soup.find_all('div', id='answer-status') for each in error_answers: each['class'] = 'zm-editable-content' answers_wrap = self.soup.find('div', id='zh-question-answer-wrap') # 正式处理 authors = answers_wrap.find_all( 'div', class_='zm-item-answer-author-info') urls = answers_wrap.find_all('a', class_='answer-date-link') up_num = answers_wrap.find_all('div', class_='zm-item-vote-info') contents = answers_wrap.find_all( 'div', class_='zm-editable-content') assert len(authors) == len(urls) == len(up_num) == len( contents) for author, url, up_num, content in \ zip(authors, urls, up_num, contents): a_url, name, motto, photo = parser_author_from_tag( author) author_obj = Author(a_url, name, motto, photo_url=photo, session=self._session) url = Zhihu_URL + url['href'] up_num = int(up_num['data-votecount']) content = answer_content_process(content) yield Answer(url, self, author_obj, up_num, content, session=self._session) else: params['offset'] = i * pagesize data['params'] = json.dumps(params) r = self._session.post(Question_Get_More_Answer_URL, data=data, headers=new_header) answer_list = r.json()['msg'] for answer_html in answer_list: yield self._parse_answer_html(answer_html)
[ "def", "answers", "(", "self", ")", ":", "from", ".", "author", "import", "Author", "from", ".", "answer", "import", "Answer", "self", ".", "_make_soup", "(", ")", "# TODO: 统一逻辑. 完全可以都用 _parse_answer_html 的逻辑替换", "if", "self", ".", "_url", ".", "endswith", "("...
获取问题的所有答案. :return: 问题的所有答案,返回生成器 :rtype: Answer.Iterable
[ "获取问题的所有答案", "." ]
python
train
SystemRDL/systemrdl-compiler
systemrdl/rdltypes.py
https://github.com/SystemRDL/systemrdl-compiler/blob/6ae64f2bb6ecbbe9db356e20e8ac94e85bdeed3a/systemrdl/rdltypes.py#L188-L211
def get_scope_path(cls, scope_separator="::"): """ Generate a string that represents this enum's declaration namespace scope. Parameters ---------- scope_separator: str Override the separator between namespace scopes """ if cls.get_parent_scope() is None: return "" elif isinstance(cls.get_parent_scope(), comp.Root): return "" else: parent_path = cls.get_parent_scope().get_scope_path(scope_separator) if parent_path: return( parent_path + scope_separator + cls.get_parent_scope().type_name ) else: return cls.get_parent_scope().type_name
[ "def", "get_scope_path", "(", "cls", ",", "scope_separator", "=", "\"::\"", ")", ":", "if", "cls", ".", "get_parent_scope", "(", ")", "is", "None", ":", "return", "\"\"", "elif", "isinstance", "(", "cls", ".", "get_parent_scope", "(", ")", ",", "comp", "...
Generate a string that represents this enum's declaration namespace scope. Parameters ---------- scope_separator: str Override the separator between namespace scopes
[ "Generate", "a", "string", "that", "represents", "this", "enum", "s", "declaration", "namespace", "scope", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/peerstorage/__init__.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/peerstorage/__init__.py#L143-L157
def relation_get(attribute=None, unit=None, rid=None): """Attempt to use leader-get if supported in the current version of Juju, otherwise falls back on relation-get. Note that we only attempt to use leader-get if the provided rid is a peer relation id or no relation id is provided (in which case we assume we are within the peer relation context). """ try: if rid in relation_ids('cluster'): return leader_get(attribute, rid) else: raise NotImplementedError except NotImplementedError: return _relation_get(attribute=attribute, rid=rid, unit=unit)
[ "def", "relation_get", "(", "attribute", "=", "None", ",", "unit", "=", "None", ",", "rid", "=", "None", ")", ":", "try", ":", "if", "rid", "in", "relation_ids", "(", "'cluster'", ")", ":", "return", "leader_get", "(", "attribute", ",", "rid", ")", "...
Attempt to use leader-get if supported in the current version of Juju, otherwise falls back on relation-get. Note that we only attempt to use leader-get if the provided rid is a peer relation id or no relation id is provided (in which case we assume we are within the peer relation context).
[ "Attempt", "to", "use", "leader", "-", "get", "if", "supported", "in", "the", "current", "version", "of", "Juju", "otherwise", "falls", "back", "on", "relation", "-", "get", "." ]
python
train
nnseva/django-access
access/admin.py
https://github.com/nnseva/django-access/blob/2e8b72830b1092652ca63125a8309189d70ad584/access/admin.py#L279-L286
def delete_view(self, request, object_id, extra_context=None): "The 'delete' admin view for this model." queryset = self.model._default_manager.filter(pk=object_id) response = self.delete_selected(request, queryset) if response: return response url = reverse('admin:%s_%s_changelist' % (self.model._meta.app_label, self.model._meta.model_name)) return HttpResponseRedirect(url)
[ "def", "delete_view", "(", "self", ",", "request", ",", "object_id", ",", "extra_context", "=", "None", ")", ":", "queryset", "=", "self", ".", "model", ".", "_default_manager", ".", "filter", "(", "pk", "=", "object_id", ")", "response", "=", "self", "....
The 'delete' admin view for this model.
[ "The", "delete", "admin", "view", "for", "this", "model", "." ]
python
train
uw-it-aca/uw-restclients
restclients/bookstore.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/bookstore.py#L56-L74
def get_books_for_schedule(self, schedule): """ Returns a dictionary of data. SLNs are the keys, an array of Book objects are the values. """ slns = self._get_slns(schedule) books = {} for sln in slns: try: section_books = self.get_books_by_quarter_sln( schedule.term.quarter, sln ) books[sln] = section_books except DataFailureException: # do nothing if bookstore doesn't have sln pass return books
[ "def", "get_books_for_schedule", "(", "self", ",", "schedule", ")", ":", "slns", "=", "self", ".", "_get_slns", "(", "schedule", ")", "books", "=", "{", "}", "for", "sln", "in", "slns", ":", "try", ":", "section_books", "=", "self", ".", "get_books_by_qu...
Returns a dictionary of data. SLNs are the keys, an array of Book objects are the values.
[ "Returns", "a", "dictionary", "of", "data", ".", "SLNs", "are", "the", "keys", "an", "array", "of", "Book", "objects", "are", "the", "values", "." ]
python
train
jaywink/federation
federation/protocols/diaspora/protocol.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/protocols/diaspora/protocol.py#L71-L83
def store_magic_envelope_doc(self, payload): """Get the Magic Envelope, trying JSON first.""" try: json_payload = json.loads(decode_if_bytes(payload)) except ValueError: # XML payload xml = unquote(decode_if_bytes(payload)) xml = xml.lstrip().encode("utf-8") logger.debug("diaspora.protocol.store_magic_envelope_doc: xml payload: %s", xml) self.doc = etree.fromstring(xml) else: logger.debug("diaspora.protocol.store_magic_envelope_doc: json payload: %s", json_payload) self.doc = self.get_json_payload_magic_envelope(json_payload)
[ "def", "store_magic_envelope_doc", "(", "self", ",", "payload", ")", ":", "try", ":", "json_payload", "=", "json", ".", "loads", "(", "decode_if_bytes", "(", "payload", ")", ")", "except", "ValueError", ":", "# XML payload", "xml", "=", "unquote", "(", "deco...
Get the Magic Envelope, trying JSON first.
[ "Get", "the", "Magic", "Envelope", "trying", "JSON", "first", "." ]
python
train
django-auth-ldap/django-auth-ldap
django_auth_ldap/config.py
https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/config.py#L105-L122
def search_with_additional_terms(self, term_dict, escape=True): """ Returns a new search object with additional search terms and-ed to the filter string. term_dict maps attribute names to assertion values. If you don't want the values escaped, pass escape=False. """ term_strings = [self.filterstr] for name, value in term_dict.items(): if escape: value = self.ldap.filter.escape_filter_chars(value) term_strings.append("({}={})".format(name, value)) filterstr = "(&{})".format("".join(term_strings)) return self.__class__( self.base_dn, self.scope, filterstr, attrlist=self.attrlist )
[ "def", "search_with_additional_terms", "(", "self", ",", "term_dict", ",", "escape", "=", "True", ")", ":", "term_strings", "=", "[", "self", ".", "filterstr", "]", "for", "name", ",", "value", "in", "term_dict", ".", "items", "(", ")", ":", "if", "escap...
Returns a new search object with additional search terms and-ed to the filter string. term_dict maps attribute names to assertion values. If you don't want the values escaped, pass escape=False.
[ "Returns", "a", "new", "search", "object", "with", "additional", "search", "terms", "and", "-", "ed", "to", "the", "filter", "string", ".", "term_dict", "maps", "attribute", "names", "to", "assertion", "values", ".", "If", "you", "don", "t", "want", "the",...
python
train
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L131-L139
def _parse_qualimap_globals_inregion(table): """Retrieve metrics from the global targeted region table. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mapped reads": out.update(_parse_num_pct("%s (in regions)" % col, val)) return out
[ "def", "_parse_qualimap_globals_inregion", "(", "table", ")", ":", "out", "=", "{", "}", "for", "row", "in", "table", ".", "find_all", "(", "\"tr\"", ")", ":", "col", ",", "val", "=", "[", "x", ".", "text", "for", "x", "in", "row", ".", "find_all", ...
Retrieve metrics from the global targeted region table.
[ "Retrieve", "metrics", "from", "the", "global", "targeted", "region", "table", "." ]
python
train
gdestuynder/simple_bugzilla
bugzilla.py
https://github.com/gdestuynder/simple_bugzilla/blob/c69766a81fa7960a8f2b22287968fa4787f1bcfe/bugzilla.py#L77-L84
def put_bug(self, bugid, bug_update): '''http://bugzilla.readthedocs.org/en/latest/api/core/v1/bug.html#update-bug''' assert type(bug_update) is DotDict if (not 'ids' in bug_update): bug_update.ids = [bugid] return self._put('bug/{bugid}'.format(bugid=bugid), json.dumps(bug_update))
[ "def", "put_bug", "(", "self", ",", "bugid", ",", "bug_update", ")", ":", "assert", "type", "(", "bug_update", ")", "is", "DotDict", "if", "(", "not", "'ids'", "in", "bug_update", ")", ":", "bug_update", ".", "ids", "=", "[", "bugid", "]", "return", ...
http://bugzilla.readthedocs.org/en/latest/api/core/v1/bug.html#update-bug
[ "http", ":", "//", "bugzilla", ".", "readthedocs", ".", "org", "/", "en", "/", "latest", "/", "api", "/", "core", "/", "v1", "/", "bug", ".", "html#update", "-", "bug" ]
python
train
openpaperwork/paperwork-backend
paperwork_backend/shell.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/shell.py#L327-L401
def cmd_guess_labels(*args): """ Arguments: <document id> [-- [--apply]] Guess the labels that should be set on the document. Example: paperwork-shell guess_labels -- 20161207_1144_00_8 --apply Possible JSON replies: -- { "status": "error", "exception": "yyy", "reason": "xxxx", "args": "(xxxx, )" } -- { "status": "ok", "docid": "xxxx", "current_labels": ["label_a", "label_b"], "guessed_labels": ["label_b", "label_c"], "applied": "yes", } """ args = list(args) apply_labels = False if "--apply" in args: apply_labels = True args.remove("--apply") docid = args[0] dsearch = get_docsearch() doc = dsearch.get(docid) if doc is None: raise Exception( "Document {} not found. Cannot guess labels".format( docid ) ) verbose("Current labels: {}".format( ", ".join([label.name for label in doc.labels]) )) guessed = dsearch.guess_labels(doc) verbose("Guessed labels: {}".format( ", ".join([label.name for label in guessed]) )) r = { 'docid': doc.docid, 'current_labels': [label.name for label in doc.labels], 'guessed_labels': [label.name for label in guessed], 'applied': "yes" if apply_labels else "no", } changed = False if apply_labels: for label in guessed: if label not in doc.labels: dsearch.add_label(doc, label, update_index=False) changed = True for label in doc.labels: if label not in guessed: dsearch.remove_label(doc, label, update_index=False) changed = True if changed: index_updater = dsearch.get_index_updater(optimize=False) index_updater.upd_doc(doc) index_updater.commit() verbose("Document {} updated".format(docid)) elif apply_labels: verbose("Document {} unchanged".format(docid)) reply(r)
[ "def", "cmd_guess_labels", "(", "*", "args", ")", ":", "args", "=", "list", "(", "args", ")", "apply_labels", "=", "False", "if", "\"--apply\"", "in", "args", ":", "apply_labels", "=", "True", "args", ".", "remove", "(", "\"--apply\"", ")", "docid", "=",...
Arguments: <document id> [-- [--apply]] Guess the labels that should be set on the document. Example: paperwork-shell guess_labels -- 20161207_1144_00_8 --apply Possible JSON replies: -- { "status": "error", "exception": "yyy", "reason": "xxxx", "args": "(xxxx, )" } -- { "status": "ok", "docid": "xxxx", "current_labels": ["label_a", "label_b"], "guessed_labels": ["label_b", "label_c"], "applied": "yes", }
[ "Arguments", ":", "<document", "id", ">", "[", "--", "[", "--", "apply", "]]" ]
python
train