repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
YeoLab/anchor
anchor/infotheory.py
https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L218-L266
def cross_phenotype_jsd(data, groupby, bins, n_iter=100): """Jensen-Shannon divergence of features across phenotypes Parameters ---------- data : pandas.DataFrame A (n_samples, n_features) Dataframe groupby : mappable A samples to phenotypes mapping n_iter : int Number of bootstrap resampling iterations to perform for the within-group comparisons n_bins : int Number of bins to binify the singles data on Returns ------- jsd_df : pandas.DataFrame A (n_features, n_phenotypes^2) dataframe of the JSD between each feature between and within phenotypes """ grouped = data.groupby(groupby) jsds = [] seen = set([]) for phenotype1, df1 in grouped: for phenotype2, df2 in grouped: pair = tuple(sorted([phenotype1, phenotype2])) if pair in seen: continue seen.add(pair) if phenotype1 == phenotype2: seriess = [] bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter, train_size=0.5) for i, (ind1, ind2) in enumerate(bs): df1_subset = df1.iloc[ind1, :] df2_subset = df2.iloc[ind2, :] seriess.append( binify_and_jsd(df1_subset, df2_subset, None, bins)) series = pd.concat(seriess, axis=1, names=None).mean(axis=1) series.name = pair jsds.append(series) else: series = binify_and_jsd(df1, df2, pair, bins) jsds.append(series) return pd.concat(jsds, axis=1)
[ "def", "cross_phenotype_jsd", "(", "data", ",", "groupby", ",", "bins", ",", "n_iter", "=", "100", ")", ":", "grouped", "=", "data", ".", "groupby", "(", "groupby", ")", "jsds", "=", "[", "]", "seen", "=", "set", "(", "[", "]", ")", "for", "phenoty...
Jensen-Shannon divergence of features across phenotypes Parameters ---------- data : pandas.DataFrame A (n_samples, n_features) Dataframe groupby : mappable A samples to phenotypes mapping n_iter : int Number of bootstrap resampling iterations to perform for the within-group comparisons n_bins : int Number of bins to binify the singles data on Returns ------- jsd_df : pandas.DataFrame A (n_features, n_phenotypes^2) dataframe of the JSD between each feature between and within phenotypes
[ "Jensen", "-", "Shannon", "divergence", "of", "features", "across", "phenotypes" ]
python
train
ronniedada/tabula
tabula/section.py
https://github.com/ronniedada/tabula/blob/ba18bb2f7db75972256b950711415031dc5421c7/tabula/section.py#L216-L235
def apply_meta(self, arr, funcs): """ Apply metadata to help formatting the output: - conv_funcs: convert data before column alignments - deco_funcs: decorate data after column alignments """ if arr is None: logging.error("unable to convert data: emtpy section") return None tmp = np.copy(arr) for col in arr.dtype.names: for row in self._get_row_hdrs(): meta = self._get_meta(row, col) for mk, mv in sorted(meta.iteritems()): if mk in funcs.iterkeys(): tmp[col][self.irt[row]] = \ funcs[mk](tmp[col][self.irt[row]], mv) return tmp
[ "def", "apply_meta", "(", "self", ",", "arr", ",", "funcs", ")", ":", "if", "arr", "is", "None", ":", "logging", ".", "error", "(", "\"unable to convert data: emtpy section\"", ")", "return", "None", "tmp", "=", "np", ".", "copy", "(", "arr", ")", "for",...
Apply metadata to help formatting the output: - conv_funcs: convert data before column alignments - deco_funcs: decorate data after column alignments
[ "Apply", "metadata", "to", "help", "formatting", "the", "output", ":" ]
python
train
ekzhu/datasketch
datasketch/b_bit_minhash.py
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/b_bit_minhash.py#L129-L136
def _calc_a(self, r, b): ''' Compute the function A(r, b) ''' if r == 0.0: # Find the limit of A(r, b) as r -> 0. return 1.0 / (1 << b) return r * (1 - r) ** (2 ** b - 1) / (1 - (1 - r) ** (2 * b))
[ "def", "_calc_a", "(", "self", ",", "r", ",", "b", ")", ":", "if", "r", "==", "0.0", ":", "# Find the limit of A(r, b) as r -> 0.", "return", "1.0", "/", "(", "1", "<<", "b", ")", "return", "r", "*", "(", "1", "-", "r", ")", "**", "(", "2", "**",...
Compute the function A(r, b)
[ "Compute", "the", "function", "A", "(", "r", "b", ")" ]
python
test
sryza/spark-timeseries
python/sparkts/datetimeindex.py
https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/datetimeindex.py#L129-L153
def uniform(start, end=None, periods=None, freq=None, sc=None): """ Instantiates a uniform DateTimeIndex. Either end or periods must be specified. Parameters ---------- start : string, long (nanos from epoch), or Pandas Timestamp end : string, long (nanos from epoch), or Pandas Timestamp periods : int freq : a frequency object sc : SparkContext """ dtmodule = sc._jvm.com.cloudera.sparkts.__getattr__('DateTimeIndex$').__getattr__('MODULE$') if freq is None: raise ValueError("Missing frequency") elif end is None and periods == None: raise ValueError("Need an end date or number of periods") elif end is not None: return DateTimeIndex(dtmodule.uniformFromInterval( \ datetime_to_nanos(start), datetime_to_nanos(end), freq._jfreq)) else: return DateTimeIndex(dtmodule.uniform( \ datetime_to_nanos(start), periods, freq._jfreq))
[ "def", "uniform", "(", "start", ",", "end", "=", "None", ",", "periods", "=", "None", ",", "freq", "=", "None", ",", "sc", "=", "None", ")", ":", "dtmodule", "=", "sc", ".", "_jvm", ".", "com", ".", "cloudera", ".", "sparkts", ".", "__getattr__", ...
Instantiates a uniform DateTimeIndex. Either end or periods must be specified. Parameters ---------- start : string, long (nanos from epoch), or Pandas Timestamp end : string, long (nanos from epoch), or Pandas Timestamp periods : int freq : a frequency object sc : SparkContext
[ "Instantiates", "a", "uniform", "DateTimeIndex", "." ]
python
train
flatangle/flatlib
flatlib/aspects.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/aspects.py#L275-L285
def movement(self): """ Returns the movement of this aspect. The movement is the one of the active object, except if the active is separating but within less than 1 degree. """ mov = self.active.movement if self.orb < 1 and mov == const.SEPARATIVE: mov = const.EXACT return mov
[ "def", "movement", "(", "self", ")", ":", "mov", "=", "self", ".", "active", ".", "movement", "if", "self", ".", "orb", "<", "1", "and", "mov", "==", "const", ".", "SEPARATIVE", ":", "mov", "=", "const", ".", "EXACT", "return", "mov" ]
Returns the movement of this aspect. The movement is the one of the active object, except if the active is separating but within less than 1 degree.
[ "Returns", "the", "movement", "of", "this", "aspect", ".", "The", "movement", "is", "the", "one", "of", "the", "active", "object", "except", "if", "the", "active", "is", "separating", "but", "within", "less", "than", "1", "degree", "." ]
python
train
widdowquinn/pyani
pyani/anib.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L250-L267
def generate_blastdb_commands(filenames, outdir, blastdb_exe=None, mode="ANIb"): """Return a list of makeblastdb command-lines for ANIb/ANIblastall - filenames - a list of paths to input FASTA files - outdir - path to output directory - blastdb_exe - path to the makeblastdb executable """ if mode == "ANIb": construct_db_cmdline = construct_makeblastdb_cmd else: construct_db_cmdline = construct_formatdb_cmd if blastdb_exe is None: cmdlines = [construct_db_cmdline(fname, outdir) for fname in filenames] else: cmdlines = [ construct_db_cmdline(fname, outdir, blastdb_exe) for fname in filenames ] return cmdlines
[ "def", "generate_blastdb_commands", "(", "filenames", ",", "outdir", ",", "blastdb_exe", "=", "None", ",", "mode", "=", "\"ANIb\"", ")", ":", "if", "mode", "==", "\"ANIb\"", ":", "construct_db_cmdline", "=", "construct_makeblastdb_cmd", "else", ":", "construct_db_...
Return a list of makeblastdb command-lines for ANIb/ANIblastall - filenames - a list of paths to input FASTA files - outdir - path to output directory - blastdb_exe - path to the makeblastdb executable
[ "Return", "a", "list", "of", "makeblastdb", "command", "-", "lines", "for", "ANIb", "/", "ANIblastall" ]
python
train
gitpython-developers/GitPython
git/remote.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/remote.py#L224-L245
def refresh(cls): """This gets called by the refresh function (see the top level __init__). """ # clear the old values in _flag_map try: del cls._flag_map["t"] except KeyError: pass try: del cls._flag_map["-"] except KeyError: pass # set the value given the git version if Git().version_info[:2] >= (2, 10): cls._flag_map["t"] = cls.TAG_UPDATE else: cls._flag_map["-"] = cls.TAG_UPDATE return True
[ "def", "refresh", "(", "cls", ")", ":", "# clear the old values in _flag_map", "try", ":", "del", "cls", ".", "_flag_map", "[", "\"t\"", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "cls", ".", "_flag_map", "[", "\"-\"", "]", "except", "KeyEr...
This gets called by the refresh function (see the top level __init__).
[ "This", "gets", "called", "by", "the", "refresh", "function", "(", "see", "the", "top", "level", "__init__", ")", "." ]
python
train
djordon/queueing-tool
queueing_tool/network/queue_network.py
https://github.com/djordon/queueing-tool/blob/ccd418cf647ac03a54f78ba5e3725903f541b808/queueing_tool/network/queue_network.py#L987-L1012
def next_event_description(self): """Returns whether the next event is an arrival or a departure and the queue the event is accuring at. Returns ------- des : str Indicates whether the next event is an arrival, a departure, or nothing; returns ``'Arrival'``, ``'Departure'``, or ``'Nothing'``. edge : int or ``None`` The edge index of the edge that this event will occur at. If there are no events then ``None`` is returned. """ if self._fancy_heap.size == 0: event_type = 'Nothing' edge_index = None else: s = [q._key() for q in self.edge2queue] s.sort() e = s[0][1] q = self.edge2queue[e] event_type = 'Arrival' if q.next_event_description() == 1 else 'Departure' edge_index = q.edge[2] return event_type, edge_index
[ "def", "next_event_description", "(", "self", ")", ":", "if", "self", ".", "_fancy_heap", ".", "size", "==", "0", ":", "event_type", "=", "'Nothing'", "edge_index", "=", "None", "else", ":", "s", "=", "[", "q", ".", "_key", "(", ")", "for", "q", "in"...
Returns whether the next event is an arrival or a departure and the queue the event is accuring at. Returns ------- des : str Indicates whether the next event is an arrival, a departure, or nothing; returns ``'Arrival'``, ``'Departure'``, or ``'Nothing'``. edge : int or ``None`` The edge index of the edge that this event will occur at. If there are no events then ``None`` is returned.
[ "Returns", "whether", "the", "next", "event", "is", "an", "arrival", "or", "a", "departure", "and", "the", "queue", "the", "event", "is", "accuring", "at", "." ]
python
valid
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L648-L664
def getXMLNS(self, prefix=None): """deference prefix or by default xmlns, returns namespace. """ if prefix == XMLSchemaComponent.xml: return XMLNS.XML parent = self ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\ XMLSchemaComponent.xmlns_key) while not ns: parent = parent._parent() ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\ XMLSchemaComponent.xmlns_key) if not ns and isinstance(parent, WSDLToolsAdapter): if prefix is None: return '' raise SchemaError, 'unknown prefix %s' %prefix return ns
[ "def", "getXMLNS", "(", "self", ",", "prefix", "=", "None", ")", ":", "if", "prefix", "==", "XMLSchemaComponent", ".", "xml", ":", "return", "XMLNS", ".", "XML", "parent", "=", "self", "ns", "=", "self", ".", "attributes", "[", "XMLSchemaComponent", ".",...
deference prefix or by default xmlns, returns namespace.
[ "deference", "prefix", "or", "by", "default", "xmlns", "returns", "namespace", "." ]
python
train
saltstack/salt
salt/modules/libcloud_compute.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_compute.py#L255-L282
def list_volume_snapshots(volume_id, profile, **libcloud_kwargs): ''' Return a list of storage volumes snapshots for this cloud :param volume_id: The volume identifier :type volume_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_volume_snapshots method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.list_volume_snapshots vol1 profile1 ''' conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volume = _get_by_id(conn.list_volumes(), volume_id) snapshots = conn.list_volume_snapshots(volume, **libcloud_kwargs) ret = [] for snapshot in snapshots: ret.append(_simple_volume_snapshot(snapshot)) return ret
[ "def", "list_volume_snapshots", "(", "volume_id", ",", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "("...
Return a list of storage volumes snapshots for this cloud :param volume_id: The volume identifier :type volume_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_volume_snapshots method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.list_volume_snapshots vol1 profile1
[ "Return", "a", "list", "of", "storage", "volumes", "snapshots", "for", "this", "cloud" ]
python
train
kytos/kytos-utils
kytos/cli/commands/napps/parser.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/cli/commands/napps/parser.py#L57-L61
def call(subcommand, args): """Call a subcommand passing the args.""" args['<napp>'] = parse_napps(args['<napp>']) func = getattr(NAppsAPI, subcommand) func(args)
[ "def", "call", "(", "subcommand", ",", "args", ")", ":", "args", "[", "'<napp>'", "]", "=", "parse_napps", "(", "args", "[", "'<napp>'", "]", ")", "func", "=", "getattr", "(", "NAppsAPI", ",", "subcommand", ")", "func", "(", "args", ")" ]
Call a subcommand passing the args.
[ "Call", "a", "subcommand", "passing", "the", "args", "." ]
python
train
CivicSpleen/ambry
ambry/orm/database.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L580-L589
def delete_partitions(self, ds): """Fast delete of all of a datasets codes, columns, partitions and tables""" from ambry.orm import Partition ssq = self.session.query ssq(Process).filter(Process.d_vid == ds.vid).delete() ssq(Code).filter(Code.d_vid == ds.vid).delete() ssq(ColumnStat).filter(ColumnStat.d_vid == ds.vid).delete() ssq(Partition).filter(Partition.d_vid == ds.vid).delete()
[ "def", "delete_partitions", "(", "self", ",", "ds", ")", ":", "from", "ambry", ".", "orm", "import", "Partition", "ssq", "=", "self", ".", "session", ".", "query", "ssq", "(", "Process", ")", ".", "filter", "(", "Process", ".", "d_vid", "==", "ds", "...
Fast delete of all of a datasets codes, columns, partitions and tables
[ "Fast", "delete", "of", "all", "of", "a", "datasets", "codes", "columns", "partitions", "and", "tables" ]
python
train
noahbenson/neuropythy
neuropythy/graphics/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/graphics/core.py#L259-L276
def color_overlap(color1, *args): ''' color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top of color1 followed by any additional colors (overlaid left to right). This respects alpha values when calculating the results. Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded. ''' args = list(args) args.insert(0, color1) rgba = np.asarray([0.5,0.5,0.5,0]) for c in args: c = to_rgba(c) a = c[...,3] a0 = rgba[...,3] if np.isclose(a0, 0).all(): rgba = np.ones(rgba.shape) * c elif np.isclose(a, 0).all(): continue else: rgba = times(a, c) + times(1-a, rgba) return rgba
[ "def", "color_overlap", "(", "color1", ",", "*", "args", ")", ":", "args", "=", "list", "(", "args", ")", "args", ".", "insert", "(", "0", ",", "color1", ")", "rgba", "=", "np", ".", "asarray", "(", "[", "0.5", ",", "0.5", ",", "0.5", ",", "0",...
color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top of color1 followed by any additional colors (overlaid left to right). This respects alpha values when calculating the results. Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded.
[ "color_overlap", "(", "color1", "color2", "...", ")", "yields", "the", "rgba", "value", "associated", "with", "overlaying", "color2", "on", "top", "of", "color1", "followed", "by", "any", "additional", "colors", "(", "overlaid", "left", "to", "right", ")", "...
python
train
FutunnOpen/futuquant
futuquant/common/open_context_base.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/common/open_context_base.py#L154-L190
def _get_sync_query_processor(self, pack_func, unpack_func, is_create_socket=True): """ synchronize the query processor :param pack_func: back :param unpack_func: unpack :return: sync_query_processor """ def sync_query_processor(**kargs): """sync query processor""" while True: with self._lock: if self._status == ContextStatus.Ready: net_mgr = self._net_mgr conn_id = self._conn_id break sleep(0.01) try: ret_code, msg, req_str = pack_func(**kargs) if ret_code != RET_OK: return ret_code, msg, None ret_code, msg, rsp_str = net_mgr.sync_query(conn_id, req_str) if ret_code != RET_OK: return ret_code, msg, None ret_code, msg, content = unpack_func(rsp_str) if ret_code != RET_OK: return ret_code, msg, None except Exception as e: logger.error(traceback.format_exc()) return RET_ERROR, str(e), None return RET_OK, msg, content return sync_query_processor
[ "def", "_get_sync_query_processor", "(", "self", ",", "pack_func", ",", "unpack_func", ",", "is_create_socket", "=", "True", ")", ":", "def", "sync_query_processor", "(", "*", "*", "kargs", ")", ":", "\"\"\"sync query processor\"\"\"", "while", "True", ":", "with"...
synchronize the query processor :param pack_func: back :param unpack_func: unpack :return: sync_query_processor
[ "synchronize", "the", "query", "processor", ":", "param", "pack_func", ":", "back", ":", "param", "unpack_func", ":", "unpack", ":", "return", ":", "sync_query_processor" ]
python
train
kpn-digital/py-timeexecution
time_execution/backends/elasticsearch.py
https://github.com/kpn-digital/py-timeexecution/blob/79b991e83f783196c41b830d0acef21ac5462596/time_execution/backends/elasticsearch.py#L110-L125
def bulk_write(self, metrics): """ Write multiple metrics to elasticsearch in one request Args: metrics (list): data with mappings to send to elasticsearch """ actions = [] index = self.get_index() for metric in metrics: actions.append({'index': {'_index': index, '_type': self.doc_type}}) actions.append(metric) try: self.client.bulk(actions) except TransportError as exc: logger.warning('bulk_write metrics %r failure %r', metrics, exc)
[ "def", "bulk_write", "(", "self", ",", "metrics", ")", ":", "actions", "=", "[", "]", "index", "=", "self", ".", "get_index", "(", ")", "for", "metric", "in", "metrics", ":", "actions", ".", "append", "(", "{", "'index'", ":", "{", "'_index'", ":", ...
Write multiple metrics to elasticsearch in one request Args: metrics (list): data with mappings to send to elasticsearch
[ "Write", "multiple", "metrics", "to", "elasticsearch", "in", "one", "request" ]
python
train
pmuller/versions
versions/operators.py
https://github.com/pmuller/versions/blob/951bc3fd99b6a675190f11ee0752af1d7ff5b440/versions/operators.py#L51-L64
def parse(cls, string): """Parses `string` and returns an :class:`Operator` object. :raises: :exc:`InvalidOperatorExpression` If `string` is not \ a valid operator. Valid operators are ``==``, ``!=``, ``<``, ``>``, ``<=``, and ``>=``. """ if string in STR_TO_OP_FUNC: return cls(STR_TO_OP_FUNC[string], string) else: raise InvalidOperatorExpression(string)
[ "def", "parse", "(", "cls", ",", "string", ")", ":", "if", "string", "in", "STR_TO_OP_FUNC", ":", "return", "cls", "(", "STR_TO_OP_FUNC", "[", "string", "]", ",", "string", ")", "else", ":", "raise", "InvalidOperatorExpression", "(", "string", ")" ]
Parses `string` and returns an :class:`Operator` object. :raises: :exc:`InvalidOperatorExpression` If `string` is not \ a valid operator. Valid operators are ``==``, ``!=``, ``<``, ``>``, ``<=``, and ``>=``.
[ "Parses", "string", "and", "returns", "an", ":", "class", ":", "Operator", "object", "." ]
python
train
lightning-viz/lightning-python
lightning/types/base.py
https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/base.py#L181-L195
def update(self, *args, **kwargs): """ Base method for updating data. Applies a plot-type specific cleaning operation, then updates the data in the visualization. """ data = self._clean_data(*args, **kwargs) if 'images' in data: images = data['images'] for img in images: self._update_image(img) else: self._update_data(data=data)
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "_clean_data", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "'images'", "in", "data", ":", "images", "=", "data", "[", "'images'...
Base method for updating data. Applies a plot-type specific cleaning operation, then updates the data in the visualization.
[ "Base", "method", "for", "updating", "data", "." ]
python
train
benedictpaten/sonLib
bioio.py
https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/bioio.py#L151-L170
def setLoggingFromOptions(options): """Sets the logging from a dictionary of name/value options. """ #We can now set up the logging info. if options.logLevel is not None: setLogLevel(options.logLevel) #Use log level, unless flags are set.. if options.logOff: setLogLevel("OFF") elif options.logInfo: setLogLevel("INFO") elif options.logDebug: setLogLevel("DEBUG") logger.info("Logging set at level: %s" % logLevelString) if options.logFile is not None: addLoggingFileHandler(options.logFile, options.logRotating) logger.info("Logging to file: %s" % options.logFile)
[ "def", "setLoggingFromOptions", "(", "options", ")", ":", "#We can now set up the logging info.", "if", "options", ".", "logLevel", "is", "not", "None", ":", "setLogLevel", "(", "options", ".", "logLevel", ")", "#Use log level, unless flags are set..", "if", "options", ...
Sets the logging from a dictionary of name/value options.
[ "Sets", "the", "logging", "from", "a", "dictionary", "of", "name", "/", "value", "options", "." ]
python
train
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/bluez_dbus/provider.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/bluez_dbus/provider.py#L109-L130
def _user_thread_main(self, target): """Main entry point for the thread that will run user's code.""" try: # Wait for GLib main loop to start running before starting user code. while True: if self._gobject_mainloop is not None and self._gobject_mainloop.is_running(): # Main loop is running, we should be ready to make bluez DBus calls. break # Main loop isn't running yet, give time back to other threads. time.sleep(0) # Run user's code. self._return_code = target() # Assume good result (0 return code) if none is returned. if self._return_code is None: self._return_code = 0 # Signal the main loop to exit. self._gobject_mainloop.quit() except Exception as ex: # Something went wrong. Raise the exception on the main thread to # exit. self._exception = sys.exc_info() self._gobject_mainloop.quit()
[ "def", "_user_thread_main", "(", "self", ",", "target", ")", ":", "try", ":", "# Wait for GLib main loop to start running before starting user code.", "while", "True", ":", "if", "self", ".", "_gobject_mainloop", "is", "not", "None", "and", "self", ".", "_gobject_main...
Main entry point for the thread that will run user's code.
[ "Main", "entry", "point", "for", "the", "thread", "that", "will", "run", "user", "s", "code", "." ]
python
valid
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L422-L449
def Parse(text, message, allow_unknown_extension=False, allow_field_number=False, descriptor_pool=None): """Parses a text representation of a protocol message into a message. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. descriptor_pool: A DescriptorPool used to resolve Any types. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ if not isinstance(text, str): text = text.decode('utf-8') return ParseLines(text.split('\n'), message, allow_unknown_extension, allow_field_number, descriptor_pool=descriptor_pool)
[ "def", "Parse", "(", "text", ",", "message", ",", "allow_unknown_extension", "=", "False", ",", "allow_field_number", "=", "False", ",", "descriptor_pool", "=", "None", ")", ":", "if", "not", "isinstance", "(", "text", ",", "str", ")", ":", "text", "=", ...
Parses a text representation of a protocol message into a message. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. descriptor_pool: A DescriptorPool used to resolve Any types. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
[ "Parses", "a", "text", "representation", "of", "a", "protocol", "message", "into", "a", "message", "." ]
python
train
Nic30/hwt
hwt/hdl/ifContainter.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/ifContainter.py#L261-L303
def _try_reduce(self) -> Tuple[bool, List[HdlStatement]]: """ Doc on parent class :meth:`HdlStatement._try_reduce` """ # flag if IO of statement has changed io_change = False self.ifTrue, rank_decrease, _io_change = self._try_reduce_list( self.ifTrue) self.rank -= rank_decrease io_change |= _io_change new_elifs = [] for cond, statements in self.elIfs: _statements, rank_decrease, _io_change = self._try_reduce_list( statements) self.rank -= rank_decrease io_change |= _io_change new_elifs.append((cond, _statements)) if self.ifFalse is not None: self.ifFalse, rank_decrease, _io_update_required = self._try_reduce_list( self.ifFalse) self.rank -= rank_decrease io_change |= _io_change reduce_self = not self.condHasEffect( self.ifTrue, self.ifFalse, self.elIfs) if reduce_self: res = self.ifTrue else: res = [self, ] self._on_reduce(reduce_self, io_change, res) # try merge nested ifs as elifs if self.ifFalse is not None and len(self.ifFalse) == 1: child = self.ifFalse[0] if isinstance(child, IfContainer): self._merge_nested_if_from_else(child) return res, io_change
[ "def", "_try_reduce", "(", "self", ")", "->", "Tuple", "[", "bool", ",", "List", "[", "HdlStatement", "]", "]", ":", "# flag if IO of statement has changed", "io_change", "=", "False", "self", ".", "ifTrue", ",", "rank_decrease", ",", "_io_change", "=", "self"...
Doc on parent class :meth:`HdlStatement._try_reduce`
[ "Doc", "on", "parent", "class", ":", "meth", ":", "HdlStatement", ".", "_try_reduce" ]
python
test
slickqa/python-client
slickqa/connection.py
https://github.com/slickqa/python-client/blob/1d36b4977cd4140d7d24917cab2b3f82b60739c2/slickqa/connection.py#L200-L225
def create(self): """Create the specified object (perform a POST to the api). You specify the object as a parameter, using the parent object as a function. Example: proj = Project() ... add project data here proj = slick.projects(proj).create() """ obj = self.data self.data = None url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: json_data = obj.to_json() self.logger.debug("Making request to slick at url %s, with data: %s", url, json_data) r = requests.post(url, data=json_data, headers=json_content) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url)
[ "def", "create", "(", "self", ")", ":", "obj", "=", "self", ".", "data", "self", ".", "data", "=", "None", "url", "=", "self", ".", "getUrl", "(", ")", "# hopefully when we discover what problems exist in slick to require this, we can take the loop out", "for", "ret...
Create the specified object (perform a POST to the api). You specify the object as a parameter, using the parent object as a function. Example: proj = Project() ... add project data here proj = slick.projects(proj).create()
[ "Create", "the", "specified", "object", "(", "perform", "a", "POST", "to", "the", "api", ")", ".", "You", "specify", "the", "object", "as", "a", "parameter", "using", "the", "parent", "object", "as", "a", "function", ".", "Example", ":", "proj", "=", "...
python
train
merll/docker-fabric
dockerfabric/tasks.py
https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/tasks.py#L148-L156
def list_networks(full_ids=False): """ Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """ networks = docker_fabric().networks() _format_output_table(networks, NETWORK_COLUMNS, full_ids)
[ "def", "list_networks", "(", "full_ids", "=", "False", ")", ":", "networks", "=", "docker_fabric", "(", ")", ".", "networks", "(", ")", "_format_output_table", "(", "networks", ",", "NETWORK_COLUMNS", ",", "full_ids", ")" ]
Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool
[ "Lists", "networks", "on", "the", "Docker", "remote", "host", "similar", "to", "docker", "network", "ls", "." ]
python
train
merll/docker-map
dockermap/client/docker_util.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/client/docker_util.py#L313-L322
def get_image_tags(self): """ Fetches image labels (repository / tags) from Docker. :return: A dictionary, with image name and tags as the key and the image id as value. :rtype: dict """ current_images = self.images() tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']} return tags
[ "def", "get_image_tags", "(", "self", ")", ":", "current_images", "=", "self", ".", "images", "(", ")", "tags", "=", "{", "tag", ":", "i", "[", "'Id'", "]", "for", "i", "in", "current_images", "for", "tag", "in", "i", "[", "'RepoTags'", "]", "}", "...
Fetches image labels (repository / tags) from Docker. :return: A dictionary, with image name and tags as the key and the image id as value. :rtype: dict
[ "Fetches", "image", "labels", "(", "repository", "/", "tags", ")", "from", "Docker", "." ]
python
train
chrisspen/burlap
burlap/system.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/system.py#L85-L103
def distrib_release(): """ Get the release number of the distribution. Example:: from burlap.system import distrib_id, distrib_release if distrib_id() == 'CentOS' and distrib_release() == '6.1': print(u"CentOS 6.2 has been released. Please upgrade.") """ with settings(hide('running', 'stdout')): kernel = (run('uname -s') or '').strip().lower() if kernel == LINUX: return run('lsb_release -r --short') elif kernel == SUNOS: return run('uname -v')
[ "def", "distrib_release", "(", ")", ":", "with", "settings", "(", "hide", "(", "'running'", ",", "'stdout'", ")", ")", ":", "kernel", "=", "(", "run", "(", "'uname -s'", ")", "or", "''", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", ...
Get the release number of the distribution. Example:: from burlap.system import distrib_id, distrib_release if distrib_id() == 'CentOS' and distrib_release() == '6.1': print(u"CentOS 6.2 has been released. Please upgrade.")
[ "Get", "the", "release", "number", "of", "the", "distribution", "." ]
python
valid
benley/butcher
butcher/targets/__init__.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/__init__.py#L28-L44
def new(ruletype, **kwargs): """Instantiate a new build rule based on kwargs. Appropriate args list varies with rule type. Minimum args required: [... fill this in ...] """ try: ruleclass = TYPE_MAP[ruletype] except KeyError: raise error.InvalidRule('Unrecognized rule type: %s' % ruletype) try: return ruleclass(**kwargs) except TypeError: log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs) raise
[ "def", "new", "(", "ruletype", ",", "*", "*", "kwargs", ")", ":", "try", ":", "ruleclass", "=", "TYPE_MAP", "[", "ruletype", "]", "except", "KeyError", ":", "raise", "error", ".", "InvalidRule", "(", "'Unrecognized rule type: %s'", "%", "ruletype", ")", "t...
Instantiate a new build rule based on kwargs. Appropriate args list varies with rule type. Minimum args required: [... fill this in ...]
[ "Instantiate", "a", "new", "build", "rule", "based", "on", "kwargs", "." ]
python
train
LabKey/labkey-api-python
labkey/unsupported/wiki.py
https://github.com/LabKey/labkey-api-python/blob/3c8d393384d7cbb2785f8a7f5fe34007b17a76b8/labkey/unsupported/wiki.py#L31-L111
def update_wiki(server_context, wiki_name, wiki_body, container_path=None): """ Used to update an existing wiki page :param server_context: A LabKey server context. See labkey.utils.create_server_context. :param wiki_name: The name of the wiki. :param wiki_body: The body of the wiki. :param container_path: Optional container path that can be used to override the server_context container path :return: returns a dictionary containing the response from the server. The 'success' key in the dictionary will be true when the wiki was successfully updated. It will be false in the case of a failure. In the case of a failure, the 'error' key contains the error message returned by the server. """ # Build the URL for reading the wiki page read_wiki_url = server_context.build_url('wiki', 'editWiki.api', container_path=container_path) payload = { 'name': wiki_name } headers = { 'Content-type': 'application/json' } try: read_response = server_context.make_request(read_wiki_url, payload, headers=headers, method='GET', non_json_response=True) except SSLError as e: print("There was a problem while attempting to submit the read for the wiki page " + str(wiki_name) + " via the URL " + str(e.geturl()) + ". The HTTP response code was " + str(e.getcode())) print("The HTTP client error was: " + format(e)) return 1 # TODO: this is incorrect, should return 'success'/'error' properly like the docs say data = read_response.text # Search HTML response for required information on wiki. This is stored in the javascript # variable named # - _wikiProps: for 14.3 and earlier # - LABKEY._wiki.setProps for 15.1 and later data_list = data.split('\n') # If LabKey Server is v14.3 or earlier find line containing '_wikiProp' v = next((i for i in range(len(data_list)) if '_wikiProp' in data_list[i]), None) # If v = None, then server is running 15.1 or later and find the line # containing 'LABKEY._wiki.setProps' if v is None: v = next((i for i in range(len(data_list)) if 'LABKEY._wiki.setProps' in data_list[i]), None) # Verify that we found the variable in the HTML response. If not # do not proceed if v is None: print("There was a problem while attempting to read the data for the wiki page '" + str(wiki_name) + "'.") print("The script is unable to find the wiki properties in the HTML response") return 1 # TODO: this is incorrect, should return 'success'/'error' properly like the docs say wiki_vars = {} for j in range(100): # Read each line, until find a javascript closing bracket. if '};' in data_list[v+j+1]: break if '});' in data_list[v+j+1]: break wvar = data_list[v+j+1].rstrip().lstrip().replace('\'', '').replace(',', '') wiki_vars[wvar.split(':')[0]] = wvar.split(':')[1] # Build the URL for updating the wiki page update_wiki_url = server_context.build_url('wiki', 'saveWiki.api', container_path=container_path) headers = { 'Content-type': 'application/json' } # Update wiki_vars to use the new wiki content. wiki_vars['name'] = wiki_name wiki_vars['body'] = wiki_body try: data = server_context.make_request(update_wiki_url, payload=json_dumps(wiki_vars, sort_keys=True), headers=headers, non_json_response=True) except SSLError as e: print("There was a problem while attempting to submit the read for the wiki page '" + str(wiki_name) + "' via the URL " + str(e.geturl()) + ". The HTTP response code was " + str(e.getcode())) print("The HTTP client error was: " + format(e)) return 1 # TODO: this is incorrect, should return 'success'/'error' properly like the docs say return data
[ "def", "update_wiki", "(", "server_context", ",", "wiki_name", ",", "wiki_body", ",", "container_path", "=", "None", ")", ":", "# Build the URL for reading the wiki page", "read_wiki_url", "=", "server_context", ".", "build_url", "(", "'wiki'", ",", "'editWiki.api'", ...
Used to update an existing wiki page :param server_context: A LabKey server context. See labkey.utils.create_server_context. :param wiki_name: The name of the wiki. :param wiki_body: The body of the wiki. :param container_path: Optional container path that can be used to override the server_context container path :return: returns a dictionary containing the response from the server. The 'success' key in the dictionary will be true when the wiki was successfully updated. It will be false in the case of a failure. In the case of a failure, the 'error' key contains the error message returned by the server.
[ "Used", "to", "update", "an", "existing", "wiki", "page", ":", "param", "server_context", ":", "A", "LabKey", "server", "context", ".", "See", "labkey", ".", "utils", ".", "create_server_context", ".", ":", "param", "wiki_name", ":", "The", "name", "of", "...
python
train
readbeyond/aeneas
aeneas/validator.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/validator.py#L493-L504
def _check_utf8_encoding(self, bstring): """ Check whether the given sequence of bytes is properly encoded in UTF-8. :param bytes bstring: the byte string to be checked """ if not gf.is_bytes(bstring): self._failed(u"The given string is not a sequence of bytes") return if not gf.is_utf8_encoded(bstring): self._failed(u"The given string is not encoded in UTF-8.")
[ "def", "_check_utf8_encoding", "(", "self", ",", "bstring", ")", ":", "if", "not", "gf", ".", "is_bytes", "(", "bstring", ")", ":", "self", ".", "_failed", "(", "u\"The given string is not a sequence of bytes\"", ")", "return", "if", "not", "gf", ".", "is_utf8...
Check whether the given sequence of bytes is properly encoded in UTF-8. :param bytes bstring: the byte string to be checked
[ "Check", "whether", "the", "given", "sequence", "of", "bytes", "is", "properly", "encoded", "in", "UTF", "-", "8", "." ]
python
train
d0c-s4vage/pfp
pfp/interp.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L1903-L1917
def _handle_expr_list(self, node, scope, ctxt, stream): """Handle ExprList nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling expression list") exprs = [ self._handle_node(expr, scope, ctxt, stream) for expr in node.exprs ] return exprs
[ "def", "_handle_expr_list", "(", "self", ",", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "self", ".", "_dlog", "(", "\"handling expression list\"", ")", "exprs", "=", "[", "self", ".", "_handle_node", "(", "expr", ",", "scope", ",", "ctx...
Handle ExprList nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
[ "Handle", "ExprList", "nodes" ]
python
train
Genida/django-meerkat
src/meerkat/utils/ip_info.py
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/utils/ip_info.py#L100-L122
def _get(self, ip): """ Get information about an IP. Args: ip (str): an IP (xxx.xxx.xxx.xxx). Returns: dict: see http://ipinfo.io/developers/getting-started """ # Geoloc updated up to once a week: # http://ipinfo.io/developers/data#geolocation-data retries = 10 for retry in range(retries): try: response = requests.get('http://ipinfo.io/%s/json' % ip, verify=False, timeout=1) # nosec if response.status_code == 429: raise RateExceededError return response.json() except (requests.ReadTimeout, requests.ConnectTimeout): pass return {}
[ "def", "_get", "(", "self", ",", "ip", ")", ":", "# Geoloc updated up to once a week:", "# http://ipinfo.io/developers/data#geolocation-data", "retries", "=", "10", "for", "retry", "in", "range", "(", "retries", ")", ":", "try", ":", "response", "=", "requests", "...
Get information about an IP. Args: ip (str): an IP (xxx.xxx.xxx.xxx). Returns: dict: see http://ipinfo.io/developers/getting-started
[ "Get", "information", "about", "an", "IP", "." ]
python
train
daxlab/Flask-Cache-Buster
flask_cache_buster/__init__.py
https://github.com/daxlab/Flask-Cache-Buster/blob/4c10bed9ab46020904df565a9c0014a7f2e4f6b3/flask_cache_buster/__init__.py#L20-L27
def __is_file_to_be_busted(self, filepath): """ :param filepath: :return: True or False """ if not self.extensions: return True return Path(filepath).suffix in self.extensions if filepath else False
[ "def", "__is_file_to_be_busted", "(", "self", ",", "filepath", ")", ":", "if", "not", "self", ".", "extensions", ":", "return", "True", "return", "Path", "(", "filepath", ")", ".", "suffix", "in", "self", ".", "extensions", "if", "filepath", "else", "False...
:param filepath: :return: True or False
[ ":", "param", "filepath", ":", ":", "return", ":", "True", "or", "False" ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ClientFactory.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ClientFactory.py#L344-L350
def create_environment_vip(self): """Get an instance of environment_vip services facade.""" return EnvironmentVIP( self.networkapi_url, self.user, self.password, self.user_ldap)
[ "def", "create_environment_vip", "(", "self", ")", ":", "return", "EnvironmentVIP", "(", "self", ".", "networkapi_url", ",", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "user_ldap", ")" ]
Get an instance of environment_vip services facade.
[ "Get", "an", "instance", "of", "environment_vip", "services", "facade", "." ]
python
train
CivicSpleen/ambry
ambry/run.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/run.py#L132-L157
def load_accounts(extra_path=None, load_user=True): """Load the yaml account files :param load_user: :return: An `AttrDict` """ from os.path import getmtime try: accts_file = find_config_file(ACCOUNTS_FILE, extra_path=extra_path, load_user=load_user) except ConfigurationError: accts_file = None if accts_file is not None and os.path.exists(accts_file): config = AttrDict() config.update_yaml(accts_file) if not 'accounts' in config: config.remotes = AttrDict() config.accounts.loaded = [accts_file, getmtime(accts_file)] return config else: return None
[ "def", "load_accounts", "(", "extra_path", "=", "None", ",", "load_user", "=", "True", ")", ":", "from", "os", ".", "path", "import", "getmtime", "try", ":", "accts_file", "=", "find_config_file", "(", "ACCOUNTS_FILE", ",", "extra_path", "=", "extra_path", "...
Load the yaml account files :param load_user: :return: An `AttrDict`
[ "Load", "the", "yaml", "account", "files" ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L766-L776
def fix_e262(self, result): """Fix spacing after comment hash.""" target = self.source[result['line'] - 1] offset = result['column'] code = target[:offset].rstrip(' \t#') comment = target[offset:].lstrip(' \t#') fixed = code + (' # ' + comment if comment.strip() else '\n') self.source[result['line'] - 1] = fixed
[ "def", "fix_e262", "(", "self", ",", "result", ")", ":", "target", "=", "self", ".", "source", "[", "result", "[", "'line'", "]", "-", "1", "]", "offset", "=", "result", "[", "'column'", "]", "code", "=", "target", "[", ":", "offset", "]", ".", "...
Fix spacing after comment hash.
[ "Fix", "spacing", "after", "comment", "hash", "." ]
python
train
joke2k/faker
faker/cli.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/cli.py#L267-L276
def execute_from_command_line(argv=None): """A simple method that runs a Command.""" if sys.stdout.encoding is None: print('please set python env PYTHONIOENCODING=UTF-8, example: ' 'export PYTHONIOENCODING=UTF-8, when writing to stdout', file=sys.stderr) exit(1) command = Command(argv) command.execute()
[ "def", "execute_from_command_line", "(", "argv", "=", "None", ")", ":", "if", "sys", ".", "stdout", ".", "encoding", "is", "None", ":", "print", "(", "'please set python env PYTHONIOENCODING=UTF-8, example: '", "'export PYTHONIOENCODING=UTF-8, when writing to stdout'", ",",...
A simple method that runs a Command.
[ "A", "simple", "method", "that", "runs", "a", "Command", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/git/git_client_base.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/git/git_client_base.py#L1692-L1717
def delete_pull_request_labels(self, repository_id, pull_request_id, label_id_or_name, project=None, project_id=None): """DeletePullRequestLabels. [Preview API] Removes a label from the set of those assigned to the pull request. :param str repository_id: The repository ID of the pull request’s target branch. :param int pull_request_id: ID of the pull request. :param str label_id_or_name: The name or ID of the label requested. :param str project: Project ID or project name :param str project_id: Project ID or project name. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') if pull_request_id is not None: route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int') if label_id_or_name is not None: route_values['labelIdOrName'] = self._serialize.url('label_id_or_name', label_id_or_name, 'str') query_parameters = {} if project_id is not None: query_parameters['projectId'] = self._serialize.query('project_id', project_id, 'str') self._send(http_method='DELETE', location_id='f22387e3-984e-4c52-9c6d-fbb8f14c812d', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters)
[ "def", "delete_pull_request_labels", "(", "self", ",", "repository_id", ",", "pull_request_id", ",", "label_id_or_name", ",", "project", "=", "None", ",", "project_id", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None",...
DeletePullRequestLabels. [Preview API] Removes a label from the set of those assigned to the pull request. :param str repository_id: The repository ID of the pull request’s target branch. :param int pull_request_id: ID of the pull request. :param str label_id_or_name: The name or ID of the label requested. :param str project: Project ID or project name :param str project_id: Project ID or project name.
[ "DeletePullRequestLabels", ".", "[", "Preview", "API", "]", "Removes", "a", "label", "from", "the", "set", "of", "those", "assigned", "to", "the", "pull", "request", ".", ":", "param", "str", "repository_id", ":", "The", "repository", "ID", "of", "the", "p...
python
train
Microsoft/ApplicationInsights-Python
applicationinsights/channel/contracts/DataPoint.py
https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/contracts/DataPoint.py#L124-L133
def count(self, value): """The count property. Args: value (int). the property value. """ if value == self._defaults['count'] and 'count' in self._values: del self._values['count'] else: self._values['count'] = value
[ "def", "count", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'count'", "]", "and", "'count'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'count'", "]", "else", ":", "self", "...
The count property. Args: value (int). the property value.
[ "The", "count", "property", ".", "Args", ":", "value", "(", "int", ")", ".", "the", "property", "value", "." ]
python
train
closeio/redis-hashring
redis_hashring/__init__.py
https://github.com/closeio/redis-hashring/blob/d767018571fbfb5705b6115d81619b2e84b6e50e/redis_hashring/__init__.py#L235-L242
def cleanup(self): """ Removes expired nodes/replicas from the ring. """ now = time.time() expired = now - NODE_TIMEOUT if self.conn.zremrangebyscore(self.key, 0, expired): self._notify()
[ "def", "cleanup", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "expired", "=", "now", "-", "NODE_TIMEOUT", "if", "self", ".", "conn", ".", "zremrangebyscore", "(", "self", ".", "key", ",", "0", ",", "expired", ")", ":", "self",...
Removes expired nodes/replicas from the ring.
[ "Removes", "expired", "nodes", "/", "replicas", "from", "the", "ring", "." ]
python
train
binux/pyspider
pyspider/scheduler/scheduler.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L937-L988
def on_task_failed(self, task): '''Called when a task is failed, called by `on_task_status`''' if 'schedule' not in task: old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule']) if old_task is None: logging.error('unknown status pack: %s' % task) return task['schedule'] = old_task.get('schedule', {}) retries = task['schedule'].get('retries', self.default_schedule['retries']) retried = task['schedule'].get('retried', 0) project_info = self.projects[task['project']] retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY[''])) if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: next_exetime = min(next_exetime, task['schedule'].get('age')) else: if retried >= retries: next_exetime = -1 elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'): next_exetime = task['schedule'].get('age') if next_exetime < 0: task['status'] = self.taskdb.FAILED task['lastcrawltime'] = time.time() self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'failed'), +1) self._cnt['1h'].event((project, 'failed'), +1) self._cnt['1d'].event((project, 'failed'), +1) self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1) logger.info('task failed %(project)s:%(taskid)s %(url)s' % task) return task else: task['schedule']['retried'] = retried + 1 task['schedule']['exetime'] = time.time() + next_exetime task['lastcrawltime'] = time.time() self.update_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'retry'), +1) self._cnt['1h'].event((project, 'retry'), +1) self._cnt['1d'].event((project, 'retry'), +1) # self._cnt['all'].event((project, 'retry'), +1) logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % ( retried, retries), task) return task
[ "def", "on_task_failed", "(", "self", ",", "task", ")", ":", "if", "'schedule'", "not", "in", "task", ":", "old_task", "=", "self", ".", "taskdb", ".", "get_task", "(", "task", "[", "'project'", "]", ",", "task", "[", "'taskid'", "]", ",", "fields", ...
Called when a task is failed, called by `on_task_status`
[ "Called", "when", "a", "task", "is", "failed", "called", "by", "on_task_status" ]
python
train
quantopian/zipline
zipline/__main__.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L287-L317
def zipline_magic(line, cell=None): """The zipline IPython cell magic. """ load_extensions( default=True, extensions=[], strict=True, environ=os.environ, ) try: return run.main( # put our overrides at the start of the parameter list so that # users may pass values with higher precedence [ '--algotext', cell, '--output', os.devnull, # don't write the results by default ] + ([ # these options are set when running in line magic mode # set a non None algo text to use the ipython user_ns '--algotext', '', '--local-namespace', ] if cell is None else []) + line.split(), '%s%%zipline' % ((cell or '') and '%'), # don't use system exit and propogate errors to the caller standalone_mode=False, ) except SystemExit as e: # https://github.com/mitsuhiko/click/pull/533 # even in standalone_mode=False `--help` really wants to kill us ;_; if e.code: raise ValueError('main returned non-zero status code: %d' % e.code)
[ "def", "zipline_magic", "(", "line", ",", "cell", "=", "None", ")", ":", "load_extensions", "(", "default", "=", "True", ",", "extensions", "=", "[", "]", ",", "strict", "=", "True", ",", "environ", "=", "os", ".", "environ", ",", ")", "try", ":", ...
The zipline IPython cell magic.
[ "The", "zipline", "IPython", "cell", "magic", "." ]
python
train
quantopian/zipline
zipline/utils/preprocess.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/preprocess.py#L115-L139
def call(f): """ Wrap a function in a processor that calls `f` on the argument before passing it along. Useful for creating simple arguments to the `@preprocess` decorator. Parameters ---------- f : function Function accepting a single argument and returning a replacement. Examples -------- >>> @preprocess(x=call(lambda x: x + 1)) ... def foo(x): ... return x ... >>> foo(1) 2 """ @wraps(f) def processor(func, argname, arg): return f(arg) return processor
[ "def", "call", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "processor", "(", "func", ",", "argname", ",", "arg", ")", ":", "return", "f", "(", "arg", ")", "return", "processor" ]
Wrap a function in a processor that calls `f` on the argument before passing it along. Useful for creating simple arguments to the `@preprocess` decorator. Parameters ---------- f : function Function accepting a single argument and returning a replacement. Examples -------- >>> @preprocess(x=call(lambda x: x + 1)) ... def foo(x): ... return x ... >>> foo(1) 2
[ "Wrap", "a", "function", "in", "a", "processor", "that", "calls", "f", "on", "the", "argument", "before", "passing", "it", "along", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/tlm.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/tlm.py#L465-L470
def _assertField(self, fieldname): """Raise AttributeError when Packet has no field with the given name.""" if not self._hasattr(fieldname): values = self._defn.name, fieldname raise AttributeError("Packet '%s' has no field '%s'" % values)
[ "def", "_assertField", "(", "self", ",", "fieldname", ")", ":", "if", "not", "self", ".", "_hasattr", "(", "fieldname", ")", ":", "values", "=", "self", ".", "_defn", ".", "name", ",", "fieldname", "raise", "AttributeError", "(", "\"Packet '%s' has no field ...
Raise AttributeError when Packet has no field with the given name.
[ "Raise", "AttributeError", "when", "Packet", "has", "no", "field", "with", "the", "given", "name", "." ]
python
train
gwastro/pycbc
pycbc/io/hdf.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L897-L914
def recursively_save_dict_contents_to_group(h5file, path, dic): """ Parameters ---------- h5file: h5py file to be written to path: path within h5py file to saved dictionary dic: python dictionary to be converted to hdf5 format """ for key, item in dic.items(): if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes, tuple, list)): h5file[path + str(key)] = item elif isinstance(item, dict): recursively_save_dict_contents_to_group(h5file, path + key + '/', item) else: raise ValueError('Cannot save %s type' % type(item))
[ "def", "recursively_save_dict_contents_to_group", "(", "h5file", ",", "path", ",", "dic", ")", ":", "for", "key", ",", "item", "in", "dic", ".", "items", "(", ")", ":", "if", "isinstance", "(", "item", ",", "(", "np", ".", "ndarray", ",", "np", ".", ...
Parameters ---------- h5file: h5py file to be written to path: path within h5py file to saved dictionary dic: python dictionary to be converted to hdf5 format
[ "Parameters", "----------", "h5file", ":", "h5py", "file", "to", "be", "written", "to", "path", ":", "path", "within", "h5py", "file", "to", "saved", "dictionary", "dic", ":", "python", "dictionary", "to", "be", "converted", "to", "hdf5", "format" ]
python
train
iamteem/redisco
redisco/models/base.py
https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/models/base.py#L324-L327
def exists(cls, id): """Checks if the model with id exists.""" return bool(redisco.get_client().exists(cls._key[str(id)]) or redisco.get_client().sismember(cls._key['all'], str(id)))
[ "def", "exists", "(", "cls", ",", "id", ")", ":", "return", "bool", "(", "redisco", ".", "get_client", "(", ")", ".", "exists", "(", "cls", ".", "_key", "[", "str", "(", "id", ")", "]", ")", "or", "redisco", ".", "get_client", "(", ")", ".", "s...
Checks if the model with id exists.
[ "Checks", "if", "the", "model", "with", "id", "exists", "." ]
python
train
acutesoftware/AIKIF
aikif/cls_file_mapping.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L117-L121
def get_full_filename(self, dataType, subjectArea): """ returns the file based on dataType and subjectArea """ return dataPath + os.sep + 'core' + os.sep + dataType + '_' + subjectArea + '.CSV'
[ "def", "get_full_filename", "(", "self", ",", "dataType", ",", "subjectArea", ")", ":", "return", "dataPath", "+", "os", ".", "sep", "+", "'core'", "+", "os", ".", "sep", "+", "dataType", "+", "'_'", "+", "subjectArea", "+", "'.CSV'" ]
returns the file based on dataType and subjectArea
[ "returns", "the", "file", "based", "on", "dataType", "and", "subjectArea" ]
python
train
bcbio/bcbio-nextgen
bcbio/bed/__init__.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bed/__init__.py#L20-L44
def concat(bed_files, catted=None): """ recursively concat a set of BED files, returning a sorted bedtools object of the result """ bed_files = [x for x in bed_files if x] if len(bed_files) == 0: if catted: # move to a .bed extension for downstream tools if not already sorted_bed = catted.sort() if not sorted_bed.fn.endswith(".bed"): return sorted_bed.moveto(sorted_bed.fn + ".bed") else: return sorted_bed else: return catted if not catted: bed_files = list(bed_files) catted = bt.BedTool(bed_files.pop()) else: catted = catted.cat(bed_files.pop(), postmerge=False, force_truncate=False) return concat(bed_files, catted)
[ "def", "concat", "(", "bed_files", ",", "catted", "=", "None", ")", ":", "bed_files", "=", "[", "x", "for", "x", "in", "bed_files", "if", "x", "]", "if", "len", "(", "bed_files", ")", "==", "0", ":", "if", "catted", ":", "# move to a .bed extension for...
recursively concat a set of BED files, returning a sorted bedtools object of the result
[ "recursively", "concat", "a", "set", "of", "BED", "files", "returning", "a", "sorted", "bedtools", "object", "of", "the", "result" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/constraint.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/constraint.py#L315-L328
def get_adjusted_border_positions(self): """ Calculates the positions to limit the port movement to :return: Adjusted positions nw_x, nw_y, se_x, se_y """ nw_x, nw_y = self._rect[0] se_x, se_y = self._rect[1] nw_x += self._distance_to_border nw_y += self._distance_to_border se_x -= self._distance_to_border se_y -= self._distance_to_border return nw_x, nw_y, se_x, se_y
[ "def", "get_adjusted_border_positions", "(", "self", ")", ":", "nw_x", ",", "nw_y", "=", "self", ".", "_rect", "[", "0", "]", "se_x", ",", "se_y", "=", "self", ".", "_rect", "[", "1", "]", "nw_x", "+=", "self", ".", "_distance_to_border", "nw_y", "+=",...
Calculates the positions to limit the port movement to :return: Adjusted positions nw_x, nw_y, se_x, se_y
[ "Calculates", "the", "positions", "to", "limit", "the", "port", "movement", "to", ":", "return", ":", "Adjusted", "positions", "nw_x", "nw_y", "se_x", "se_y" ]
python
train
jsfenfen/990-xml-reader
irs_reader/xmlrunner.py
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/xmlrunner.py#L161-L182
def run_sked(self, object_id, sked, verbose=False): """ sked is the proper name of the schedule: IRS990, IRS990EZ, IRS990PF, IRS990ScheduleA, etc. """ self.whole_filing_data = [] self.filing_keyerr_data = [] this_filing = Filing(object_id) this_filing.process(verbose=verbose) this_version = this_filing.get_version() if this_version in ALLOWED_VERSIONSTRINGS or ( self.csv_format and this_version in CSV_ALLOWED_VERSIONSTRINGS ): this_version = this_filing.get_version() ein = this_filing.get_ein() sked_dict = this_filing.get_schedule(sked) self._run_schedule(sked, object_id, sked_dict, ein) this_filing.set_result(self.whole_filing_data) this_filing.set_keyerrors(self.filing_keyerr_data) return this_filing else: print("Filing version %s isn't supported for this operation" % this_version ) return this_filing
[ "def", "run_sked", "(", "self", ",", "object_id", ",", "sked", ",", "verbose", "=", "False", ")", ":", "self", ".", "whole_filing_data", "=", "[", "]", "self", ".", "filing_keyerr_data", "=", "[", "]", "this_filing", "=", "Filing", "(", "object_id", ")",...
sked is the proper name of the schedule: IRS990, IRS990EZ, IRS990PF, IRS990ScheduleA, etc.
[ "sked", "is", "the", "proper", "name", "of", "the", "schedule", ":", "IRS990", "IRS990EZ", "IRS990PF", "IRS990ScheduleA", "etc", "." ]
python
train
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L792-L825
def xmlrpc_run(self, port=24444, bind='127.0.0.1', logRequests=False): '''Run xmlrpc server''' import umsgpack from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication try: from xmlrpc.client import Binary except ImportError: from xmlrpclib import Binary application = WSGIXMLRPCApplication() application.register_function(self.quit, '_quit') application.register_function(self.size) def sync_fetch(task): result = self.sync_fetch(task) result = Binary(umsgpack.packb(result)) return result application.register_function(sync_fetch, 'fetch') def dump_counter(_time, _type): return self._cnt[_time].to_dict(_type) application.register_function(dump_counter, 'counter') import tornado.wsgi import tornado.ioloop import tornado.httpserver container = tornado.wsgi.WSGIContainer(application) self.xmlrpc_ioloop = tornado.ioloop.IOLoop() self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop) self.xmlrpc_server.listen(port=port, address=bind) logger.info('fetcher.xmlrpc listening on %s:%s', bind, port) self.xmlrpc_ioloop.start()
[ "def", "xmlrpc_run", "(", "self", ",", "port", "=", "24444", ",", "bind", "=", "'127.0.0.1'", ",", "logRequests", "=", "False", ")", ":", "import", "umsgpack", "from", "pyspider", ".", "libs", ".", "wsgi_xmlrpc", "import", "WSGIXMLRPCApplication", "try", ":"...
Run xmlrpc server
[ "Run", "xmlrpc", "server" ]
python
train
abilian/abilian-core
abilian/web/admin/panels/dashboard.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/web/admin/panels/dashboard.py#L148-L207
def uniquelogins(sessions): """Unique logins per days/weeks/months. :return: daily, weekly, monthly 3 lists of dictionaries of the following format [{'x':epoch, 'y': value},] """ # sessions = LoginSession.query.order_by(LoginSession.started_at.asc()).all() if not sessions: return [], [], [] dates = {} for session in sessions: user = session.user # time value is discarded to aggregate on days only date = session.started_at.strftime("%Y/%m/%d") if date not in dates: dates[date] = set() # we want unique users on a given day dates[date].add(user) else: dates[date].add(user) daily = [] weekly = [] monthly = [] for date in sorted(dates.keys()): # print u"{} : {}".format(date, len(dates[date])) date_epoch = unix_time_millis(datetime.strptime(date, "%Y/%m/%d")) daily.append({"x": date_epoch, "y": len(dates[date])}) # first_day = data[0]['x'] # last_day = data[-1]['x'] daily_serie = pd.Series(dates) # convert the index to Datetime type daily_serie.index = pd.DatetimeIndex(daily_serie.index) # calculate the values instead of users lists daily_serie = daily_serie.apply(lambda x: len(x)) # GroupBy Week/month, Thanks Panda weekly_serie = daily_serie.groupby(pd.Grouper(freq="W")).aggregate(numpysum) monthly_serie = daily_serie.groupby(pd.Grouper(freq="M")).aggregate(numpysum) for date, value in weekly_serie.items(): try: value = int(value) except ValueError: continue date_epoch = unix_time_millis(date) weekly.append({"x": date_epoch, "y": value}) for date, value in monthly_serie.items(): try: value = int(value) except ValueError: continue date_epoch = unix_time_millis(date) monthly.append({"x": date_epoch, "y": value}) return daily, weekly, monthly
[ "def", "uniquelogins", "(", "sessions", ")", ":", "# sessions = LoginSession.query.order_by(LoginSession.started_at.asc()).all()", "if", "not", "sessions", ":", "return", "[", "]", ",", "[", "]", ",", "[", "]", "dates", "=", "{", "}", "for", "session", "in", "se...
Unique logins per days/weeks/months. :return: daily, weekly, monthly 3 lists of dictionaries of the following format [{'x':epoch, 'y': value},]
[ "Unique", "logins", "per", "days", "/", "weeks", "/", "months", "." ]
python
train
saltstack/salt
salt/modules/puppet.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/puppet.py#L259-L296
def status(): ''' .. versionadded:: 2014.7.0 Display puppet agent status CLI Example: .. code-block:: bash salt '*' puppet.status ''' puppet = _Puppet() if os.path.isfile(puppet.disabled_lockfile): return 'Administratively disabled' if os.path.isfile(puppet.run_lockfile): try: with salt.utils.files.fopen(puppet.run_lockfile, 'r') as fp_: pid = int(salt.utils.stringutils.to_unicode(fp_.read())) os.kill(pid, 0) # raise an OSError if process doesn't exist except (OSError, ValueError): return 'Stale lockfile' else: return 'Applying a catalog' if os.path.isfile(puppet.agent_pidfile): try: with salt.utils.files.fopen(puppet.agent_pidfile, 'r') as fp_: pid = int(salt.utils.stringutils.to_unicode(fp_.read())) os.kill(pid, 0) # raise an OSError if process doesn't exist except (OSError, ValueError): return 'Stale pidfile' else: return 'Idle daemon' return 'Stopped'
[ "def", "status", "(", ")", ":", "puppet", "=", "_Puppet", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "puppet", ".", "disabled_lockfile", ")", ":", "return", "'Administratively disabled'", "if", "os", ".", "path", ".", "isfile", "(", "puppet",...
.. versionadded:: 2014.7.0 Display puppet agent status CLI Example: .. code-block:: bash salt '*' puppet.status
[ "..", "versionadded", "::", "2014", ".", "7", ".", "0" ]
python
train
MaxStrange/AudioSegment
audiosegment.py
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/audiosegment.py#L691-L759
def fft(self, start_s=None, duration_s=None, start_sample=None, num_samples=None, zero_pad=False): """ Transforms the indicated slice of the AudioSegment into the frequency domain and returns the bins and the values. If neither `start_s` or `start_sample` is specified, the first sample of the slice will be the first sample of the AudioSegment. If neither `duration_s` or `num_samples` is specified, the slice will be from the specified start to the end of the segment. .. code-block:: python # Example for plotting the FFT using this function import matplotlib.pyplot as plt import numpy as np seg = audiosegment.from_file("furelise.wav") # Just take the first 3 seconds hist_bins, hist_vals = seg[1:3000].fft() hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals) plt.plot(hist_bins / 1000, hist_vals_real_normed) plt.xlabel("kHz") plt.ylabel("dB") plt.show() .. image:: images/fft.png :param start_s: The start time in seconds. If this is specified, you cannot specify `start_sample`. :param duration_s: The duration of the slice in seconds. If this is specified, you cannot specify `num_samples`. :param start_sample: The zero-based index of the first sample to include in the slice. If this is specified, you cannot specify `start_s`. :param num_samples: The number of samples to include in the slice. If this is specified, you cannot specify `duration_s`. :param zero_pad: If True and the combination of start and duration result in running off the end of the AudioSegment, the end is zero padded to prevent this. :returns: np.ndarray of frequencies in Hz, np.ndarray of amount of each frequency :raises: ValueError If `start_s` and `start_sample` are both specified and/or if both `duration_s` and `num_samples` are specified. """ if start_s is not None and start_sample is not None: raise ValueError("Only one of start_s and start_sample can be specified.") if duration_s is not None and num_samples is not None: raise ValueError("Only one of duration_s and num_samples can be specified.") if start_s is None and start_sample is None: start_sample = 0 if duration_s is None and num_samples is None: num_samples = len(self.get_array_of_samples()) - int(start_sample) if duration_s is not None: num_samples = int(round(duration_s * self.frame_rate)) if start_s is not None: start_sample = int(round(start_s * self.frame_rate)) end_sample = start_sample + num_samples # end_sample is excluded if end_sample > len(self.get_array_of_samples()) and not zero_pad: raise ValueError("The combination of start and duration will run off the end of the AudioSegment object.") elif end_sample > len(self.get_array_of_samples()) and zero_pad: arr = np.array(self.get_array_of_samples()) zeros = np.zeros(end_sample - len(arr)) arr = np.append(arr, zeros) else: arr = np.array(self.get_array_of_samples()) audioslice = np.array(arr[start_sample:end_sample]) fft_result = np.fft.fft(audioslice)[range(int(round(num_samples/2)) + 1)] step_size = self.frame_rate / num_samples bins = np.arange(0, int(round(num_samples/2)) + 1, 1.0) * step_size return bins, fft_result
[ "def", "fft", "(", "self", ",", "start_s", "=", "None", ",", "duration_s", "=", "None", ",", "start_sample", "=", "None", ",", "num_samples", "=", "None", ",", "zero_pad", "=", "False", ")", ":", "if", "start_s", "is", "not", "None", "and", "start_samp...
Transforms the indicated slice of the AudioSegment into the frequency domain and returns the bins and the values. If neither `start_s` or `start_sample` is specified, the first sample of the slice will be the first sample of the AudioSegment. If neither `duration_s` or `num_samples` is specified, the slice will be from the specified start to the end of the segment. .. code-block:: python # Example for plotting the FFT using this function import matplotlib.pyplot as plt import numpy as np seg = audiosegment.from_file("furelise.wav") # Just take the first 3 seconds hist_bins, hist_vals = seg[1:3000].fft() hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals) plt.plot(hist_bins / 1000, hist_vals_real_normed) plt.xlabel("kHz") plt.ylabel("dB") plt.show() .. image:: images/fft.png :param start_s: The start time in seconds. If this is specified, you cannot specify `start_sample`. :param duration_s: The duration of the slice in seconds. If this is specified, you cannot specify `num_samples`. :param start_sample: The zero-based index of the first sample to include in the slice. If this is specified, you cannot specify `start_s`. :param num_samples: The number of samples to include in the slice. If this is specified, you cannot specify `duration_s`. :param zero_pad: If True and the combination of start and duration result in running off the end of the AudioSegment, the end is zero padded to prevent this. :returns: np.ndarray of frequencies in Hz, np.ndarray of amount of each frequency :raises: ValueError If `start_s` and `start_sample` are both specified and/or if both `duration_s` and `num_samples` are specified.
[ "Transforms", "the", "indicated", "slice", "of", "the", "AudioSegment", "into", "the", "frequency", "domain", "and", "returns", "the", "bins", "and", "the", "values", "." ]
python
test
rcarmo/pngcanvas
pngcanvas.py
https://github.com/rcarmo/pngcanvas/blob/e2eaa0d5ba353005b3b658f6ee453c1956340670/pngcanvas.py#L310-L351
def defilter(cur, prev, filter_type, bpp=4): """Decode a chunk""" if filter_type == 0: # No filter return cur elif filter_type == 1: # Sub xp = 0 for xc in range(bpp, len(cur)): cur[xc] = (cur[xc] + cur[xp]) % 256 xp += 1 elif filter_type == 2: # Up for xc in range(len(cur)): cur[xc] = (cur[xc] + prev[xc]) % 256 elif filter_type == 3: # Average xp = 0 for i in range(bpp): cur[i] = (cur[i] + prev[i] // 2) % 256 for xc in range(bpp, len(cur)): cur[xc] = (cur[xc] + ((cur[xp] + prev[xc]) // 2)) % 256 xp += 1 elif filter_type == 4: # Paeth xp = 0 for i in range(bpp): cur[i] = (cur[i] + prev[i]) % 256 for xc in range(bpp, len(cur)): a = cur[xp] b = prev[xc] c = prev[xp] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: value = a elif pb <= pc: value = b else: value = c cur[xc] = (cur[xc] + value) % 256 xp += 1 else: raise ValueError('Unrecognized scanline filter type: {}'.format(filter_type)) return cur
[ "def", "defilter", "(", "cur", ",", "prev", ",", "filter_type", ",", "bpp", "=", "4", ")", ":", "if", "filter_type", "==", "0", ":", "# No filter", "return", "cur", "elif", "filter_type", "==", "1", ":", "# Sub", "xp", "=", "0", "for", "xc", "in", ...
Decode a chunk
[ "Decode", "a", "chunk" ]
python
train
wbond/vat_moss-python
vat_moss/phone_number.py
https://github.com/wbond/vat_moss-python/blob/5089dcf036eb2e9abc58e78186fd46b522a50620/vat_moss/phone_number.py#L91-L113
def _lookup_country_code(phone_number): """ Accepts an international form of a phone number (+ followed by digits), and returns a two-character country code. :param phone_number: The string phone number, in international format with leading + :return: A two-character string or None if no match """ leading_digit = phone_number[0] if leading_digit not in CALLING_CODE_MAPPING: return None for mapping in CALLING_CODE_MAPPING[leading_digit]: if not re.match(mapping['regex'], phone_number): continue return mapping['country_code'] return None
[ "def", "_lookup_country_code", "(", "phone_number", ")", ":", "leading_digit", "=", "phone_number", "[", "0", "]", "if", "leading_digit", "not", "in", "CALLING_CODE_MAPPING", ":", "return", "None", "for", "mapping", "in", "CALLING_CODE_MAPPING", "[", "leading_digit"...
Accepts an international form of a phone number (+ followed by digits), and returns a two-character country code. :param phone_number: The string phone number, in international format with leading + :return: A two-character string or None if no match
[ "Accepts", "an", "international", "form", "of", "a", "phone", "number", "(", "+", "followed", "by", "digits", ")", "and", "returns", "a", "two", "-", "character", "country", "code", "." ]
python
train
gmr/rejected
rejected/log.py
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/log.py#L104-L117
def process(self, msg, kwargs): """Process the logging message and keyword arguments passed in to a logging call to insert contextual information. :param str msg: The message to process :param dict kwargs: The kwargs to append :rtype: (str, dict) """ kwargs['extra'] = { 'correlation_id': self.parent.correlation_id, 'parent': self.parent.name } return msg, kwargs
[ "def", "process", "(", "self", ",", "msg", ",", "kwargs", ")", ":", "kwargs", "[", "'extra'", "]", "=", "{", "'correlation_id'", ":", "self", ".", "parent", ".", "correlation_id", ",", "'parent'", ":", "self", ".", "parent", ".", "name", "}", "return",...
Process the logging message and keyword arguments passed in to a logging call to insert contextual information. :param str msg: The message to process :param dict kwargs: The kwargs to append :rtype: (str, dict)
[ "Process", "the", "logging", "message", "and", "keyword", "arguments", "passed", "in", "to", "a", "logging", "call", "to", "insert", "contextual", "information", "." ]
python
train
hubo1016/vlcp
vlcp/utils/flowupdater.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/flowupdater.py#L167-L248
async def main(self): """ Main coroutine """ try: lastkeys = set() dataupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.DATAUPDATED) startwalk = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.STARTWALK) self.subroutine(self._flowupdater(), False, '_flowupdateroutine') # Cache updated objects presave_update = set() while True: self._restartwalk = False presave_update.update(self._updatedset) self._updatedset.clear() _initialkeys = set(self._initialkeys) try: walk_result = await call_api(self, 'objectdb', 'walk', {'keys': self._initialkeys, 'walkerdict': self._walkerdict, 'requestid': (self._requstid, self._requestindex)}) except Exception: self._logger.warning("Flow updater %r walk step failed, conn = %r", self, self._connection, exc_info=True) # Cleanup await call_api(self, 'objectdb', 'unwatchall', {'requestid': (self._requstid, self._requestindex)}) await self.wait_with_timeout(2) self._requestindex += 1 if self._restartwalk: continue if self._updatedset: if any(v.getkey() in _initialkeys for v in self._updatedset): # During walk, there are other initial keys that are updated # To make sure we get the latest result, restart the walk continue lastkeys = set(self._savedkeys) _savedkeys, _savedresult = walk_result removekeys = tuple(lastkeys.difference(_savedkeys)) self.reset_initialkeys(_savedkeys, _savedresult) _initialkeys = set(self._initialkeys) if self._dataupdateroutine: self.terminate(self._dataupdateroutine) # Start detecting updates self.subroutine(self._dataobject_update_detect(_initialkeys, _savedresult), False, "_dataupdateroutine") # Set the updates back (potentially merged with newly updated objects) self._updatedset.update(v for v in presave_update) presave_update.clear() await self.walkcomplete(_savedkeys, _savedresult) if removekeys: await call_api(self, 'objectdb', 'munwatch', {'keys': removekeys, 'requestid': (self._requstid, self._requestindex)}) # Transfer updated objects to updatedset2 before a flow update notification # This helps to make `walkcomplete` executes before `updateflow` # # But notice that since there is only a single data object copy in all the program, # it is impossible to hide the change completely during `updateflow` self._updatedset2.update(self._updatedset) self._updatedset.clear() self._savedkeys = _savedkeys self._savedresult = _savedresult await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE)) while not self._restartwalk: if self._updatedset: if any(v.getkey() in _initialkeys for v in self._updatedset): break else: self._updatedset2.update(self._updatedset) self._updatedset.clear() self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE)) await M_(dataupdate, startwalk) except Exception: self._logger.exception("Flow updater %r stops update by an exception, conn = %r", self, self._connection) raise finally: self.subroutine(send_api(self, 'objectdb', 'unwatchall', {'requestid': (self._requstid, self._requestindex)}), False) if self._flowupdateroutine: self.terminate(self._flowupdateroutine) self._flowupdateroutine = None if self._dataupdateroutine: self.terminate(self._dataupdateroutine) self._dataupdateroutine = None
[ "async", "def", "main", "(", "self", ")", ":", "try", ":", "lastkeys", "=", "set", "(", ")", "dataupdate", "=", "FlowUpdaterNotification", ".", "createMatcher", "(", "self", ",", "FlowUpdaterNotification", ".", "DATAUPDATED", ")", "startwalk", "=", "FlowUpdate...
Main coroutine
[ "Main", "coroutine" ]
python
train
Atomistica/atomistica
src/python/atomistica/atomic_strain.py
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/atomic_strain.py#L34-L47
def get_XIJ(nat, i_now, dr_now, dr_old): """ Calculates the X_{ij} matrix """ # Do an element-wise outer product dr_dr = dr_now.reshape(-1,3,1)*dr_old.reshape(-1,1,3) xij = np.zeros([nat,3,3]) for i in range(3): for j in range(3): # For each atom, sum over all neighbors xij[:,i,j] = np.bincount(i_now, weights=dr_dr[:,i,j]) return xij
[ "def", "get_XIJ", "(", "nat", ",", "i_now", ",", "dr_now", ",", "dr_old", ")", ":", "# Do an element-wise outer product", "dr_dr", "=", "dr_now", ".", "reshape", "(", "-", "1", ",", "3", ",", "1", ")", "*", "dr_old", ".", "reshape", "(", "-", "1", ",...
Calculates the X_{ij} matrix
[ "Calculates", "the", "X_", "{", "ij", "}", "matrix" ]
python
train
GaryLee/cmdlet
cmdlet/cmds.py
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L698-L702
def safe_substitute(prev, *args, **kw): '''alias of string.Template.safe_substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.safe_substitute(data)
[ "def", "safe_substitute", "(", "prev", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "template_obj", "=", "string", ".", "Template", "(", "*", "args", ",", "*", "*", "kw", ")", "for", "data", "in", "prev", ":", "yield", "template_obj", ".", "saf...
alias of string.Template.safe_substitute
[ "alias", "of", "string", ".", "Template", ".", "safe_substitute" ]
python
valid
etcher-be/emiz
emiz/avwx/core.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/core.py#L584-L596
def split_taf(txt: str) -> [str]: # type: ignore """ Splits a TAF report into each distinct time period """ lines = [] split = txt.split() last_index = 0 for i, item in enumerate(split): if starts_new_line(item) and i != 0 and not split[i - 1].startswith('PROB'): lines.append(' '.join(split[last_index:i])) last_index = i lines.append(' '.join(split[last_index:])) return lines
[ "def", "split_taf", "(", "txt", ":", "str", ")", "->", "[", "str", "]", ":", "# type: ignore", "lines", "=", "[", "]", "split", "=", "txt", ".", "split", "(", ")", "last_index", "=", "0", "for", "i", ",", "item", "in", "enumerate", "(", "split", ...
Splits a TAF report into each distinct time period
[ "Splits", "a", "TAF", "report", "into", "each", "distinct", "time", "period" ]
python
train
mbedmicro/pyOCD
pyocd/__main__.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/__main__.py#L463-L485
def do_erase(self): """! @brief Handle 'erase' subcommand.""" self._increase_logging(["pyocd.tools.loader", "pyocd"]) session = ConnectHelper.session_with_chosen_probe( project_dir=self._args.project_dir, config_file=self._args.config, user_script=self._args.script, no_config=self._args.no_config, pack=self._args.pack, unique_id=self._args.unique_id, target_override=self._args.target_override, frequency=self._args.frequency, blocking=False, **convert_session_options(self._args.options)) if session is None: sys.exit(1) with session: mode = self._args.erase_mode or loader.FlashEraser.Mode.SECTOR eraser = loader.FlashEraser(session, mode) addresses = flatten_args(self._args.addresses) eraser.erase(addresses)
[ "def", "do_erase", "(", "self", ")", ":", "self", ".", "_increase_logging", "(", "[", "\"pyocd.tools.loader\"", ",", "\"pyocd\"", "]", ")", "session", "=", "ConnectHelper", ".", "session_with_chosen_probe", "(", "project_dir", "=", "self", ".", "_args", ".", "...
! @brief Handle 'erase' subcommand.
[ "!" ]
python
train
simoninireland/epyc
epyc/experiment.py
https://github.com/simoninireland/epyc/blob/b3b61007741a0ab3de64df89070a6f30de8ec268/epyc/experiment.py#L120-L135
def report( self, params, meta, res ): """Return a properly-structured dict of results. The default returns a dict with results keyed by :attr:`Experiment.RESULTS`, the data point in the parameter space keyed by :attr:`Experiment.PARAMETERS`, and timing and other metadata keyed by :attr:`Experiment.METADATA`. Overriding this method can be used to record extra values, but be sure to call the base method as well. :param params: the parameters we ran under :param meta: the metadata for this run :param res: the direct experimental results from do() :returns: a :term:`results dict`""" rc = dict() rc[self.PARAMETERS] = params.copy() rc[self.METADATA] = meta.copy() rc[self.RESULTS] = res return rc
[ "def", "report", "(", "self", ",", "params", ",", "meta", ",", "res", ")", ":", "rc", "=", "dict", "(", ")", "rc", "[", "self", ".", "PARAMETERS", "]", "=", "params", ".", "copy", "(", ")", "rc", "[", "self", ".", "METADATA", "]", "=", "meta", ...
Return a properly-structured dict of results. The default returns a dict with results keyed by :attr:`Experiment.RESULTS`, the data point in the parameter space keyed by :attr:`Experiment.PARAMETERS`, and timing and other metadata keyed by :attr:`Experiment.METADATA`. Overriding this method can be used to record extra values, but be sure to call the base method as well. :param params: the parameters we ran under :param meta: the metadata for this run :param res: the direct experimental results from do() :returns: a :term:`results dict`
[ "Return", "a", "properly", "-", "structured", "dict", "of", "results", ".", "The", "default", "returns", "a", "dict", "with", "results", "keyed", "by", ":", "attr", ":", "Experiment", ".", "RESULTS", "the", "data", "point", "in", "the", "parameter", "space...
python
train
Kronuz/pyScss
yapps2.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/yapps2.py#L111-L117
def equal_set(self, a, b): "See if a and b have the same elements" if len(a) != len(b): return 0 if a == b: return 1 return self.subset(a, b) and self.subset(b, a)
[ "def", "equal_set", "(", "self", ",", "a", ",", "b", ")", ":", "if", "len", "(", "a", ")", "!=", "len", "(", "b", ")", ":", "return", "0", "if", "a", "==", "b", ":", "return", "1", "return", "self", ".", "subset", "(", "a", ",", "b", ")", ...
See if a and b have the same elements
[ "See", "if", "a", "and", "b", "have", "the", "same", "elements" ]
python
train
tensorflow/probability
experimental/mcmc/elliptical_slice_sampler.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/mcmc/elliptical_slice_sampler.py#L228-L372
def one_step(self, current_state, previous_kernel_results): """Runs one iteration of the Elliptical Slice Sampler. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(log_likelihood_fn(*normal_sampler_fn()))`. previous_kernel_results: `collections.namedtuple` containing `Tensor`s representing values from previous calls to this function (or from the `bootstrap_results` function.) Returns: next_state: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) after taking exactly one step. Has same type and shape as `current_state`. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain. Raises: TypeError: if `not log_likelihood.dtype.is_floating`. """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'elliptical_slice', 'one_step'), values=[self._seed_stream, current_state, previous_kernel_results.log_likelihood]): with tf.compat.v1.name_scope('initialize'): [ init_state_parts, init_log_likelihood ] = _prepare_args( self.log_likelihood_fn, current_state, previous_kernel_results.log_likelihood) normal_samples = self.normal_sampler_fn(self._seed_stream()) # pylint: disable=not-callable normal_samples = list(normal_samples) if mcmc_util.is_list_like( normal_samples) else [normal_samples] u = tf.random.uniform( shape=tf.shape(init_log_likelihood), seed=self._seed_stream(), dtype=init_log_likelihood.dtype.base_dtype, ) threshold = init_log_likelihood + tf.math.log(u) starting_angle = tf.random.uniform( shape=tf.shape(init_log_likelihood), minval=0., maxval=2 * np.pi, name='angle', seed=self._seed_stream(), dtype=init_log_likelihood.dtype.base_dtype, ) starting_angle_min = starting_angle - 2 * np.pi starting_angle_max = starting_angle starting_state_parts = _rotate_on_ellipse( init_state_parts, normal_samples, starting_angle) starting_log_likelihood = self.log_likelihood_fn(*starting_state_parts) # pylint: disable=not-callable def chain_not_done( angle, angle_min, angle_max, current_state_parts, current_log_likelihood): del angle, angle_min, angle_max, current_state_parts return tf.reduce_any(current_log_likelihood < threshold) def sample_next_angle( angle, angle_min, angle_max, current_state_parts, current_log_likelihood): """Slice sample a new angle, and rotate init_state by that amount.""" chain_not_done = current_log_likelihood < threshold # Box in on angle. Only update angles for which we haven't generated a # point that beats the threshold. angle_min = tf.where( tf.math.logical_and(angle < 0, chain_not_done), angle, angle_min) angle_max = tf.where( tf.math.logical_and(angle >= 0, chain_not_done), angle, angle_max) new_angle = tf.random.uniform( shape=tf.shape(current_log_likelihood), minval=angle_min, maxval=angle_max, seed=self._seed_stream(), dtype=angle.dtype.base_dtype ) angle = tf.where(chain_not_done, new_angle, angle) next_state_parts = _rotate_on_ellipse( init_state_parts, normal_samples, angle) new_state_parts = [] broadcasted_chain_not_done = _right_pad_with_ones( chain_not_done, tf.rank(next_state_parts[0])) for n_state, c_state in zip(next_state_parts, current_state_parts): new_state_part = tf.where( tf.broadcast_to( broadcasted_chain_not_done, tf.shape(n_state)), n_state, c_state) new_state_parts.append(new_state_part) return ( angle, angle_min, angle_max, new_state_parts, self.log_likelihood_fn(*new_state_parts) # pylint: disable=not-callable ) [ next_angle, _, _, next_state_parts, next_log_likelihood, ] = tf.while_loop( cond=chain_not_done, body=sample_next_angle, loop_vars=[ starting_angle, starting_angle_min, starting_angle_max, starting_state_parts, starting_log_likelihood ]) return [ next_state_parts if mcmc_util.is_list_like( current_state) else next_state_parts[0], EllipticalSliceSamplerKernelResults( log_likelihood=next_log_likelihood, angle=next_angle, normal_samples=normal_samples, ), ]
[ "def", "one_step", "(", "self", ",", "current_state", ",", "previous_kernel_results", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", "=", "mcmc_util", ".", "make_name", "(", "self", ".", "name", ",", "'elliptical_slice'", ...
Runs one iteration of the Elliptical Slice Sampler. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(log_likelihood_fn(*normal_sampler_fn()))`. previous_kernel_results: `collections.namedtuple` containing `Tensor`s representing values from previous calls to this function (or from the `bootstrap_results` function.) Returns: next_state: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) after taking exactly one step. Has same type and shape as `current_state`. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain. Raises: TypeError: if `not log_likelihood.dtype.is_floating`.
[ "Runs", "one", "iteration", "of", "the", "Elliptical", "Slice", "Sampler", "." ]
python
test
volafiled/python-volapi
volapi/auxo.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/auxo.py#L33-L51
def call_async(func): """Decorates a function to be called async on the loop thread""" @wraps(func) def wrapper(self, *args, **kw): """Wraps instance method to be called on loop thread""" def call(): """Calls function on loop thread""" try: func(self, *args, **kw) except Exception: logger.exception( "failed to call async [%r] with [%r] [%r]", func, args, kw ) self.loop.call_soon_threadsafe(call) return wrapper
[ "def", "call_async", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "\"\"\"Wraps instance method to be called on loop thread\"\"\"", "def", "call", "(", ")", ":", "\"\"...
Decorates a function to be called async on the loop thread
[ "Decorates", "a", "function", "to", "be", "called", "async", "on", "the", "loop", "thread" ]
python
train
sony/nnabla
python/src/nnabla/functions.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L230-L275
def slice(ctx, x, start=None, stop=None, step=None, n_outputs=-1, outputs=None): r""" Slice arrays along specified axis. This function complies with python slice wherre `slice(None, None, -1)` and `slice(-1, None, -1)` are the special case, which flips the input array and results in the output array from the end to the beginning of the input array along the corresponding dimension. Args: x(~nnabla.Variable): N-D array start(repeated int64): Start indices for each axis [default=``(0,) * len(x.shape)``] stop(repeated int64): Stop indices for each axis [default=``tuple(x.shape)``] step(repeated int64): Step indices for each axis [default=``(1,) * len(x.shape)``] Returns: ~nnabla.Variable: Sliced N-D array """ import copy start = copy.copy(start) stop = copy.copy(stop) step = copy.copy(step) from .function_bases import slice as slice_base if start is None: start = (0,) * len(x.shape) if stop is None: stop = tuple(x.shape) if step is None: step = (1,) * len(x.shape) shape = x.shape for i, sss in enumerate(zip(start, stop, step)): s0, s1, s2 = sss # SPECIAL CASE: slice(-1, None, <0) or slice(None, None, <0) SLICE_NONE = 0x7fffffff if s0 == None: start[i] = SLICE_NONE if s1 == None: stop[i] = SLICE_NONE if s2 == None: step[i] = SLICE_NONE return slice_base(x, start, stop, step, n_outputs, outputs)
[ "def", "slice", "(", "ctx", ",", "x", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "None", ",", "n_outputs", "=", "-", "1", ",", "outputs", "=", "None", ")", ":", "import", "copy", "start", "=", "copy", ".", "copy", "(...
r""" Slice arrays along specified axis. This function complies with python slice wherre `slice(None, None, -1)` and `slice(-1, None, -1)` are the special case, which flips the input array and results in the output array from the end to the beginning of the input array along the corresponding dimension. Args: x(~nnabla.Variable): N-D array start(repeated int64): Start indices for each axis [default=``(0,) * len(x.shape)``] stop(repeated int64): Stop indices for each axis [default=``tuple(x.shape)``] step(repeated int64): Step indices for each axis [default=``(1,) * len(x.shape)``] Returns: ~nnabla.Variable: Sliced N-D array
[ "r", "Slice", "arrays", "along", "specified", "axis", ".", "This", "function", "complies", "with", "python", "slice", "wherre", "slice", "(", "None", "None", "-", "1", ")", "and", "slice", "(", "-", "1", "None", "-", "1", ")", "are", "the", "special", ...
python
train
earwig/mwparserfromhell
mwparserfromhell/parser/tokenizer.py
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/parser/tokenizer.py#L450-L503
def _really_parse_external_link(self, brackets): """Really parse an external link.""" if brackets: self._parse_bracketed_uri_scheme() invalid = ("\n", " ", "]") else: self._parse_free_uri_scheme() invalid = ("\n", " ", "[", "]") punct = tuple(",;\\.:!?)") if self._read() is self.END or self._read()[0] in invalid: self._fail_route() tail = "" while True: this, next = self._read(), self._read(1) if this == "&": if tail: self._emit_text(tail) tail = "" self._parse_entity() elif (this == "<" and next == "!" and self._read(2) == self._read(3) == "-"): if tail: self._emit_text(tail) tail = "" self._parse_comment() elif not brackets and self._is_free_link_end(this, next): return self._pop(), tail, -1 elif this is self.END or this == "\n": self._fail_route() elif this == next == "{" and self._can_recurse(): if tail: self._emit_text(tail) tail = "" self._parse_template_or_argument() elif this == "]": return self._pop(), tail, 0 elif " " in this: before, after = this.split(" ", 1) if brackets: self._emit_text(before) self._emit(tokens.ExternalLinkSeparator()) if after: self._emit_text(after) self._context ^= contexts.EXT_LINK_URI self._context |= contexts.EXT_LINK_TITLE self._head += 1 return self._parse(push=False), None, 0 punct, tail = self._handle_free_link_text(punct, tail, before) return self._pop(), tail + " " + after, 0 elif not brackets: punct, tail = self._handle_free_link_text(punct, tail, this) else: self._emit_text(this) self._head += 1
[ "def", "_really_parse_external_link", "(", "self", ",", "brackets", ")", ":", "if", "brackets", ":", "self", ".", "_parse_bracketed_uri_scheme", "(", ")", "invalid", "=", "(", "\"\\n\"", ",", "\" \"", ",", "\"]\"", ")", "else", ":", "self", ".", "_parse_free...
Really parse an external link.
[ "Really", "parse", "an", "external", "link", "." ]
python
train
tensorflow/datasets
tensorflow_datasets/core/registered.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L343-L354
def _cast_to_pod(val): """Try cast to int, float, bool, str, in that order.""" bools = {"True": True, "False": False} if val in bools: return bools[val] try: return int(val) except ValueError: try: return float(val) except ValueError: return tf.compat.as_text(val)
[ "def", "_cast_to_pod", "(", "val", ")", ":", "bools", "=", "{", "\"True\"", ":", "True", ",", "\"False\"", ":", "False", "}", "if", "val", "in", "bools", ":", "return", "bools", "[", "val", "]", "try", ":", "return", "int", "(", "val", ")", "except...
Try cast to int, float, bool, str, in that order.
[ "Try", "cast", "to", "int", "float", "bool", "str", "in", "that", "order", "." ]
python
train
agile-geoscience/striplog
striplog/striplog.py
https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/striplog.py#L410-L444
def from_petrel(cls, filename, stop=None, points=False, null=None, function=None, include=None, exclude=None, remap=None, ignore=None): """ Makes a striplog from a Petrel text file. Returns: striplog. """ result = utils.read_petrel(filename, function=function, remap=remap, ) data = cls._clean_longitudinal_data(result, null=null ) list_of_Intervals = cls._build_list_of_Intervals(data, stop=stop, points=points, include=include, exclude=exclude, ignore=ignore ) if list_of_Intervals: return cls(list_of_Intervals) return None
[ "def", "from_petrel", "(", "cls", ",", "filename", ",", "stop", "=", "None", ",", "points", "=", "False", ",", "null", "=", "None", ",", "function", "=", "None", ",", "include", "=", "None", ",", "exclude", "=", "None", ",", "remap", "=", "None", "...
Makes a striplog from a Petrel text file. Returns: striplog.
[ "Makes", "a", "striplog", "from", "a", "Petrel", "text", "file", "." ]
python
test
angr/angr
angr/project.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/project.py#L710-L717
def is_java_jni_project(self): """ Indicates if the project's main binary is a Java Archive, which interacts during its execution with native libraries (via JNI). """ if self._is_java_jni_project is None: self._is_java_jni_project = isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support return self._is_java_jni_project
[ "def", "is_java_jni_project", "(", "self", ")", ":", "if", "self", ".", "_is_java_jni_project", "is", "None", ":", "self", ".", "_is_java_jni_project", "=", "isinstance", "(", "self", ".", "arch", ",", "ArchSoot", ")", "and", "self", ".", "simos", ".", "is...
Indicates if the project's main binary is a Java Archive, which interacts during its execution with native libraries (via JNI).
[ "Indicates", "if", "the", "project", "s", "main", "binary", "is", "a", "Java", "Archive", "which", "interacts", "during", "its", "execution", "with", "native", "libraries", "(", "via", "JNI", ")", "." ]
python
train
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L4700-L4709
def copyHiddenToContext(self): """ Uses key to identify the hidden layer associated with each layer in the self.contextLayers dictionary. """ for item in list(self.contextLayers.items()): if self.verbosity > 2: print('Hidden layer: ', self.getLayer(item[0]).activation) if self.verbosity > 2: print('Context layer before copy: ', item[1].activation) item[1].copyActivations(self.getLayer(item[0]).activation) if self.verbosity > 2: print('Context layer after copy: ', item[1].activation)
[ "def", "copyHiddenToContext", "(", "self", ")", ":", "for", "item", "in", "list", "(", "self", ".", "contextLayers", ".", "items", "(", ")", ")", ":", "if", "self", ".", "verbosity", ">", "2", ":", "print", "(", "'Hidden layer: '", ",", "self", ".", ...
Uses key to identify the hidden layer associated with each layer in the self.contextLayers dictionary.
[ "Uses", "key", "to", "identify", "the", "hidden", "layer", "associated", "with", "each", "layer", "in", "the", "self", ".", "contextLayers", "dictionary", "." ]
python
train
wummel/linkchecker
linkcheck/logger/text.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/text.py#L144-L148
def write_url (self, url_data): """Write url_data.base_url.""" self.write(self.part('url') + self.spaces('url')) txt = strformat.strline(url_data.base_url) self.writeln(txt, color=self.colorurl)
[ "def", "write_url", "(", "self", ",", "url_data", ")", ":", "self", ".", "write", "(", "self", ".", "part", "(", "'url'", ")", "+", "self", ".", "spaces", "(", "'url'", ")", ")", "txt", "=", "strformat", ".", "strline", "(", "url_data", ".", "base_...
Write url_data.base_url.
[ "Write", "url_data", ".", "base_url", "." ]
python
train
bolt-project/bolt
bolt/local/array.py
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/local/array.py#L66-L92
def filter(self, func, axis=(0,)): """ Filter array along an axis. Applies a function which should evaluate to boolean, along a single axis or multiple axes. Array will be aligned so that the desired set of axes are in the keys, which may require a transpose/reshape. Parameters ---------- func : function Function to apply, should return boolean axis : tuple or int, optional, default=(0,) Axis or multiple axes to filter along. Returns ------- BoltArrayLocal """ axes = sorted(tupleize(axis)) reshaped = self._align(axes) filtered = asarray(list(filter(func, reshaped))) return self._constructor(filtered)
[ "def", "filter", "(", "self", ",", "func", ",", "axis", "=", "(", "0", ",", ")", ")", ":", "axes", "=", "sorted", "(", "tupleize", "(", "axis", ")", ")", "reshaped", "=", "self", ".", "_align", "(", "axes", ")", "filtered", "=", "asarray", "(", ...
Filter array along an axis. Applies a function which should evaluate to boolean, along a single axis or multiple axes. Array will be aligned so that the desired set of axes are in the keys, which may require a transpose/reshape. Parameters ---------- func : function Function to apply, should return boolean axis : tuple or int, optional, default=(0,) Axis or multiple axes to filter along. Returns ------- BoltArrayLocal
[ "Filter", "array", "along", "an", "axis", "." ]
python
test
dropbox/stone
stone/frontend/parser.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/parser.py#L567-L576
def p_field_void(self, p): """field : ID NL | ID NL INDENT annotation_ref_list docsection DEDENT""" p[0] = AstVoidField(self.path, p.lineno(1), p.lexpos(1), p[1]) if len(p) > 3: if p[4] is not None: p[0].set_annotations(p[4]) if p[5] is not None: p[0].set_doc(p[5])
[ "def", "p_field_void", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "AstVoidField", "(", "self", ".", "path", ",", "p", ".", "lineno", "(", "1", ")", ",", "p", ".", "lexpos", "(", "1", ")", ",", "p", "[", "1", "]", ")", "if", ...
field : ID NL | ID NL INDENT annotation_ref_list docsection DEDENT
[ "field", ":", "ID", "NL", "|", "ID", "NL", "INDENT", "annotation_ref_list", "docsection", "DEDENT" ]
python
train
UCSBarchlab/PyRTL
pyrtl/rtllib/aes.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/aes.py#L127-L149
def decryption(self, ciphertext, key): """ Builds a single cycle AES Decryption circuit :param WireVector ciphertext: data to decrypt :param WireVector key: AES key to use to encrypt (AES is symmetric) :return: a WireVector containing the plaintext """ if len(ciphertext) != self._key_len: raise pyrtl.PyrtlError("Ciphertext length is invalid") if len(key) != self._key_len: raise pyrtl.PyrtlError("key length is invalid") key_list = self._key_gen(key) t = self._add_round_key(ciphertext, key_list[10]) for round in range(1, 11): t = self._inv_shift_rows(t) t = self._sub_bytes(t, True) t = self._add_round_key(t, key_list[10 - round]) if round != 10: t = self._mix_columns(t, True) return t
[ "def", "decryption", "(", "self", ",", "ciphertext", ",", "key", ")", ":", "if", "len", "(", "ciphertext", ")", "!=", "self", ".", "_key_len", ":", "raise", "pyrtl", ".", "PyrtlError", "(", "\"Ciphertext length is invalid\"", ")", "if", "len", "(", "key", ...
Builds a single cycle AES Decryption circuit :param WireVector ciphertext: data to decrypt :param WireVector key: AES key to use to encrypt (AES is symmetric) :return: a WireVector containing the plaintext
[ "Builds", "a", "single", "cycle", "AES", "Decryption", "circuit" ]
python
train
sixty-north/cosmic-ray
plugins/execution-engines/celery4/cosmic_ray_celery4_engine/worker.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/plugins/execution-engines/celery4/cosmic_ray_celery4_engine/worker.py#L25-L48
def worker_task(work_item, config): """The celery task which performs a single mutation and runs a test suite. This runs `cosmic-ray worker` in a subprocess and returns the results, passing `config` to it via stdin. Args: work_item: A dict describing a WorkItem. config: The configuration to use for the test execution. Returns: An updated WorkItem """ global _workspace _ensure_workspace(config) result = worker( work_item.module_path, config.python_version, work_item.operator_name, work_item.occurrence, config.test_command, config.timeout) return work_item.job_id, result
[ "def", "worker_task", "(", "work_item", ",", "config", ")", ":", "global", "_workspace", "_ensure_workspace", "(", "config", ")", "result", "=", "worker", "(", "work_item", ".", "module_path", ",", "config", ".", "python_version", ",", "work_item", ".", "opera...
The celery task which performs a single mutation and runs a test suite. This runs `cosmic-ray worker` in a subprocess and returns the results, passing `config` to it via stdin. Args: work_item: A dict describing a WorkItem. config: The configuration to use for the test execution. Returns: An updated WorkItem
[ "The", "celery", "task", "which", "performs", "a", "single", "mutation", "and", "runs", "a", "test", "suite", "." ]
python
train
mailgun/talon
talon/html_quotations.py
https://github.com/mailgun/talon/blob/cdd84563dd329c4f887591807870d10015e0c7a7/talon/html_quotations.py#L20-L42
def add_checkpoint(html_note, counter): """Recursively adds checkpoints to html tree. """ if html_note.text: html_note.text = (html_note.text + CHECKPOINT_PREFIX + str(counter) + CHECKPOINT_SUFFIX) else: html_note.text = (CHECKPOINT_PREFIX + str(counter) + CHECKPOINT_SUFFIX) counter += 1 for child in html_note.iterchildren(): counter = add_checkpoint(child, counter) if html_note.tail: html_note.tail = (html_note.tail + CHECKPOINT_PREFIX + str(counter) + CHECKPOINT_SUFFIX) else: html_note.tail = (CHECKPOINT_PREFIX + str(counter) + CHECKPOINT_SUFFIX) counter += 1 return counter
[ "def", "add_checkpoint", "(", "html_note", ",", "counter", ")", ":", "if", "html_note", ".", "text", ":", "html_note", ".", "text", "=", "(", "html_note", ".", "text", "+", "CHECKPOINT_PREFIX", "+", "str", "(", "counter", ")", "+", "CHECKPOINT_SUFFIX", ")"...
Recursively adds checkpoints to html tree.
[ "Recursively", "adds", "checkpoints", "to", "html", "tree", "." ]
python
train
CodyKochmann/generators
generators/uniq.py
https://github.com/CodyKochmann/generators/blob/e4ca4dd25d5023a94b0349c69d6224070cc2526f/generators/uniq.py#L7-L16
def uniq(pipe): ''' this works like bash's uniq command where the generator only iterates if the next value is not the previous ''' pipe = iter(pipe) previous = next(pipe) yield previous for i in pipe: if i is not previous: previous = i yield i
[ "def", "uniq", "(", "pipe", ")", ":", "pipe", "=", "iter", "(", "pipe", ")", "previous", "=", "next", "(", "pipe", ")", "yield", "previous", "for", "i", "in", "pipe", ":", "if", "i", "is", "not", "previous", ":", "previous", "=", "i", "yield", "i...
this works like bash's uniq command where the generator only iterates if the next value is not the previous
[ "this", "works", "like", "bash", "s", "uniq", "command", "where", "the", "generator", "only", "iterates", "if", "the", "next", "value", "is", "not", "the", "previous" ]
python
train
WTRMQDev/lnoise
lnoise/noisetypes.py
https://github.com/WTRMQDev/lnoise/blob/7f8d9faf135025a6aac50131d14a34d1009e8cdd/lnoise/noisetypes.py#L69-L85
def hkdf(self, chaining_key, input_key_material, dhlen=64): """Hash-based key derivation function Takes a ``chaining_key'' byte sequence of len HASHLEN, and an ``input_key_material'' byte sequence with length either zero bytes, 32 bytes or dhlen bytes. Returns two byte sequences of length HASHLEN""" if len(chaining_key) != self.HASHLEN: raise HashError("Incorrect chaining key length") if len(input_key_material) not in (0, 32, dhlen): raise HashError("Incorrect input key material length") temp_key = self.hmac_hash(chaining_key, input_key_material) output1 = self.hmac_hash(temp_key, b'\x01') output2 = self.hmac_hash(temp_key, output1 + b'\x02') return output1, output2
[ "def", "hkdf", "(", "self", ",", "chaining_key", ",", "input_key_material", ",", "dhlen", "=", "64", ")", ":", "if", "len", "(", "chaining_key", ")", "!=", "self", ".", "HASHLEN", ":", "raise", "HashError", "(", "\"Incorrect chaining key length\"", ")", "if"...
Hash-based key derivation function Takes a ``chaining_key'' byte sequence of len HASHLEN, and an ``input_key_material'' byte sequence with length either zero bytes, 32 bytes or dhlen bytes. Returns two byte sequences of length HASHLEN
[ "Hash", "-", "based", "key", "derivation", "function" ]
python
train
sanger-pathogens/circlator
circlator/merge.py
https://github.com/sanger-pathogens/circlator/blob/a4befb8c9dbbcd4b3ad1899a95aa3e689d58b638/circlator/merge.py#L210-L217
def _has_qry_hit_longer_than(self, nucmer_hits, min_length, hits_to_exclude=None): '''Returns True iff list of nucmer_hits has a hit longer than min_length, not counting the hits in hits_to_exclude''' if hits_to_exclude is None: to_exclude = set() else: to_exclude = hits_to_exclude long_hits = [hit.hit_length_qry for hit in nucmer_hits if hit not in to_exclude and hit.hit_length_qry > min_length] return len(long_hits) > 0
[ "def", "_has_qry_hit_longer_than", "(", "self", ",", "nucmer_hits", ",", "min_length", ",", "hits_to_exclude", "=", "None", ")", ":", "if", "hits_to_exclude", "is", "None", ":", "to_exclude", "=", "set", "(", ")", "else", ":", "to_exclude", "=", "hits_to_exclu...
Returns True iff list of nucmer_hits has a hit longer than min_length, not counting the hits in hits_to_exclude
[ "Returns", "True", "iff", "list", "of", "nucmer_hits", "has", "a", "hit", "longer", "than", "min_length", "not", "counting", "the", "hits", "in", "hits_to_exclude" ]
python
train
6809/MC6809
MC6809/components/mc6809_tools.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_tools.py#L120-L149
def calc_new_count(min_value, value, max_value, trigger, target): """ change 'value' between 'min_value' and 'max_value' so that 'trigger' will be match 'target' >>> calc_new_count(min_value=0, value=100, max_value=200, trigger=30, target=30) 100 >>> calc_new_count(min_value=0, value=100, max_value=200, trigger=50, target=5) 55 >>> calc_new_count(min_value=60, value=100, max_value=200, trigger=50, target=5) 60 >>> calc_new_count(min_value=0, value=100, max_value=200, trigger=20, target=40) 150 >>> calc_new_count(min_value=0, value=100, max_value=125, trigger=20, target=40) 125 """ try: new_value = float(value) / float(trigger) * target except ZeroDivisionError: return value * 2 if new_value > max_value: return max_value new_value = int((value + new_value) / 2) if new_value < min_value: return min_value return new_value
[ "def", "calc_new_count", "(", "min_value", ",", "value", ",", "max_value", ",", "trigger", ",", "target", ")", ":", "try", ":", "new_value", "=", "float", "(", "value", ")", "/", "float", "(", "trigger", ")", "*", "target", "except", "ZeroDivisionError", ...
change 'value' between 'min_value' and 'max_value' so that 'trigger' will be match 'target' >>> calc_new_count(min_value=0, value=100, max_value=200, trigger=30, target=30) 100 >>> calc_new_count(min_value=0, value=100, max_value=200, trigger=50, target=5) 55 >>> calc_new_count(min_value=60, value=100, max_value=200, trigger=50, target=5) 60 >>> calc_new_count(min_value=0, value=100, max_value=200, trigger=20, target=40) 150 >>> calc_new_count(min_value=0, value=100, max_value=125, trigger=20, target=40) 125
[ "change", "value", "between", "min_value", "and", "max_value", "so", "that", "trigger", "will", "be", "match", "target", ">>>", "calc_new_count", "(", "min_value", "=", "0", "value", "=", "100", "max_value", "=", "200", "trigger", "=", "30", "target", "=", ...
python
train
SuperCowPowers/workbench
workbench/workers/pe_classifier.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/pe_classifier.py#L12-L25
def execute(self, input_data): ''' This worker classifies PEFiles as Evil or AOK (TOY not a real classifier at this point)''' # In general you'd do something different with these two outputs # for this toy example will just smash them in a big string pefile_output = input_data['pe_features'] indicators = input_data['pe_indicators'] all_input = str(pefile_output) + str(indicators) flag = 'Reported Checksum does not match actual checksum' if flag in all_input: self.output['classification'] = 'Toy/Fake Classifier says Evil!' return self.output
[ "def", "execute", "(", "self", ",", "input_data", ")", ":", "# In general you'd do something different with these two outputs", "# for this toy example will just smash them in a big string", "pefile_output", "=", "input_data", "[", "'pe_features'", "]", "indicators", "=", "input_...
This worker classifies PEFiles as Evil or AOK (TOY not a real classifier at this point)
[ "This", "worker", "classifies", "PEFiles", "as", "Evil", "or", "AOK", "(", "TOY", "not", "a", "real", "classifier", "at", "this", "point", ")" ]
python
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L810-L838
def get_zeroth_quarter_idx(self, stacked_last_per_qtr): """ Filters for releases that are on or after each simulation date and determines the previous quarter by picking out the most recent release relative to each date in the index. Parameters ---------- stacked_last_per_qtr : pd.DataFrame A DataFrame with index of calendar dates, sid, and normalized quarters with each row being the latest estimate for the row's index values, sorted by event date. Returns ------- previous_releases_per_date_index : pd.MultiIndex An index of calendar dates, sid, and normalized quarters, for only the rows that have a previous event. """ previous_releases_per_date = stacked_last_per_qtr.loc[ stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) ].groupby( level=[SIMULATION_DATES, SID_FIELD_NAME], as_index=False, # Here we take advantage of the fact that `stacked_last_per_qtr` is # sorted by event date. ).nth(-1) return previous_releases_per_date.index
[ "def", "get_zeroth_quarter_idx", "(", "self", ",", "stacked_last_per_qtr", ")", ":", "previous_releases_per_date", "=", "stacked_last_per_qtr", ".", "loc", "[", "stacked_last_per_qtr", "[", "EVENT_DATE_FIELD_NAME", "]", "<=", "stacked_last_per_qtr", ".", "index", ".", "...
Filters for releases that are on or after each simulation date and determines the previous quarter by picking out the most recent release relative to each date in the index. Parameters ---------- stacked_last_per_qtr : pd.DataFrame A DataFrame with index of calendar dates, sid, and normalized quarters with each row being the latest estimate for the row's index values, sorted by event date. Returns ------- previous_releases_per_date_index : pd.MultiIndex An index of calendar dates, sid, and normalized quarters, for only the rows that have a previous event.
[ "Filters", "for", "releases", "that", "are", "on", "or", "after", "each", "simulation", "date", "and", "determines", "the", "previous", "quarter", "by", "picking", "out", "the", "most", "recent", "release", "relative", "to", "each", "date", "in", "the", "ind...
python
train
quantumlib/Cirq
cirq/circuits/circuit.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/circuit.py#L1068-L1100
def insert_at_frontier(self, operations: ops.OP_TREE, start: int, frontier: Dict[ops.Qid, int] = None ) -> Dict[ops.Qid, int]: """Inserts operations inline at frontier. Args: operations: the operations to insert start: the moment at which to start inserting the operations frontier: frontier[q] is the earliest moment in which an operation acting on qubit q can be placed. """ if frontier is None: frontier = defaultdict(lambda: 0) operations = tuple(ops.flatten_op_tree(operations)) if not operations: return frontier qubits = set(q for op in operations for q in op.qubits) if any(frontier[q] > start for q in qubits): raise ValueError('The frontier for qubits on which the operations' 'to insert act cannot be after start.') next_moments = self.next_moments_operating_on(qubits, start) insertion_indices, _ = self._pick_inserted_ops_moment_indices( operations, start, frontier) self._push_frontier(frontier, next_moments) self._insert_operations(operations, insertion_indices) return frontier
[ "def", "insert_at_frontier", "(", "self", ",", "operations", ":", "ops", ".", "OP_TREE", ",", "start", ":", "int", ",", "frontier", ":", "Dict", "[", "ops", ".", "Qid", ",", "int", "]", "=", "None", ")", "->", "Dict", "[", "ops", ".", "Qid", ",", ...
Inserts operations inline at frontier. Args: operations: the operations to insert start: the moment at which to start inserting the operations frontier: frontier[q] is the earliest moment in which an operation acting on qubit q can be placed.
[ "Inserts", "operations", "inline", "at", "frontier", "." ]
python
train
kytos/python-openflow
pyof/v0x01/controller2switch/stats_reply.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/v0x01/controller2switch/stats_reply.py#L35-L54
def pack(self, value=None): """Pack a StatsReply using the object's attributes. This method will pack the attribute body and body_type before pack the StatsReply object, then will return this struct as a binary data. Returns: bytes: Binary data with StatsReply packed. """ buff = self.body if not value: value = self.body if value and hasattr(value, 'pack'): self.body = BinaryData(value.pack()) stats_reply_packed = super().pack() self.body = buff return stats_reply_packed
[ "def", "pack", "(", "self", ",", "value", "=", "None", ")", ":", "buff", "=", "self", ".", "body", "if", "not", "value", ":", "value", "=", "self", ".", "body", "if", "value", "and", "hasattr", "(", "value", ",", "'pack'", ")", ":", "self", ".", ...
Pack a StatsReply using the object's attributes. This method will pack the attribute body and body_type before pack the StatsReply object, then will return this struct as a binary data. Returns: bytes: Binary data with StatsReply packed.
[ "Pack", "a", "StatsReply", "using", "the", "object", "s", "attributes", "." ]
python
train
scikit-hep/probfit
probfit/plotting.py
https://github.com/scikit-hep/probfit/blob/de3593798ea3877dd2785062bed6877dd9058a02/probfit/plotting.py#L135-L193
def draw_residual(x, y, yerr, xerr, show_errbars=True, ax=None, zero_line=True, grid=True, **kwargs): """Draw a residual plot on the axis. By default, if show_errbars if True, residuals are drawn as blue points with errorbars with no endcaps. If show_errbars is False, residuals are drawn as a bar graph with black bars. **Arguments** - **x** array of numbers, x-coordinates - **y** array of numbers, y-coordinates - **yerr** array of numbers, the uncertainty on the y-values - **xerr** array of numbers, the uncertainty on the x-values - **show_errbars** If True, draw the data as a bar plot, else as an errorbar plot - **ax** Optional matplotlib axis instance on which to draw the plot - **zero_line** If True, draw a red line at :math:`y = 0` along the full extent in :math:`x` - **grid** If True, draw gridlines - **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or ``ax.bar`` (if ``show_errbars`` if False) **Returns** The matplotlib axis instance the plot was drawn on. """ from matplotlib import pyplot as plt ax = plt.gca() if ax is None else ax if show_errbars: plotopts = dict(fmt='b.', capsize=0) plotopts.update(kwargs) pp = ax.errorbar(x, y, yerr, xerr, zorder=0, **plotopts) else: plotopts = dict(color='k') plotopts.update(kwargs) pp = ax.bar(x - xerr, y, width=2*xerr, **plotopts) if zero_line: ax.plot([x[0] - xerr[0], x[-1] + xerr[-1]], [0, 0], 'r-', zorder=2) # Take the `grid` kwarg to mean 'add a grid if True'; if grid is False and # we called ax.grid(False) then any existing grid on ax would be turned off if grid: ax.grid(grid) return ax
[ "def", "draw_residual", "(", "x", ",", "y", ",", "yerr", ",", "xerr", ",", "show_errbars", "=", "True", ",", "ax", "=", "None", ",", "zero_line", "=", "True", ",", "grid", "=", "True", ",", "*", "*", "kwargs", ")", ":", "from", "matplotlib", "impor...
Draw a residual plot on the axis. By default, if show_errbars if True, residuals are drawn as blue points with errorbars with no endcaps. If show_errbars is False, residuals are drawn as a bar graph with black bars. **Arguments** - **x** array of numbers, x-coordinates - **y** array of numbers, y-coordinates - **yerr** array of numbers, the uncertainty on the y-values - **xerr** array of numbers, the uncertainty on the x-values - **show_errbars** If True, draw the data as a bar plot, else as an errorbar plot - **ax** Optional matplotlib axis instance on which to draw the plot - **zero_line** If True, draw a red line at :math:`y = 0` along the full extent in :math:`x` - **grid** If True, draw gridlines - **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or ``ax.bar`` (if ``show_errbars`` if False) **Returns** The matplotlib axis instance the plot was drawn on.
[ "Draw", "a", "residual", "plot", "on", "the", "axis", "." ]
python
train
PythonCharmers/python-future
src/future/backports/email/header.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/header.py#L157-L176
def make_header(decoded_seq, maxlinelen=None, header_name=None, continuation_ws=' '): """Create a Header from a sequence of pairs as returned by decode_header() decode_header() takes a header value string and returns a sequence of pairs of the format (decoded_string, charset) where charset is the string name of the character set. This function takes one of those sequence of pairs and returns a Header instance. Optional maxlinelen, header_name, and continuation_ws are as in the Header constructor. """ h = Header(maxlinelen=maxlinelen, header_name=header_name, continuation_ws=continuation_ws) for s, charset in decoded_seq: # None means us-ascii but we can simply pass it on to h.append() if charset is not None and not isinstance(charset, Charset): charset = Charset(charset) h.append(s, charset) return h
[ "def", "make_header", "(", "decoded_seq", ",", "maxlinelen", "=", "None", ",", "header_name", "=", "None", ",", "continuation_ws", "=", "' '", ")", ":", "h", "=", "Header", "(", "maxlinelen", "=", "maxlinelen", ",", "header_name", "=", "header_name", ",", ...
Create a Header from a sequence of pairs as returned by decode_header() decode_header() takes a header value string and returns a sequence of pairs of the format (decoded_string, charset) where charset is the string name of the character set. This function takes one of those sequence of pairs and returns a Header instance. Optional maxlinelen, header_name, and continuation_ws are as in the Header constructor.
[ "Create", "a", "Header", "from", "a", "sequence", "of", "pairs", "as", "returned", "by", "decode_header", "()" ]
python
train
RI-imaging/qpsphere
qpsphere/imagefit/interp.py
https://github.com/RI-imaging/qpsphere/blob/3cfa0e9fb8e81be8c820abbeccd47242e7972ac1/qpsphere/imagefit/interp.py#L124-L167
def get_border_phase(self, idn=0, idr=0): """Return one of nine border fields Parameters ---------- idn: int Index for refractive index. One of -1 (left), 0 (center), 1 (right) idr: int Index for radius. One of -1 (left), 0 (center), 1 (right) """ assert idn in [-1, 0, 1] assert idr in [-1, 0, 1] n = self.sphere_index + self.dn * idn r = self.radius + self.dr * idr # convert to array indices idn += 1 idr += 1 # find out whether we need to compute a new border field if self._n_border[idn, idr] == n and self._r_border[idn, idr] == r: if self.verbose > 3: print("Using cached border phase (n{}, r{})".format(idn, idr)) # return previously computed field pha = self._border_pha[(idn, idr)] else: if self.verbose > 3: print("Computing border phase (n{}, r{})".format(idn, idr)) kwargs = self.model_kwargs.copy() kwargs["radius"] = r kwargs["sphere_index"] = n kwargs["center"] = [self.posx_offset, self.posy_offset] tb = time.time() pha = self.sphere_method(**kwargs).pha if self.verbose > 2: print("Border phase computation time:", self.sphere_method.__module__, time.time() - tb) self._border_pha[(idn, idr)] = pha self._n_border[idn, idr] = n self._r_border[idn, idr] = r return pha
[ "def", "get_border_phase", "(", "self", ",", "idn", "=", "0", ",", "idr", "=", "0", ")", ":", "assert", "idn", "in", "[", "-", "1", ",", "0", ",", "1", "]", "assert", "idr", "in", "[", "-", "1", ",", "0", ",", "1", "]", "n", "=", "self", ...
Return one of nine border fields Parameters ---------- idn: int Index for refractive index. One of -1 (left), 0 (center), 1 (right) idr: int Index for radius. One of -1 (left), 0 (center), 1 (right)
[ "Return", "one", "of", "nine", "border", "fields" ]
python
train
pkkid/python-plexapi
plexapi/media.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/media.py#L191-L196
def parse(server, data, initpath): # pragma: no cover seems to be dead code. """ Factory method returns a new MediaPartStream from xml data. """ STREAMCLS = {1: VideoStream, 2: AudioStream, 3: SubtitleStream} stype = cast(int, data.attrib.get('streamType')) cls = STREAMCLS.get(stype, MediaPartStream) return cls(server, data, initpath)
[ "def", "parse", "(", "server", ",", "data", ",", "initpath", ")", ":", "# pragma: no cover seems to be dead code.", "STREAMCLS", "=", "{", "1", ":", "VideoStream", ",", "2", ":", "AudioStream", ",", "3", ":", "SubtitleStream", "}", "stype", "=", "cast", "(",...
Factory method returns a new MediaPartStream from xml data.
[ "Factory", "method", "returns", "a", "new", "MediaPartStream", "from", "xml", "data", "." ]
python
train
waqasbhatti/astrobase
astrobase/hatsurveys/hatlc.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L482-L514
def _gzip_sqlitecurve(sqlitecurve, force=False): '''This just compresses the sqlitecurve in gzip format. FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably). ''' # -k to keep the input file just in case something explodes if force: cmd = 'gzip -k -f %s' % sqlitecurve else: cmd = 'gzip -k %s' % sqlitecurve try: outfile = '%s.gz' % sqlitecurve if os.path.exists(outfile) and not force: # get rid of the .sqlite file only os.remove(sqlitecurve) return outfile else: subprocess.check_output(cmd, shell=True) # check if the output file was successfully created if os.path.exists(outfile): return outfile else: return None except subprocess.CalledProcessError: return None
[ "def", "_gzip_sqlitecurve", "(", "sqlitecurve", ",", "force", "=", "False", ")", ":", "# -k to keep the input file just in case something explodes", "if", "force", ":", "cmd", "=", "'gzip -k -f %s'", "%", "sqlitecurve", "else", ":", "cmd", "=", "'gzip -k %s'", "%", ...
This just compresses the sqlitecurve in gzip format. FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
[ "This", "just", "compresses", "the", "sqlitecurve", "in", "gzip", "format", "." ]
python
valid
biolink/ontobio
ontobio/golr/golr_query.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L1629-L1639
def map_id(self,id, prefix, closure_list): """ Map identifiers based on an equivalence closure list. """ prefixc = prefix + ':' ids = [eid for eid in closure_list if eid.startswith(prefixc)] # TODO: add option to fail if no mapping, or if >1 mapping if len(ids) == 0: # default to input return id return ids[0]
[ "def", "map_id", "(", "self", ",", "id", ",", "prefix", ",", "closure_list", ")", ":", "prefixc", "=", "prefix", "+", "':'", "ids", "=", "[", "eid", "for", "eid", "in", "closure_list", "if", "eid", ".", "startswith", "(", "prefixc", ")", "]", "# TODO...
Map identifiers based on an equivalence closure list.
[ "Map", "identifiers", "based", "on", "an", "equivalence", "closure", "list", "." ]
python
train
jmcarp/nplusone
tasks.py
https://github.com/jmcarp/nplusone/blob/2f1e25d8f52c691519c528c4ed583a315bc1ccf9/tasks.py#L62-L72
def watch_docs(ctx): """Run build the docs when a file changes.""" try: import sphinx_autobuild # noqa except ImportError: print('ERROR: watch task requires the sphinx_autobuild package.') print('Install it with:') print(' pip install sphinx-autobuild') sys.exit(1) docs(ctx) ctx.run('sphinx-autobuild {} {}'.format(docs_dir, build_dir), pty=True)
[ "def", "watch_docs", "(", "ctx", ")", ":", "try", ":", "import", "sphinx_autobuild", "# noqa", "except", "ImportError", ":", "print", "(", "'ERROR: watch task requires the sphinx_autobuild package.'", ")", "print", "(", "'Install it with:'", ")", "print", "(", "' p...
Run build the docs when a file changes.
[ "Run", "build", "the", "docs", "when", "a", "file", "changes", "." ]
python
train
totalgood/nlpia
src/nlpia/talk.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/talk.py#L67-L80
def transcribe(decoder, audio_file, libdir=None): """ Decode streaming audio data from raw binary file on disk. """ decoder = get_decoder() decoder.start_utt() stream = open(audio_file, 'rb') while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) else: break decoder.end_utt() return evaluate_results(decoder)
[ "def", "transcribe", "(", "decoder", ",", "audio_file", ",", "libdir", "=", "None", ")", ":", "decoder", "=", "get_decoder", "(", ")", "decoder", ".", "start_utt", "(", ")", "stream", "=", "open", "(", "audio_file", ",", "'rb'", ")", "while", "True", "...
Decode streaming audio data from raw binary file on disk.
[ "Decode", "streaming", "audio", "data", "from", "raw", "binary", "file", "on", "disk", "." ]
python
train
OpenAgInitiative/openag_python
openag/cli/cloud/db.py
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/cli/cloud/db.py#L52-L61
def deinit(ctx): """ Detach from the current cloud server """ utils.check_for_cloud_server() if config["local_server"]["url"]: utils.cancel_global_db_replication() if config["cloud_server"]["username"]: ctx.invoke(logout_user) config["cloud_server"]["url"] = None
[ "def", "deinit", "(", "ctx", ")", ":", "utils", ".", "check_for_cloud_server", "(", ")", "if", "config", "[", "\"local_server\"", "]", "[", "\"url\"", "]", ":", "utils", ".", "cancel_global_db_replication", "(", ")", "if", "config", "[", "\"cloud_server\"", ...
Detach from the current cloud server
[ "Detach", "from", "the", "current", "cloud", "server" ]
python
train
zsiciarz/pygcvs
pygcvs/helpers.py
https://github.com/zsiciarz/pygcvs/blob/ed5522ab9cf9237592a6af7a0bc8cad079afeb67/pygcvs/helpers.py#L23-L36
def dict_to_body(star_dict): """ Converts a dictionary of variable star data to a `Body` instance. Requires `PyEphem <http://rhodesmill.org/pyephem/>`_ to be installed. """ if ephem is None: # pragma: no cover raise NotImplementedError("Please install PyEphem in order to use dict_to_body.") body = ephem.FixedBody() body.name = star_dict['name'] body._ra = ephem.hours(str(star_dict['ra'])) body._dec = ephem.degrees(str(star_dict['dec'])) body._epoch = ephem.J2000 return body
[ "def", "dict_to_body", "(", "star_dict", ")", ":", "if", "ephem", "is", "None", ":", "# pragma: no cover", "raise", "NotImplementedError", "(", "\"Please install PyEphem in order to use dict_to_body.\"", ")", "body", "=", "ephem", ".", "FixedBody", "(", ")", "body", ...
Converts a dictionary of variable star data to a `Body` instance. Requires `PyEphem <http://rhodesmill.org/pyephem/>`_ to be installed.
[ "Converts", "a", "dictionary", "of", "variable", "star", "data", "to", "a", "Body", "instance", "." ]
python
train
sods/ods
pods/datasets.py
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/datasets.py#L1271-L1304
def olympic_sprints(data_set='rogers_girolami_data'): """All olympics sprint winning times for multiple output prediction.""" X = np.zeros((0, 2)) Y = np.zeros((0, 1)) cats = {} for i, dataset in enumerate([olympic_100m_men, olympic_100m_women, olympic_200m_men, olympic_200m_women, olympic_400m_men, olympic_400m_women]): data = dataset() year = data['X'] time = data['Y'] X = np.vstack((X, np.hstack((year, np.ones_like(year)*i)))) Y = np.vstack((Y, time)) cats[dataset.__name__] = i data['X'] = X data['Y'] = Y data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning." return data_details_return({ 'X': X, 'Y': Y, 'covariates' : [decimalyear('year', '%Y'), discrete(cats, 'event')], 'response' : ['time'], 'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.", 'output_info': { 0:'100m Men', 1:'100m Women', 2:'200m Men', 3:'200m Women', 4:'400m Men', 5:'400m Women'} }, data_set)
[ "def", "olympic_sprints", "(", "data_set", "=", "'rogers_girolami_data'", ")", ":", "X", "=", "np", ".", "zeros", "(", "(", "0", ",", "2", ")", ")", "Y", "=", "np", ".", "zeros", "(", "(", "0", ",", "1", ")", ")", "cats", "=", "{", "}", "for", ...
All olympics sprint winning times for multiple output prediction.
[ "All", "olympics", "sprint", "winning", "times", "for", "multiple", "output", "prediction", "." ]
python
train
opencivicdata/pupa
pupa/importers/organizations.py
https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/organizations.py#L61-L122
def _prepare_imports(self, dicts): """ an override for prepare imports that sorts the imports by parent_id dependencies """ # all pseudo parent ids we've seen pseudo_ids = set() # pseudo matches pseudo_matches = {} # get prepared imports from parent prepared = dict(super(OrganizationImporter, self)._prepare_imports(dicts)) # collect parent pseudo_ids for _, data in prepared.items(): parent_id = data.get('parent_id', None) or '' if parent_id.startswith('~'): pseudo_ids.add(parent_id) # turn pseudo_ids into a tuple of dictionaries pseudo_ids = [(ppid, get_pseudo_id(ppid)) for ppid in pseudo_ids] # loop over all data again, finding the pseudo ids true json id for json_id, data in prepared.items(): # check if this matches one of our ppids for ppid, spec in pseudo_ids: match = True for k, v in spec.items(): if data[k] != v: match = False break if match: if ppid in pseudo_matches: raise UnresolvedIdError('multiple matches for pseudo id: ' + ppid) pseudo_matches[ppid] = json_id # toposort the nodes so parents are imported first network = Network() in_network = set() import_order = [] for json_id, data in prepared.items(): parent_id = data.get('parent_id', None) # resolve pseudo_ids to their json id before building the network if parent_id in pseudo_matches: parent_id = pseudo_matches[parent_id] network.add_node(json_id) if parent_id: # Right. There's an import dep. We need to add the edge from # the parent to the current node, so that we import the parent # before the current node. network.add_edge(parent_id, json_id) # resolve the sorted import order for jid in network.sort(): import_order.append((jid, prepared[jid])) in_network.add(jid) # ensure all data made it into network (paranoid check, should never fail) if in_network != set(prepared.keys()): # pragma: no cover raise PupaInternalError("import is missing nodes in network set") return import_order
[ "def", "_prepare_imports", "(", "self", ",", "dicts", ")", ":", "# all pseudo parent ids we've seen", "pseudo_ids", "=", "set", "(", ")", "# pseudo matches", "pseudo_matches", "=", "{", "}", "# get prepared imports from parent", "prepared", "=", "dict", "(", "super", ...
an override for prepare imports that sorts the imports by parent_id dependencies
[ "an", "override", "for", "prepare", "imports", "that", "sorts", "the", "imports", "by", "parent_id", "dependencies" ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2165-L2178
def disable_host_freshness_check(self, host): """Disable freshness check for a host Format of the line that triggers function call:: DISABLE_HOST_FRESHNESS_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.check_freshness: host.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value host.check_freshness = False self.send_an_element(host.get_update_status_brok())
[ "def", "disable_host_freshness_check", "(", "self", ",", "host", ")", ":", "if", "host", ".", "check_freshness", ":", "host", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MODATTR_FRESHNESS_CHECKS_ENABLED\"", "]", ".", "value", "host", ".", "check_freshn...
Disable freshness check for a host Format of the line that triggers function call:: DISABLE_HOST_FRESHNESS_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None
[ "Disable", "freshness", "check", "for", "a", "host", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewprofile.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofile.py#L326-L358
def toXml(self, xparent=None): """ Converts the data for this profile into an XML blob. :return <xml.etree.ElementTree.Element> """ if xparent is not None: xprofile = ElementTree.SubElement(xparent, 'profile') else: xprofile = ElementTree.Element('profile') xprofile.set('version', '2') xprofile.set('name', self.name()) xprofile.set('profile_version', '{0:0.1f}'.format(self.version())) icon = self.icon() if not icon.isNull(): data = projexui.storePixmap(self.icon().pixmap(48, 48)) xico = ElementTree.SubElement(xprofile, 'icon') xico.text = data xdata = ElementTree.SubElement(xprofile, 'data') self._customData.toXml(xdata) xdesc = ElementTree.SubElement(xprofile, 'desc') xdesc.text = self.description() if self._xmlElement is not None: xlayout = copy.deepcopy(self._xmlElement) xlayout.tag = 'layout' xprofile.append(xlayout) return xprofile
[ "def", "toXml", "(", "self", ",", "xparent", "=", "None", ")", ":", "if", "xparent", "is", "not", "None", ":", "xprofile", "=", "ElementTree", ".", "SubElement", "(", "xparent", ",", "'profile'", ")", "else", ":", "xprofile", "=", "ElementTree", ".", "...
Converts the data for this profile into an XML blob. :return <xml.etree.ElementTree.Element>
[ "Converts", "the", "data", "for", "this", "profile", "into", "an", "XML", "blob", ".", ":", "return", "<xml", ".", "etree", ".", "ElementTree", ".", "Element", ">" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_tile.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_tile.py#L140-L147
def url(self, service): '''return URL for a tile''' if service not in TILE_SERVICES: raise TileException('unknown tile service %s' % service) url = string.Template(TILE_SERVICES[service]) (x,y) = self.tile tile_info = TileServiceInfo(x, y, self.zoom) return url.substitute(tile_info)
[ "def", "url", "(", "self", ",", "service", ")", ":", "if", "service", "not", "in", "TILE_SERVICES", ":", "raise", "TileException", "(", "'unknown tile service %s'", "%", "service", ")", "url", "=", "string", ".", "Template", "(", "TILE_SERVICES", "[", "servi...
return URL for a tile
[ "return", "URL", "for", "a", "tile" ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/data/featurization/featurizer.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/featurization/featurizer.py#L850-L860
def dimension(self): """ current dimension due to selected features Returns ------- dim : int total dimension due to all selection features """ dim = sum(f.dimension for f in self.active_features) return dim
[ "def", "dimension", "(", "self", ")", ":", "dim", "=", "sum", "(", "f", ".", "dimension", "for", "f", "in", "self", ".", "active_features", ")", "return", "dim" ]
current dimension due to selected features Returns ------- dim : int total dimension due to all selection features
[ "current", "dimension", "due", "to", "selected", "features" ]
python
train