repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
globality-corp/microcosm-flask
microcosm_flask/basic_auth.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/basic_auth.py#L22-L34
def encode_basic_auth(username, password): """ Encode basic auth credentials. """ return "Basic {}".format( b64encode( "{}:{}".format( username, password, ).encode("utf-8") ).decode("utf-8") )
[ "def", "encode_basic_auth", "(", "username", ",", "password", ")", ":", "return", "\"Basic {}\"", ".", "format", "(", "b64encode", "(", "\"{}:{}\"", ".", "format", "(", "username", ",", "password", ",", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", ...
Encode basic auth credentials.
[ "Encode", "basic", "auth", "credentials", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/mmax2.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/mmax2.py#L361-L403
def spanstring2tokens(docgraph, span_string): """ Converts a span string (e.g. 'word_88..word_91') into a list of token IDs (e.g. ['word_88', 'word_89', 'word_90', 'word_91']. Token IDs that do not occur in the given document graph will be filtered out. Q: Why are some token IDs missing in a document graph? A: They either have been removed manually (e.g. because someone thought the annotation/tokenization was 'wrong') or have been renamed during the merging of several document graphs. Parameters ---------- docgraph : MMAXDocumentGraph a document graph which represent an MMAX2 annotated document span_string : str a string representing a (non)-contiguous series of tokens by their token IDs Returns ------- existing_tokens : list of str a list of all those tokens that are represented by the span string and which actually exist in the given graph """ tokens = convert_spanstring(span_string) existing_nodes = set(docgraph.nodes()) existing_tokens = [] for tok in tokens: if tok in existing_nodes: existing_tokens.append(tok) else: # we're trying to catch all token IDs that have been # renamed during merging of document graphs / annotation layers if hasattr(docgraph, 'renamed_nodes'): renamed_token_id = docgraph.renamed_nodes.get(tok) if renamed_token_id in existing_nodes: existing_tokens.append(renamed_token_id) # else: there was no merging /renaming going on, so the # token is missing because it's <word> element was removed # from the associated *_words.xml file. # This is another 'bug' in the PCC corpus, cf. issue #134 return existing_tokens
[ "def", "spanstring2tokens", "(", "docgraph", ",", "span_string", ")", ":", "tokens", "=", "convert_spanstring", "(", "span_string", ")", "existing_nodes", "=", "set", "(", "docgraph", ".", "nodes", "(", ")", ")", "existing_tokens", "=", "[", "]", "for", "tok...
Converts a span string (e.g. 'word_88..word_91') into a list of token IDs (e.g. ['word_88', 'word_89', 'word_90', 'word_91']. Token IDs that do not occur in the given document graph will be filtered out. Q: Why are some token IDs missing in a document graph? A: They either have been removed manually (e.g. because someone thought the annotation/tokenization was 'wrong') or have been renamed during the merging of several document graphs. Parameters ---------- docgraph : MMAXDocumentGraph a document graph which represent an MMAX2 annotated document span_string : str a string representing a (non)-contiguous series of tokens by their token IDs Returns ------- existing_tokens : list of str a list of all those tokens that are represented by the span string and which actually exist in the given graph
[ "Converts", "a", "span", "string", "(", "e", ".", "g", ".", "word_88", "..", "word_91", ")", "into", "a", "list", "of", "token", "IDs", "(", "e", ".", "g", ".", "[", "word_88", "word_89", "word_90", "word_91", "]", ".", "Token", "IDs", "that", "do"...
python
train
Yelp/kafka-utils
kafka_utils/kafka_cluster_manager/cmds/command.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cmds/command.py#L261-L302
def _extract_actions_unique_topics(self, movement_counts, max_movements, cluster_topology, max_movement_size): """Extract actions limiting to given max value such that the resultant has the minimum possible number of duplicate topics. Algorithm: 1. Group actions by by topic-name: {topic: action-list} 2. Iterate through the dictionary in circular fashion and keep extracting actions with until max_partition_movements are reached. :param movement_counts: list of tuple ((topic, partition), movement count) :param max_movements: max number of movements to extract :param cluster_topology: cluster topology containing the new proposed assignment for the cluster :param max_movement_size: maximum size of data to move at a time in extracted actions :return: list of tuple (topic, partitions) to include in the reduced plan """ # Group actions by topic topic_actions = defaultdict(list) for t_p, replica_change_cnt in movement_counts: topic_actions[t_p[0]].append((t_p, replica_change_cnt)) # Create reduced assignment minimizing duplication of topics extracted_actions = [] curr_movements = 0 curr_size = 0 action_available = True while curr_movements < max_movements and curr_size <= max_movement_size and action_available: action_available = False for topic, actions in six.iteritems(topic_actions): for action in actions: action_size = cluster_topology.partitions[action[0]].size if curr_movements + action[1] > max_movements or curr_size + action_size > max_movement_size: # Remove action since it won't be possible to use it actions.remove(action) else: # Append (topic, partition) to the list of movements action_available = True extracted_actions.append(action[0]) curr_movements += action[1] curr_size += action_size actions.remove(action) break return extracted_actions
[ "def", "_extract_actions_unique_topics", "(", "self", ",", "movement_counts", ",", "max_movements", ",", "cluster_topology", ",", "max_movement_size", ")", ":", "# Group actions by topic", "topic_actions", "=", "defaultdict", "(", "list", ")", "for", "t_p", ",", "repl...
Extract actions limiting to given max value such that the resultant has the minimum possible number of duplicate topics. Algorithm: 1. Group actions by by topic-name: {topic: action-list} 2. Iterate through the dictionary in circular fashion and keep extracting actions with until max_partition_movements are reached. :param movement_counts: list of tuple ((topic, partition), movement count) :param max_movements: max number of movements to extract :param cluster_topology: cluster topology containing the new proposed assignment for the cluster :param max_movement_size: maximum size of data to move at a time in extracted actions :return: list of tuple (topic, partitions) to include in the reduced plan
[ "Extract", "actions", "limiting", "to", "given", "max", "value", "such", "that", "the", "resultant", "has", "the", "minimum", "possible", "number", "of", "duplicate", "topics", "." ]
python
train
CityOfZion/neo-python-core
neocore/KeyPair.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/KeyPair.py#L171-L187
def Export(self): """ Export this KeyPair's private key in WIF format. Returns: str: The key in wif format """ data = bytearray(38) data[0] = 0x80 data[1:33] = self.PrivateKey[0:32] data[33] = 0x01 checksum = Crypto.Default().Hash256(data[0:34]) data[34:38] = checksum[0:4] b58 = base58.b58encode(bytes(data)) return b58.decode("utf-8")
[ "def", "Export", "(", "self", ")", ":", "data", "=", "bytearray", "(", "38", ")", "data", "[", "0", "]", "=", "0x80", "data", "[", "1", ":", "33", "]", "=", "self", ".", "PrivateKey", "[", "0", ":", "32", "]", "data", "[", "33", "]", "=", "...
Export this KeyPair's private key in WIF format. Returns: str: The key in wif format
[ "Export", "this", "KeyPair", "s", "private", "key", "in", "WIF", "format", "." ]
python
train
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2615-L2659
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None): """ Helper function for element-wise operation. The function will perform numpy-like broadcasting if needed and call different functions. Parameters -------- lhs : NDArray or numeric value Left-hand side operand. rhs : NDArray or numeric value Right-hand operand, fn_array : function Function to be called if both lhs and rhs are of ``NDArray`` type. fn_scalar : function Function to be called if both lhs and rhs are numeric values. lfn_scalar : function Function to be called if lhs is ``NDArray`` while rhs is numeric value rfn_scalar : function Function to be called if lhs is numeric value while rhs is ``NDArray``; if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar Returns -------- NDArray result array """ if isinstance(lhs, numeric_types): if isinstance(rhs, numeric_types): return fn_scalar(lhs, rhs) else: if rfn_scalar is None: # commutative function return lfn_scalar(rhs, float(lhs)) else: return rfn_scalar(rhs, float(lhs)) elif isinstance(rhs, numeric_types): return lfn_scalar(lhs, float(rhs)) elif isinstance(rhs, NDArray): return fn_array(lhs, rhs) else: raise TypeError('type %s not supported' % str(type(rhs)))
[ "def", "_ufunc_helper", "(", "lhs", ",", "rhs", ",", "fn_array", ",", "fn_scalar", ",", "lfn_scalar", ",", "rfn_scalar", "=", "None", ")", ":", "if", "isinstance", "(", "lhs", ",", "numeric_types", ")", ":", "if", "isinstance", "(", "rhs", ",", "numeric_...
Helper function for element-wise operation. The function will perform numpy-like broadcasting if needed and call different functions. Parameters -------- lhs : NDArray or numeric value Left-hand side operand. rhs : NDArray or numeric value Right-hand operand, fn_array : function Function to be called if both lhs and rhs are of ``NDArray`` type. fn_scalar : function Function to be called if both lhs and rhs are numeric values. lfn_scalar : function Function to be called if lhs is ``NDArray`` while rhs is numeric value rfn_scalar : function Function to be called if lhs is numeric value while rhs is ``NDArray``; if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar Returns -------- NDArray result array
[ "Helper", "function", "for", "element", "-", "wise", "operation", ".", "The", "function", "will", "perform", "numpy", "-", "like", "broadcasting", "if", "needed", "and", "call", "different", "functions", "." ]
python
train
stitchfix/pyxley
pyxley/charts/mg/graphic.py
https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/pyxley/charts/mg/graphic.py#L164-L176
def legend(self, values): """Set the legend labels. Args: values (list): list of labels. Raises: ValueError: legend must be a list of labels. """ if not isinstance(values, list): raise TypeError("legend must be a list of labels") self.options["legend"] = values
[ "def", "legend", "(", "self", ",", "values", ")", ":", "if", "not", "isinstance", "(", "values", ",", "list", ")", ":", "raise", "TypeError", "(", "\"legend must be a list of labels\"", ")", "self", ".", "options", "[", "\"legend\"", "]", "=", "values" ]
Set the legend labels. Args: values (list): list of labels. Raises: ValueError: legend must be a list of labels.
[ "Set", "the", "legend", "labels", "." ]
python
train
ktdreyer/txkoji
txkoji/connection.py
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/connection.py#L117-L158
def from_web(self, url): """ Reverse-engineer a kojiweb URL into an equivalent API response. Only a few kojiweb URL endpoints work here. See also connect_from_web(). :param url: ``str``, for example "http://cbs.centos.org/koji/buildinfo?buildID=21155" :returns: deferred that when fired returns a Munch (dict-like) object with data about this resource, or None if we could not parse the url. """ # Treat any input with whitespace as invalid: if re.search(r'\s', url): return defer.succeed(None) o = urlparse(url) endpoint = os.path.basename(o.path) if o.query: query = parse_qs(o.query) # Known Kojiweb endpoints: endpoints = { 'buildinfo': ('buildID', self.getBuild), 'channelinfo': ('channelID', self.getChannel), 'hostinfo': ('hostID', self.getHost), 'packageinfo': ('packageID', self.getPackage), 'taskinfo': ('taskID', self.getTaskInfo), 'taginfo': ('tagID', self.getTag), 'targetinfo': ('targetID', self.getTarget), 'userinfo': ('userID', self.getUser), } try: (param, method) = endpoints[endpoint] except KeyError: return defer.succeed(None) try: id_str = query[param][0] id_ = int(id_str) except (KeyError, ValueError): return defer.succeed(None) return method(id_)
[ "def", "from_web", "(", "self", ",", "url", ")", ":", "# Treat any input with whitespace as invalid:", "if", "re", ".", "search", "(", "r'\\s'", ",", "url", ")", ":", "return", "defer", ".", "succeed", "(", "None", ")", "o", "=", "urlparse", "(", "url", ...
Reverse-engineer a kojiweb URL into an equivalent API response. Only a few kojiweb URL endpoints work here. See also connect_from_web(). :param url: ``str``, for example "http://cbs.centos.org/koji/buildinfo?buildID=21155" :returns: deferred that when fired returns a Munch (dict-like) object with data about this resource, or None if we could not parse the url.
[ "Reverse", "-", "engineer", "a", "kojiweb", "URL", "into", "an", "equivalent", "API", "response", "." ]
python
train
mfcloud/python-zvm-sdk
zvmsdk/utils.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/utils.py#L410-L423
def log_and_reraise_smt_request_failed(action=None): """Catch SDK base exception and print error log before reraise exception. msg: the error message to be logged. """ try: yield except exception.SDKSMTRequestFailed as err: msg = '' if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg)
[ "def", "log_and_reraise_smt_request_failed", "(", "action", "=", "None", ")", ":", "try", ":", "yield", "except", "exception", ".", "SDKSMTRequestFailed", "as", "err", ":", "msg", "=", "''", "if", "action", "is", "not", "None", ":", "msg", "=", "\"Failed to ...
Catch SDK base exception and print error log before reraise exception. msg: the error message to be logged.
[ "Catch", "SDK", "base", "exception", "and", "print", "error", "log", "before", "reraise", "exception", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L1150-L1154
def set_children(self, child_ids): """Set the children IDs""" if not self._supports_simple_sequencing(): raise errors.IllegalState() self._my_map['childIds'] = [str(i) for i in child_ids]
[ "def", "set_children", "(", "self", ",", "child_ids", ")", ":", "if", "not", "self", ".", "_supports_simple_sequencing", "(", ")", ":", "raise", "errors", ".", "IllegalState", "(", ")", "self", ".", "_my_map", "[", "'childIds'", "]", "=", "[", "str", "("...
Set the children IDs
[ "Set", "the", "children", "IDs" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L382-L401
def hide_routemap_holder_route_map_content_match_protocol_protocol_static_container_static(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") match = ET.SubElement(content, "match") protocol = ET.SubElement(match, "protocol") protocol_static_container = ET.SubElement(protocol, "protocol-static-container") static = ET.SubElement(protocol_static_container, "static") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_routemap_holder_route_map_content_match_protocol_protocol_static_container_static", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_routemap_holder", "=", "ET", ".", "SubElement", "(", "co...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L426-L432
def show_image(img:Image, ax:plt.Axes=None, figsize:tuple=(3,3), hide_axis:bool=True, cmap:str='binary', alpha:float=None, **kwargs)->plt.Axes: "Display `Image` in notebook." if ax is None: fig,ax = plt.subplots(figsize=figsize) ax.imshow(image2np(img.data), cmap=cmap, alpha=alpha, **kwargs) if hide_axis: ax.axis('off') return ax
[ "def", "show_image", "(", "img", ":", "Image", ",", "ax", ":", "plt", ".", "Axes", "=", "None", ",", "figsize", ":", "tuple", "=", "(", "3", ",", "3", ")", ",", "hide_axis", ":", "bool", "=", "True", ",", "cmap", ":", "str", "=", "'binary'", ",...
Display `Image` in notebook.
[ "Display", "Image", "in", "notebook", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/sersic.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/sersic.py#L39-L53
def derivatives(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0): """ returns df/dx and df/dy of the function """ x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) if isinstance(r, int) or isinstance(r, float): r = max(self._s, r) else: r[r < self._s] = self._s alpha = -self.alpha_abs(x, y, n_sersic, R_sersic, k_eff, center_x, center_y) f_x = alpha * x_ / r f_y = alpha * y_ / r return f_x, f_y
[ "def", "derivatives", "(", "self", ",", "x", ",", "y", ",", "n_sersic", ",", "R_sersic", ",", "k_eff", ",", "center_x", "=", "0", ",", "center_y", "=", "0", ")", ":", "x_", "=", "x", "-", "center_x", "y_", "=", "y", "-", "center_y", "r", "=", "...
returns df/dx and df/dy of the function
[ "returns", "df", "/", "dx", "and", "df", "/", "dy", "of", "the", "function" ]
python
train
pytroll/satpy
satpy/readers/iasi_l2.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/iasi_l2.py#L125-L140
def read_dataset(fid, key): """Read dataset""" dsid = DSET_NAMES[key.name] dset = fid["/PWLR/" + dsid] if dset.ndim == 3: dims = ['y', 'x', 'level'] else: dims = ['y', 'x'] data = xr.DataArray(da.from_array(dset.value, chunks=CHUNK_SIZE), name=key.name, dims=dims).astype(np.float32) data = xr.where(data > 1e30, np.nan, data) dset_attrs = dict(dset.attrs) data.attrs.update(dset_attrs) return data
[ "def", "read_dataset", "(", "fid", ",", "key", ")", ":", "dsid", "=", "DSET_NAMES", "[", "key", ".", "name", "]", "dset", "=", "fid", "[", "\"/PWLR/\"", "+", "dsid", "]", "if", "dset", ".", "ndim", "==", "3", ":", "dims", "=", "[", "'y'", ",", ...
Read dataset
[ "Read", "dataset" ]
python
train
GetmeUK/MongoFrames
mongoframes/queries.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/queries.py#L249-L258
def SortBy(*qs): """Convert a list of Q objects into list of sort instructions""" sort = [] for q in qs: if q._path.endswith('.desc'): sort.append((q._path[:-5], DESCENDING)) else: sort.append((q._path, ASCENDING)) return sort
[ "def", "SortBy", "(", "*", "qs", ")", ":", "sort", "=", "[", "]", "for", "q", "in", "qs", ":", "if", "q", ".", "_path", ".", "endswith", "(", "'.desc'", ")", ":", "sort", ".", "append", "(", "(", "q", ".", "_path", "[", ":", "-", "5", "]", ...
Convert a list of Q objects into list of sort instructions
[ "Convert", "a", "list", "of", "Q", "objects", "into", "list", "of", "sort", "instructions" ]
python
train
RJT1990/pyflux
pyflux/families/exponential.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/exponential.py#L251-L275
def neg_loglikelihood(y, mean, scale, shape, skewness): """ Negative loglikelihood function Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Negative loglikelihood of the Exponential family """ return -np.sum(ss.expon.logpdf(x=y, scale=1/mean))
[ "def", "neg_loglikelihood", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "-", "np", ".", "sum", "(", "ss", ".", "expon", ".", "logpdf", "(", "x", "=", "y", ",", "scale", "=", "1", "/", "mean", ")", ")"...
Negative loglikelihood function Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Negative loglikelihood of the Exponential family
[ "Negative", "loglikelihood", "function" ]
python
train
predicador37/pyjstat
pyjstat/pyjstat.py
https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L254-L284
def get_dim_index(js_dict, dim): """Get index from a given dimension. Args: js_dict (dict): dictionary containing dataset data and metadata. dim (string): dimension name obtained from JSON file. Returns: dim_index (pandas.DataFrame): DataFrame with index-based dimension data. """ try: dim_index = js_dict['dimension'][dim]['category']['index'] except KeyError: dim_label = get_dim_label(js_dict, dim) dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])), index=[0], columns=['id', 'index']) else: if type(dim_index) is list: dim_index = pd.DataFrame(list(zip(dim_index, range(0, len(dim_index)))), index=dim_index, columns=['id', 'index']) else: dim_index = pd.DataFrame(list(zip(dim_index.keys(), dim_index.values())), index=dim_index.keys(), columns=['id', 'index']) dim_index = dim_index.sort_index(by='index') return dim_index
[ "def", "get_dim_index", "(", "js_dict", ",", "dim", ")", ":", "try", ":", "dim_index", "=", "js_dict", "[", "'dimension'", "]", "[", "dim", "]", "[", "'category'", "]", "[", "'index'", "]", "except", "KeyError", ":", "dim_label", "=", "get_dim_label", "(...
Get index from a given dimension. Args: js_dict (dict): dictionary containing dataset data and metadata. dim (string): dimension name obtained from JSON file. Returns: dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
[ "Get", "index", "from", "a", "given", "dimension", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L137-L224
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40, profiler=('cProfile', 'profile', 'hotshot')): """Mark `fn` for profiling. If `skip` is > 0, first `skip` calls to `fn` will not be profiled. If `immediate` is False, profiling results will be printed to sys.stdout on program termination. Otherwise results will be printed after each call. If `dirs` is False only the name of the file will be printed. Otherwise the full path is used. `sort` can be a list of sort keys (defaulting to ['cumulative', 'time', 'calls']). The following ones are recognized:: 'calls' -- call count 'cumulative' -- cumulative time 'file' -- file name 'line' -- line number 'module' -- file name 'name' -- function name 'nfl' -- name/file/line 'pcalls' -- call count 'stdname' -- standard name 'time' -- internal time `entries` limits the output to the first N entries. `profiler` can be used to select the preferred profiler, or specify a sequence of them, in order of preference. The default is ('cProfile'. 'profile', 'hotshot'). If `filename` is specified, the profile stats will be stored in the named file. You can load them pstats.Stats(filename). Usage:: def fn(...): ... fn = profile(fn, skip=1) If you are using Python 2.4, you should be able to use the decorator syntax:: @profile(skip=3) def fn(...): ... or just :: @profile def fn(...): ... """ if fn is None: # @profile() syntax -- we are a decorator maker def decorator(fn): return profile(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries, profiler=profiler) return decorator # @profile syntax -- we are a decorator. if isinstance(profiler, str): profiler = [profiler] for p in profiler: if p in AVAILABLE_PROFILERS: profiler_class = AVAILABLE_PROFILERS[p] break else: raise ValueError('only these profilers are available: %s' % ', '.join(AVAILABLE_PROFILERS)) fp = profiler_class(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries) # fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...) # or HotShotFuncProfile # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
[ "def", "profile", "(", "fn", "=", "None", ",", "skip", "=", "0", ",", "filename", "=", "None", ",", "immediate", "=", "False", ",", "dirs", "=", "False", ",", "sort", "=", "None", ",", "entries", "=", "40", ",", "profiler", "=", "(", "'cProfile'", ...
Mark `fn` for profiling. If `skip` is > 0, first `skip` calls to `fn` will not be profiled. If `immediate` is False, profiling results will be printed to sys.stdout on program termination. Otherwise results will be printed after each call. If `dirs` is False only the name of the file will be printed. Otherwise the full path is used. `sort` can be a list of sort keys (defaulting to ['cumulative', 'time', 'calls']). The following ones are recognized:: 'calls' -- call count 'cumulative' -- cumulative time 'file' -- file name 'line' -- line number 'module' -- file name 'name' -- function name 'nfl' -- name/file/line 'pcalls' -- call count 'stdname' -- standard name 'time' -- internal time `entries` limits the output to the first N entries. `profiler` can be used to select the preferred profiler, or specify a sequence of them, in order of preference. The default is ('cProfile'. 'profile', 'hotshot'). If `filename` is specified, the profile stats will be stored in the named file. You can load them pstats.Stats(filename). Usage:: def fn(...): ... fn = profile(fn, skip=1) If you are using Python 2.4, you should be able to use the decorator syntax:: @profile(skip=3) def fn(...): ... or just :: @profile def fn(...): ...
[ "Mark", "fn", "for", "profiling", "." ]
python
train
CiscoUcs/UcsPythonSDK
src/UcsSdk/utils/helper.py
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/utils/helper.py#L31-L41
def create_dn_wcard_filter(filter_class, filter_value): """ Creates wild card filter object for given class name, and values. :param filter_class: class name :param filter_value: filter property value :return WcardFilter: WcardFilter object """ wcard_filter = WcardFilter() wcard_filter.Class = filter_class wcard_filter.Property = "dn" wcard_filter.Value = filter_value return wcard_filter
[ "def", "create_dn_wcard_filter", "(", "filter_class", ",", "filter_value", ")", ":", "wcard_filter", "=", "WcardFilter", "(", ")", "wcard_filter", ".", "Class", "=", "filter_class", "wcard_filter", ".", "Property", "=", "\"dn\"", "wcard_filter", ".", "Value", "=",...
Creates wild card filter object for given class name, and values. :param filter_class: class name :param filter_value: filter property value :return WcardFilter: WcardFilter object
[ "Creates", "wild", "card", "filter", "object", "for", "given", "class", "name", "and", "values", ".", ":", "param", "filter_class", ":", "class", "name", ":", "param", "filter_value", ":", "filter", "property", "value", ":", "return", "WcardFilter", ":", "Wc...
python
train
pmclanahan/django-celery-email
djcelery_email/utils.py
https://github.com/pmclanahan/django-celery-email/blob/6d0684b3d2d6751c4e5066f9215e130e6a91ea78/djcelery_email/utils.py#L10-L24
def chunked(iterator, chunksize): """ Yields items from 'iterator' in chunks of size 'chunksize'. >>> list(chunked([1, 2, 3, 4, 5], chunksize=2)) [(1, 2), (3, 4), (5,)] """ chunk = [] for idx, item in enumerate(iterator, 1): chunk.append(item) if idx % chunksize == 0: yield chunk chunk = [] if chunk: yield chunk
[ "def", "chunked", "(", "iterator", ",", "chunksize", ")", ":", "chunk", "=", "[", "]", "for", "idx", ",", "item", "in", "enumerate", "(", "iterator", ",", "1", ")", ":", "chunk", ".", "append", "(", "item", ")", "if", "idx", "%", "chunksize", "==",...
Yields items from 'iterator' in chunks of size 'chunksize'. >>> list(chunked([1, 2, 3, 4, 5], chunksize=2)) [(1, 2), (3, 4), (5,)]
[ "Yields", "items", "from", "iterator", "in", "chunks", "of", "size", "chunksize", "." ]
python
train
pytroll/satpy
satpy/readers/hrit_jma.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/hrit_jma.py#L191-L204
def _check_sensor_platform_consistency(self, sensor): """Make sure sensor and platform are consistent Args: sensor (str) : Sensor name from YAML dataset definition Raises: ValueError if they don't match """ ref_sensor = SENSORS.get(self.platform, None) if ref_sensor and not sensor == ref_sensor: logger.error('Sensor-Platform mismatch: {} is not a payload ' 'of {}. Did you choose the correct reader?' .format(sensor, self.platform))
[ "def", "_check_sensor_platform_consistency", "(", "self", ",", "sensor", ")", ":", "ref_sensor", "=", "SENSORS", ".", "get", "(", "self", ".", "platform", ",", "None", ")", "if", "ref_sensor", "and", "not", "sensor", "==", "ref_sensor", ":", "logger", ".", ...
Make sure sensor and platform are consistent Args: sensor (str) : Sensor name from YAML dataset definition Raises: ValueError if they don't match
[ "Make", "sure", "sensor", "and", "platform", "are", "consistent" ]
python
train
fulfilio/fulfil-python-api
fulfil_client/client.py
https://github.com/fulfilio/fulfil-python-api/blob/180ac969c427b1292439a0371866aa5f169ffa6b/fulfil_client/client.py#L333-L343
def update(self, data=None, **kwargs): """ Update the record right away. :param data: dictionary of changes :param kwargs: possibly a list of keyword args to change """ if data is None: data = {} data.update(kwargs) return self.model.write([self.id], data)
[ "def", "update", "(", "self", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "data", "is", "None", ":", "data", "=", "{", "}", "data", ".", "update", "(", "kwargs", ")", "return", "self", ".", "model", ".", "write", "(", "["...
Update the record right away. :param data: dictionary of changes :param kwargs: possibly a list of keyword args to change
[ "Update", "the", "record", "right", "away", "." ]
python
train
RJT1990/pyflux
pyflux/ssm/llm.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/llm.py#L264-L323
def plot_fit(self, intervals=True, **kwargs): """ Plots the fit of the model Parameters ---------- intervals : Boolean Whether to plot 95% confidence interval of states Returns ---------- None (plots data and the fit) """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) series_type = kwargs.get('series_type','Smoothed') if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: date_index = copy.deepcopy(self.index) date_index = date_index[self.integ:self.data_original.shape[0]+1] if series_type == 'Smoothed': mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values()) elif series_type == 'Filtered': mu, V, _, _, _ = self._model(self.data,self.latent_variables.get_z_values()) else: mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values()) mu = mu[0][:-1] V = V.ravel() plt.figure(figsize=figsize) plt.subplot(3, 1, 1) plt.title(self.data_name + " Raw and " + series_type) if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] plt.fill_between(date_index[2:], mu[2:] + 1.98*np.sqrt(V[:-1][2:]), mu[2:] - 1.98*np.sqrt(V[:-1][2:]), alpha=0.15,label='95% C.I.') plt.plot(date_index,self.data,label='Data') plt.plot(date_index,mu,label=series_type,c='black') plt.legend(loc=2) plt.subplot(3, 1, 2) plt.title(self.data_name + " Local Level") if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] plt.fill_between(date_index[2:], mu[2:] + 1.98*np.sqrt(V[:-1][2:]), mu[2:] - 1.98*np.sqrt(V[:-1][2:]), alpha=0.15,label='95% C.I.') plt.plot(date_index,mu,label='Local Level') plt.legend(loc=2) plt.subplot(3, 1, 3) plt.title("Measurement Noise") plt.plot(date_index,self.data-mu) plt.show()
[ "def", "plot_fit", "(", "self", ",", "intervals", "=", "True", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "seaborn", "as", "sns", "figsize", "=", "kwargs", ".", "get", "(", "'figsize'", ",", "(", ...
Plots the fit of the model Parameters ---------- intervals : Boolean Whether to plot 95% confidence interval of states Returns ---------- None (plots data and the fit)
[ "Plots", "the", "fit", "of", "the", "model" ]
python
train
googlefonts/ufo2ft
Lib/ufo2ft/postProcessor.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/postProcessor.py#L135-L168
def _build_production_name(self, glyph): """Build a production name for a single glyph.""" # use PostScript names from UFO lib if available if self._postscriptNames: production_name = self._postscriptNames.get(glyph.name) return production_name if production_name else glyph.name # use name derived from unicode value unicode_val = glyph.unicode if glyph.unicode is not None: return '%s%04X' % ( 'u' if unicode_val > 0xffff else 'uni', unicode_val) # use production name + last (non-script) suffix if possible parts = glyph.name.rsplit('.', 1) if len(parts) == 2 and parts[0] in self.glyphSet: return '%s.%s' % ( self._build_production_name(self.glyphSet[parts[0]]), parts[1]) # use ligature name, making sure to look up components with suffixes parts = glyph.name.split('.', 1) if len(parts) == 2: liga_parts = ['%s.%s' % (n, parts[1]) for n in parts[0].split('_')] else: liga_parts = glyph.name.split('_') if len(liga_parts) > 1 and all(n in self.glyphSet for n in liga_parts): unicode_vals = [self.glyphSet[n].unicode for n in liga_parts] if all(v and v <= 0xffff for v in unicode_vals): return 'uni' + ''.join('%04X' % v for v in unicode_vals) return '_'.join( self._build_production_name(self.glyphSet[n]) for n in liga_parts) return glyph.name
[ "def", "_build_production_name", "(", "self", ",", "glyph", ")", ":", "# use PostScript names from UFO lib if available", "if", "self", ".", "_postscriptNames", ":", "production_name", "=", "self", ".", "_postscriptNames", ".", "get", "(", "glyph", ".", "name", ")",...
Build a production name for a single glyph.
[ "Build", "a", "production", "name", "for", "a", "single", "glyph", "." ]
python
train
pacificclimate/cfmeta
cfmeta/cmipfile.py
https://github.com/pacificclimate/cfmeta/blob/a6eef78d0bce523bb44920ba96233f034b60316a/cfmeta/cmipfile.py#L176-L195
def get_var_name(nc): """Guesses the variable_name of an open NetCDF file """ non_variable_names = [ 'lat', 'lat_bnds', 'lon', 'lon_bnds', 'time', 'latitude', 'longitude', 'bnds' ] _vars = set(nc.variables.keys()) _vars.difference_update(set(non_variable_names)) if len(_vars) == 1: return _vars.pop() return None
[ "def", "get_var_name", "(", "nc", ")", ":", "non_variable_names", "=", "[", "'lat'", ",", "'lat_bnds'", ",", "'lon'", ",", "'lon_bnds'", ",", "'time'", ",", "'latitude'", ",", "'longitude'", ",", "'bnds'", "]", "_vars", "=", "set", "(", "nc", ".", "varia...
Guesses the variable_name of an open NetCDF file
[ "Guesses", "the", "variable_name", "of", "an", "open", "NetCDF", "file" ]
python
train
JasonKessler/scattertext
scattertext/representations/Word2VecFromParsedCorpus.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/representations/Word2VecFromParsedCorpus.py#L59-L77
def add_phrases(self, corpus): ''' Parameters ---------- corpus: Corpus for phrase augmentation Returns ------- New ParsedCorpus containing unigrams in corpus and new phrases ''' from gensim.models import Phrases assert isinstance(corpus, ParsedCorpus) self.phrases = [Phrases(CorpusAdapterForGensim.get_sentences(corpus), delimiter=' ')] for i in range(1, self.max_tokens_per_phrase): self.phrases.append(Phrases(self.phrases[-1][CorpusAdapterForGensim.get_sentences(corpus)])) return self
[ "def", "add_phrases", "(", "self", ",", "corpus", ")", ":", "from", "gensim", ".", "models", "import", "Phrases", "assert", "isinstance", "(", "corpus", ",", "ParsedCorpus", ")", "self", ".", "phrases", "=", "[", "Phrases", "(", "CorpusAdapterForGensim", "."...
Parameters ---------- corpus: Corpus for phrase augmentation Returns ------- New ParsedCorpus containing unigrams in corpus and new phrases
[ "Parameters", "----------", "corpus", ":", "Corpus", "for", "phrase", "augmentation" ]
python
train
dougalsutherland/skl-groups
skl_groups/kernels/transform.py
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L259-L290
def fit(self, X, y=None): ''' Learn the linear transformation to clipped eigenvalues. Note that if min_eig isn't zero and any of the original eigenvalues were exactly zero, this will leave those eigenvalues as zero. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals.reshape(-1, 1) if self.min_eig == 0: inner = vals > self.min_eig else: with np.errstate(divide='ignore'): inner = np.where(vals >= self.min_eig, 1, np.where(vals == 0, 0, self.min_eig / vals)) self.clip_ = np.dot(vecs, inner * vecs.T) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "n", "=", "X", ".", "shape", "[", "0", "]", "if", "X", ".", "shape", "!=", "(", "n", ",", "n", ")", ":", "raise", "TypeError", "(", "\"Input must be a square matrix.\"", ")", ...
Learn the linear transformation to clipped eigenvalues. Note that if min_eig isn't zero and any of the original eigenvalues were exactly zero, this will leave those eigenvalues as zero. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part.
[ "Learn", "the", "linear", "transformation", "to", "clipped", "eigenvalues", "." ]
python
valid
timkpaine/pyEX
pyEX/marketdata/http.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/marketdata/http.py#L132-L149
def auction(symbol=None, token='', version=''): '''DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions, and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions. https://iexcloud.io/docs/api/#deep-auction Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) if symbol: return _getJson('deep/auction?symbols=' + symbol, token, version) return _getJson('deep/auction', token, version)
[ "def", "auction", "(", "symbol", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "_raiseIfNotStr", "(", "symbol", ")", "if", "symbol", ":", "return", "_getJson", "(", "'deep/auction?symbols='", "+", "symbol", ",", "token", ",",...
DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions, and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions. https://iexcloud.io/docs/api/#deep-auction Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
[ "DEEP", "broadcasts", "an", "Auction", "Information", "Message", "every", "one", "second", "between", "the", "Lock", "-", "in", "Time", "and", "the", "auction", "match", "for", "Opening", "and", "Closing", "Auctions", "and", "during", "the", "Display", "Only",...
python
valid
jalanb/pysyte
pysyte/paths.py
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/paths.py#L855-L864
def pyc_to_py(path_to_file): """Change some file extensions to those which are more likely to be text >>> pyc_to_py('vim.pyc') == 'vim.py' True """ stem, ext = os.path.splitext(path_to_file) if ext == '.pyc': return '%s.py' % stem return path_to_file
[ "def", "pyc_to_py", "(", "path_to_file", ")", ":", "stem", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "path_to_file", ")", "if", "ext", "==", "'.pyc'", ":", "return", "'%s.py'", "%", "stem", "return", "path_to_file" ]
Change some file extensions to those which are more likely to be text >>> pyc_to_py('vim.pyc') == 'vim.py' True
[ "Change", "some", "file", "extensions", "to", "those", "which", "are", "more", "likely", "to", "be", "text" ]
python
train
harvard-nrg/yaxil
yaxil/__init__.py
https://github.com/harvard-nrg/yaxil/blob/af594082258e62d1904d6e6841fce0bb5c0bf309/yaxil/__init__.py#L151-L205
def subjects(auth, label=None, project=None): ''' Retrieve Subject tuples for subjects returned by this function. Example: >>> import yaxil >>> auth = yaxil.XnatAuth(url='...', username='...', password='...') >>> yaxil.subjects(auth, 'AB1234C') Subject(uri=u'/data/experiments/XNAT_S0001', label=u'AB1234C', id=u'XNAT_S0001', project=u'MyProject') :param auth: XNAT authentication :type auth: :mod:`yaxil.XnatAuth` :param label: XNAT Subject label :type label: str :param project: XNAT Subject Project :type project: str :returns: Subject objects :rtype: :mod:`yaxil.Subject` ''' url = '{0}/data/subjects'.format(auth.url.rstrip('/')) logger.debug('issuing http request %s', url) # compile query string columns = [ 'ID', 'label', 'project' ] payload = { 'columns': ','.join(columns) } if label: payload['label'] = label if project: payload['project'] = project # submit the request r = requests.get(url, params=payload, auth=(auth.username, auth.password), verify=CHECK_CERTIFICATE) # validate response if r.status_code != requests.codes.ok: raise AccessionError('response not ok ({0}) from {1}'.format(r.status_code, r.url)) try: results = r.json() __quick_validate(results) except ResultSetError as e: raise ResultSetError('{0} from {1}'.format(e.message, r.url)) results = results['ResultSet'] if int(results['totalRecords']) == 0: raise NoSubjectsError('no records returned from {0}'.format(r.url)) # start generating consumable results for the caller for item in results['Result']: yield Subject(uri=item['URI'], id=item['ID'], project=item['project'], label=item['label'])
[ "def", "subjects", "(", "auth", ",", "label", "=", "None", ",", "project", "=", "None", ")", ":", "url", "=", "'{0}/data/subjects'", ".", "format", "(", "auth", ".", "url", ".", "rstrip", "(", "'/'", ")", ")", "logger", ".", "debug", "(", "'issuing h...
Retrieve Subject tuples for subjects returned by this function. Example: >>> import yaxil >>> auth = yaxil.XnatAuth(url='...', username='...', password='...') >>> yaxil.subjects(auth, 'AB1234C') Subject(uri=u'/data/experiments/XNAT_S0001', label=u'AB1234C', id=u'XNAT_S0001', project=u'MyProject') :param auth: XNAT authentication :type auth: :mod:`yaxil.XnatAuth` :param label: XNAT Subject label :type label: str :param project: XNAT Subject Project :type project: str :returns: Subject objects :rtype: :mod:`yaxil.Subject`
[ "Retrieve", "Subject", "tuples", "for", "subjects", "returned", "by", "this", "function", ".", "Example", ":", ">>>", "import", "yaxil", ">>>", "auth", "=", "yaxil", ".", "XnatAuth", "(", "url", "=", "...", "username", "=", "...", "password", "=", "...", ...
python
train
ray-project/ray
python/ray/rllib/optimizers/rollout.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/optimizers/rollout.py#L43-L72
def collect_samples_straggler_mitigation(agents, train_batch_size): """Collects at least train_batch_size samples. This is the legacy behavior as of 0.6, and launches extra sample tasks to potentially improve performance but can result in many wasted samples. """ num_timesteps_so_far = 0 trajectories = [] agent_dict = {} for agent in agents: fut_sample = agent.sample.remote() agent_dict[fut_sample] = agent while num_timesteps_so_far < train_batch_size: # TODO(pcm): Make wait support arbitrary iterators and remove the # conversion to list here. [fut_sample], _ = ray.wait(list(agent_dict)) agent = agent_dict.pop(fut_sample) # Start task with next trajectory and record it in the dictionary. fut_sample2 = agent.sample.remote() agent_dict[fut_sample2] = agent next_sample = ray_get_and_free(fut_sample) num_timesteps_so_far += next_sample.count trajectories.append(next_sample) logger.info("Discarding {} sample tasks".format(len(agent_dict))) return SampleBatch.concat_samples(trajectories)
[ "def", "collect_samples_straggler_mitigation", "(", "agents", ",", "train_batch_size", ")", ":", "num_timesteps_so_far", "=", "0", "trajectories", "=", "[", "]", "agent_dict", "=", "{", "}", "for", "agent", "in", "agents", ":", "fut_sample", "=", "agent", ".", ...
Collects at least train_batch_size samples. This is the legacy behavior as of 0.6, and launches extra sample tasks to potentially improve performance but can result in many wasted samples.
[ "Collects", "at", "least", "train_batch_size", "samples", "." ]
python
train
troeger/opensubmit
web/opensubmit/models/submission.py
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/web/opensubmit/models/submission.py#L234-L244
def qs_tobegraded(qs): ''' A filtering of the given Submission queryset for all submissions that are gradeable. This includes the following cases: - The submission was submitted and there are no tests. - The submission was successfully validity-tested, regardless of the full test status (not existent / failed / success). - The grading was already started, but not finished. The idea is to get a list of work to be done for the correctors. ''' return qs.filter(state__in=[Submission.SUBMITTED, Submission.SUBMITTED_TESTED, Submission.TEST_FULL_FAILED, Submission.GRADING_IN_PROGRESS])
[ "def", "qs_tobegraded", "(", "qs", ")", ":", "return", "qs", ".", "filter", "(", "state__in", "=", "[", "Submission", ".", "SUBMITTED", ",", "Submission", ".", "SUBMITTED_TESTED", ",", "Submission", ".", "TEST_FULL_FAILED", ",", "Submission", ".", "GRADING_IN_...
A filtering of the given Submission queryset for all submissions that are gradeable. This includes the following cases: - The submission was submitted and there are no tests. - The submission was successfully validity-tested, regardless of the full test status (not existent / failed / success). - The grading was already started, but not finished. The idea is to get a list of work to be done for the correctors.
[ "A", "filtering", "of", "the", "given", "Submission", "queryset", "for", "all", "submissions", "that", "are", "gradeable", ".", "This", "includes", "the", "following", "cases", ":" ]
python
train
chrippa/ds4drv
ds4drv/uinput.py
https://github.com/chrippa/ds4drv/blob/be7327fc3f5abb8717815f2a1a2ad3d335535d8a/ds4drv/uinput.py#L452-L469
def parse_uinput_mapping(name, mapping): """Parses a dict of mapping options.""" axes, buttons, mouse, mouse_options = {}, {}, {}, {} description = "ds4drv custom mapping ({0})".format(name) for key, attr in mapping.items(): key = key.upper() if key.startswith("BTN_") or key.startswith("KEY_"): buttons[key] = attr elif key.startswith("ABS_"): axes[key] = attr elif key.startswith("REL_"): mouse[key] = attr elif key.startswith("MOUSE_"): mouse_options[key] = attr create_mapping(name, description, axes=axes, buttons=buttons, mouse=mouse, mouse_options=mouse_options)
[ "def", "parse_uinput_mapping", "(", "name", ",", "mapping", ")", ":", "axes", ",", "buttons", ",", "mouse", ",", "mouse_options", "=", "{", "}", ",", "{", "}", ",", "{", "}", ",", "{", "}", "description", "=", "\"ds4drv custom mapping ({0})\"", ".", "for...
Parses a dict of mapping options.
[ "Parses", "a", "dict", "of", "mapping", "options", "." ]
python
train
gboeing/osmnx
osmnx/utils.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/utils.py#L863-L892
def geocode(query): """ Geocode a query string to (lat, lon) with the Nominatim geocoder. Parameters ---------- query : string the query string to geocode Returns ------- point : tuple the (lat, lon) coordinates returned by the geocoder """ # send the query to the nominatim geocoder and parse the json response url_template = 'https://nominatim.openstreetmap.org/search?format=json&limit=1&q={}' url = url_template.format(query) response = requests.get(url, timeout=60) results = response.json() # if results were returned, parse lat and long out of the result if len(results) > 0 and 'lat' in results[0] and 'lon' in results[0]: lat = float(results[0]['lat']) lon = float(results[0]['lon']) point = (lat, lon) log('Geocoded "{}" to {}'.format(query, point)) return point else: raise Exception('Nominatim geocoder returned no results for query "{}"'.format(query))
[ "def", "geocode", "(", "query", ")", ":", "# send the query to the nominatim geocoder and parse the json response", "url_template", "=", "'https://nominatim.openstreetmap.org/search?format=json&limit=1&q={}'", "url", "=", "url_template", ".", "format", "(", "query", ")", "respons...
Geocode a query string to (lat, lon) with the Nominatim geocoder. Parameters ---------- query : string the query string to geocode Returns ------- point : tuple the (lat, lon) coordinates returned by the geocoder
[ "Geocode", "a", "query", "string", "to", "(", "lat", "lon", ")", "with", "the", "Nominatim", "geocoder", "." ]
python
train
gabstopper/smc-python
smc/core/interfaces.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/interfaces.py#L229-L255
def set_backup_mgt(self, interface_id): """ Set this interface as a backup management interface. Backup management interfaces cannot be placed on an interface with only a CVI (requires node interface/s). To 'unset' the specified interface address, set interface id to None :: engine.interface_options.set_backup_mgt(2) Set backup on interface 1, VLAN 201:: engine.interface_options.set_backup_mgt('1.201') Remove management backup from engine:: engine.interface_options.set_backup_mgt(None) :param str,int interface_id: interface identifier to make the backup management server. :raises InterfaceNotFound: specified interface is not found :raises UpdateElementFailed: failure to make modification :return: None """ self.interface.set_unset(interface_id, 'backup_mgt') self._engine.update()
[ "def", "set_backup_mgt", "(", "self", ",", "interface_id", ")", ":", "self", ".", "interface", ".", "set_unset", "(", "interface_id", ",", "'backup_mgt'", ")", "self", ".", "_engine", ".", "update", "(", ")" ]
Set this interface as a backup management interface. Backup management interfaces cannot be placed on an interface with only a CVI (requires node interface/s). To 'unset' the specified interface address, set interface id to None :: engine.interface_options.set_backup_mgt(2) Set backup on interface 1, VLAN 201:: engine.interface_options.set_backup_mgt('1.201') Remove management backup from engine:: engine.interface_options.set_backup_mgt(None) :param str,int interface_id: interface identifier to make the backup management server. :raises InterfaceNotFound: specified interface is not found :raises UpdateElementFailed: failure to make modification :return: None
[ "Set", "this", "interface", "as", "a", "backup", "management", "interface", ".", "Backup", "management", "interfaces", "cannot", "be", "placed", "on", "an", "interface", "with", "only", "a", "CVI", "(", "requires", "node", "interface", "/", "s", ")", ".", ...
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/color/colormap.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/color/colormap.py#L31-L40
def _vector(x, type='row'): """Convert an object to a row or column vector.""" if isinstance(x, (list, tuple)): x = np.array(x, dtype=np.float32) elif not isinstance(x, np.ndarray): x = np.array([x], dtype=np.float32) assert x.ndim == 1 if type == 'column': x = x[:, None] return x
[ "def", "_vector", "(", "x", ",", "type", "=", "'row'", ")", ":", "if", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "x", "=", "np", ".", "array", "(", "x", ",", "dtype", "=", "np", ".", "float32", ")", "elif", "not",...
Convert an object to a row or column vector.
[ "Convert", "an", "object", "to", "a", "row", "or", "column", "vector", "." ]
python
train
JoaoFelipe/pyposast
pyposast/visitor.py
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L1054-L1071
def visit_Print(self, node): """ Python 2 """ start_by_keyword(node, self.operators['print'], self.bytes_pos_to_utf8) node.op_pos = [ NodeWithPosition(node.uid, (node.first_line, node.first_col)) ] subnodes = [] if node.dest: min_first_max_last(node, node.dest) position = (node.dest.first_line, node.dest.first_col) last, first = self.operators['>>'].find_previous(position) node.op_pos.append(NodeWithPosition(last, first)) subnodes.append(node.dest) if node.values: min_first_max_last(node, node.values[-1]) subnodes.extend(node.values) self.comma_separated_list(node, subnodes)
[ "def", "visit_Print", "(", "self", ",", "node", ")", ":", "start_by_keyword", "(", "node", ",", "self", ".", "operators", "[", "'print'", "]", ",", "self", ".", "bytes_pos_to_utf8", ")", "node", ".", "op_pos", "=", "[", "NodeWithPosition", "(", "node", "...
Python 2
[ "Python", "2" ]
python
train
tdryer/hangups
hangups/client.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/client.py#L636-L644
async def set_group_link_sharing_enabled( self, set_group_link_sharing_enabled_request ): """Set whether group link sharing is enabled for a conversation.""" response = hangouts_pb2.SetGroupLinkSharingEnabledResponse() await self._pb_request('conversations/setgrouplinksharingenabled', set_group_link_sharing_enabled_request, response) return response
[ "async", "def", "set_group_link_sharing_enabled", "(", "self", ",", "set_group_link_sharing_enabled_request", ")", ":", "response", "=", "hangouts_pb2", ".", "SetGroupLinkSharingEnabledResponse", "(", ")", "await", "self", ".", "_pb_request", "(", "'conversations/setgroupli...
Set whether group link sharing is enabled for a conversation.
[ "Set", "whether", "group", "link", "sharing", "is", "enabled", "for", "a", "conversation", "." ]
python
valid
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/p_jaffe.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/p_jaffe.py#L14-L26
def density(self, r, rho0, Ra, Rs): """ computes the density :param x: :param y: :param rho0: :param Ra: :param Rs: :return: """ Ra, Rs = self._sort_ra_rs(Ra, Rs) rho = rho0 / ((1 + (r / Ra) ** 2) * (1 + (r / Rs) ** 2)) return rho
[ "def", "density", "(", "self", ",", "r", ",", "rho0", ",", "Ra", ",", "Rs", ")", ":", "Ra", ",", "Rs", "=", "self", ".", "_sort_ra_rs", "(", "Ra", ",", "Rs", ")", "rho", "=", "rho0", "/", "(", "(", "1", "+", "(", "r", "/", "Ra", ")", "**"...
computes the density :param x: :param y: :param rho0: :param Ra: :param Rs: :return:
[ "computes", "the", "density", ":", "param", "x", ":", ":", "param", "y", ":", ":", "param", "rho0", ":", ":", "param", "Ra", ":", ":", "param", "Rs", ":", ":", "return", ":" ]
python
train
dstufft/potpie
potpie/pseudo/__init__.py
https://github.com/dstufft/potpie/blob/1b12f25b77b8719418f88f49c45920c1eb8ee406/potpie/pseudo/__init__.py#L28-L50
def _skip_char_around(self, string, char='\n'): """ Custom pseudo method for skipping a given char around a string. The default char to be skipped is the new line (\n) one. Example: '\nHello\n' would call ``_base_compile`` with 'Hello' only. """ starts, ends = '', '' n = len(char) if string.startswith(char): starts = string[:n] string = string[n:] if string.endswith(char): ends = string[-n:] string = string[:-n] string = self._base_compile(string) if starts: string = starts + string if ends: string = string + ends return string
[ "def", "_skip_char_around", "(", "self", ",", "string", ",", "char", "=", "'\\n'", ")", ":", "starts", ",", "ends", "=", "''", ",", "''", "n", "=", "len", "(", "char", ")", "if", "string", ".", "startswith", "(", "char", ")", ":", "starts", "=", ...
Custom pseudo method for skipping a given char around a string. The default char to be skipped is the new line (\n) one. Example: '\nHello\n' would call ``_base_compile`` with 'Hello' only.
[ "Custom", "pseudo", "method", "for", "skipping", "a", "given", "char", "around", "a", "string", ".", "The", "default", "char", "to", "be", "skipped", "is", "the", "new", "line", "(", "\\", "n", ")", "one", ".", "Example", ":", "\\", "nHello", "\\", "...
python
train
craffel/mir_eval
mir_eval/segment.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L126-L173
def validate_structure(reference_intervals, reference_labels, estimated_intervals, estimated_labels): """Checks that the input annotations to a structure estimation metric (i.e. one that takes in both segment boundaries and their labels) look like valid segment times and labels, and throws helpful errors if not. Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. reference_labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_labels : list, shape=(m,) estimated segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. """ for (intervals, labels) in [(reference_intervals, reference_labels), (estimated_intervals, estimated_labels)]: util.validate_intervals(intervals) if intervals.shape[0] != len(labels): raise ValueError('Number of intervals does not match number ' 'of labels') # Check only when intervals are non-empty if intervals.size > 0: # Make sure intervals start at 0 if not np.allclose(intervals.min(), 0.0): raise ValueError('Segment intervals do not start at 0') if reference_intervals.size == 0: warnings.warn("Reference intervals are empty.") if estimated_intervals.size == 0: warnings.warn("Estimated intervals are empty.") # Check only when intervals are non-empty if reference_intervals.size > 0 and estimated_intervals.size > 0: if not np.allclose(reference_intervals.max(), estimated_intervals.max()): raise ValueError('End times do not match')
[ "def", "validate_structure", "(", "reference_intervals", ",", "reference_labels", ",", "estimated_intervals", ",", "estimated_labels", ")", ":", "for", "(", "intervals", ",", "labels", ")", "in", "[", "(", "reference_intervals", ",", "reference_labels", ")", ",", ...
Checks that the input annotations to a structure estimation metric (i.e. one that takes in both segment boundaries and their labels) look like valid segment times and labels, and throws helpful errors if not. Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. reference_labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_labels : list, shape=(m,) estimated segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`.
[ "Checks", "that", "the", "input", "annotations", "to", "a", "structure", "estimation", "metric", "(", "i", ".", "e", ".", "one", "that", "takes", "in", "both", "segment", "boundaries", "and", "their", "labels", ")", "look", "like", "valid", "segment", "tim...
python
train
fermiPy/fermipy
fermipy/utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/utils.py#L190-L200
def match_regex_list(patterns, string): """Perform a regex match of a string against a list of patterns. Returns true if the string matches at least one pattern in the list.""" for p in patterns: if re.findall(p, string): return True return False
[ "def", "match_regex_list", "(", "patterns", ",", "string", ")", ":", "for", "p", "in", "patterns", ":", "if", "re", ".", "findall", "(", "p", ",", "string", ")", ":", "return", "True", "return", "False" ]
Perform a regex match of a string against a list of patterns. Returns true if the string matches at least one pattern in the list.
[ "Perform", "a", "regex", "match", "of", "a", "string", "against", "a", "list", "of", "patterns", ".", "Returns", "true", "if", "the", "string", "matches", "at", "least", "one", "pattern", "in", "the", "list", "." ]
python
train
SHTOOLS/SHTOOLS
pyshtools/shclasses/shwindow.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shwindow.py#L469-L525
def multitaper_cross_spectrum(self, clm, slm, k, convention='power', unit='per_l', **kwargs): """ Return the multitaper cross-spectrum estimate and standard error. Usage ----- mtse, sd = x.multitaper_cross_spectrum(clm, slm, k, [convention, unit, lmax, taper_wt, clat, clon, coord_degrees]) Returns ------- mtse : ndarray, shape (lmax-lwin+1) The localized multitaper cross-spectrum estimate, where lmax is the smaller of the two spherical-harmonic bandwidths of clm and slm, and lwin is the spherical-harmonic bandwidth of the localization windows. sd : ndarray, shape (lmax-lwin+1) The standard error of the localized multitaper cross-spectrum estimate. Parameters ---------- clm : SHCoeffs class instance SHCoeffs class instance containing the spherical harmonic coefficients of the first global field to analyze. slm : SHCoeffs class instance SHCoeffs class instance containing the spherical harmonic coefficients of the second global field to analyze. k : int The number of tapers to be utilized in performing the multitaper spectral analysis. convention : str, optional, default = 'power' The type of output spectra: 'power' for power spectra, and 'energy' for energy spectra. unit : str, optional, default = 'per_l' The units of the output spectra. If 'per_l', the spectra contain the total contribution for each spherical harmonic degree l. If 'per_lm', the spectra contain the average contribution for each coefficient at spherical harmonic degree l. lmax : int, optional, default = min(clm.lmax, slm.lmax) The maximum spherical-harmonic degree of the input coefficients to use. taper_wt : ndarray, optional, default = None The weights used in calculating the multitaper cross-spectral estimates and standard error. clat, clon : float, optional, default = 90., 0. Latitude and longitude of the center of the spherical-cap localization windows. coord_degrees : bool, optional, default = True True if clat and clon are in degrees. """ return self._multitaper_cross_spectrum(clm, slm, k, convention=convention, unit=unit, **kwargs)
[ "def", "multitaper_cross_spectrum", "(", "self", ",", "clm", ",", "slm", ",", "k", ",", "convention", "=", "'power'", ",", "unit", "=", "'per_l'", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_multitaper_cross_spectrum", "(", "clm", ",", "s...
Return the multitaper cross-spectrum estimate and standard error. Usage ----- mtse, sd = x.multitaper_cross_spectrum(clm, slm, k, [convention, unit, lmax, taper_wt, clat, clon, coord_degrees]) Returns ------- mtse : ndarray, shape (lmax-lwin+1) The localized multitaper cross-spectrum estimate, where lmax is the smaller of the two spherical-harmonic bandwidths of clm and slm, and lwin is the spherical-harmonic bandwidth of the localization windows. sd : ndarray, shape (lmax-lwin+1) The standard error of the localized multitaper cross-spectrum estimate. Parameters ---------- clm : SHCoeffs class instance SHCoeffs class instance containing the spherical harmonic coefficients of the first global field to analyze. slm : SHCoeffs class instance SHCoeffs class instance containing the spherical harmonic coefficients of the second global field to analyze. k : int The number of tapers to be utilized in performing the multitaper spectral analysis. convention : str, optional, default = 'power' The type of output spectra: 'power' for power spectra, and 'energy' for energy spectra. unit : str, optional, default = 'per_l' The units of the output spectra. If 'per_l', the spectra contain the total contribution for each spherical harmonic degree l. If 'per_lm', the spectra contain the average contribution for each coefficient at spherical harmonic degree l. lmax : int, optional, default = min(clm.lmax, slm.lmax) The maximum spherical-harmonic degree of the input coefficients to use. taper_wt : ndarray, optional, default = None The weights used in calculating the multitaper cross-spectral estimates and standard error. clat, clon : float, optional, default = 90., 0. Latitude and longitude of the center of the spherical-cap localization windows. coord_degrees : bool, optional, default = True True if clat and clon are in degrees.
[ "Return", "the", "multitaper", "cross", "-", "spectrum", "estimate", "and", "standard", "error", "." ]
python
train
log2timeline/dfvfs
dfvfs/vfs/tsk_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/tsk_file_entry.py#L119-L147
def CopyToDateTimeString(self): """Copies the date time value to a date and time string. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss or YYYY-MM-DD hh:mm:ss.####### or YYYY-MM-DD hh:mm:ss.######### """ if self._timestamp is None: return None number_of_days, hours, minutes, seconds = self._GetTimeValues( self._timestamp) year, month, day_of_month = self._GetDateValues(number_of_days, 1970, 1, 1) if self.fraction_of_second is None: return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format( year, month, day_of_month, hours, minutes, seconds) if pytsk3.TSK_VERSION_NUM >= 0x040200ff: return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:09d}'.format( year, month, day_of_month, hours, minutes, seconds, self.fraction_of_second) return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:07d}'.format( year, month, day_of_month, hours, minutes, seconds, self.fraction_of_second)
[ "def", "CopyToDateTimeString", "(", "self", ")", ":", "if", "self", ".", "_timestamp", "is", "None", ":", "return", "None", "number_of_days", ",", "hours", ",", "minutes", ",", "seconds", "=", "self", ".", "_GetTimeValues", "(", "self", ".", "_timestamp", ...
Copies the date time value to a date and time string. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss or YYYY-MM-DD hh:mm:ss.####### or YYYY-MM-DD hh:mm:ss.#########
[ "Copies", "the", "date", "time", "value", "to", "a", "date", "and", "time", "string", "." ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L767-L833
def _zfs_image_create(vm_name, pool, disk_name, hostname_property_name, sparse_volume, disk_size, disk_image_name): ''' Clones an existing image, or creates a new one. When cloning an image, disk_image_name refers to the source of the clone. If not specified, disk_size is used for creating a new zvol, and sparse_volume determines whether to create a thin provisioned volume. The cloned or new volume can have a ZFS property set containing the vm_name. Use hostname_property_name for specifying the key of this ZFS property. ''' if not disk_image_name and not disk_size: raise CommandExecutionError( 'Unable to create new disk {0}, please specify' ' the disk image name or disk size argument' .format(disk_name) ) if not pool: raise CommandExecutionError( 'Unable to create new disk {0}, please specify' ' the disk pool name'.format(disk_name)) destination_fs = os.path.join(pool, '{0}.{1}'.format(vm_name, disk_name)) log.debug('Image destination will be %s', destination_fs) existing_disk = __salt__['zfs.list'](name=pool) if 'error' in existing_disk: raise CommandExecutionError( 'Unable to create new disk {0}. {1}' .format(destination_fs, existing_disk['error']) ) elif destination_fs in existing_disk: log.info('ZFS filesystem %s already exists. Skipping creation', destination_fs) blockdevice_path = os.path.join('/dev/zvol', pool, vm_name) return blockdevice_path properties = {} if hostname_property_name: properties[hostname_property_name] = vm_name if disk_image_name: __salt__['zfs.clone']( name_a=disk_image_name, name_b=destination_fs, properties=properties) elif disk_size: __salt__['zfs.create']( name=destination_fs, properties=properties, volume_size=disk_size, sparse=sparse_volume) blockdevice_path = os.path.join('/dev/zvol', pool, '{0}.{1}' .format(vm_name, disk_name)) log.debug('Image path will be %s', blockdevice_path) return blockdevice_path
[ "def", "_zfs_image_create", "(", "vm_name", ",", "pool", ",", "disk_name", ",", "hostname_property_name", ",", "sparse_volume", ",", "disk_size", ",", "disk_image_name", ")", ":", "if", "not", "disk_image_name", "and", "not", "disk_size", ":", "raise", "CommandExe...
Clones an existing image, or creates a new one. When cloning an image, disk_image_name refers to the source of the clone. If not specified, disk_size is used for creating a new zvol, and sparse_volume determines whether to create a thin provisioned volume. The cloned or new volume can have a ZFS property set containing the vm_name. Use hostname_property_name for specifying the key of this ZFS property.
[ "Clones", "an", "existing", "image", "or", "creates", "a", "new", "one", "." ]
python
train
nicolargo/glances
glances/amps/glances_nginx.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/amps/glances_nginx.py#L76-L87
def update(self, process_list): """Update the AMP""" # Get the Nginx status logger.debug('{}: Update stats using status URL {}'.format(self.NAME, self.get('status_url'))) res = requests.get(self.get('status_url')) if res.ok: # u'Active connections: 1 \nserver accepts handled requests\n 1 1 1 \nReading: 0 Writing: 1 Waiting: 0 \n' self.set_result(res.text.rstrip()) else: logger.debug('{}: Can not grab status URL {} ({})'.format(self.NAME, self.get('status_url'), res.reason)) return self.result()
[ "def", "update", "(", "self", ",", "process_list", ")", ":", "# Get the Nginx status", "logger", ".", "debug", "(", "'{}: Update stats using status URL {}'", ".", "format", "(", "self", ".", "NAME", ",", "self", ".", "get", "(", "'status_url'", ")", ")", ")", ...
Update the AMP
[ "Update", "the", "AMP" ]
python
train
juju/charm-helpers
charmhelpers/contrib/hahelpers/cluster.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hahelpers/cluster.py#L311-L351
def valid_hacluster_config(): ''' Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname must be set. Note: ha-bindiface and ha-macastport both have defaults and will always be set. We only care that either vip or dns-ha is set. :returns: boolean: valid config returns true. raises: HAIncompatibileConfig if settings conflict. raises: HAIncompleteConfig if settings are missing. ''' vip = config_get('vip') dns = config_get('dns-ha') if not(bool(vip) ^ bool(dns)): msg = ('HA: Either vip or dns-ha must be set but not both in order to ' 'use high availability') status_set('blocked', msg) raise HAIncorrectConfig(msg) # If dns-ha then one of os-*-hostname must be set if dns: dns_settings = ['os-internal-hostname', 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] # At this point it is unknown if one or all of the possible # network spaces are in HA. Validate at least one is set which is # the minimum required. for setting in dns_settings: if config_get(setting): log('DNS HA: At least one hostname is set {}: {}' ''.format(setting, config_get(setting)), level=DEBUG) return True msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' 'DNS HA') status_set('blocked', msg) raise HAIncompleteConfig(msg) log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) return True
[ "def", "valid_hacluster_config", "(", ")", ":", "vip", "=", "config_get", "(", "'vip'", ")", "dns", "=", "config_get", "(", "'dns-ha'", ")", "if", "not", "(", "bool", "(", "vip", ")", "^", "bool", "(", "dns", ")", ")", ":", "msg", "=", "(", "'HA: E...
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname must be set. Note: ha-bindiface and ha-macastport both have defaults and will always be set. We only care that either vip or dns-ha is set. :returns: boolean: valid config returns true. raises: HAIncompatibileConfig if settings conflict. raises: HAIncompleteConfig if settings are missing.
[ "Check", "that", "either", "vip", "or", "dns", "-", "ha", "is", "set", ".", "If", "dns", "-", "ha", "then", "one", "of", "os", "-", "*", "-", "hostname", "must", "be", "set", "." ]
python
train
inonit/drf-haystack
drf_haystack/mixins.py
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L47-L64
def facets(self, request): """ Sets up a list route for ``faceted`` results. This will add ie ^search/facets/$ to your existing ^search pattern. """ queryset = self.filter_facet_queryset(self.get_queryset()) for facet in request.query_params.getlist(self.facet_query_params_text): if ":" not in facet: continue field, value = facet.split(":", 1) if value: queryset = queryset.narrow('%s:"%s"' % (field, queryset.query.clean(value))) serializer = self.get_facet_serializer(queryset.facet_counts(), objects=queryset, many=False) return Response(serializer.data)
[ "def", "facets", "(", "self", ",", "request", ")", ":", "queryset", "=", "self", ".", "filter_facet_queryset", "(", "self", ".", "get_queryset", "(", ")", ")", "for", "facet", "in", "request", ".", "query_params", ".", "getlist", "(", "self", ".", "facet...
Sets up a list route for ``faceted`` results. This will add ie ^search/facets/$ to your existing ^search pattern.
[ "Sets", "up", "a", "list", "route", "for", "faceted", "results", ".", "This", "will", "add", "ie", "^search", "/", "facets", "/", "$", "to", "your", "existing", "^search", "pattern", "." ]
python
train
spyder-ide/spyder
spyder/preferences/languageserver.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/languageserver.py#L573-L579
def next_row(self): """Move to next row from currently selected row.""" row = self.currentIndex().row() rows = self.source_model.rowCount() if row + 1 == rows: row = -1 self.selectRow(row + 1)
[ "def", "next_row", "(", "self", ")", ":", "row", "=", "self", ".", "currentIndex", "(", ")", ".", "row", "(", ")", "rows", "=", "self", ".", "source_model", ".", "rowCount", "(", ")", "if", "row", "+", "1", "==", "rows", ":", "row", "=", "-", "...
Move to next row from currently selected row.
[ "Move", "to", "next", "row", "from", "currently", "selected", "row", "." ]
python
train
apache/airflow
airflow/utils/cli_action_loggers.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/cli_action_loggers.py#L57-L69
def on_pre_execution(**kwargs): """ Calls callbacks before execution. Note that any exception from callback will be logged but won't be propagated. :param kwargs: :return: None """ logging.debug("Calling callbacks: %s", __pre_exec_callbacks) for cb in __pre_exec_callbacks: try: cb(**kwargs) except Exception: logging.exception('Failed on pre-execution callback using %s', cb)
[ "def", "on_pre_execution", "(", "*", "*", "kwargs", ")", ":", "logging", ".", "debug", "(", "\"Calling callbacks: %s\"", ",", "__pre_exec_callbacks", ")", "for", "cb", "in", "__pre_exec_callbacks", ":", "try", ":", "cb", "(", "*", "*", "kwargs", ")", "except...
Calls callbacks before execution. Note that any exception from callback will be logged but won't be propagated. :param kwargs: :return: None
[ "Calls", "callbacks", "before", "execution", ".", "Note", "that", "any", "exception", "from", "callback", "will", "be", "logged", "but", "won", "t", "be", "propagated", ".", ":", "param", "kwargs", ":", ":", "return", ":", "None" ]
python
test
sibirrer/lenstronomy
lenstronomy/LensModel/numeric_lens_differentials.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/numeric_lens_differentials.py#L32-L39
def magnification(self, x, y, kwargs, diff=diff): """ computes the magnification :return: potential """ f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, diff=diff) det_A = (1 - f_xx) * (1 - f_yy) - f_xy*f_yx return 1/det_A
[ "def", "magnification", "(", "self", ",", "x", ",", "y", ",", "kwargs", ",", "diff", "=", "diff", ")", ":", "f_xx", ",", "f_xy", ",", "f_yx", ",", "f_yy", "=", "self", ".", "hessian", "(", "x", ",", "y", ",", "kwargs", ",", "diff", "=", "diff",...
computes the magnification :return: potential
[ "computes", "the", "magnification", ":", "return", ":", "potential" ]
python
train
Netflix-Skunkworks/cloudaux
cloudaux/aws/elbv2.py
https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/aws/elbv2.py#L104-L111
def describe_target_health(target_group_arn, targets=None, client=None): """ Permission: elasticloadbalancing:DescribeTargetHealth """ kwargs = dict(TargetGroupArn=target_group_arn) if targets: kwargs.update(Targets=targets) return client.describe_target_health(**kwargs)['TargetHealthDescriptions']
[ "def", "describe_target_health", "(", "target_group_arn", ",", "targets", "=", "None", ",", "client", "=", "None", ")", ":", "kwargs", "=", "dict", "(", "TargetGroupArn", "=", "target_group_arn", ")", "if", "targets", ":", "kwargs", ".", "update", "(", "Targ...
Permission: elasticloadbalancing:DescribeTargetHealth
[ "Permission", ":", "elasticloadbalancing", ":", "DescribeTargetHealth" ]
python
valid
BlueBrain/hpcbench
hpcbench/benchmark/standard.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/standard.py#L286-L293
def metrics(self): """ :return: Description of metrics extracted by this class """ return dict( (name, getattr(Metrics, config['type'])) for name, config in six.iteritems(self._metrics) )
[ "def", "metrics", "(", "self", ")", ":", "return", "dict", "(", "(", "name", ",", "getattr", "(", "Metrics", ",", "config", "[", "'type'", "]", ")", ")", "for", "name", ",", "config", "in", "six", ".", "iteritems", "(", "self", ".", "_metrics", ")"...
:return: Description of metrics extracted by this class
[ ":", "return", ":", "Description", "of", "metrics", "extracted", "by", "this", "class" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/policy/policy_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/policy/policy_client.py#L239-L255
def get_policy_type(self, project, type_id): """GetPolicyType. Retrieve a specific policy type by ID. :param str project: Project ID or project name :param str type_id: The policy ID. :rtype: :class:`<PolicyType> <azure.devops.v5_0.policy.models.PolicyType>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if type_id is not None: route_values['typeId'] = self._serialize.url('type_id', type_id, 'str') response = self._send(http_method='GET', location_id='44096322-2d3d-466a-bb30-d1b7de69f61f', version='5.0', route_values=route_values) return self._deserialize('PolicyType', response)
[ "def", "get_policy_type", "(", "self", ",", "project", ",", "type_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'"...
GetPolicyType. Retrieve a specific policy type by ID. :param str project: Project ID or project name :param str type_id: The policy ID. :rtype: :class:`<PolicyType> <azure.devops.v5_0.policy.models.PolicyType>`
[ "GetPolicyType", ".", "Retrieve", "a", "specific", "policy", "type", "by", "ID", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "type_id", ":", "The", "policy", "ID", ".", ":", "rtype", ":", ":"...
python
train
ssut/py-googletrans
googletrans/client.py
https://github.com/ssut/py-googletrans/blob/4aebfb18faa45a7d7817fbd4b8fe8ff502bf9e81/googletrans/client.py#L211-L262
def detect(self, text): """Detect language of the input text :param text: The source text(s) whose language you want to identify. Batch detection is supported via sequence input. :type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator) :rtype: Detected :rtype: :class:`list` (when a list is passed) Basic usage: >>> from googletrans import Translator >>> translator = Translator() >>> translator.detect('이 문장은 한글로 쓰여졌습니다.') <Detected lang=ko confidence=0.27041003> >>> translator.detect('この文章は日本語で書かれました。') <Detected lang=ja confidence=0.64889508> >>> translator.detect('This sentence is written in English.') <Detected lang=en confidence=0.22348526> >>> translator.detect('Tiu frazo estas skribita en Esperanto.') <Detected lang=eo confidence=0.10538048> Advanced usage: >>> langs = translator.detect(['한국어', '日本語', 'English', 'le français']) >>> for lang in langs: ... print(lang.lang, lang.confidence) ko 1 ja 0.92929292 en 0.96954316 fr 0.043500196 """ if isinstance(text, list): result = [] for item in text: lang = self.detect(item) result.append(lang) return result data = self._translate(text, dest='en', src='auto') # actual source language that will be recognized by Google Translator when the # src passed is equal to auto. src = '' confidence = 0.0 try: src = ''.join(data[8][0]) confidence = data[8][-2][0] except Exception: # pragma: nocover pass result = Detected(lang=src, confidence=confidence) return result
[ "def", "detect", "(", "self", ",", "text", ")", ":", "if", "isinstance", "(", "text", ",", "list", ")", ":", "result", "=", "[", "]", "for", "item", "in", "text", ":", "lang", "=", "self", ".", "detect", "(", "item", ")", "result", ".", "append",...
Detect language of the input text :param text: The source text(s) whose language you want to identify. Batch detection is supported via sequence input. :type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator) :rtype: Detected :rtype: :class:`list` (when a list is passed) Basic usage: >>> from googletrans import Translator >>> translator = Translator() >>> translator.detect('이 문장은 한글로 쓰여졌습니다.') <Detected lang=ko confidence=0.27041003> >>> translator.detect('この文章は日本語で書かれました。') <Detected lang=ja confidence=0.64889508> >>> translator.detect('This sentence is written in English.') <Detected lang=en confidence=0.22348526> >>> translator.detect('Tiu frazo estas skribita en Esperanto.') <Detected lang=eo confidence=0.10538048> Advanced usage: >>> langs = translator.detect(['한국어', '日本語', 'English', 'le français']) >>> for lang in langs: ... print(lang.lang, lang.confidence) ko 1 ja 0.92929292 en 0.96954316 fr 0.043500196
[ "Detect", "language", "of", "the", "input", "text" ]
python
train
apache/airflow
airflow/models/variable.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/variable.py#L76-L99
def setdefault(cls, key, default, deserialize_json=False): """ Like a Python builtin dict object, setdefault returns the current value for a key, and if it isn't there, stores the default value and returns it. :param key: Dict key for this Variable :type key: str :param default: Default value to set and return if the variable isn't already in the DB :type default: Mixed :param deserialize_json: Store this as a JSON encoded value in the DB and un-encode it when retrieving a value :return: Mixed """ obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json) if obj is None: if default is not None: Variable.set(key, default, serialize_json=deserialize_json) return default else: raise ValueError('Default Value must be set') else: return obj
[ "def", "setdefault", "(", "cls", ",", "key", ",", "default", ",", "deserialize_json", "=", "False", ")", ":", "obj", "=", "Variable", ".", "get", "(", "key", ",", "default_var", "=", "None", ",", "deserialize_json", "=", "deserialize_json", ")", "if", "o...
Like a Python builtin dict object, setdefault returns the current value for a key, and if it isn't there, stores the default value and returns it. :param key: Dict key for this Variable :type key: str :param default: Default value to set and return if the variable isn't already in the DB :type default: Mixed :param deserialize_json: Store this as a JSON encoded value in the DB and un-encode it when retrieving a value :return: Mixed
[ "Like", "a", "Python", "builtin", "dict", "object", "setdefault", "returns", "the", "current", "value", "for", "a", "key", "and", "if", "it", "isn", "t", "there", "stores", "the", "default", "value", "and", "returns", "it", "." ]
python
test
saltstack/salt
salt/modules/kubernetesmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1279-L1327
def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
[ "def", "replace_service", "(", "name", ",", "metadata", ",", "spec", ",", "source", ",", "template", ",", "old_service", ",", "saltenv", ",", "namespace", "=", "'default'", ",", "*", "*", "kwargs", ")", ":", "body", "=", "__create_object_body", "(", "kind"...
Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec.
[ "Replaces", "an", "existing", "service", "with", "a", "new", "one", "defined", "by", "name", "and", "namespace", "having", "the", "specificed", "metadata", "and", "spec", "." ]
python
train
googledatalab/pydatalab
datalab/bigquery/commands/_bigquery.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L646-L660
def _datasets_line(args): """Implements the BigQuery datasets magic used to display datasets in a project. The supported syntax is: %bigquery datasets [-f <filter>] [-p|--project <project_id>] Args: args: the arguments following '%bigquery datasets'. Returns: The HTML rendering for the table of datasets. """ filter_ = args['filter'] if args['filter'] else '*' return _render_list([str(dataset) for dataset in datalab.bigquery.Datasets(args['project']) if fnmatch.fnmatch(str(dataset), filter_)])
[ "def", "_datasets_line", "(", "args", ")", ":", "filter_", "=", "args", "[", "'filter'", "]", "if", "args", "[", "'filter'", "]", "else", "'*'", "return", "_render_list", "(", "[", "str", "(", "dataset", ")", "for", "dataset", "in", "datalab", ".", "bi...
Implements the BigQuery datasets magic used to display datasets in a project. The supported syntax is: %bigquery datasets [-f <filter>] [-p|--project <project_id>] Args: args: the arguments following '%bigquery datasets'. Returns: The HTML rendering for the table of datasets.
[ "Implements", "the", "BigQuery", "datasets", "magic", "used", "to", "display", "datasets", "in", "a", "project", "." ]
python
train
kristianfoerster/melodist
melodist/stationstatistics.py
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L111-L116
def calc_temperature_stats(self): """ Calculates statistics in order to derive diurnal patterns of temperature """ self.temp.max_delta = melodist.get_shift_by_data(self.data.temp, self._lon, self._lat, self._timezone) self.temp.mean_course = melodist.util.calculate_mean_daily_course_by_month(self.data.temp, normalize=True)
[ "def", "calc_temperature_stats", "(", "self", ")", ":", "self", ".", "temp", ".", "max_delta", "=", "melodist", ".", "get_shift_by_data", "(", "self", ".", "data", ".", "temp", ",", "self", ".", "_lon", ",", "self", ".", "_lat", ",", "self", ".", "_tim...
Calculates statistics in order to derive diurnal patterns of temperature
[ "Calculates", "statistics", "in", "order", "to", "derive", "diurnal", "patterns", "of", "temperature" ]
python
train
robhowley/nhlscrapi
nhlscrapi/scrapr/reportloader.py
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/reportloader.py#L74-L86
def parse_matchup(self): """ Parse the banner matchup meta info for the game. :returns: ``self`` on success or ``None`` """ lx_doc = self.html_doc() try: if not self.matchup: self.matchup = self._fill_meta(lx_doc) return self except: return None
[ "def", "parse_matchup", "(", "self", ")", ":", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "try", ":", "if", "not", "self", ".", "matchup", ":", "self", ".", "matchup", "=", "self", ".", "_fill_meta", "(", "lx_doc", ")", "return", "self", "exce...
Parse the banner matchup meta info for the game. :returns: ``self`` on success or ``None``
[ "Parse", "the", "banner", "matchup", "meta", "info", "for", "the", "game", ".", ":", "returns", ":", "self", "on", "success", "or", "None" ]
python
train
blink1073/oct2py
oct2py/thread_check.py
https://github.com/blink1073/oct2py/blob/bfc69d2168ae3d98258f95bbc55a858c21836b58/oct2py/thread_check.py#L39-L64
def thread_check(nthreads=3): """ Start a number of threads and verify each has a unique Octave session. Parameters ========== nthreads : int Number of threads to use. Raises ====== Oct2PyError If the thread does not sucessfully demonstrate independence. """ print("Starting {0} threads at {1}".format(nthreads, datetime.datetime.now())) threads = [] for i in range(nthreads): thread = ThreadClass() thread.setDaemon(True) thread.start() threads.append(thread) for thread in threads: thread.join() print('All threads closed at {0}'.format(datetime.datetime.now()))
[ "def", "thread_check", "(", "nthreads", "=", "3", ")", ":", "print", "(", "\"Starting {0} threads at {1}\"", ".", "format", "(", "nthreads", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ")", ")", "threads", "=", "[", "]", "for", "i", "in", "...
Start a number of threads and verify each has a unique Octave session. Parameters ========== nthreads : int Number of threads to use. Raises ====== Oct2PyError If the thread does not sucessfully demonstrate independence.
[ "Start", "a", "number", "of", "threads", "and", "verify", "each", "has", "a", "unique", "Octave", "session", ".", "Parameters", "==========", "nthreads", ":", "int", "Number", "of", "threads", "to", "use", ".", "Raises", "======", "Oct2PyError", "If", "the",...
python
valid
Hackerfleet/hfos
hfos/database.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/database.py#L102-L118
def clear_all(): """DANGER! *This command is a maintenance tool and clears the complete database.* """ sure = input("Are you sure to drop the complete database content? (Type " "in upppercase YES)") if not (sure == 'YES'): db_log('Not deleting the database.') sys.exit(5) client = pymongo.MongoClient(host=dbhost, port=dbport) db = client[dbname] for col in db.collection_names(include_system_collections=False): db_log("Dropping collection ", col, lvl=warn) db.drop_collection(col)
[ "def", "clear_all", "(", ")", ":", "sure", "=", "input", "(", "\"Are you sure to drop the complete database content? (Type \"", "\"in upppercase YES)\"", ")", "if", "not", "(", "sure", "==", "'YES'", ")", ":", "db_log", "(", "'Not deleting the database.'", ")", "sys",...
DANGER! *This command is a maintenance tool and clears the complete database.*
[ "DANGER!", "*", "This", "command", "is", "a", "maintenance", "tool", "and", "clears", "the", "complete", "database", ".", "*" ]
python
train
WoLpH/python-statsd
statsd/timer.py
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L79-L89
def stop(self, subname='total'): '''Stop the timer and send the total since `start()` was run :keyword subname: The subname to report the data to (appended to the client name) :type subname: str ''' assert self._stop is None, ( 'Unable to stop, the timer is already stopped') self._stop = time.time() return self.send(subname, self._stop - self._start)
[ "def", "stop", "(", "self", ",", "subname", "=", "'total'", ")", ":", "assert", "self", ".", "_stop", "is", "None", ",", "(", "'Unable to stop, the timer is already stopped'", ")", "self", ".", "_stop", "=", "time", ".", "time", "(", ")", "return", "self",...
Stop the timer and send the total since `start()` was run :keyword subname: The subname to report the data to (appended to the client name) :type subname: str
[ "Stop", "the", "timer", "and", "send", "the", "total", "since", "start", "()", "was", "run" ]
python
train
shoebot/shoebot
lib/database/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/database/__init__.py#L363-L372
def remove(self, id, operator="=", key=None): """ Deletes the row with given id. """ if key == None: key = self._key try: id = unicode(id) except: pass sql = "delete from "+self._name+" where "+key+" "+operator+" ?" self._db._cur.execute(sql, (id,))
[ "def", "remove", "(", "self", ",", "id", ",", "operator", "=", "\"=\"", ",", "key", "=", "None", ")", ":", "if", "key", "==", "None", ":", "key", "=", "self", ".", "_key", "try", ":", "id", "=", "unicode", "(", "id", ")", "except", ":", "pass",...
Deletes the row with given id.
[ "Deletes", "the", "row", "with", "given", "id", "." ]
python
valid
brian-rose/climlab
climlab/dynamics/diffusion.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/dynamics/diffusion.py#L360-L381
def _solve_implicit_banded(current, banded_matrix): """Uses a banded solver for matrix inversion of a tridiagonal matrix. Converts the complete listed tridiagonal matrix *(nxn)* into a three row matrix *(3xn)* and calls :py:func:`scipy.linalg.solve_banded()`. :param array current: the current state of the variable for which matrix inversion should be computed :param array banded_matrix: complete diffusion matrix (*dimension: nxn*) :returns: output of :py:func:`scipy.linalg.solve_banded()` :rtype: array """ # can improve performance by storing the banded form once and not # recalculating it... # but whatever J = banded_matrix.shape[0] diag = np.zeros((3, J)) diag[1, :] = np.diag(banded_matrix, k=0) diag[0, 1:] = np.diag(banded_matrix, k=1) diag[2, :-1] = np.diag(banded_matrix, k=-1) return solve_banded((1, 1), diag, current)
[ "def", "_solve_implicit_banded", "(", "current", ",", "banded_matrix", ")", ":", "# can improve performance by storing the banded form once and not", "# recalculating it...", "# but whatever", "J", "=", "banded_matrix", ".", "shape", "[", "0", "]", "diag", "=", "np", "...
Uses a banded solver for matrix inversion of a tridiagonal matrix. Converts the complete listed tridiagonal matrix *(nxn)* into a three row matrix *(3xn)* and calls :py:func:`scipy.linalg.solve_banded()`. :param array current: the current state of the variable for which matrix inversion should be computed :param array banded_matrix: complete diffusion matrix (*dimension: nxn*) :returns: output of :py:func:`scipy.linalg.solve_banded()` :rtype: array
[ "Uses", "a", "banded", "solver", "for", "matrix", "inversion", "of", "a", "tridiagonal", "matrix", "." ]
python
train
emory-libraries/eulfedora
eulfedora/api.py
https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/api.py#L1045-L1054
def get_predicates(self, subject, object): """ Search for all subjects related to the specified subject and object. :param subject: :param object: :rtype: generator of RDF statements """ for statement in self.spo_search(subject=subject, object=object): yield str(statement[1])
[ "def", "get_predicates", "(", "self", ",", "subject", ",", "object", ")", ":", "for", "statement", "in", "self", ".", "spo_search", "(", "subject", "=", "subject", ",", "object", "=", "object", ")", ":", "yield", "str", "(", "statement", "[", "1", "]",...
Search for all subjects related to the specified subject and object. :param subject: :param object: :rtype: generator of RDF statements
[ "Search", "for", "all", "subjects", "related", "to", "the", "specified", "subject", "and", "object", "." ]
python
train
jjgomera/iapws
iapws/iapws97.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/iapws97.py#L2198-L2217
def _Backward3_v_Ph(P, h): """Backward equation for region 3, v=f(P,h) Parameters ---------- P : float Pressure, [MPa] h : float Specific enthalpy, [kJ/kg] Returns ------- v : float Specific volume, [m³/kg] """ hf = _h_3ab(P) if h <= hf: return _Backward3a_v_Ph(P, h) else: return _Backward3b_v_Ph(P, h)
[ "def", "_Backward3_v_Ph", "(", "P", ",", "h", ")", ":", "hf", "=", "_h_3ab", "(", "P", ")", "if", "h", "<=", "hf", ":", "return", "_Backward3a_v_Ph", "(", "P", ",", "h", ")", "else", ":", "return", "_Backward3b_v_Ph", "(", "P", ",", "h", ")" ]
Backward equation for region 3, v=f(P,h) Parameters ---------- P : float Pressure, [MPa] h : float Specific enthalpy, [kJ/kg] Returns ------- v : float Specific volume, [m³/kg]
[ "Backward", "equation", "for", "region", "3", "v", "=", "f", "(", "P", "h", ")" ]
python
train
theislab/anndata
anndata/base.py
https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/base.py#L2014-L2030
def write_csvs(self, dirname: PathLike, skip_data: bool = True, sep: str = ','): """Write annotation to ``.csv`` files. It is not possible to recover the full :class:`~anndata.AnnData` from the output of this function. Use :meth:`~anndata.AnnData.write` for this. Parameters ---------- dirname Name of directory to which to export. skip_data Skip the data matrix :attr:`X`. sep Separator for the data. """ from .readwrite.write import write_csvs write_csvs(dirname, self, skip_data=skip_data, sep=sep)
[ "def", "write_csvs", "(", "self", ",", "dirname", ":", "PathLike", ",", "skip_data", ":", "bool", "=", "True", ",", "sep", ":", "str", "=", "','", ")", ":", "from", ".", "readwrite", ".", "write", "import", "write_csvs", "write_csvs", "(", "dirname", "...
Write annotation to ``.csv`` files. It is not possible to recover the full :class:`~anndata.AnnData` from the output of this function. Use :meth:`~anndata.AnnData.write` for this. Parameters ---------- dirname Name of directory to which to export. skip_data Skip the data matrix :attr:`X`. sep Separator for the data.
[ "Write", "annotation", "to", ".", "csv", "files", "." ]
python
train
Gandi/gandi.cli
gandi/cli/modules/datacenter.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/datacenter.py#L100-L108
def from_dc_code(cls, dc_code): """Retrieve the datacenter id associated to a dc_code""" result = cls.list() dc_codes = {} for dc in result: if dc.get('dc_code'): dc_codes[dc['dc_code']] = dc['id'] return dc_codes.get(dc_code)
[ "def", "from_dc_code", "(", "cls", ",", "dc_code", ")", ":", "result", "=", "cls", ".", "list", "(", ")", "dc_codes", "=", "{", "}", "for", "dc", "in", "result", ":", "if", "dc", ".", "get", "(", "'dc_code'", ")", ":", "dc_codes", "[", "dc", "[",...
Retrieve the datacenter id associated to a dc_code
[ "Retrieve", "the", "datacenter", "id", "associated", "to", "a", "dc_code" ]
python
train
wadda/gps3
gps3/gps3.py
https://github.com/wadda/gps3/blob/91adcd7073b891b135b2a46d039ce2125cf09a09/gps3/gps3.py#L107-L124
def next(self, timeout=0): """Return empty unless new data is ready for the client. Arguments: timeout: Default timeout=0 range zero to float specifies a time-out as a floating point number in seconds. Will sit and wait for timeout seconds. When the timeout argument is omitted the function blocks until at least one file descriptor is ready. A time-out value of zero specifies a poll and never blocks. """ try: waitin, _waitout, _waiterror = select.select((self.streamSock,), (), (), timeout) if not waitin: return else: gpsd_response = self.streamSock.makefile() # '.makefile(buffering=4096)' In strictly Python3 self.response = gpsd_response.readline() return self.response except StopIteration as error: sys.stderr.write('The readline exception in GPSDSocket.next is--> {}'.format(error))
[ "def", "next", "(", "self", ",", "timeout", "=", "0", ")", ":", "try", ":", "waitin", ",", "_waitout", ",", "_waiterror", "=", "select", ".", "select", "(", "(", "self", ".", "streamSock", ",", ")", ",", "(", ")", ",", "(", ")", ",", "timeout", ...
Return empty unless new data is ready for the client. Arguments: timeout: Default timeout=0 range zero to float specifies a time-out as a floating point number in seconds. Will sit and wait for timeout seconds. When the timeout argument is omitted the function blocks until at least one file descriptor is ready. A time-out value of zero specifies a poll and never blocks.
[ "Return", "empty", "unless", "new", "data", "is", "ready", "for", "the", "client", ".", "Arguments", ":", "timeout", ":", "Default", "timeout", "=", "0", "range", "zero", "to", "float", "specifies", "a", "time", "-", "out", "as", "a", "floating", "point"...
python
train
dhain/potpy
potpy/wsgi.py
https://github.com/dhain/potpy/blob/e39a5a84f763fbf144b07a620afb02a5ff3741c9/potpy/wsgi.py#L136-L152
def match(self, methods, request_method): """Check for a method match. :param methods: A method or tuple of methods to match against. :param request_method: The method to check for a match. :returns: An empty :class:`dict` in the case of a match, or ``None`` if there is no matching handler for the given method. Example: >>> MethodRouter().match(('GET', 'HEAD'), 'HEAD') {} >>> MethodRouter().match('POST', 'DELETE') """ if isinstance(methods, basestring): return {} if request_method == methods else None return {} if request_method in methods else None
[ "def", "match", "(", "self", ",", "methods", ",", "request_method", ")", ":", "if", "isinstance", "(", "methods", ",", "basestring", ")", ":", "return", "{", "}", "if", "request_method", "==", "methods", "else", "None", "return", "{", "}", "if", "request...
Check for a method match. :param methods: A method or tuple of methods to match against. :param request_method: The method to check for a match. :returns: An empty :class:`dict` in the case of a match, or ``None`` if there is no matching handler for the given method. Example: >>> MethodRouter().match(('GET', 'HEAD'), 'HEAD') {} >>> MethodRouter().match('POST', 'DELETE')
[ "Check", "for", "a", "method", "match", "." ]
python
train
IdentityPython/pyop
src/pyop/storage.py
https://github.com/IdentityPython/pyop/blob/7b1385964f079c39752fce5f2dbcf458b8a92e56/src/pyop/storage.py#L173-L215
def _format_mongodb_uri(parsed_uri): """ Painstakingly reconstruct a MongoDB URI parsed using pymongo.uri_parser.parse_uri. :param parsed_uri: Result of pymongo.uri_parser.parse_uri :type parsed_uri: dict :return: New URI :rtype: str | unicode """ user_pass = '' if parsed_uri.get('username') and parsed_uri.get('password'): user_pass = '{username!s}:{password!s}@'.format(**parsed_uri) _nodes = [] for host, port in parsed_uri.get('nodelist'): if ':' in host and not host.endswith(']'): # IPv6 address without brackets host = '[{!s}]'.format(host) if port == 27017: _nodes.append(host) else: _nodes.append('{!s}:{!s}'.format(host, port)) nodelist = ','.join(_nodes) options = '' if parsed_uri.get('options'): _opt_list = [] for key, value in parsed_uri.get('options').items(): if isinstance(value, bool): value = str(value).lower() _opt_list.append('{!s}={!s}'.format(key, value)) options = '?' + '&'.join(_opt_list) db_name = parsed_uri.get('database') or '' res = "mongodb://{user_pass!s}{nodelist!s}/{db_name!s}{options!s}".format( user_pass=user_pass, nodelist=nodelist, db_name=db_name, # collection is ignored options=options) return res
[ "def", "_format_mongodb_uri", "(", "parsed_uri", ")", ":", "user_pass", "=", "''", "if", "parsed_uri", ".", "get", "(", "'username'", ")", "and", "parsed_uri", ".", "get", "(", "'password'", ")", ":", "user_pass", "=", "'{username!s}:{password!s}@'", ".", "for...
Painstakingly reconstruct a MongoDB URI parsed using pymongo.uri_parser.parse_uri. :param parsed_uri: Result of pymongo.uri_parser.parse_uri :type parsed_uri: dict :return: New URI :rtype: str | unicode
[ "Painstakingly", "reconstruct", "a", "MongoDB", "URI", "parsed", "using", "pymongo", ".", "uri_parser", ".", "parse_uri", "." ]
python
train
PredixDev/predixpy
predix/security/uaa.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/security/uaa.py#L413-L476
def update_client_grants(self, client_id, scope=[], authorities=[], grant_types=[], redirect_uri=[], replace=False): """ Will extend the client with additional scopes or authorities. Any existing scopes and authorities will be left as is unless asked to replace entirely. """ self.assert_has_permission('clients.write') client = self.get_client(client_id) if not client: raise ValueError("Must first create client: '%s'" % (client_id)) if replace: changes = { 'client_id': client_id, 'scope': scope, 'authorities': authorities, } else: changes = {'client_id': client_id} if scope: changes['scope'] = client['scope'] changes['scope'].extend(scope) if authorities: changes['authorities'] = client['authorities'] changes['authorities'].extend(authorities) if grant_types: if 'authorization_code' in grant_types and not redirect_uri: logging.warning("A redirect_uri is required for authorization_code.") changes['authorized_grant_types'] = client['authorized_grant_types'] changes['authorized_grant_types'].extend(grant_types) if redirect_uri: if 'redirect_uri' in client: changes['redirect_uri'] = client['redirect_uri'] changes['redirect_uri'].extend(redirect_uri) else: changes['redirect_uri'] = redirect_uri uri = self.uri + '/oauth/clients/' + client_id headers = { "pragma": "no-cache", "Cache-Control": "no-cache", "Content-Type": "application/json", "Accepts": "application/json", "Authorization": "Bearer " + self.get_token() } logging.debug("URI=" + str(uri)) logging.debug("HEADERS=" + str(headers)) logging.debug("BODY=" + json.dumps(changes)) response = requests.put(uri, headers=headers, data=json.dumps(changes)) logging.debug("STATUS=" + str(response.status_code)) if response.status_code == 200: return response else: logging.error(response.content) response.raise_for_status()
[ "def", "update_client_grants", "(", "self", ",", "client_id", ",", "scope", "=", "[", "]", ",", "authorities", "=", "[", "]", ",", "grant_types", "=", "[", "]", ",", "redirect_uri", "=", "[", "]", ",", "replace", "=", "False", ")", ":", "self", ".", ...
Will extend the client with additional scopes or authorities. Any existing scopes and authorities will be left as is unless asked to replace entirely.
[ "Will", "extend", "the", "client", "with", "additional", "scopes", "or", "authorities", ".", "Any", "existing", "scopes", "and", "authorities", "will", "be", "left", "as", "is", "unless", "asked", "to", "replace", "entirely", "." ]
python
train
nchopin/particles
particles/smc_samplers.py
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L134-L155
def all_distinct(l, idx): """ Returns the list [l[i] for i in idx]  When needed, objects l[i] are replaced by a copy, to make sure that the elements of the list are all distinct Parameters --------- l: iterable idx: iterable that generates ints (e.g. ndarray of ints) Returns ------- a list """ out = [] deja_vu = [False for _ in l] for i in idx: to_add = cp.deepcopy(l[i]) if deja_vu[i] else l[i] out.append(to_add) deja_vu[i] = True return out
[ "def", "all_distinct", "(", "l", ",", "idx", ")", ":", "out", "=", "[", "]", "deja_vu", "=", "[", "False", "for", "_", "in", "l", "]", "for", "i", "in", "idx", ":", "to_add", "=", "cp", ".", "deepcopy", "(", "l", "[", "i", "]", ")", "if", "...
Returns the list [l[i] for i in idx]  When needed, objects l[i] are replaced by a copy, to make sure that the elements of the list are all distinct Parameters --------- l: iterable idx: iterable that generates ints (e.g. ndarray of ints) Returns ------- a list
[ "Returns", "the", "list", "[", "l", "[", "i", "]", "for", "i", "in", "idx", "]", "When", "needed", "objects", "l", "[", "i", "]", "are", "replaced", "by", "a", "copy", "to", "make", "sure", "that", "the", "elements", "of", "the", "list", "are", "...
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/overlay/access_list/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/overlay/access_list/__init__.py#L92-L113
def _set_type(self, v, load=False): """ Setter method for type, mapped from YANG variable /overlay/access_list/type (container) If this variable is read-only (config: false) in the source YANG file, then _set_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_type() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=type.type, is_container='container', presence=False, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'type'}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """type must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=type.type, is_container='container', presence=False, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'type'}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='container', is_config=True)""", }) self.__type = t if hasattr(self, '_set'): self._set()
[ "def", "_set_type", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
Setter method for type, mapped from YANG variable /overlay/access_list/type (container) If this variable is read-only (config: false) in the source YANG file, then _set_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_type() directly.
[ "Setter", "method", "for", "type", "mapped", "from", "YANG", "variable", "/", "overlay", "/", "access_list", "/", "type", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source",...
python
train
yunojuno-archive/django-package-monitor
package_monitor/admin.py
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/admin.py#L48-L72
def queryset(self, request, queryset): """Filter based on whether an update (of any sort) is available.""" if self.value() == '-1': return queryset.filter(latest_version__isnull=True) elif self.value() == '0': return ( queryset .filter( current_version__isnull=False, latest_version__isnull=False, latest_version=F('current_version') ) ) elif self.value() == '1': return ( queryset .filter( current_version__isnull=False, latest_version__isnull=False ).exclude( latest_version=F('current_version') ) ) else: return queryset
[ "def", "queryset", "(", "self", ",", "request", ",", "queryset", ")", ":", "if", "self", ".", "value", "(", ")", "==", "'-1'", ":", "return", "queryset", ".", "filter", "(", "latest_version__isnull", "=", "True", ")", "elif", "self", ".", "value", "(",...
Filter based on whether an update (of any sort) is available.
[ "Filter", "based", "on", "whether", "an", "update", "(", "of", "any", "sort", ")", "is", "available", "." ]
python
train
libyal/dtfabric
dtfabric/runtime/data_maps.py
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/data_maps.py#L186-L197
def GetStructByteOrderString(self): """Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined. """ if not self._data_type_definition: return None return self._BYTE_ORDER_STRINGS.get( self._data_type_definition.byte_order, None)
[ "def", "GetStructByteOrderString", "(", "self", ")", ":", "if", "not", "self", ".", "_data_type_definition", ":", "return", "None", "return", "self", ".", "_BYTE_ORDER_STRINGS", ".", "get", "(", "self", ".", "_data_type_definition", ".", "byte_order", ",", "None...
Retrieves the Python struct format string. Returns: str: format string as used by Python struct or None if format string cannot be determined.
[ "Retrieves", "the", "Python", "struct", "format", "string", "." ]
python
train
pingali/dgit
dgitcore/contrib/representations/tableformat.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/contrib/representations/tableformat.py#L58-L86
def get_schema(self, filename): """ Guess schema using messytables """ table_set = self.read_file(filename) # Have I been able to read the filename if table_set is None: return [] # Get the first table as rowset row_set = table_set.tables[0] offset, headers = headers_guess(row_set.sample) row_set.register_processor(headers_processor(headers)) row_set.register_processor(offset_processor(offset + 1)) types = type_guess(row_set.sample, strict=True) # Get a sample as well.. sample = next(row_set.sample) clean = lambda v: str(v) if not isinstance(v, str) else v schema = [] for i, h in enumerate(headers): schema.append([h, str(types[i]), clean(sample[i].value)]) return schema
[ "def", "get_schema", "(", "self", ",", "filename", ")", ":", "table_set", "=", "self", ".", "read_file", "(", "filename", ")", "# Have I been able to read the filename", "if", "table_set", "is", "None", ":", "return", "[", "]", "# Get the first table as rowset", "...
Guess schema using messytables
[ "Guess", "schema", "using", "messytables" ]
python
valid
RJT1990/pyflux
pyflux/gas/gasllt.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gas/gasllt.py#L450-L498
def _sim_prediction_bayes(self, h, simulations): """ Simulates a h-step ahead mean prediction Parameters ---------- h : int How many steps ahead for the prediction simulations : int How many simulations to perform Returns ---------- Matrix of simulations """ sim_vector = np.zeros([simulations,h]) for n in range(0,simulations): t_z = self.draw_latent_variables(nsims=1).T[0] theta, theta_t, Y, scores = self._model(t_z) t_z = np.array([self.latent_variables.z_list[k].prior.transform(t_z[k]) for k in range(t_z.shape[0])]) model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z) Y_exp = Y.copy() theta_exp = theta.copy() theta_t_exp = theta_t.copy() scores_exp = scores.copy() #(TODO: vectorize the inner construction here) for t in range(0,h): new_value1 = theta_t_exp[-1] + theta_exp[-1] + t_z[0]*scores_exp[-1] new_value2 = theta_t_exp[-1] + t_z[1]*scores_exp[-1] if self.model_name2 == "Exponential": rnd_value = self.family.draw_variable(1.0/self.link(new_value1),model_scale,model_shape,model_skewness,1)[0] else: rnd_value = self.family.draw_variable(self.link(new_value1),model_scale,model_shape,model_skewness,1)[0] Y_exp = np.append(Y_exp,[rnd_value]) theta_exp = np.append(theta_exp,[new_value1]) # For indexing consistency theta_t_exp = np.append(theta_t_exp,[new_value2]) scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero sim_vector[n] = Y_exp[-h:] return np.transpose(sim_vector)
[ "def", "_sim_prediction_bayes", "(", "self", ",", "h", ",", "simulations", ")", ":", "sim_vector", "=", "np", ".", "zeros", "(", "[", "simulations", ",", "h", "]", ")", "for", "n", "in", "range", "(", "0", ",", "simulations", ")", ":", "t_z", "=", ...
Simulates a h-step ahead mean prediction Parameters ---------- h : int How many steps ahead for the prediction simulations : int How many simulations to perform Returns ---------- Matrix of simulations
[ "Simulates", "a", "h", "-", "step", "ahead", "mean", "prediction" ]
python
train
dslackw/slpkg
slpkg/downloader.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/downloader.py#L91-L97
def _directory_prefix(self): """Downloader options for specific directory """ if self.downder == "wget": self.dir_prefix = "--directory-prefix=" elif self.downder == "aria2c": self.dir_prefix = "--dir="
[ "def", "_directory_prefix", "(", "self", ")", ":", "if", "self", ".", "downder", "==", "\"wget\"", ":", "self", ".", "dir_prefix", "=", "\"--directory-prefix=\"", "elif", "self", ".", "downder", "==", "\"aria2c\"", ":", "self", ".", "dir_prefix", "=", "\"--d...
Downloader options for specific directory
[ "Downloader", "options", "for", "specific", "directory" ]
python
train
liminspace/dju-common
dju_common/tcache.py
https://github.com/liminspace/dju-common/blob/c68860bb84d454a35e66275841c20f38375c2135/dju_common/tcache.py#L35-L65
def cache_invalidate_by_tags(tags, cache=None): """ Clear cache by tags. """ if isinstance(tags, basestring): tags = [tags] tag_keys = [CACHE_TAG_KEY % tag for tag in tags if tag] if not tag_keys: raise ValueError('Attr tags invalid') if cache is None: cache = default_cache tag_keys_for_delete = [] if cache.__class__.__name__ == 'RedisCache': from django_redis.exceptions import ConnectionInterrupted try: redis_client = cache.client.get_client() for tag_key in tag_keys: keys = redis_client.smembers(tag_key) if keys: cache.delete_many(keys) tag_keys_for_delete.append(tag_key) except ConnectionInterrupted: pass # todo add logging else: for tag_key in tag_keys: keys = cache.get(tag_key) if keys: cache.delete_many(keys) tag_keys_for_delete.append(tag_key) if tag_keys_for_delete: cache.delete_many(tag_keys_for_delete)
[ "def", "cache_invalidate_by_tags", "(", "tags", ",", "cache", "=", "None", ")", ":", "if", "isinstance", "(", "tags", ",", "basestring", ")", ":", "tags", "=", "[", "tags", "]", "tag_keys", "=", "[", "CACHE_TAG_KEY", "%", "tag", "for", "tag", "in", "ta...
Clear cache by tags.
[ "Clear", "cache", "by", "tags", "." ]
python
train
fifman/sockspy
sockspy/cli.py
https://github.com/fifman/sockspy/blob/9a925d5ce3ef63e1d8d2d56bf7cea0e5298d8c5b/sockspy/cli.py#L21-L44
def main(host, port, timeout, itimeout, qsize, backlog, maxtry, bsize, verbose, logfile=None, logcfgfile=None, cfgfile=None): """Simple python implementation of a socks5 proxy server. """ dict_cfg = {} if cfgfile: dict_cfg = app_config.get_config_by_file(cfgfile) def get_param(key, param, default): return param or dict_cfg.get(key, None) or default cfg = app_config.Config( address=(get_param("host", host, "localhost"), get_param("port", port, 3333)), timeout=get_param("timeout", timeout, 10), msg_size=get_param("bsize", bsize, 4096), max_try_turn=get_param("maxtry", maxtry, 3), backlog=get_param("backlog", backlog, 1024), max_queue_size=get_param("qsize", qsize, 100), endpoint_timeout=get_param("itimeout", itimeout, 60), verbose=get_param("verbose", verbose, 0), logfile=get_param("logfile", logfile, None), logcfgfile=get_param("logcfgfile", logcfgfile, None) ) click.echo("Starting sockspy...") sockspy_main.run(cfg)
[ "def", "main", "(", "host", ",", "port", ",", "timeout", ",", "itimeout", ",", "qsize", ",", "backlog", ",", "maxtry", ",", "bsize", ",", "verbose", ",", "logfile", "=", "None", ",", "logcfgfile", "=", "None", ",", "cfgfile", "=", "None", ")", ":", ...
Simple python implementation of a socks5 proxy server.
[ "Simple", "python", "implementation", "of", "a", "socks5", "proxy", "server", "." ]
python
train
edmondburnett/twitter-text-python
ttp/ttp.py
https://github.com/edmondburnett/twitter-text-python/blob/2a23ced35bfd34c4bc4b7148afd85771e9eb8669/ttp/ttp.py#L230-L253
def _parse_tags(self, match): '''Parse hashtags.''' mat = match.group(0) # Fix problems with the regex capturing stuff infront of the # tag = None for i in '#\uff03': pos = mat.rfind(i) if pos != -1: tag = i break pre, text = mat[:pos], mat[pos + 1:] if self._include_spans: span = match.span(0) # add an offset if pre is e.g. ' ' span = (span[0] + len(pre), span[1]) self._tags.append((text, span)) else: self._tags.append(text) if self._html: return '%s%s' % (pre, self.format_tag(tag, text))
[ "def", "_parse_tags", "(", "self", ",", "match", ")", ":", "mat", "=", "match", ".", "group", "(", "0", ")", "# Fix problems with the regex capturing stuff infront of the #", "tag", "=", "None", "for", "i", "in", "'#\\uff03'", ":", "pos", "=", "mat", ".", "r...
Parse hashtags.
[ "Parse", "hashtags", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/streamsasl.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/streamsasl.py#L204-L227
def _process_sasl_challenge(self, stream, element): """Process incoming <sasl:challenge/> element. [initiating entity only] """ if not self.authenticator: logger.debug("Unexpected SASL challenge") return False content = element.text.encode("us-ascii") ret = self.authenticator.challenge(a2b_base64(content)) if isinstance(ret, sasl.Response): element = ElementTree.Element(RESPONSE_TAG) element.text = ret.encode() else: element = ElementTree.Element(ABORT_TAG) stream.write_element(element) if isinstance(ret, sasl.Failure): stream.disconnect() raise SASLAuthenticationFailed("SASL authentication failed") return True
[ "def", "_process_sasl_challenge", "(", "self", ",", "stream", ",", "element", ")", ":", "if", "not", "self", ".", "authenticator", ":", "logger", ".", "debug", "(", "\"Unexpected SASL challenge\"", ")", "return", "False", "content", "=", "element", ".", "text"...
Process incoming <sasl:challenge/> element. [initiating entity only]
[ "Process", "incoming", "<sasl", ":", "challenge", "/", ">", "element", "." ]
python
valid
PyMySQL/PyMySQL
pymysql/connections.py
https://github.com/PyMySQL/PyMySQL/blob/3674bc6fd064bf88524e839c07690e8c35223709/pymysql/connections.py#L344-L364
def close(self): """ Send the quit message and close the socket. See `Connection.close() <https://www.python.org/dev/peps/pep-0249/#Connection.close>`_ in the specification. :raise Error: If the connection is already closed. """ if self._closed: raise err.Error("Already closed") self._closed = True if self._sock is None: return send_data = struct.pack('<iB', 1, COMMAND.COM_QUIT) try: self._write_bytes(send_data) except Exception: pass finally: self._force_close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closed", ":", "raise", "err", ".", "Error", "(", "\"Already closed\"", ")", "self", ".", "_closed", "=", "True", "if", "self", ".", "_sock", "is", "None", ":", "return", "send_data", "=", "s...
Send the quit message and close the socket. See `Connection.close() <https://www.python.org/dev/peps/pep-0249/#Connection.close>`_ in the specification. :raise Error: If the connection is already closed.
[ "Send", "the", "quit", "message", "and", "close", "the", "socket", "." ]
python
train
pypa/pipenv
pipenv/vendor/click/utils.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/utils.py#L264-L277
def get_binary_stream(name): """Returns a system stream for byte processing. This essentially returns the stream from the sys module with the given name but it solves some compatibility issues between different Python versions. Primarily this function is necessary for getting binary streams on Python 3. :param name: the name of the stream to open. Valid names are ``'stdin'``, ``'stdout'`` and ``'stderr'`` """ opener = binary_streams.get(name) if opener is None: raise TypeError('Unknown standard stream %r' % name) return opener()
[ "def", "get_binary_stream", "(", "name", ")", ":", "opener", "=", "binary_streams", ".", "get", "(", "name", ")", "if", "opener", "is", "None", ":", "raise", "TypeError", "(", "'Unknown standard stream %r'", "%", "name", ")", "return", "opener", "(", ")" ]
Returns a system stream for byte processing. This essentially returns the stream from the sys module with the given name but it solves some compatibility issues between different Python versions. Primarily this function is necessary for getting binary streams on Python 3. :param name: the name of the stream to open. Valid names are ``'stdin'``, ``'stdout'`` and ``'stderr'``
[ "Returns", "a", "system", "stream", "for", "byte", "processing", ".", "This", "essentially", "returns", "the", "stream", "from", "the", "sys", "module", "with", "the", "given", "name", "but", "it", "solves", "some", "compatibility", "issues", "between", "diffe...
python
train
crs4/pydoop
pydoop/hdfs/file.py
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L154-L163
def close(self): """ Close the file. """ if not self.closed: self.closed = True retval = self.f.close() if self.base_mode != "r": self.__size = self.fs.get_path_info(self.name)["size"] return retval
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "self", ".", "closed", "=", "True", "retval", "=", "self", ".", "f", ".", "close", "(", ")", "if", "self", ".", "base_mode", "!=", "\"r\"", ":", "self", ".", "__size"...
Close the file.
[ "Close", "the", "file", "." ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L758-L770
async def get_capability_report(self): """ This method requests and returns a Firmata capability query report :returns: A capability report in the form of a list """ if self.query_reply_data.get( PrivateConstants.CAPABILITY_RESPONSE) is None: await self._send_sysex(PrivateConstants.CAPABILITY_QUERY) while self.query_reply_data.get( PrivateConstants.CAPABILITY_RESPONSE) is None: await asyncio.sleep(self.sleep_tune) return self.query_reply_data.get(PrivateConstants.CAPABILITY_RESPONSE)
[ "async", "def", "get_capability_report", "(", "self", ")", ":", "if", "self", ".", "query_reply_data", ".", "get", "(", "PrivateConstants", ".", "CAPABILITY_RESPONSE", ")", "is", "None", ":", "await", "self", ".", "_send_sysex", "(", "PrivateConstants", ".", "...
This method requests and returns a Firmata capability query report :returns: A capability report in the form of a list
[ "This", "method", "requests", "and", "returns", "a", "Firmata", "capability", "query", "report" ]
python
train
google/flatbuffers
python/flatbuffers/builder.py
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L585-L594
def PrependUOffsetTRelativeSlot(self, o, x, d): """ PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at vtable slot `o`. If value `x` equals default `d`, then the slot will be set to zero and no other data will be written. """ if x != d: self.PrependUOffsetTRelative(x) self.Slot(o)
[ "def", "PrependUOffsetTRelativeSlot", "(", "self", ",", "o", ",", "x", ",", "d", ")", ":", "if", "x", "!=", "d", ":", "self", ".", "PrependUOffsetTRelative", "(", "x", ")", "self", ".", "Slot", "(", "o", ")" ]
PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at vtable slot `o`. If value `x` equals default `d`, then the slot will be set to zero and no other data will be written.
[ "PrependUOffsetTRelativeSlot", "prepends", "an", "UOffsetT", "onto", "the", "object", "at", "vtable", "slot", "o", ".", "If", "value", "x", "equals", "default", "d", "then", "the", "slot", "will", "be", "set", "to", "zero", "and", "no", "other", "data", "w...
python
train
iotile/coretools
iotilesensorgraph/iotile/sg/graph.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/graph.py#L199-L230
def initialize_remaining_constants(self, value=0): """Ensure that all constant streams referenced in the sensor graph have a value. Constant streams that are automatically created by the compiler are initialized as part of the compilation process but it's possible that the user references other constant streams but never assigns them an explicit initial value. This function will initialize them all to a default value (0 if not passed) and return the streams that were so initialized. Args: value (int): Optional value to use to initialize all uninitialized constants. Defaults to 0 if not passed. Returns: list(DataStream): A list of all of the constant streams that were not previously initialized and were initialized to the given value in this function. """ remaining = [] for node, _inputs, _outputs in self.iterate_bfs(): streams = node.input_streams() + [node.stream] for stream in streams: if stream.stream_type is not DataStream.ConstantType: continue if stream not in self.constant_database: self.add_constant(stream, value) remaining.append(stream) return remaining
[ "def", "initialize_remaining_constants", "(", "self", ",", "value", "=", "0", ")", ":", "remaining", "=", "[", "]", "for", "node", ",", "_inputs", ",", "_outputs", "in", "self", ".", "iterate_bfs", "(", ")", ":", "streams", "=", "node", ".", "input_strea...
Ensure that all constant streams referenced in the sensor graph have a value. Constant streams that are automatically created by the compiler are initialized as part of the compilation process but it's possible that the user references other constant streams but never assigns them an explicit initial value. This function will initialize them all to a default value (0 if not passed) and return the streams that were so initialized. Args: value (int): Optional value to use to initialize all uninitialized constants. Defaults to 0 if not passed. Returns: list(DataStream): A list of all of the constant streams that were not previously initialized and were initialized to the given value in this function.
[ "Ensure", "that", "all", "constant", "streams", "referenced", "in", "the", "sensor", "graph", "have", "a", "value", "." ]
python
train
andycasey/ads
examples/monthly-institute-publications/stromlo.py
https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/examples/monthly-institute-publications/stromlo.py#L22-L64
def get_pdf(article, debug=False): """ Download an article PDF from arXiv. :param article: The ADS article to retrieve. :type article: :class:`ads.search.Article` :returns: The binary content of the requested PDF. """ print('Retrieving {0}'.format(article)) identifier = [_ for _ in article.identifier if 'arXiv' in _] if identifier: url = 'http://arXiv.org/pdf/{0}.{1}'.format(identifier[0][9:13], ''.join(_ for _ in identifier[0][14:] if _.isdigit())) else: # No arXiv version. Ask ADS to redirect us to the journal article. params = { 'bibcode': article.bibcode, 'link_type': 'ARTICLE', 'db_key': 'AST' } url = requests.get('http://adsabs.harvard.edu/cgi-bin/nph-data_query', params=params).url q = requests.get(url) if not q.ok: print('Error retrieving {0}: {1} for {2}'.format( article, q.status_code, url)) if debug: q.raise_for_status() else: return None # Check if the journal has given back forbidden HTML. if q.content.endswith('</html>'): print('Error retrieving {0}: 200 (access denied?) for {1}'.format( article, url)) return None return q.content
[ "def", "get_pdf", "(", "article", ",", "debug", "=", "False", ")", ":", "print", "(", "'Retrieving {0}'", ".", "format", "(", "article", ")", ")", "identifier", "=", "[", "_", "for", "_", "in", "article", ".", "identifier", "if", "'arXiv'", "in", "_", ...
Download an article PDF from arXiv. :param article: The ADS article to retrieve. :type article: :class:`ads.search.Article` :returns: The binary content of the requested PDF.
[ "Download", "an", "article", "PDF", "from", "arXiv", "." ]
python
train
networks-lab/tidyextractors
tidyextractors/tidytwitter/twitter_extractor.py
https://github.com/networks-lab/tidyextractors/blob/658448ed533beecf32adcc188fc64d1068d15ca6/tidyextractors/tidytwitter/twitter_extractor.py#L180-L191
def _make_user_dict(self, username): """ Processes a Twitter User object, exporting as a nested dictionary. Complex values (i.e. objects that aren't int, bool, float, str, or a collection of such) are converted to strings (i.e. using __str__ or __repr__). To access user data only, use make_user_dict(username)['_json']. :param username: A Twitter username string. :return: A nested dictionary of user data. """ user = self._api.get_user(username) return self._make_object_dict(user)
[ "def", "_make_user_dict", "(", "self", ",", "username", ")", ":", "user", "=", "self", ".", "_api", ".", "get_user", "(", "username", ")", "return", "self", ".", "_make_object_dict", "(", "user", ")" ]
Processes a Twitter User object, exporting as a nested dictionary. Complex values (i.e. objects that aren't int, bool, float, str, or a collection of such) are converted to strings (i.e. using __str__ or __repr__). To access user data only, use make_user_dict(username)['_json']. :param username: A Twitter username string. :return: A nested dictionary of user data.
[ "Processes", "a", "Twitter", "User", "object", "exporting", "as", "a", "nested", "dictionary", ".", "Complex", "values", "(", "i", ".", "e", ".", "objects", "that", "aren", "t", "int", "bool", "float", "str", "or", "a", "collection", "of", "such", ")", ...
python
train
ivankorobkov/python-inject
src/inject.py
https://github.com/ivankorobkov/python-inject/blob/e2f04f91fbcfd0b38e628cbeda97bd8449038d36/src/inject.py#L108-L114
def configure_once(config=None, bind_in_runtime=True): """Create an injector with a callable config if not present, otherwise, do nothing.""" with _INJECTOR_LOCK: if _INJECTOR: return _INJECTOR return configure(config, bind_in_runtime=bind_in_runtime)
[ "def", "configure_once", "(", "config", "=", "None", ",", "bind_in_runtime", "=", "True", ")", ":", "with", "_INJECTOR_LOCK", ":", "if", "_INJECTOR", ":", "return", "_INJECTOR", "return", "configure", "(", "config", ",", "bind_in_runtime", "=", "bind_in_runtime"...
Create an injector with a callable config if not present, otherwise, do nothing.
[ "Create", "an", "injector", "with", "a", "callable", "config", "if", "not", "present", "otherwise", "do", "nothing", "." ]
python
train
GluuFederation/oxd-python
oxdpython/client.py
https://github.com/GluuFederation/oxd-python/blob/a0448cda03b4384bc50a8c20bd65eacd983bceb8/oxdpython/client.py#L538-L601
def setup_client(self): """The command registers the client for communication protection. This will be used to obtain an access token via the Get Client Token command. The access token will be passed as a protection_access_token parameter to other commands. Note: If you are using the oxd-https-extension, you must setup the client Returns: **dict:** the client setup information Example response:: { "oxd_id":"6F9619FF-8B86-D011-B42D-00CF4FC964FF", "op_host": "<op host>", "client_id":"<client id>", "client_secret":"<client secret>", "client_registration_access_token":"<Client registration access token>", "client_registration_client_uri":"<URI of client registration>", "client_id_issued_at":"<client_id issued at>", "client_secret_expires_at":"<client_secret expires at>" } """ # add required params for the command params = { "authorization_redirect_uri": self.authorization_redirect_uri, "oxd_rp_programming_language": "python", } # add other optional params if they exist in config for op in self.opt_params: if self.config.get("client", op): params[op] = self.config.get("client", op) for olp in self.opt_list_params: if self.config.get("client", olp): params[olp] = self.config.get("client", olp).split(",") logger.debug("Sending command `setup_client` with params %s", params) response = self.msgr.request("setup_client", **params) logger.debug("Received response: %s", response) if response['status'] == 'error': raise OxdServerError(response['data']) data = response["data"] self.oxd_id = data["oxd_id"] self.config.set("oxd", "id", data["oxd_id"]) self.config.set("client", "client_id", data["client_id"]) self.config.set("client", "client_secret", data["client_secret"]) if data["client_registration_access_token"]: self.config.set("client", "client_registration_access_token", data["client_registration_access_token"]) if data["client_registration_client_uri"]: self.config.set("client", "client_registration_client_uri", data["client_registration_client_uri"]) self.config.set("client", "client_id_issued_at", str(data["client_id_issued_at"])) return data
[ "def", "setup_client", "(", "self", ")", ":", "# add required params for the command", "params", "=", "{", "\"authorization_redirect_uri\"", ":", "self", ".", "authorization_redirect_uri", ",", "\"oxd_rp_programming_language\"", ":", "\"python\"", ",", "}", "# add other opt...
The command registers the client for communication protection. This will be used to obtain an access token via the Get Client Token command. The access token will be passed as a protection_access_token parameter to other commands. Note: If you are using the oxd-https-extension, you must setup the client Returns: **dict:** the client setup information Example response:: { "oxd_id":"6F9619FF-8B86-D011-B42D-00CF4FC964FF", "op_host": "<op host>", "client_id":"<client id>", "client_secret":"<client secret>", "client_registration_access_token":"<Client registration access token>", "client_registration_client_uri":"<URI of client registration>", "client_id_issued_at":"<client_id issued at>", "client_secret_expires_at":"<client_secret expires at>" }
[ "The", "command", "registers", "the", "client", "for", "communication", "protection", ".", "This", "will", "be", "used", "to", "obtain", "an", "access", "token", "via", "the", "Get", "Client", "Token", "command", ".", "The", "access", "token", "will", "be", ...
python
train
bio2bel/bio2bel
src/bio2bel/manager/namespace_manager.py
https://github.com/bio2bel/bio2bel/blob/d80762d891fa18b248709ff0b0f97ebb65ec64c2/src/bio2bel/manager/namespace_manager.py#L470-L490
def get_cli(cls) -> click.Group: """Get a :mod:`click` main function with added BEL namespace commands.""" main = super().get_cli() if cls.is_namespace: @main.group() def belns(): """Manage BEL namespace.""" cls._cli_add_to_bel_namespace(belns) cls._cli_add_clear_bel_namespace(belns) cls._cli_add_write_bel_namespace(belns) if cls.is_annotation: @main.group() def belanno(): """Manage BEL annotation.""" cls._cli_add_write_bel_annotation(belanno) return main
[ "def", "get_cli", "(", "cls", ")", "->", "click", ".", "Group", ":", "main", "=", "super", "(", ")", ".", "get_cli", "(", ")", "if", "cls", ".", "is_namespace", ":", "@", "main", ".", "group", "(", ")", "def", "belns", "(", ")", ":", "\"\"\"Manag...
Get a :mod:`click` main function with added BEL namespace commands.
[ "Get", "a", ":", "mod", ":", "click", "main", "function", "with", "added", "BEL", "namespace", "commands", "." ]
python
valid
mabuchilab/QNET
src/qnet/algebra/core/scalar_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/scalar_algebra.py#L898-L905
def create(cls, *operands, **kwargs): """Instantiate the product while applying simplification rules""" converted_operands = [] for op in operands: if not isinstance(op, Scalar): op = ScalarValue.create(op) converted_operands.append(op) return super().create(*converted_operands, **kwargs)
[ "def", "create", "(", "cls", ",", "*", "operands", ",", "*", "*", "kwargs", ")", ":", "converted_operands", "=", "[", "]", "for", "op", "in", "operands", ":", "if", "not", "isinstance", "(", "op", ",", "Scalar", ")", ":", "op", "=", "ScalarValue", ...
Instantiate the product while applying simplification rules
[ "Instantiate", "the", "product", "while", "applying", "simplification", "rules" ]
python
train
buriburisuri/sugartensor
sugartensor/sg_transform.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L30-L45
def sg_cast(tensor, opt): r"""Casts a tensor to a new type. See `tf.cast()` in tensorflow. Args: tensor: A `Tensor` or `SparseTensor` (automatically given by chain). opt: dtype : The destination type. name : If provided, it replaces current tensor's name Returns: A `Tensor` or `SparseTensor` with same shape as `tensor`. """ assert opt.dtype is not None, 'dtype is mandatory.' return tf.cast(tensor, opt.dtype, name=opt.name)
[ "def", "sg_cast", "(", "tensor", ",", "opt", ")", ":", "assert", "opt", ".", "dtype", "is", "not", "None", ",", "'dtype is mandatory.'", "return", "tf", ".", "cast", "(", "tensor", ",", "opt", ".", "dtype", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Casts a tensor to a new type. See `tf.cast()` in tensorflow. Args: tensor: A `Tensor` or `SparseTensor` (automatically given by chain). opt: dtype : The destination type. name : If provided, it replaces current tensor's name Returns: A `Tensor` or `SparseTensor` with same shape as `tensor`.
[ "r", "Casts", "a", "tensor", "to", "a", "new", "type", ".", "See", "tf", ".", "cast", "()", "in", "tensorflow", "." ]
python
train
quantopian/zipline
zipline/lib/labelarray.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L322-L338
def as_categorical(self): """ Coerce self into a pandas categorical. This is only defined on 1D arrays, since that's all pandas supports. """ if len(self.shape) > 1: raise ValueError("Can't convert a 2D array to a categorical.") with ignore_pandas_nan_categorical_warning(): return pd.Categorical.from_codes( self.as_int_array(), # We need to make a copy because pandas >= 0.17 fails if this # buffer isn't writeable. self.categories.copy(), ordered=False, )
[ "def", "as_categorical", "(", "self", ")", ":", "if", "len", "(", "self", ".", "shape", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Can't convert a 2D array to a categorical.\"", ")", "with", "ignore_pandas_nan_categorical_warning", "(", ")", ":", "return",...
Coerce self into a pandas categorical. This is only defined on 1D arrays, since that's all pandas supports.
[ "Coerce", "self", "into", "a", "pandas", "categorical", "." ]
python
train
nephila/djangocms-helper
djangocms_helper/runner.py
https://github.com/nephila/djangocms-helper/blob/3fe53aee7b06922112c5e4445b74afeb86f6d836/djangocms_helper/runner.py#L28-L52
def cms(app, argv=sys.argv, extra_args=None): """ Run commands in a django cMS environment :param app: application :param argv: arguments (default to sys.argv) :param extra_args: list of extra arguments """ try: import cms # NOQA # nopyflakes except ImportError: print('runner.cms is available only if django CMS is installed') raise if app not in argv[:2]: # app is automatically added if not present argv.insert(1, app) if len(argv) < 3 and 'test' not in argv[:2]: # test argument is given if not argument is passed argv.insert(2, 'test') if '--cms' not in argv: # this is the cms runner, just add the cms argument argv.append('--cms') if extra_args: argv.extend(extra_args) return runner(argv)
[ "def", "cms", "(", "app", ",", "argv", "=", "sys", ".", "argv", ",", "extra_args", "=", "None", ")", ":", "try", ":", "import", "cms", "# NOQA # nopyflakes", "except", "ImportError", ":", "print", "(", "'runner.cms is available only if django CMS is installed'", ...
Run commands in a django cMS environment :param app: application :param argv: arguments (default to sys.argv) :param extra_args: list of extra arguments
[ "Run", "commands", "in", "a", "django", "cMS", "environment" ]
python
train
dcaune/perseus-lib-python-common
majormode/perseus/model/locale.py
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/model/locale.py#L254-L288
def decompose_locale(locale, strict=True): """ Return the decomposition of the specified locale into a language code and a country code. @param locale: a string representation of a locale, i.e., a ISO 639-3 alpha-3 code (or alpha-2 code), optionally followed by a dash character ``-`` and a ISO 3166-1 alpha-2 code. If ``None`` passed, the function returns the default locale, i.e., standard English ``('eng', None)``. @param strict: indicate whether the string representation of a locale has to be strictly compliant with RFC 4646, or whether a Java- style locale (character ``_`` instead of ``-``) is accepted. @return: a tuple ``(language_code, country_code)``, where the first code represents a ISO 639-3 alpha-3 code (or alpha-2 code), and the second code a ISO 3166-1 alpha-2 code. """ if locale is None: return ('eng', None) match = REGEX_LOCALE.match(locale) if match is None: if strict == True: raise Locale.MalformedLocaleException() match = REGEX_JAVA_LOCALE.match(locale) if match is None: raise Locale.MalformedLocaleException() (_, locale_language_code, locale_country_code, language_code) = match.groups() return (locale_language_code, locale_country_code) if language_code is None \ else (language_code, None)
[ "def", "decompose_locale", "(", "locale", ",", "strict", "=", "True", ")", ":", "if", "locale", "is", "None", ":", "return", "(", "'eng'", ",", "None", ")", "match", "=", "REGEX_LOCALE", ".", "match", "(", "locale", ")", "if", "match", "is", "None", ...
Return the decomposition of the specified locale into a language code and a country code. @param locale: a string representation of a locale, i.e., a ISO 639-3 alpha-3 code (or alpha-2 code), optionally followed by a dash character ``-`` and a ISO 3166-1 alpha-2 code. If ``None`` passed, the function returns the default locale, i.e., standard English ``('eng', None)``. @param strict: indicate whether the string representation of a locale has to be strictly compliant with RFC 4646, or whether a Java- style locale (character ``_`` instead of ``-``) is accepted. @return: a tuple ``(language_code, country_code)``, where the first code represents a ISO 639-3 alpha-3 code (or alpha-2 code), and the second code a ISO 3166-1 alpha-2 code.
[ "Return", "the", "decomposition", "of", "the", "specified", "locale", "into", "a", "language", "code", "and", "a", "country", "code", "." ]
python
train
pyblish/pyblish-nuke
pyblish_nuke/lib.py
https://github.com/pyblish/pyblish-nuke/blob/5fbd766774e999e5e3015201094a07a92d800c4f/pyblish_nuke/lib.py#L335-L379
def dock(window): """ Expecting a window to parent into a Nuke panel, that is dockable. """ # Deleting existing dock # There is a bug where existing docks are kept in-memory when closed via UI if self._dock: print("Deleting existing dock...") parent = self._dock dialog = None stacked_widget = None main_windows = [] # Getting dock parents while parent: if isinstance(parent, QtWidgets.QDialog): dialog = parent if isinstance(parent, QtWidgets.QStackedWidget): stacked_widget = parent if isinstance(parent, QtWidgets.QMainWindow): main_windows.append(parent) parent = parent.parent() dialog.deleteLater() if len(main_windows) > 1: # Then it's a floating window if stacked_widget.count() == 1: # Then it's empty and we can close it, # as is native Nuke UI behaviour main_windows[0].deleteLater() # Creating new dock pane = nuke.getPaneFor("Properties.1") widget_path = "pyblish_nuke.lib.pyblish_nuke_dockwidget" panel = nukescripts.panels.registerWidgetAsPanel(widget_path, window.windowTitle(), "pyblish_nuke.dock", True).addToPane(pane) panel_widget = panel.customKnob.getObject().widget panel_widget.layout().addWidget(window) _nuke_set_zero_margins(panel_widget) self._dock = panel_widget return self._dock
[ "def", "dock", "(", "window", ")", ":", "# Deleting existing dock", "# There is a bug where existing docks are kept in-memory when closed via UI", "if", "self", ".", "_dock", ":", "print", "(", "\"Deleting existing dock...\"", ")", "parent", "=", "self", ".", "_dock", "di...
Expecting a window to parent into a Nuke panel, that is dockable.
[ "Expecting", "a", "window", "to", "parent", "into", "a", "Nuke", "panel", "that", "is", "dockable", "." ]
python
train