repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
spacetelescope/pysynphot
commissioning/basecase.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/commissioning/basecase.py#L163-L177
def savepysyn(self,wave,flux,fname,units=None): """ Cannot ever use the .writefits() method, because the array is frequently just sampled at the synphot waveset; plus, writefits is smart and does things like tapering.""" if units is None: ytype='throughput' units=' ' else: ytype='flux' col1=pyfits.Column(name='wavelength',format='D',array=wave) col2=pyfits.Column(name=ytype,format='D',array=flux) tbhdu=pyfits.BinTableHDU.from_columns(pyfits.ColDefs([col1,col2])) tbhdu.header.update('tunit1','angstrom') tbhdu.header.update('tunit2',units) tbhdu.writeto(fname.replace('.fits','_pysyn.fits'))
[ "def", "savepysyn", "(", "self", ",", "wave", ",", "flux", ",", "fname", ",", "units", "=", "None", ")", ":", "if", "units", "is", "None", ":", "ytype", "=", "'throughput'", "units", "=", "' '", "else", ":", "ytype", "=", "'flux'", "col1", "=", "py...
Cannot ever use the .writefits() method, because the array is frequently just sampled at the synphot waveset; plus, writefits is smart and does things like tapering.
[ "Cannot", "ever", "use", "the", ".", "writefits", "()", "method", "because", "the", "array", "is", "frequently", "just", "sampled", "at", "the", "synphot", "waveset", ";", "plus", "writefits", "is", "smart", "and", "does", "things", "like", "tapering", "." ]
python
train
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1846-L1892
def update(self, train_set=None, fobj=None): """Update Booster for one iteration. Parameters ---------- train_set : Dataset or None, optional (default=None) Training data. If None, last training data is used. fobj : callable or None, optional (default=None) Customized objective function. For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Returns ------- is_finished : bool Whether the update was successfully finished. """ # need reset training data if train_set is not None and train_set is not self.train_set: if not isinstance(train_set, Dataset): raise TypeError('Training data should be Dataset instance, met {}' .format(type(train_set).__name__)) if train_set._predictor is not self.__init_predictor: raise LightGBMError("Replace training data failed, " "you should use same predictor for these data") self.train_set = train_set _safe_call(_LIB.LGBM_BoosterResetTrainingData( self.handle, self.train_set.construct().handle)) self.__inner_predict_buffer[0] = None is_finished = ctypes.c_int(0) if fobj is None: if self.__set_objective_to_none: raise LightGBMError('Cannot update due to null objective function.') _safe_call(_LIB.LGBM_BoosterUpdateOneIter( self.handle, ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return is_finished.value == 1 else: if not self.__set_objective_to_none: self.reset_parameter({"objective": "none"}).__set_objective_to_none = True grad, hess = fobj(self.__inner_predict(0), self.train_set) return self.__boost(grad, hess)
[ "def", "update", "(", "self", ",", "train_set", "=", "None", ",", "fobj", "=", "None", ")", ":", "# need reset training data", "if", "train_set", "is", "not", "None", "and", "train_set", "is", "not", "self", ".", "train_set", ":", "if", "not", "isinstance"...
Update Booster for one iteration. Parameters ---------- train_set : Dataset or None, optional (default=None) Training data. If None, last training data is used. fobj : callable or None, optional (default=None) Customized objective function. For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Returns ------- is_finished : bool Whether the update was successfully finished.
[ "Update", "Booster", "for", "one", "iteration", "." ]
python
train
mozilla/treeherder
treeherder/log_parser/artifactbuilders.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/artifactbuilders.py#L61-L64
def get_artifact(self): """Return the job artifact built by the parser.""" self.artifact[self.parser.name] = self.parser.get_artifact() return self.artifact
[ "def", "get_artifact", "(", "self", ")", ":", "self", ".", "artifact", "[", "self", ".", "parser", ".", "name", "]", "=", "self", ".", "parser", ".", "get_artifact", "(", ")", "return", "self", ".", "artifact" ]
Return the job artifact built by the parser.
[ "Return", "the", "job", "artifact", "built", "by", "the", "parser", "." ]
python
train
sunt05/SuPy
docs/source/proc_var_info/gen_df_state_csv.py
https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/gen_df_state_csv.py#L56-L79
def extract_var_suews(dict_var_full: dict, var_supy: str)->list: '''extract related SUEWS variables for a supy variable `var_supy` Parameters ---------- dict_var_full : dict dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect) var_supy : str supy variable name Returns ------- list related SUEWS variables for `var_supy` ''' x = sp.supy_load.flatten_list(dict_var_full[var_supy]) x = np.unique(x) x = [ xx for xx in x if xx not in ['base', 'const', '0.0'] + [str(x) for x in range(24)] ] x = [xx for xx in x if 'Code' not in xx] return x
[ "def", "extract_var_suews", "(", "dict_var_full", ":", "dict", ",", "var_supy", ":", "str", ")", "->", "list", ":", "x", "=", "sp", ".", "supy_load", ".", "flatten_list", "(", "dict_var_full", "[", "var_supy", "]", ")", "x", "=", "np", ".", "unique", "...
extract related SUEWS variables for a supy variable `var_supy` Parameters ---------- dict_var_full : dict dict_var_full = sp.supy_load.exp_dict_full(sp.supy_load.dict_var2SiteSelect) var_supy : str supy variable name Returns ------- list related SUEWS variables for `var_supy`
[ "extract", "related", "SUEWS", "variables", "for", "a", "supy", "variable", "var_supy" ]
python
train
morse-talk/morse-talk
morse_talk/plot.py
https://github.com/morse-talk/morse-talk/blob/71e09ace0aa554d28cada5ee658e43758305b8fa/morse_talk/plot.py#L31-L53
def _create_x_y(l, duration=1): """ Create 2 lists x: time (as unit of dot (dit) y: bits from a list of bit >>> l = [1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1] >>> x, y = _create_x_y(l) >>> x [-1, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28] >>> y [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0] """ l = [0] + l + [0] y = [] x = [] for i, bit in enumerate(l): y.append(bit) y.append(bit) x.append((i - 1) * duration) x.append(i * duration) return x, y
[ "def", "_create_x_y", "(", "l", ",", "duration", "=", "1", ")", ":", "l", "=", "[", "0", "]", "+", "l", "+", "[", "0", "]", "y", "=", "[", "]", "x", "=", "[", "]", "for", "i", ",", "bit", "in", "enumerate", "(", "l", ")", ":", "y", ".",...
Create 2 lists x: time (as unit of dot (dit) y: bits from a list of bit >>> l = [1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1] >>> x, y = _create_x_y(l) >>> x [-1, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28] >>> y [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0]
[ "Create", "2", "lists", "x", ":", "time", "(", "as", "unit", "of", "dot", "(", "dit", ")", "y", ":", "bits", "from", "a", "list", "of", "bit" ]
python
train
codeinn/vcs
vcs/backends/hg/changeset.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/hg/changeset.py#L97-L102
def parents(self): """ Returns list of parents changesets. """ return [self.repository.get_changeset(parent.rev()) for parent in self._ctx.parents() if parent.rev() >= 0]
[ "def", "parents", "(", "self", ")", ":", "return", "[", "self", ".", "repository", ".", "get_changeset", "(", "parent", ".", "rev", "(", ")", ")", "for", "parent", "in", "self", ".", "_ctx", ".", "parents", "(", ")", "if", "parent", ".", "rev", "("...
Returns list of parents changesets.
[ "Returns", "list", "of", "parents", "changesets", "." ]
python
train
IDSIA/sacred
sacred/ingredient.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/ingredient.py#L328-L362
def get_experiment_info(self): """Get a dictionary with information about this experiment. Contains: * *name*: the name * *sources*: a list of sources (filename, md5) * *dependencies*: a list of package dependencies (name, version) :return: experiment information :rtype: dict """ dependencies = set() sources = set() for ing, _ in self.traverse_ingredients(): dependencies |= ing.dependencies sources |= ing.sources for dep in dependencies: dep.fill_missing_version() mainfile = (self.mainfile.to_json(self.base_dir)[0] if self.mainfile else None) def name_lower(d): return d.name.lower() return dict( name=self.path, base_dir=self.base_dir, sources=[s.to_json(self.base_dir) for s in sorted(sources)], dependencies=[d.to_json() for d in sorted(dependencies, key=name_lower)], repositories=collect_repositories(sources), mainfile=mainfile )
[ "def", "get_experiment_info", "(", "self", ")", ":", "dependencies", "=", "set", "(", ")", "sources", "=", "set", "(", ")", "for", "ing", ",", "_", "in", "self", ".", "traverse_ingredients", "(", ")", ":", "dependencies", "|=", "ing", ".", "dependencies"...
Get a dictionary with information about this experiment. Contains: * *name*: the name * *sources*: a list of sources (filename, md5) * *dependencies*: a list of package dependencies (name, version) :return: experiment information :rtype: dict
[ "Get", "a", "dictionary", "with", "information", "about", "this", "experiment", "." ]
python
train
spotify/luigi
luigi/worker.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/worker.py#L395-L404
def check_complete(task, out_queue): """ Checks if task is complete, puts the result to out_queue. """ logger.debug("Checking if %s is complete", task) try: is_complete = task.complete() except Exception: is_complete = TracebackWrapper(traceback.format_exc()) out_queue.put((task, is_complete))
[ "def", "check_complete", "(", "task", ",", "out_queue", ")", ":", "logger", ".", "debug", "(", "\"Checking if %s is complete\"", ",", "task", ")", "try", ":", "is_complete", "=", "task", ".", "complete", "(", ")", "except", "Exception", ":", "is_complete", "...
Checks if task is complete, puts the result to out_queue.
[ "Checks", "if", "task", "is", "complete", "puts", "the", "result", "to", "out_queue", "." ]
python
train
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L2047-L2059
def get_edited(self, subreddit='mod', *args, **kwargs): """Return a get_content generator of edited items. :param subreddit: Either a Subreddit object or the name of the subreddit to return the edited items for. Defaults to `mod` which includes items for all the subreddits you moderate. The additional parameters are passed directly into :meth:`.get_content`. Note: the `url` parameter cannot be altered. """ url = self.config['edited'].format(subreddit=six.text_type(subreddit)) return self.get_content(url, *args, **kwargs)
[ "def", "get_edited", "(", "self", ",", "subreddit", "=", "'mod'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "config", "[", "'edited'", "]", ".", "format", "(", "subreddit", "=", "six", ".", "text_type", "(", "sub...
Return a get_content generator of edited items. :param subreddit: Either a Subreddit object or the name of the subreddit to return the edited items for. Defaults to `mod` which includes items for all the subreddits you moderate. The additional parameters are passed directly into :meth:`.get_content`. Note: the `url` parameter cannot be altered.
[ "Return", "a", "get_content", "generator", "of", "edited", "items", "." ]
python
train
marvin-ai/marvin-python-toolbox
marvin_python_toolbox/common/http_client.py
https://github.com/marvin-ai/marvin-python-toolbox/blob/7c95cb2f9698b989150ab94c1285f3a9eaaba423/marvin_python_toolbox/common/http_client.py#L101-L103
def get_all(self, path, data=None, limit=100): """Encapsulates GET all requests""" return ListResultSet(path=path, data=data or {}, limit=limit)
[ "def", "get_all", "(", "self", ",", "path", ",", "data", "=", "None", ",", "limit", "=", "100", ")", ":", "return", "ListResultSet", "(", "path", "=", "path", ",", "data", "=", "data", "or", "{", "}", ",", "limit", "=", "limit", ")" ]
Encapsulates GET all requests
[ "Encapsulates", "GET", "all", "requests" ]
python
train
tcalmant/ipopo
pelix/shell/core.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/core.py#L298-L312
def var_set(self, session, **kwargs): """ Sets the given variables or prints the current ones. "set answer=42" """ if not kwargs: session.write_line( self._utils.make_table( ("Name", "Value"), session.variables.items() ) ) else: for name, value in kwargs.items(): name = name.strip() session.set(name, value) session.write_line("{0}={1}", name, value)
[ "def", "var_set", "(", "self", ",", "session", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ":", "session", ".", "write_line", "(", "self", ".", "_utils", ".", "make_table", "(", "(", "\"Name\"", ",", "\"Value\"", ")", ",", "session", "....
Sets the given variables or prints the current ones. "set answer=42"
[ "Sets", "the", "given", "variables", "or", "prints", "the", "current", "ones", ".", "set", "answer", "=", "42" ]
python
train
taborlab/FlowCal
FlowCal/io.py
https://github.com/taborlab/FlowCal/blob/031a7af82acb1d46879a8e384a1a00f27f0bdc7a/FlowCal/io.py#L1395-L1545
def hist_bins(self, channels=None, nbins=None, scale='logicle', **kwargs): """ Get histogram bin edges for the specified channel(s). These cover the range specified in ``FCSData.range(channels)`` with a number of bins `nbins`, with linear, logarithmic, or logicle spacing. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to generate histogram bins. If None, return a list with bins for all channels, in the order of ``FCSData.channels``. nbins : int or list of ints, optional The number of bins to calculate. If `channels` specifies a list of channels, `nbins` should be a list of integers. If `nbins` is None, use ``FCSData.resolution(channel)``. scale : str, optional Scale in which to generate bins. Can be either ``linear``, ``log``, or ``logicle``. kwargs : optional Keyword arguments specific to the selected bin scaling. Linear and logarithmic scaling do not use additional arguments. For logicle scaling, the following parameters can be provided: T : float, optional Maximum range of data. If not provided, use ``range[1]``. M : float, optional (Asymptotic) number of decades in scaled units. If not provided, calculate from the following:: max(4.5, 4.5 / np.log10(262144) * np.log10(T)) W : float, optional Width of linear range in scaled units. If not provided, calculate using the following relationship:: W = (M - log10(T / abs(r))) / 2 Where ``r`` is the minimum negative event. If no negative events are present, W is set to zero. Return ------ array or list of arrays Histogram bin edges for the specified channel(s). Notes ----- If ``range[0]`` is equal or less than zero and `scale` is ``log``, the lower limit of the range is replaced with one. Logicle scaling uses the LogicleTransform class in the plot module. References ---------- .. [1] D.R. Parks, M. Roederer, W.A. Moore, "A New Logicle Display Method Avoids Deceptive Effects of Logarithmic Scaling for Low Signals and Compensated Data," Cytometry Part A 69A:541-551, 2006, PMID 16604519. """ # Default: all channels if channels is None: channels = list(self._channels) # Get numerical indices of channels channels = self._name_to_index(channels) # Convert to list if necessary channel_list = channels if not isinstance(channel_list, list): channel_list = [channel_list] if not isinstance(nbins, list): nbins = [nbins]*len(channel_list) if not isinstance(scale, list): scale = [scale]*len(channel_list) # Iterate bins = [] for channel, nbins_channel, scale_channel in \ zip(channel_list, nbins, scale): # Get channel resolution res_channel = self.resolution(channel) # Get default nbins if nbins_channel is None: nbins_channel = res_channel # Get range of channel range_channel = self.range(channel) # Generate bins according to specified scale if scale_channel == 'linear': # We will now generate ``nbins`` uniformly spaced bins centered # at ``linspace(range_channel[0], range_channel[1], nbins)``. To # do so, we need to generate ``nbins + 1`` uniformly spaced # points. delta_res = (range_channel[1] - range_channel[0]) / \ (res_channel - 1) bins_channel = np.linspace(range_channel[0] - delta_res/2, range_channel[1] + delta_res/2, nbins_channel + 1) elif scale_channel == 'log': # Check if the lower limit is equal or less than zero. If so, # change the lower limit to one or some lower value, such that # the range covers at least five decades. if range_channel[0] <= 0: range_channel[0] = min(1., range_channel[1]/1e5) # Log range range_channel = [np.log10(range_channel[0]), np.log10(range_channel[1])] # We will now generate ``nbins`` uniformly spaced bins centered # at ``linspace(range_channel[0], range_channel[1], nbins)``. To # do so, we need to generate ``nbins + 1`` uniformly spaced # points. delta_res = (range_channel[1] - range_channel[0]) / \ (res_channel - 1) bins_channel = np.linspace(range_channel[0] - delta_res/2, range_channel[1] + delta_res/2, nbins_channel + 1) # Exponentiate bins bins_channel = 10**(bins_channel) elif scale_channel == 'logicle': # Create transform class # Use the LogicleTransform class from the plot module t = FlowCal.plot._LogicleTransform(data=self, channel=channel, **kwargs) # We now generate ``nbins`` uniformly spaced bins centered at # ``linspace(0, M, nbins)``. To do so, we need to generate # ``nbins + 1`` uniformly spaced points. delta_res = float(t.M) / (res_channel - 1) s = np.linspace(- delta_res/2., t.M + delta_res/2., nbins_channel + 1) # Finally, apply the logicle transformation to generate bins bins_channel = t.transform_non_affine(s) else: # Scale not supported raise ValueError('scale "{}" not supported'.format( scale_channel)) # Accumulate bins.append(bins_channel) # Extract from list if channels was not a list if not isinstance(channels, list): bins = bins[0] return bins
[ "def", "hist_bins", "(", "self", ",", "channels", "=", "None", ",", "nbins", "=", "None", ",", "scale", "=", "'logicle'", ",", "*", "*", "kwargs", ")", ":", "# Default: all channels", "if", "channels", "is", "None", ":", "channels", "=", "list", "(", "...
Get histogram bin edges for the specified channel(s). These cover the range specified in ``FCSData.range(channels)`` with a number of bins `nbins`, with linear, logarithmic, or logicle spacing. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to generate histogram bins. If None, return a list with bins for all channels, in the order of ``FCSData.channels``. nbins : int or list of ints, optional The number of bins to calculate. If `channels` specifies a list of channels, `nbins` should be a list of integers. If `nbins` is None, use ``FCSData.resolution(channel)``. scale : str, optional Scale in which to generate bins. Can be either ``linear``, ``log``, or ``logicle``. kwargs : optional Keyword arguments specific to the selected bin scaling. Linear and logarithmic scaling do not use additional arguments. For logicle scaling, the following parameters can be provided: T : float, optional Maximum range of data. If not provided, use ``range[1]``. M : float, optional (Asymptotic) number of decades in scaled units. If not provided, calculate from the following:: max(4.5, 4.5 / np.log10(262144) * np.log10(T)) W : float, optional Width of linear range in scaled units. If not provided, calculate using the following relationship:: W = (M - log10(T / abs(r))) / 2 Where ``r`` is the minimum negative event. If no negative events are present, W is set to zero. Return ------ array or list of arrays Histogram bin edges for the specified channel(s). Notes ----- If ``range[0]`` is equal or less than zero and `scale` is ``log``, the lower limit of the range is replaced with one. Logicle scaling uses the LogicleTransform class in the plot module. References ---------- .. [1] D.R. Parks, M. Roederer, W.A. Moore, "A New Logicle Display Method Avoids Deceptive Effects of Logarithmic Scaling for Low Signals and Compensated Data," Cytometry Part A 69A:541-551, 2006, PMID 16604519.
[ "Get", "histogram", "bin", "edges", "for", "the", "specified", "channel", "(", "s", ")", "." ]
python
train
yunojuno/elasticsearch-django
elasticsearch_django/index.py
https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/index.py#L119-L140
def scan_index(index, model): """ Yield all documents of model type in an index. This function calls the elasticsearch.helpers.scan function, and yields all the documents in the index that match the doc_type produced by a specific Django model. Args: index: string, the name of the index to scan, must be a configured index as returned from settings.get_index_names. model: a Django model type, used to filter the the documents that are scanned. Yields each document of type model in index, one at a time. """ # see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html query = {"query": {"type": {"value": model._meta.model_name}}} client = get_client() for hit in helpers.scan(client, index=index, query=query): yield hit
[ "def", "scan_index", "(", "index", ",", "model", ")", ":", "# see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html", "query", "=", "{", "\"query\"", ":", "{", "\"type\"", ":", "{", "\"value\"", ":", "model", ".", "_meta", ".", ...
Yield all documents of model type in an index. This function calls the elasticsearch.helpers.scan function, and yields all the documents in the index that match the doc_type produced by a specific Django model. Args: index: string, the name of the index to scan, must be a configured index as returned from settings.get_index_names. model: a Django model type, used to filter the the documents that are scanned. Yields each document of type model in index, one at a time.
[ "Yield", "all", "documents", "of", "model", "type", "in", "an", "index", "." ]
python
train
log2timeline/plaso
plaso/parsers/winreg_plugins/networks.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/winreg_plugins/networks.py#L138-L207
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): """Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. """ network_info = {} signatures = registry_key.GetSubkeyByName('Signatures') if signatures: network_info = self._GetNetworkInfo(signatures) profiles = registry_key.GetSubkeyByName('Profiles') if not profiles: return for subkey in profiles.GetSubkeys(): default_gateway_mac, dns_suffix = network_info.get( subkey.name, (None, None)) event_data = WindowsRegistryNetworkEventData() event_data.default_gateway_mac = default_gateway_mac event_data.dns_suffix = dns_suffix ssid_value = subkey.GetValueByName('ProfileName') if ssid_value: event_data.ssid = ssid_value.GetDataAsObject() description_value = subkey.GetValueByName('Description') if description_value: event_data.description = description_value.GetDataAsObject() connection_type_value = subkey.GetValueByName('NameType') if connection_type_value: connection_type = connection_type_value.GetDataAsObject() # TODO: move to formatter. connection_type = self._CONNECTION_TYPE.get( connection_type, 'unknown') event_data.connection_type = connection_type date_created_value = subkey.GetValueByName('DateCreated') if date_created_value: try: date_time = self._ParseSystemTime(date_created_value.data) except errors.ParseError as exception: date_time = None parser_mediator.ProduceExtractionWarning( 'unable to parse date created with error: {0!s}'.format( exception)) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) date_last_connected_value = subkey.GetValueByName('DateLastConnected') if date_last_connected_value: try: date_time = self._ParseSystemTime(date_last_connected_value.data) except errors.ParseError as exception: date_time = None parser_mediator.ProduceExtractionWarning( 'unable to parse date last connected with error: {0!s}'.format( exception)) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_CONNECTED) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "ExtractEvents", "(", "self", ",", "parser_mediator", ",", "registry_key", ",", "*", "*", "kwargs", ")", ":", "network_info", "=", "{", "}", "signatures", "=", "registry_key", ".", "GetSubkeyByName", "(", "'Signatures'", ")", "if", "signatures", ":", ...
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
[ "Extracts", "events", "from", "a", "Windows", "Registry", "key", "." ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/util.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/util.py#L127-L154
def get_queue_name(queue_name): """Determine which queue MR should run on. How to choose the queue: 1. If user provided one, use that. 2. If we are starting a mr from taskqueue, inherit that queue. If it's a special queue, fall back to the default queue. 3. Default queue. If user is using any MR pipeline interface, pipeline.start takes a "queue_name" argument. The pipeline will run on that queue and MR will simply inherit the queue_name. Args: queue_name: queue_name from user. Maybe None. Returns: The queue name to run on. """ if queue_name: return queue_name queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", parameters.config.QUEUE_NAME) if len(queue_name) > 1 and queue_name[0:2] == "__": # We are currently in some special queue. E.g. __cron. return parameters.config.QUEUE_NAME else: return queue_name
[ "def", "get_queue_name", "(", "queue_name", ")", ":", "if", "queue_name", ":", "return", "queue_name", "queue_name", "=", "os", ".", "environ", ".", "get", "(", "\"HTTP_X_APPENGINE_QUEUENAME\"", ",", "parameters", ".", "config", ".", "QUEUE_NAME", ")", "if", "...
Determine which queue MR should run on. How to choose the queue: 1. If user provided one, use that. 2. If we are starting a mr from taskqueue, inherit that queue. If it's a special queue, fall back to the default queue. 3. Default queue. If user is using any MR pipeline interface, pipeline.start takes a "queue_name" argument. The pipeline will run on that queue and MR will simply inherit the queue_name. Args: queue_name: queue_name from user. Maybe None. Returns: The queue name to run on.
[ "Determine", "which", "queue", "MR", "should", "run", "on", "." ]
python
train
onnx/onnx-mxnet
onnx_mxnet/import_onnx.py
https://github.com/onnx/onnx-mxnet/blob/b602d75c5a01f5ed8f68b11150a06374f058a86b/onnx_mxnet/import_onnx.py#L302-L326
def _fix_channels(self, op, attrs, inputs): """A workaround for getting 'channels' or 'units' since onnx don't provide these attributes. We check the shape of weights provided to get the number. """ if op not in [mx.sym.Convolution, mx.sym.Deconvolution, mx.sym.FullyConnected]: return attrs weight_name = self._renames[inputs[1]] if not weight_name in self._params: raise ValueError("Unable to get channels/units attr from onnx graph.") else: wshape = self._params[weight_name].shape assert len(wshape) >= 2, "Weights shape is invalid: {}".format(wshape) if op in [mx.sym.FullyConnected]: attrs['num_hidden'] = wshape[0] else: if op == mx.sym.Convolution: # Weight shape for Conv and FC: (M x C x kH x kW) : M is number of # feature maps/hidden and C is number of channels attrs['num_filter'] = wshape[0] elif op == mx.sym.Deconvolution: # Weight shape for DeConv : (C x M x kH x kW) : M is number of # feature maps/filters and C is number of channels attrs['num_filter'] = wshape[1] return attrs
[ "def", "_fix_channels", "(", "self", ",", "op", ",", "attrs", ",", "inputs", ")", ":", "if", "op", "not", "in", "[", "mx", ".", "sym", ".", "Convolution", ",", "mx", ".", "sym", ".", "Deconvolution", ",", "mx", ".", "sym", ".", "FullyConnected", "]...
A workaround for getting 'channels' or 'units' since onnx don't provide these attributes. We check the shape of weights provided to get the number.
[ "A", "workaround", "for", "getting", "channels", "or", "units", "since", "onnx", "don", "t", "provide", "these", "attributes", ".", "We", "check", "the", "shape", "of", "weights", "provided", "to", "get", "the", "number", "." ]
python
train
davebridges/mousedb
mousedb/animal/views.py
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/animal/views.py#L528-L537
def todo(request): """This view generates a summary of the todo lists. The login restricted view passes lists for ear tagging, genotyping and weaning and passes them to the template todo.html.""" eartag_list = Animal.objects.filter(Born__lt=(datetime.date.today() - datetime.timedelta(days=settings.WEAN_AGE))).filter(MouseID__isnull=True, Alive=True) genotype_list = Animal.objects.filter(Q(Genotype='N.D.')|Q(Genotype__icontains='?')).filter(Alive=True, Born__lt=(datetime.date.today() - datetime.timedelta(days=settings.GENOTYPE_AGE))) wean = datetime.date.today() - datetime.timedelta(days=settings.WEAN_AGE) wean_list = Animal.objects.filter(Born__lt=wean).filter(Weaned=None,Alive=True).exclude(Strain=2).order_by('Strain','Background','Rack','Cage') return render(request, 'todo.html', {'eartag_list':eartag_list, 'wean_list':wean_list, 'genotype_list':genotype_list})
[ "def", "todo", "(", "request", ")", ":", "eartag_list", "=", "Animal", ".", "objects", ".", "filter", "(", "Born__lt", "=", "(", "datetime", ".", "date", ".", "today", "(", ")", "-", "datetime", ".", "timedelta", "(", "days", "=", "settings", ".", "W...
This view generates a summary of the todo lists. The login restricted view passes lists for ear tagging, genotyping and weaning and passes them to the template todo.html.
[ "This", "view", "generates", "a", "summary", "of", "the", "todo", "lists", ".", "The", "login", "restricted", "view", "passes", "lists", "for", "ear", "tagging", "genotyping", "and", "weaning", "and", "passes", "them", "to", "the", "template", "todo", ".", ...
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3844-L3855
def weight_targeting(w, k): """Weight-level magnitude pruning.""" k = tf.to_int32(k) w_shape = shape_list(w) size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) w = tf.reshape(w, [size, w_shape[-1]]) transpose_w = tf.transpose(w) thres = tf.contrib.framework.sort(tf.abs(transpose_w), axis=1)[:, k] mask = to_float(thres[None, :] >= tf.abs(w)) return tf.reshape(mask, w_shape)
[ "def", "weight_targeting", "(", "w", ",", "k", ")", ":", "k", "=", "tf", ".", "to_int32", "(", "k", ")", "w_shape", "=", "shape_list", "(", "w", ")", "size", "=", "tf", ".", "to_int32", "(", "tf", ".", "reduce_prod", "(", "w_shape", "[", ":", "-"...
Weight-level magnitude pruning.
[ "Weight", "-", "level", "magnitude", "pruning", "." ]
python
train
Leeps-Lab/otree-redwood
otree_redwood/models.py
https://github.com/Leeps-Lab/otree-redwood/blob/59212f61a256ef77e0a9ed392ff497ea83ee6245/otree_redwood/models.py#L50-L58
def message(self): """Dictionary representation of the Event appropriate for JSON-encoding.""" return { 'timestamp': time.mktime(self.timestamp.timetuple())*1e3 + self.timestamp.microsecond/1e3, 'group': self.group_pk, 'participant': None if not self.participant else self.participant.code, 'channel': self.channel, 'value': self.value }
[ "def", "message", "(", "self", ")", ":", "return", "{", "'timestamp'", ":", "time", ".", "mktime", "(", "self", ".", "timestamp", ".", "timetuple", "(", ")", ")", "*", "1e3", "+", "self", ".", "timestamp", ".", "microsecond", "/", "1e3", ",", "'group...
Dictionary representation of the Event appropriate for JSON-encoding.
[ "Dictionary", "representation", "of", "the", "Event", "appropriate", "for", "JSON", "-", "encoding", "." ]
python
train
MarcoFavorito/flloat
flloat/parser/ldlf.py
https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/ldlf.py#L63-L66
def t_ATOM(self, t): r'[a-zA-Z_][a-zA-Z_0-9]*' t.type = LDLfLexer.reserved.get(t.value, 'ATOM') # Check for reserved words return t
[ "def", "t_ATOM", "(", "self", ",", "t", ")", ":", "t", ".", "type", "=", "LDLfLexer", ".", "reserved", ".", "get", "(", "t", ".", "value", ",", "'ATOM'", ")", "# Check for reserved words", "return", "t" ]
r'[a-zA-Z_][a-zA-Z_0-9]*
[ "r", "[", "a", "-", "zA", "-", "Z_", "]", "[", "a", "-", "zA", "-", "Z_0", "-", "9", "]", "*" ]
python
train
tensorlayer/tensorlayer
tensorlayer/files/utils.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/files/utils.py#L1838-L1899
def load_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list=None, is_latest=True, printable=False): """Load parameters from `ckpt` file. Parameters ------------ sess : Session TensorFlow Session. mode_name : str The name of the model, default is ``model.ckpt``. save_dir : str The path / file directory to the `ckpt`, default is ``checkpoint``. var_list : list of tensor The parameters / variables (tensor) to be saved. If empty, save all global variables (default). is_latest : boolean Whether to load the latest `ckpt`, if False, load the `ckpt` with the name of ```mode_name``. printable : boolean Whether to print all parameters information. Examples ---------- - Save all global parameters. >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', save_dir='model', printable=True) - Save specific parameters. >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', printable=True) - Load latest ckpt. >>> tl.files.load_ckpt(sess=sess, var_list=net.all_params, save_dir='model', printable=True) - Load specific ckpt. >>> tl.files.load_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', is_latest=False, printable=True) """ if sess is None: raise ValueError("session is None.") if var_list is None: var_list = [] if is_latest: ckpt_file = tf.train.latest_checkpoint(save_dir) else: ckpt_file = os.path.join(save_dir, mode_name) if not var_list: var_list = tf.global_variables() logging.info("[*] load %s n_params: %d" % (ckpt_file, len(var_list))) if printable: for idx, v in enumerate(var_list): logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) try: saver = tf.train.Saver(var_list) saver.restore(sess, ckpt_file) except Exception as e: logging.info(e) logging.info("[*] load ckpt fail ...")
[ "def", "load_ckpt", "(", "sess", "=", "None", ",", "mode_name", "=", "'model.ckpt'", ",", "save_dir", "=", "'checkpoint'", ",", "var_list", "=", "None", ",", "is_latest", "=", "True", ",", "printable", "=", "False", ")", ":", "if", "sess", "is", "None", ...
Load parameters from `ckpt` file. Parameters ------------ sess : Session TensorFlow Session. mode_name : str The name of the model, default is ``model.ckpt``. save_dir : str The path / file directory to the `ckpt`, default is ``checkpoint``. var_list : list of tensor The parameters / variables (tensor) to be saved. If empty, save all global variables (default). is_latest : boolean Whether to load the latest `ckpt`, if False, load the `ckpt` with the name of ```mode_name``. printable : boolean Whether to print all parameters information. Examples ---------- - Save all global parameters. >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', save_dir='model', printable=True) - Save specific parameters. >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', printable=True) - Load latest ckpt. >>> tl.files.load_ckpt(sess=sess, var_list=net.all_params, save_dir='model', printable=True) - Load specific ckpt. >>> tl.files.load_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', is_latest=False, printable=True)
[ "Load", "parameters", "from", "ckpt", "file", "." ]
python
valid
wtolson/gnsq
gnsq/nsqd.py
https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/nsqd.py#L453-L462
def requeue(self, message_id, timeout=0, backoff=True): """Re-queue a message (indicate failure to process).""" self.send(nsq.requeue(message_id, timeout)) self.finish_inflight() self.on_requeue.send( self, message_id=message_id, timeout=timeout, backoff=backoff )
[ "def", "requeue", "(", "self", ",", "message_id", ",", "timeout", "=", "0", ",", "backoff", "=", "True", ")", ":", "self", ".", "send", "(", "nsq", ".", "requeue", "(", "message_id", ",", "timeout", ")", ")", "self", ".", "finish_inflight", "(", ")",...
Re-queue a message (indicate failure to process).
[ "Re", "-", "queue", "a", "message", "(", "indicate", "failure", "to", "process", ")", "." ]
python
train
underworldcode/stripy
stripy-src/stripy/__init__.py
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/__init__.py#L25-L72
def weighted_average_to_nodes(x1, x2, data, interpolator ): """ Weighted average of scattered data to the nodal points of a triangulation using the barycentric coordinates as weightings. Parameters ---------- x1, x2 : 1D arrays arrays of x,y or lon, lat (radians) data : 1D array of data to be lumped to the node locations interpolator : a stripy.Triangulation or stripy.sTriangulation object which defines the node locations and their triangulation Returns ------- grid : 1D array containing the results of the weighted average norm : 1D array of the normalisation used to compute `grid` count : 1D int array of number of points that contribute anything to a given node """ import numpy as np gridded_data = np.zeros(interpolator.npoints) norm = np.zeros(interpolator.npoints) count = np.zeros(interpolator.npoints, dtype=np.int) bcc, nodes = interpolator.containing_simplex_and_bcc(x1, x2) # Beware vectorising the reduction operation !! for i in range(0, len(data)): grid[nodes[i][0]] += bcc[i][0] * data[i] grid[nodes[i][1]] += bcc[i][1] * data[i] grid[nodes[i][2]] += bcc[i][2] * data[i] norm[nodes[i][0]] += bcc[i][0] norm[nodes[i][1]] += bcc[i][1] norm[nodes[i][2]] += bcc[i][2] count[nodes[i][0]] += 1 count[nodes[i][1]] += 1 count[nodes[i][2]] += 1 grid[np.where(norm > 0.0)] /= norm[np.where(norm > 0.0)] return grid, norm, count
[ "def", "weighted_average_to_nodes", "(", "x1", ",", "x2", ",", "data", ",", "interpolator", ")", ":", "import", "numpy", "as", "np", "gridded_data", "=", "np", ".", "zeros", "(", "interpolator", ".", "npoints", ")", "norm", "=", "np", ".", "zeros", "(", ...
Weighted average of scattered data to the nodal points of a triangulation using the barycentric coordinates as weightings. Parameters ---------- x1, x2 : 1D arrays arrays of x,y or lon, lat (radians) data : 1D array of data to be lumped to the node locations interpolator : a stripy.Triangulation or stripy.sTriangulation object which defines the node locations and their triangulation Returns ------- grid : 1D array containing the results of the weighted average norm : 1D array of the normalisation used to compute `grid` count : 1D int array of number of points that contribute anything to a given node
[ "Weighted", "average", "of", "scattered", "data", "to", "the", "nodal", "points", "of", "a", "triangulation", "using", "the", "barycentric", "coordinates", "as", "weightings", "." ]
python
train
emin63/eyap
eyap/core/github_comments.py
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L356-L374
def raw_pull(self, topic): """Do a raw pull of data for given topic down from github. :arg topic: String topic (i.e., issue title). ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- :returns: Result of request data from github API. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Encapsulate call that gets raw data from github. """ assert topic is not None, 'A topic of None is not allowed' kwargs = {} if not self.user else {'auth': (self.user, self.token)} my_req = requests.get('%s/issues/%s' % ( self.base_url, topic), **kwargs) return my_req
[ "def", "raw_pull", "(", "self", ",", "topic", ")", ":", "assert", "topic", "is", "not", "None", ",", "'A topic of None is not allowed'", "kwargs", "=", "{", "}", "if", "not", "self", ".", "user", "else", "{", "'auth'", ":", "(", "self", ".", "user", ",...
Do a raw pull of data for given topic down from github. :arg topic: String topic (i.e., issue title). ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- :returns: Result of request data from github API. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Encapsulate call that gets raw data from github.
[ "Do", "a", "raw", "pull", "of", "data", "for", "given", "topic", "down", "from", "github", "." ]
python
train
codelv/enaml-native
src/enamlnative/android/android_view.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_view.py#L140-L145
def create_widget(self): """ Create the underlying label widget. """ d = self.declaration self.widget = View(self.get_context(), None, d.style)
[ "def", "create_widget", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "self", ".", "widget", "=", "View", "(", "self", ".", "get_context", "(", ")", ",", "None", ",", "d", ".", "style", ")" ]
Create the underlying label widget.
[ "Create", "the", "underlying", "label", "widget", "." ]
python
train
datasift/datasift-python
datasift/pylon.py
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L123-L155
def list(self, page=None, per_page=None, order_by='created_at', order_dir='DESC', service='facebook'): """ List pylon recordings :param page: page number for pagination :type page: int :param per_page: number of items per page, default 20 :type per_page: int :param order_by: field to order by, default request_time :type order_by: str :param order_dir: direction to order by, asc or desc, default desc :type order_dir: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if page: params['page'] = page if per_page: params['per_page'] = per_page if order_by: params['order_by'] = order_by if order_dir: params['order_dir'] = order_dir return self.request.get(service + '/get', params)
[ "def", "list", "(", "self", ",", "page", "=", "None", ",", "per_page", "=", "None", ",", "order_by", "=", "'created_at'", ",", "order_dir", "=", "'DESC'", ",", "service", "=", "'facebook'", ")", ":", "params", "=", "{", "}", "if", "page", ":", "param...
List pylon recordings :param page: page number for pagination :type page: int :param per_page: number of items per page, default 20 :type per_page: int :param order_by: field to order by, default request_time :type order_by: str :param order_dir: direction to order by, asc or desc, default desc :type order_dir: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "List", "pylon", "recordings" ]
python
train
rosenbrockc/acorn
acorn/acrn.py
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/acrn.py#L35-L49
def _parser_options(): """Parses the options and arguments from the command line.""" #We have two options: get some of the details from the config file, import argparse from acorn import base pdescr = "ACORN setup and custom configuration" parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr) for arg, options in script_options.items(): parser.add_argument(arg, **options) args = base.exhandler(examples, parser) if args is None: return return args
[ "def", "_parser_options", "(", ")", ":", "#We have two options: get some of the details from the config file,", "import", "argparse", "from", "acorn", "import", "base", "pdescr", "=", "\"ACORN setup and custom configuration\"", "parser", "=", "argparse", ".", "ArgumentParser", ...
Parses the options and arguments from the command line.
[ "Parses", "the", "options", "and", "arguments", "from", "the", "command", "line", "." ]
python
train
refenv/cijoe
modules/cij/ssh.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/modules/cij/ssh.py#L54-L88
def command(cmd, shell=True, echo=True, suffix=None): """SSH: Run the given command over SSH as defined in environment""" if env(): cij.err("cij.ssh.command: Invalid SSH environment") return 1 prefix = [] if cij.ENV.get("SSH_CMD_TIME") == "1": prefix.append("/usr/bin/time") if cij.ENV.get("SSH_CMD_TIMEOUT"): prefix.append("timeout") prefix.append(cij.ENV.get("SSH_CMD_TIMEOUT")) prefix.append("ssh") args = [] if cij.ENV.get("SSH_KEY"): args.append("-i") args.append(cij.ENV.get("SSH_KEY")) if cij.ENV.get("SSH_PORT"): args.append("-p") args.append(cij.ENV.get("SSH_PORT")) args.append("@".join([cij.ENV.get("SSH_USER"), cij.ENV.get("SSH_HOST")])) wrapped = prefix + args + ["'%s'" % " ".join(cmd)] if suffix: wrapped += suffix return cij.util.execute(wrapped, shell, echo)
[ "def", "command", "(", "cmd", ",", "shell", "=", "True", ",", "echo", "=", "True", ",", "suffix", "=", "None", ")", ":", "if", "env", "(", ")", ":", "cij", ".", "err", "(", "\"cij.ssh.command: Invalid SSH environment\"", ")", "return", "1", "prefix", "...
SSH: Run the given command over SSH as defined in environment
[ "SSH", ":", "Run", "the", "given", "command", "over", "SSH", "as", "defined", "in", "environment" ]
python
valid
collectiveacuity/labPack
labpack/databases/couchbase.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/couchbase.py#L255-L364
def create_view(self, query_criteria=None, uid='_all_users'): ''' a method to add a view to a design document of a uid :param query_criteria: dictionary with valid jsonmodel query criteria :param uid: [optional] string with uid of design document to update :return: integer with status of operation an example of how to construct the query_criteria argument: query_criteria = { '.path.to.number': { 'min_value': 4.5 }, '.path.to.string': { 'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ] } } NOTE: only fields specified in the document schema at class initialization can be used as fields in query_criteria. otherwise, an error will be thrown. uid is automatically added to all document schemas at initialization NOTE: the full list of all criteria are found in the reference page for the jsonmodel module as well as the query-rules.json file included in the module. http://collectiveacuity.github.io/jsonModel/reference/#query-criteria ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/query/put__db___design__ddoc_ # https://developer.couchbase.com/documentation/server/3.x/admin/Views/views-writing.html title = '%s.create_view' % self.__class__.__name__ # validate inputs input_fields = { 'uid': uid } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate inputs if query_criteria: if not self.model: raise ValueError('%s(query_criteria={...} requires a document_schema.' % title) self.model.query(query_criteria) else: query_criteria = {} if uid != '_all_users' and self.public: raise ValueError('%s(uid="%s") user ids are not applicable for a public bucket. % title') # catch missing args if not query_criteria and not uid: raise IndexError('%s requires either a uid or query_criteria argument.' % title) # create a view of all user documents else: # retrieve the design document for the uid url = self.bucket_url + '/_design/%s' % uid design_details = { 'views': {} } response = requests.get(url) if response.status_code in (200, 201): design_details = response.json() design_details['views'] = self._clean_views(design_details['views']) # create a view of all docs for the uid if not query_criteria: if uid == '_all_users': return response.status_code else: function_string = 'function(doc, meta) { if (doc.uid == "%s") { emit(null, null); } }' % uid design_details['views']['_all_docs'] = { 'map': function_string } # construct a view for a query criteria else: # determine hashed key for criteria import hashlib import json from collections import OrderedDict ordered_criteria = OrderedDict(**query_criteria) hashed_criteria = hashlib.md5(json.dumps(query_criteria, sort_keys=True).encode('utf-8')).hexdigest() # determine function string for criteria uid_insert = 'emit();' if uid != '_all_users': uid_insert = 'if (doc.uid == "%s") { emit(); }' % uid function_string = 'function(doc, meta) { %s }' % uid_insert emit_insert = 'emit(null, [' count = 0 for key in ordered_criteria.keys(): if count: emit_insert += ',' emit_insert += 'doc%s' % key emit_insert += ']);' function_string = function_string.replace('emit();', emit_insert) # construct updated design details design_details['views'][hashed_criteria] = { 'map': function_string } # send update of design document response = requests.put(url, json=design_details) return response.status_code
[ "def", "create_view", "(", "self", ",", "query_criteria", "=", "None", ",", "uid", "=", "'_all_users'", ")", ":", "# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/query/put__db___design__ddoc_", "# https://developer.couchbas...
a method to add a view to a design document of a uid :param query_criteria: dictionary with valid jsonmodel query criteria :param uid: [optional] string with uid of design document to update :return: integer with status of operation an example of how to construct the query_criteria argument: query_criteria = { '.path.to.number': { 'min_value': 4.5 }, '.path.to.string': { 'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ] } } NOTE: only fields specified in the document schema at class initialization can be used as fields in query_criteria. otherwise, an error will be thrown. uid is automatically added to all document schemas at initialization NOTE: the full list of all criteria are found in the reference page for the jsonmodel module as well as the query-rules.json file included in the module. http://collectiveacuity.github.io/jsonModel/reference/#query-criteria
[ "a", "method", "to", "add", "a", "view", "to", "a", "design", "document", "of", "a", "uid", ":", "param", "query_criteria", ":", "dictionary", "with", "valid", "jsonmodel", "query", "criteria", ":", "param", "uid", ":", "[", "optional", "]", "string", "w...
python
train
CivicSpleen/ambry
ambry/bundle/files.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/files.py#L549-L571
def record_to_objects(self): """Create config records to match the file metadata""" from ..util import AttrDict fr = self.record contents = fr.unpacked_contents if not contents: return ad = AttrDict(contents) # Get time that filessystem was synchronized to the File record. # Maybe use this to avoid overwriting configs that changed by bundle program. # fs_sync_time = self._dataset.config.sync[self.file_const][self.file_to_record] self._dataset.config.metadata.set(ad) self._dataset._database.commit() return ad
[ "def", "record_to_objects", "(", "self", ")", ":", "from", ".", ".", "util", "import", "AttrDict", "fr", "=", "self", ".", "record", "contents", "=", "fr", ".", "unpacked_contents", "if", "not", "contents", ":", "return", "ad", "=", "AttrDict", "(", "con...
Create config records to match the file metadata
[ "Create", "config", "records", "to", "match", "the", "file", "metadata" ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/query_formatting/gremlin_formatting.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/gremlin_formatting.py#L18-L48
def _safe_gremlin_string(value): """Sanitize and represent a string argument in Gremlin.""" if not isinstance(value, six.string_types): if isinstance(value, bytes): # should only happen in py3 value = value.decode('utf-8') else: raise GraphQLInvalidArgumentError(u'Attempting to convert a non-string into a string: ' u'{}'.format(value)) # Using JSON encoding means that all unicode literals and special chars # (e.g. newlines and backslashes) are replaced by appropriate escape sequences. # However, the quoted result is wrapped in double quotes, and $ signs are not escaped, # so that would allow arbitrary code execution in Gremlin. # We will therefore turn the double-quoted string into a single-quoted one to avoid this risk. escaped_and_quoted = json.dumps(value) # Double-quoted string literals in Gremlin/Groovy allow # arbitrary code execution via string interpolation and closures. # To avoid this, we perform the following steps: # - we strip the wrapping double quotes; # - we un-escape any double-quotes in the string, by replacing \" with "; # - we escape any single-quotes in the string, by replacing ' with \'; # - finally, we wrap the string in single quotes. # http://www.groovy-lang.org/syntax.html#_double_quoted_string if not escaped_and_quoted[0] == escaped_and_quoted[-1] == '"': raise AssertionError(u'Unreachable state reached: {} {}'.format(value, escaped_and_quoted)) no_quotes = escaped_and_quoted[1:-1] re_escaped = no_quotes.replace('\\"', '"').replace('\'', '\\\'') final_escaped_value = '\'' + re_escaped + '\'' return final_escaped_value
[ "def", "_safe_gremlin_string", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "# should only happen in py3", "value", "=", "value", ".", ...
Sanitize and represent a string argument in Gremlin.
[ "Sanitize", "and", "represent", "a", "string", "argument", "in", "Gremlin", "." ]
python
train
Ouranosinc/xclim
xclim/utils.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/utils.py#L888-L953
def daily_downsampler(da, freq='YS'): r"""Daily climate data downsampler Parameters ---------- da : xarray.DataArray freq : string Returns ------- xarray.DataArray Note ---- Usage Example grouper = daily_downsampler(da_std, freq='YS') x2 = grouper.mean() # add time coords to x2 and change dimension tags to time time1 = daily_downsampler(da_std.time, freq=freq).first() x2.coords['time'] = ('tags', time1.values) x2 = x2.swap_dims({'tags': 'time'}) x2 = x2.sortby('time') """ # generate tags from da.time and freq if isinstance(da.time.values[0], np.datetime64): years = ['{:04d}'.format(y) for y in da.time.dt.year.values] months = ['{:02d}'.format(m) for m in da.time.dt.month.values] else: # cannot use year, month, season attributes, not available for all calendars ... years = ['{:04d}'.format(v.year) for v in da.time.values] months = ['{:02d}'.format(v.month) for v in da.time.values] seasons = ['DJF DJF MAM MAM MAM JJA JJA JJA SON SON SON DJF'.split()[int(m) - 1] for m in months] n_t = da.time.size if freq == 'YS': # year start frequency l_tags = years elif freq == 'MS': # month start frequency l_tags = [years[i] + months[i] for i in range(n_t)] elif freq == 'QS-DEC': # DJF, MAM, JJA, SON seasons # construct tags from list of season+year, increasing year for December ys = [] for i in range(n_t): m = months[i] s = seasons[i] y = years[i] if m == '12': y = str(int(y) + 1) ys.append(y + s) l_tags = ys else: raise RuntimeError('freqency {:s} not implemented'.format(freq)) # add tags to buffer DataArray buffer = da.copy() buffer.coords['tags'] = ('time', l_tags) # return groupby according to tags return buffer.groupby('tags')
[ "def", "daily_downsampler", "(", "da", ",", "freq", "=", "'YS'", ")", ":", "# generate tags from da.time and freq", "if", "isinstance", "(", "da", ".", "time", ".", "values", "[", "0", "]", ",", "np", ".", "datetime64", ")", ":", "years", "=", "[", "'{:0...
r"""Daily climate data downsampler Parameters ---------- da : xarray.DataArray freq : string Returns ------- xarray.DataArray Note ---- Usage Example grouper = daily_downsampler(da_std, freq='YS') x2 = grouper.mean() # add time coords to x2 and change dimension tags to time time1 = daily_downsampler(da_std.time, freq=freq).first() x2.coords['time'] = ('tags', time1.values) x2 = x2.swap_dims({'tags': 'time'}) x2 = x2.sortby('time')
[ "r", "Daily", "climate", "data", "downsampler" ]
python
train
gem/oq-engine
openquake/hmtk/faults/tectonic_regionalisation.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/faults/tectonic_regionalisation.py#L111-L140
def populate_regions(self, tectonic_region_dict): ''' Populates the tectonic region from the list of dictionaries, where each region is a dictionary of with the following format:: region = {'Shear_Modulus': [(val1, weight1), (val2, weight2), ...], 'Displacement_Length_Ratio': [(val1, weight1), ...], 'Magnitude_Scaling_Relation': [(val1, weight1), ...]} ''' for tect_reg in tectonic_region_dict: if 'Shear_Modulus' in tect_reg.keys(): shear_modulus = tect_reg['Shear_Modulus'] else: shear_modulus = DEFAULT_SHEAR_MODULUS if 'Displacement_Length_Ratio' in tect_reg.keys(): disp_length_ratio = tect_reg['Displacement_Length_Ratio'] else: disp_length_ratio = DEFAULT_DLR if 'Magnitude_Scaling_Relation' in tect_reg.keys(): scaling_relation = tect_reg['Magnitude_Scaling_Relation'] else: scaling_relation = DEFAULT_MSR self.regionalisation.append( TectonicRegion( tect_reg['Code'], tect_reg['Name'], shear_modulus, disp_length_ratio, scaling_relation)) self.key_list.append(tect_reg['Name'])
[ "def", "populate_regions", "(", "self", ",", "tectonic_region_dict", ")", ":", "for", "tect_reg", "in", "tectonic_region_dict", ":", "if", "'Shear_Modulus'", "in", "tect_reg", ".", "keys", "(", ")", ":", "shear_modulus", "=", "tect_reg", "[", "'Shear_Modulus'", ...
Populates the tectonic region from the list of dictionaries, where each region is a dictionary of with the following format:: region = {'Shear_Modulus': [(val1, weight1), (val2, weight2), ...], 'Displacement_Length_Ratio': [(val1, weight1), ...], 'Magnitude_Scaling_Relation': [(val1, weight1), ...]}
[ "Populates", "the", "tectonic", "region", "from", "the", "list", "of", "dictionaries", "where", "each", "region", "is", "a", "dictionary", "of", "with", "the", "following", "format", "::" ]
python
train
UDST/orca
orca/orca.py
https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/orca.py#L295-L325
def column_type(self, column_name): """ Report column type as one of 'local', 'series', or 'function'. Parameters ---------- column_name : str Returns ------- col_type : {'local', 'series', 'function'} 'local' means that the column is part of the registered table, 'series' means the column is a registered Pandas Series, and 'function' means the column is a registered function providing a Pandas Series. """ extra_cols = list_columns_for_table(self.name) if column_name in extra_cols: col = _COLUMNS[(self.name, column_name)] if isinstance(col, _SeriesWrapper): return 'series' elif isinstance(col, _ColumnFuncWrapper): return 'function' elif column_name in self.local_columns: return 'local' raise KeyError('column {!r} not found'.format(column_name))
[ "def", "column_type", "(", "self", ",", "column_name", ")", ":", "extra_cols", "=", "list_columns_for_table", "(", "self", ".", "name", ")", "if", "column_name", "in", "extra_cols", ":", "col", "=", "_COLUMNS", "[", "(", "self", ".", "name", ",", "column_n...
Report column type as one of 'local', 'series', or 'function'. Parameters ---------- column_name : str Returns ------- col_type : {'local', 'series', 'function'} 'local' means that the column is part of the registered table, 'series' means the column is a registered Pandas Series, and 'function' means the column is a registered function providing a Pandas Series.
[ "Report", "column", "type", "as", "one", "of", "local", "series", "or", "function", "." ]
python
train
FlaskGuys/Flask-Imagine
flask_imagine/filters/watermark.py
https://github.com/FlaskGuys/Flask-Imagine/blob/f79c6517ecb5480b63a2b3b8554edb6e2ac8be8c/flask_imagine/filters/watermark.py#L169-L180
def _bottom_position(self, resource): """ Place watermark to bottom position :param resource: Image.Image :return: Image.Image """ image = self._get_scaled_image(resource) left = int(round(resource.size[0] // 2 - image.size[0] // 2)) upper = int(round(resource.size[1] - image.size[1])) return image, left, upper
[ "def", "_bottom_position", "(", "self", ",", "resource", ")", ":", "image", "=", "self", ".", "_get_scaled_image", "(", "resource", ")", "left", "=", "int", "(", "round", "(", "resource", ".", "size", "[", "0", "]", "//", "2", "-", "image", ".", "siz...
Place watermark to bottom position :param resource: Image.Image :return: Image.Image
[ "Place", "watermark", "to", "bottom", "position", ":", "param", "resource", ":", "Image", ".", "Image", ":", "return", ":", "Image", ".", "Image" ]
python
train
sryza/spark-timeseries
python/sparkts/timeseriesrdd.py
https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L141-L146
def to_pandas_series_rdd(self): """ Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes """ pd_index = self.index().to_pandas_index() return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))
[ "def", "to_pandas_series_rdd", "(", "self", ")", ":", "pd_index", "=", "self", ".", "index", "(", ")", ".", "to_pandas_index", "(", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "(", "x", "[", "0", "]", ",", "pd", ".", "Series", "(", ...
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
[ "Returns", "an", "RDD", "of", "Pandas", "Series", "objects", "indexed", "with", "Pandas", "DatetimeIndexes" ]
python
train
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L82-L106
def _load(content_or_fp): """YAML Parse a file or str and check version. """ try: data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader) except Exception as e: raise type(e)('Malformed yaml file:\n%r' % format_exc()) try: ver = data['spec'] except: raise ValueError('The file does not specify a spec version') try: ver = tuple(map(int, (ver.split(".")))) except: raise ValueError("Invalid spec version format. Expect 'X.Y'" " (X and Y integers), found %s" % ver) if ver > SPEC_VERSION_TUPLE: raise ValueError('The spec version of the file is ' '%s but the parser is %s. ' 'Please update pyvisa-sim.' % (ver, SPEC_VERSION)) return data
[ "def", "_load", "(", "content_or_fp", ")", ":", "try", ":", "data", "=", "yaml", ".", "load", "(", "content_or_fp", ",", "Loader", "=", "yaml", ".", "loader", ".", "BaseLoader", ")", "except", "Exception", "as", "e", ":", "raise", "type", "(", "e", "...
YAML Parse a file or str and check version.
[ "YAML", "Parse", "a", "file", "or", "str", "and", "check", "version", "." ]
python
train
mayfield/shellish
shellish/data.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/data.py#L151-L204
def make_hone_cache_wrapper(inner_func, maxsize, maxage, finder, store_partials): """ Keeps a cache of requests we've already made and use that for generating results if possible. If the user asked for a root prior to this call we can use it to skip a new lookup using `finder`. A top-level lookup will effectively serves as a global cache. """ hits = misses = partials = 0 cache = TTLMapping(maxsize, maxage) def wrapper(*args): nonlocal hits, misses, partials radix = args[-1] # Attempt fast cache hit first. try: r = cache[radix] except KeyError: pass else: hits += 1 return r for i in range(len(radix) - 1, -1, -1): partial_radix = radix[:i] try: partial = cache[partial_radix] except KeyError: continue try: r = finder(radix, partial_radix, partial) except: break # Treat any exception as a miss. partials += 1 if store_partials: cache[radix] = r return r misses += 1 cache[radix] = r = inner_func(*args) return r def cache_info(): """ Emulate lru_cache so this is a low touch replacement. """ return HoneCacheInfo(hits, misses, maxsize, len(cache), maxage, partials, finder) def cache_clear(): """ Clear cache and stats. """ nonlocal hits, misses, partials hits = misses = partials = 0 cache.clear() wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return functools.update_wrapper(wrapper, inner_func)
[ "def", "make_hone_cache_wrapper", "(", "inner_func", ",", "maxsize", ",", "maxage", ",", "finder", ",", "store_partials", ")", ":", "hits", "=", "misses", "=", "partials", "=", "0", "cache", "=", "TTLMapping", "(", "maxsize", ",", "maxage", ")", "def", "wr...
Keeps a cache of requests we've already made and use that for generating results if possible. If the user asked for a root prior to this call we can use it to skip a new lookup using `finder`. A top-level lookup will effectively serves as a global cache.
[ "Keeps", "a", "cache", "of", "requests", "we", "ve", "already", "made", "and", "use", "that", "for", "generating", "results", "if", "possible", ".", "If", "the", "user", "asked", "for", "a", "root", "prior", "to", "this", "call", "we", "can", "use", "i...
python
train
liip/taxi
taxi/timesheet/entry.py
https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/timesheet/entry.py#L411-L509
def filter(self, date=None, regroup=False, ignored=None, pushed=None, unmapped=None, current_workday=None): """ Return the entries as a dict of {:class:`datetime.date`: :class:`~taxi.timesheet.lines.Entry`} items. `date` can either be a single :class:`datetime.date` object to filter only entries from the given date, or a tuple of :class:`datetime.date` objects representing `(from, to)`. `filter_callback` is a function that, given a :class:`~taxi.timesheet.lines.Entry` object, should return True to include that line, or False to exclude it. If `regroup` is set to True, similar entries (ie. having the same :meth:`~taxi.timesheet.lines.Entry.hash`) will be regrouped intro a single :class:`~taxi.timesheet.entry.AggregatedTimesheetEntry`. """ def entry_filter(entry_date, entry): if ignored is not None and entry.ignored != ignored: return False if pushed is not None and entry.pushed != pushed: return False if unmapped is not None and entry.mapped == unmapped: return False if current_workday is not None: today = datetime.date.today() yesterday = date_utils.get_previous_working_day(today) is_current_workday = entry_date in (today, yesterday) and entry_date.strftime('%w') not in [6, 0] if current_workday != is_current_workday: return False return True # Date can either be a single date (only 1 day) or a tuple for a # date range if date is not None and not isinstance(date, tuple): date = (date, date) filtered_entries = collections.defaultdict(list) for (entries_date, entries) in six.iteritems(self): if (date is not None and ( (date[0] is not None and entries_date < date[0]) or (date[1] is not None and entries_date > date[1]))): continue entries_for_date = [] if regroup: # This is a mapping between entries hashes and their # position in the entries_for_date list aggregated_entries = {} id = 0 for entry in entries: if not entry_filter(entries_date, entry): continue # Common case: the entry is not yet referenced in the # aggregated_entries dict if entry.hash not in aggregated_entries: # In that case, put it normally in the entries_for_date # list. It will get replaced by an AggregatedEntry # later if necessary entries_for_date.append(entry) aggregated_entries[entry.hash] = id id += 1 else: # Get the first occurence of the entry in the # entries_for_date list existing_entry = entries_for_date[ aggregated_entries[entry.hash] ] # The entry could already have been replaced by an # AggregatedEntry if there's more than 2 occurences if isinstance(existing_entry, Entry): # Create the AggregatedEntry, put the first # occurence of Entry in it and the current one aggregated_entry = AggregatedTimesheetEntry() aggregated_entry.entries.append(existing_entry) aggregated_entry.entries.append(entry) entries_for_date[ aggregated_entries[entry.hash] ] = aggregated_entry else: # The entry we found is already an # AggregatedEntry, let's just append the # current entry to it aggregated_entry = existing_entry aggregated_entry.entries.append(entry) else: entries_for_date = [ entry for entry in entries if entry_filter(entries_date, entry) ] if entries_for_date: filtered_entries[entries_date].extend(entries_for_date) return filtered_entries
[ "def", "filter", "(", "self", ",", "date", "=", "None", ",", "regroup", "=", "False", ",", "ignored", "=", "None", ",", "pushed", "=", "None", ",", "unmapped", "=", "None", ",", "current_workday", "=", "None", ")", ":", "def", "entry_filter", "(", "e...
Return the entries as a dict of {:class:`datetime.date`: :class:`~taxi.timesheet.lines.Entry`} items. `date` can either be a single :class:`datetime.date` object to filter only entries from the given date, or a tuple of :class:`datetime.date` objects representing `(from, to)`. `filter_callback` is a function that, given a :class:`~taxi.timesheet.lines.Entry` object, should return True to include that line, or False to exclude it. If `regroup` is set to True, similar entries (ie. having the same :meth:`~taxi.timesheet.lines.Entry.hash`) will be regrouped intro a single :class:`~taxi.timesheet.entry.AggregatedTimesheetEntry`.
[ "Return", "the", "entries", "as", "a", "dict", "of", "{", ":", "class", ":", "datetime", ".", "date", ":", ":", "class", ":", "~taxi", ".", "timesheet", ".", "lines", ".", "Entry", "}", "items", "." ]
python
train
pennlabs/penn-sdk-python
penn/registrar.py
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/registrar.py#L80-L91
def section(self, dept, course_number, sect_number): """Return a single section object for the given section. All arguments should be strings. Throws a `ValueError` if the section is not found. >>> lgst101_bfs = r.course('lgst', '101', '301') """ section_id = dept + course_number + sect_number sections = self.search({'course_id': section_id}) try: return next(sections) except StopIteration: raise ValueError('Section %s not found' % section_id)
[ "def", "section", "(", "self", ",", "dept", ",", "course_number", ",", "sect_number", ")", ":", "section_id", "=", "dept", "+", "course_number", "+", "sect_number", "sections", "=", "self", ".", "search", "(", "{", "'course_id'", ":", "section_id", "}", ")...
Return a single section object for the given section. All arguments should be strings. Throws a `ValueError` if the section is not found. >>> lgst101_bfs = r.course('lgst', '101', '301')
[ "Return", "a", "single", "section", "object", "for", "the", "given", "section", ".", "All", "arguments", "should", "be", "strings", ".", "Throws", "a", "ValueError", "if", "the", "section", "is", "not", "found", "." ]
python
train
basho/riak-python-client
riak/bucket.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/bucket.py#L116-L128
def get_decoder(self, content_type): """ Get the decoding function for the provided content type for this bucket. :param content_type: the requested media type :type content_type: str :rtype: function """ if content_type in self._decoders: return self._decoders[content_type] else: return self._client.get_decoder(content_type)
[ "def", "get_decoder", "(", "self", ",", "content_type", ")", ":", "if", "content_type", "in", "self", ".", "_decoders", ":", "return", "self", ".", "_decoders", "[", "content_type", "]", "else", ":", "return", "self", ".", "_client", ".", "get_decoder", "(...
Get the decoding function for the provided content type for this bucket. :param content_type: the requested media type :type content_type: str :rtype: function
[ "Get", "the", "decoding", "function", "for", "the", "provided", "content", "type", "for", "this", "bucket", "." ]
python
train
rackerlabs/python-lunrclient
lunrclient/lunr_shell.py
https://github.com/rackerlabs/python-lunrclient/blob/f26a450a422600f492480bfa42cbee50a5c7016f/lunrclient/lunr_shell.py#L324-L327
def create(self, id): """ Create a new tenant id """ resp = self.client.accounts.create(id=id) self.display(resp)
[ "def", "create", "(", "self", ",", "id", ")", ":", "resp", "=", "self", ".", "client", ".", "accounts", ".", "create", "(", "id", "=", "id", ")", "self", ".", "display", "(", "resp", ")" ]
Create a new tenant id
[ "Create", "a", "new", "tenant", "id" ]
python
train
tanghaibao/jcvi
jcvi/apps/base.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1109-L1129
def timestamp(args): """ %prog timestamp path > timestamp.info Record the timestamps for all files in the current folder. filename atime mtime This file can be used later to recover previous timestamps through touch(). """ p = OptionParser(timestamp.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) path, = args for root, dirs, files in os.walk(path): for f in files: filename = op.join(root, f) atime, mtime = get_times(filename) print(filename, atime, mtime)
[ "def", "timestamp", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "timestamp", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "("...
%prog timestamp path > timestamp.info Record the timestamps for all files in the current folder. filename atime mtime This file can be used later to recover previous timestamps through touch().
[ "%prog", "timestamp", "path", ">", "timestamp", ".", "info" ]
python
train
saltstack/salt
salt/modules/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L1037-L1046
def _sed_esc(string, escape_all=False): ''' Escape single quotes and forward slashes ''' special_chars = "^.[$()|*+?{" string = string.replace("'", "'\"'\"'").replace("/", "\\/") if escape_all is True: for char in special_chars: string = string.replace(char, "\\" + char) return string
[ "def", "_sed_esc", "(", "string", ",", "escape_all", "=", "False", ")", ":", "special_chars", "=", "\"^.[$()|*+?{\"", "string", "=", "string", ".", "replace", "(", "\"'\"", ",", "\"'\\\"'\\\"'\"", ")", ".", "replace", "(", "\"/\"", ",", "\"\\\\/\"", ")", "...
Escape single quotes and forward slashes
[ "Escape", "single", "quotes", "and", "forward", "slashes" ]
python
train
googledatalab/pydatalab
google/datalab/bigquery/commands/_bigquery.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L645-L647
def _render_list(data): """ Helper to render a list of objects as an HTML list object. """ return IPython.core.display.HTML(google.datalab.utils.commands.HtmlBuilder.render_list(data))
[ "def", "_render_list", "(", "data", ")", ":", "return", "IPython", ".", "core", ".", "display", ".", "HTML", "(", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "HtmlBuilder", ".", "render_list", "(", "data", ")", ")" ]
Helper to render a list of objects as an HTML list object.
[ "Helper", "to", "render", "a", "list", "of", "objects", "as", "an", "HTML", "list", "object", "." ]
python
train
pavoni/pyvera
pyvera/__init__.py
https://github.com/pavoni/pyvera/blob/e05e3d13f76153444787d31948feb5419d77a8c8/pyvera/__init__.py#L471-L481
def call_service(self, service_id, action): """Call a Vera service. This will call the Vera api to change device state. """ result = self.vera_request(id='action', serviceId=service_id, action=action) logger.debug("call_service: " "result of vera_request with id %s: %s", service_id, result.text) return result
[ "def", "call_service", "(", "self", ",", "service_id", ",", "action", ")", ":", "result", "=", "self", ".", "vera_request", "(", "id", "=", "'action'", ",", "serviceId", "=", "service_id", ",", "action", "=", "action", ")", "logger", ".", "debug", "(", ...
Call a Vera service. This will call the Vera api to change device state.
[ "Call", "a", "Vera", "service", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/bam/fastq.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L278-L292
def estimate_read_length(fastq_file, quality_format="fastq-sanger", nreads=1000): """ estimate average read length of a fastq file """ in_handle = SeqIO.parse(open_fastq(fastq_file), quality_format) read = next(in_handle) average = len(read.seq) for _ in range(nreads): try: average = (average + len(next(in_handle).seq)) / 2 except StopIteration: break in_handle.close() return average
[ "def", "estimate_read_length", "(", "fastq_file", ",", "quality_format", "=", "\"fastq-sanger\"", ",", "nreads", "=", "1000", ")", ":", "in_handle", "=", "SeqIO", ".", "parse", "(", "open_fastq", "(", "fastq_file", ")", ",", "quality_format", ")", "read", "=",...
estimate average read length of a fastq file
[ "estimate", "average", "read", "length", "of", "a", "fastq", "file" ]
python
train
pgmpy/pgmpy
ez_setup.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/ez_setup.py#L161-L172
def download_file_powershell(url, target): """ Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete. """ target = os.path.abspath(target) cmd = [ 'powershell', '-Command', "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(), ] _clean_check(cmd, target)
[ "def", "download_file_powershell", "(", "url", ",", "target", ")", ":", "target", "=", "os", ".", "path", ".", "abspath", "(", "target", ")", "cmd", "=", "[", "'powershell'", ",", "'-Command'", ",", "\"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target...
Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete.
[ "Download", "the", "file", "at", "url", "to", "target", "using", "Powershell", "(", "which", "will", "validate", "trust", ")", ".", "Raise", "an", "exception", "if", "the", "command", "cannot", "complete", "." ]
python
train
allenai/allennlp
allennlp/data/iterators/data_iterator.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/iterators/data_iterator.py#L22-L28
def add_epoch_number(batch: Batch, epoch: int) -> Batch: """ Add the epoch number to the batch instances as a MetadataField. """ for instance in batch.instances: instance.fields['epoch_num'] = MetadataField(epoch) return batch
[ "def", "add_epoch_number", "(", "batch", ":", "Batch", ",", "epoch", ":", "int", ")", "->", "Batch", ":", "for", "instance", "in", "batch", ".", "instances", ":", "instance", ".", "fields", "[", "'epoch_num'", "]", "=", "MetadataField", "(", "epoch", ")"...
Add the epoch number to the batch instances as a MetadataField.
[ "Add", "the", "epoch", "number", "to", "the", "batch", "instances", "as", "a", "MetadataField", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/ga.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/ga.py#L648-L671
def _get_best_chromosome(chromosomes, data, count_clusters): """! @brief Returns the current best chromosome. @param[in] chromosomes (list): Chromosomes that are used for searching. @param[in] data (list): Input data that is used for clustering process. @param[in] count_clusters (uint): Amount of clusters that should be allocated. @return (list, float, list) The best chromosome, its fitness function value and fitness function values for all chromosomes. """ # Calc centers centres = ga_math.get_centres(chromosomes, data, count_clusters) # Calc Fitness functions fitness_functions = genetic_algorithm._calc_fitness_function(centres, data, chromosomes) # Index of the best chromosome best_chromosome_idx = fitness_functions.argmin() # Get chromosome with the best fitness function return chromosomes[best_chromosome_idx], fitness_functions[best_chromosome_idx], fitness_functions
[ "def", "_get_best_chromosome", "(", "chromosomes", ",", "data", ",", "count_clusters", ")", ":", "# Calc centers", "centres", "=", "ga_math", ".", "get_centres", "(", "chromosomes", ",", "data", ",", "count_clusters", ")", "# Calc Fitness functions", "fitness_function...
! @brief Returns the current best chromosome. @param[in] chromosomes (list): Chromosomes that are used for searching. @param[in] data (list): Input data that is used for clustering process. @param[in] count_clusters (uint): Amount of clusters that should be allocated. @return (list, float, list) The best chromosome, its fitness function value and fitness function values for all chromosomes.
[ "!" ]
python
valid
googleapis/google-cloud-python
core/google/cloud/_helpers.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/core/google/cloud/_helpers.py#L603-L627
def make_insecure_stub(stub_class, host, port=None): """Makes an insecure stub for an RPC service. Uses / depends on gRPC. :type stub_class: type :param stub_class: A gRPC stub type for a given service. :type host: str :param host: The host for the service. May also include the port if ``port`` is unspecified. :type port: int :param port: (Optional) The port for the service. :rtype: object, instance of ``stub_class`` :returns: The stub object used to make gRPC requests to a given API. """ if port is None: target = host else: # NOTE: This assumes port != http_client.HTTPS_PORT: target = "%s:%d" % (host, port) channel = grpc.insecure_channel(target) return stub_class(channel)
[ "def", "make_insecure_stub", "(", "stub_class", ",", "host", ",", "port", "=", "None", ")", ":", "if", "port", "is", "None", ":", "target", "=", "host", "else", ":", "# NOTE: This assumes port != http_client.HTTPS_PORT:", "target", "=", "\"%s:%d\"", "%", "(", ...
Makes an insecure stub for an RPC service. Uses / depends on gRPC. :type stub_class: type :param stub_class: A gRPC stub type for a given service. :type host: str :param host: The host for the service. May also include the port if ``port`` is unspecified. :type port: int :param port: (Optional) The port for the service. :rtype: object, instance of ``stub_class`` :returns: The stub object used to make gRPC requests to a given API.
[ "Makes", "an", "insecure", "stub", "for", "an", "RPC", "service", "." ]
python
train
TheOneHyer/arandomness
build/lib.linux-x86_64-3.6/arandomness/arandom/agenerator.py
https://github.com/TheOneHyer/arandomness/blob/ae9f630e9a1d67b0eb6d61644a49756de8a5268c/build/lib.linux-x86_64-3.6/arandomness/arandom/agenerator.py#L35-L44
def agenerator(): """Arandom number generator""" free_mem = psutil.virtual_memory().available mem_24 = 0.24 * free_mem mem_26 = 0.26 * free_mem a = MemEater(int(mem_24)) b = MemEater(int(mem_26)) sleep(5) return free_mem/1000/1000, psutil.virtual_memory().available/1000/1000
[ "def", "agenerator", "(", ")", ":", "free_mem", "=", "psutil", ".", "virtual_memory", "(", ")", ".", "available", "mem_24", "=", "0.24", "*", "free_mem", "mem_26", "=", "0.26", "*", "free_mem", "a", "=", "MemEater", "(", "int", "(", "mem_24", ")", ")",...
Arandom number generator
[ "Arandom", "number", "generator" ]
python
train
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L71-L89
def site_models(self, app_label=None): """Returns a dictionary of registered models. """ site_models = {} app_configs = ( django_apps.get_app_configs() if app_label is None else [django_apps.get_app_config(app_label)] ) for app_config in app_configs: model_list = [ model for model in app_config.get_models() if model._meta.label_lower in self.registry ] if model_list: model_list.sort(key=lambda m: m._meta.verbose_name) site_models.update({app_config.name: model_list}) return site_models
[ "def", "site_models", "(", "self", ",", "app_label", "=", "None", ")", ":", "site_models", "=", "{", "}", "app_configs", "=", "(", "django_apps", ".", "get_app_configs", "(", ")", "if", "app_label", "is", "None", "else", "[", "django_apps", ".", "get_app_c...
Returns a dictionary of registered models.
[ "Returns", "a", "dictionary", "of", "registered", "models", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/ingest_visibilities/send/async_send.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/ingest_visibilities/send/async_send.py#L278-L304
def main(): """Main function for SPEAD sender module.""" # Check command line arguments. if len(sys.argv) != 2: raise RuntimeError('Usage: python3 async_send.py <json config>') # Set up logging. sip_logging.init_logger(show_thread=False) # Load SPEAD configuration from JSON file. # _path = os.path.dirname(os.path.abspath(__file__)) # with open(os.path.join(_path, 'spead_send.json')) as file_handle: # spead_config = json.load(file_handle) spead_config = json.loads(sys.argv[1]) try: _path = os.path.dirname(os.path.abspath(__file__)) schema_path = os.path.join(_path, 'config_schema.json') with open(schema_path) as schema_file: schema = json.load(schema_file) validate(spead_config, schema) except ValidationError as error: print(error.cause) raise # Set up the SPEAD sender and run it (see method, above). sender = SpeadSender(spead_config) sender.run()
[ "def", "main", "(", ")", ":", "# Check command line arguments.", "if", "len", "(", "sys", ".", "argv", ")", "!=", "2", ":", "raise", "RuntimeError", "(", "'Usage: python3 async_send.py <json config>'", ")", "# Set up logging.", "sip_logging", ".", "init_logger", "("...
Main function for SPEAD sender module.
[ "Main", "function", "for", "SPEAD", "sender", "module", "." ]
python
train
opencobra/memote
memote/support/basic.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L236-L242
def is_constrained_reaction(model, rxn): """Return whether a reaction has fixed constraints.""" lower_bound, upper_bound = helpers.find_bounds(model) if rxn.reversibility: return rxn.lower_bound > lower_bound or rxn.upper_bound < upper_bound else: return rxn.lower_bound > 0 or rxn.upper_bound < upper_bound
[ "def", "is_constrained_reaction", "(", "model", ",", "rxn", ")", ":", "lower_bound", ",", "upper_bound", "=", "helpers", ".", "find_bounds", "(", "model", ")", "if", "rxn", ".", "reversibility", ":", "return", "rxn", ".", "lower_bound", ">", "lower_bound", "...
Return whether a reaction has fixed constraints.
[ "Return", "whether", "a", "reaction", "has", "fixed", "constraints", "." ]
python
train
PaloAltoNetworks/pancloud
pancloud/logging.py
https://github.com/PaloAltoNetworks/pancloud/blob/c51e4c8aca3c988c60f062291007534edcb55285/pancloud/logging.py#L199-L296
def xpoll(self, query_id=None, sequence_no=None, params=None, delete_query=True, **kwargs): # pragma: no cover """Retrieve individual logs iteratively in a non-greedy manner. Generator function to return individual log entries from poll API request. Args: params (dict): Payload/request dictionary. query_id (str): Specifies the ID of the query job. sequence_no (int): Specifies the sequenceNo. delete_query (bool): True for delete, False otherwise. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Yields: dictionary with single log entry. """ def _delete(query_id, **kwargs): r = self.delete(query_id, **kwargs) try: r_json = r.json() except ValueError as e: raise PanCloudError('Invalid JSON: %s' % e) if not (200 <= r.status_code < 300): if 'errorCode' in r_json and 'errorMessage' in r_json: raise PanCloudError('%s: %s' % (r_json['errorCode'], r_json['errorMessage'])) else: raise PanCloudError('%s %s' % (r.status_code, r.reason)) if r.status_code == 200: return else: raise PanCloudError('delete: status_code: %d' % r.status_code) while True: r = self.poll(query_id, sequence_no, params, **kwargs) try: r_json = r.json() except ValueError as e: raise PanCloudError('Invalid JSON: %s' % e) if not (200 <= r.status_code < 300): if 'errorCode' in r_json and 'errorMessage' in r_json: raise PanCloudError('%s: %s' % (r_json['errorCode'], r_json['errorMessage'])) else: raise PanCloudError('%s %s' % (r.status_code, r.reason)) if 'queryStatus' not in r_json: self._debug(r_json) raise PanCloudError('no "queryStatus" in response') self._debug(r_json['queryStatus']) if r_json['queryStatus'] in ['FINISHED', 'JOB_FINISHED']: try: hits = r_json['result']['esResult']['hits']['hits'] except KeyError as e: raise PanCloudError('no "hits" in response' % e) self._debug('hits: %d', len(hits)) for x in hits: yield x if r_json['queryStatus'] == 'JOB_FINISHED': if delete_query: _delete(query_id, **kwargs) return if sequence_no is not None: sequence_no += 1 else: sequence_no = 1 elif r_json['queryStatus'] == 'JOB_FAILED': e = '%s' % r_json['queryStatus'] try: e += ': %s' % r_json['result']['esResult']['error'] except KeyError: self._debug(r_json) raise PanCloudError(e) elif r_json['queryStatus'] == 'RUNNING': if params is not None and 'maxWaitTime' in params: pass else: # XXX time.sleep(1) else: raise PanCloudError('Bad queryStatus: %s' % r_json['queryStatus'])
[ "def", "xpoll", "(", "self", ",", "query_id", "=", "None", ",", "sequence_no", "=", "None", ",", "params", "=", "None", ",", "delete_query", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "def", "_delete", "(", "query_id", ",", "...
Retrieve individual logs iteratively in a non-greedy manner. Generator function to return individual log entries from poll API request. Args: params (dict): Payload/request dictionary. query_id (str): Specifies the ID of the query job. sequence_no (int): Specifies the sequenceNo. delete_query (bool): True for delete, False otherwise. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Yields: dictionary with single log entry.
[ "Retrieve", "individual", "logs", "iteratively", "in", "a", "non", "-", "greedy", "manner", "." ]
python
train
pypa/pipenv
pipenv/vendor/click/core.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/core.py#L444-L454
def command_path(self): """The computed command path. This is used for the ``usage`` information on the help page. It's automatically created by combining the info names of the chain of contexts to the root. """ rv = '' if self.info_name is not None: rv = self.info_name if self.parent is not None: rv = self.parent.command_path + ' ' + rv return rv.lstrip()
[ "def", "command_path", "(", "self", ")", ":", "rv", "=", "''", "if", "self", ".", "info_name", "is", "not", "None", ":", "rv", "=", "self", ".", "info_name", "if", "self", ".", "parent", "is", "not", "None", ":", "rv", "=", "self", ".", "parent", ...
The computed command path. This is used for the ``usage`` information on the help page. It's automatically created by combining the info names of the chain of contexts to the root.
[ "The", "computed", "command", "path", ".", "This", "is", "used", "for", "the", "usage", "information", "on", "the", "help", "page", ".", "It", "s", "automatically", "created", "by", "combining", "the", "info", "names", "of", "the", "chain", "of", "contexts...
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/expressions.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/expressions.py#L918-L945
def to_match(self): """Return a unicode object with the MATCH representation of this TernaryConditional.""" self.validate() # For MATCH, an additional validation step is needed -- we currently do not support # emitting MATCH code for TernaryConditional that contains another TernaryConditional # anywhere within the predicate expression. This is because the predicate expression # must be surrounded in quotes, and it is unclear whether nested/escaped quotes would work. def visitor_fn(expression): """Visitor function that ensures the predicate does not contain TernaryConditionals.""" if isinstance(expression, TernaryConditional): raise ValueError(u'Cannot emit MATCH code for TernaryConditional that contains ' u'in its predicate another TernaryConditional: ' u'{} {}'.format(expression, self)) return expression self.predicate.visit_and_update(visitor_fn) format_spec = u'if(eval("%(predicate)s"), %(if_true)s, %(if_false)s)' predicate_string = self.predicate.to_match() if u'"' in predicate_string: raise AssertionError(u'Found a double-quote within the predicate string, this would ' u'have terminated the if(eval()) early and should be fixed: ' u'{} {}'.format(predicate_string, self)) return format_spec % dict(predicate=predicate_string, if_true=self.if_true.to_match(), if_false=self.if_false.to_match())
[ "def", "to_match", "(", "self", ")", ":", "self", ".", "validate", "(", ")", "# For MATCH, an additional validation step is needed -- we currently do not support", "# emitting MATCH code for TernaryConditional that contains another TernaryConditional", "# anywhere within the predicate expr...
Return a unicode object with the MATCH representation of this TernaryConditional.
[ "Return", "a", "unicode", "object", "with", "the", "MATCH", "representation", "of", "this", "TernaryConditional", "." ]
python
train
susam/ice
ice.py
https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L172-L194
def error(self, status=None): """Decorator to add a callback that generates error page. The *status* parameter specifies the HTTP response status code for which the decorated callback should be invoked. If the *status* argument is not specified, then the decorated callable is considered to be a fallback callback. A fallback callback, when defined, is invoked to generate the error page for any HTTP response representing an error when there is no error handler defined explicitly for the response code of the HTTP response. Arguments: status(int, optional): HTTP response status code. Returns: function: Decorator function to add error handler. """ def decorator(callback): self._error_handlers[status] = callback return callback return decorator
[ "def", "error", "(", "self", ",", "status", "=", "None", ")", ":", "def", "decorator", "(", "callback", ")", ":", "self", ".", "_error_handlers", "[", "status", "]", "=", "callback", "return", "callback", "return", "decorator" ]
Decorator to add a callback that generates error page. The *status* parameter specifies the HTTP response status code for which the decorated callback should be invoked. If the *status* argument is not specified, then the decorated callable is considered to be a fallback callback. A fallback callback, when defined, is invoked to generate the error page for any HTTP response representing an error when there is no error handler defined explicitly for the response code of the HTTP response. Arguments: status(int, optional): HTTP response status code. Returns: function: Decorator function to add error handler.
[ "Decorator", "to", "add", "a", "callback", "that", "generates", "error", "page", "." ]
python
test
KnightConan/sspdatatables
src/sspdatatables/utils/data_type_ensure.py
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/data_type_ensure.py#L7-L27
def ensure(data_type, check_value, default_value=None): """ function to ensure the given check value is in the given data type, if yes, return the check value directly, otherwise return the default value :param data_type: different data type: can be int, str, list, tuple etc, must be python supportable data type or new defined data type :param check_value: different value: the value to check :param default_value: None/ different value: provide the default value :return: check value or default value """ if default_value is not None and not isinstance(default_value, data_type): raise ValueError("default_value must be the value in the given data " "type.") elif isinstance(check_value, data_type): return check_value try: new_value = data_type(check_value) except: return default_value return new_value
[ "def", "ensure", "(", "data_type", ",", "check_value", ",", "default_value", "=", "None", ")", ":", "if", "default_value", "is", "not", "None", "and", "not", "isinstance", "(", "default_value", ",", "data_type", ")", ":", "raise", "ValueError", "(", "\"defau...
function to ensure the given check value is in the given data type, if yes, return the check value directly, otherwise return the default value :param data_type: different data type: can be int, str, list, tuple etc, must be python supportable data type or new defined data type :param check_value: different value: the value to check :param default_value: None/ different value: provide the default value :return: check value or default value
[ "function", "to", "ensure", "the", "given", "check", "value", "is", "in", "the", "given", "data", "type", "if", "yes", "return", "the", "check", "value", "directly", "otherwise", "return", "the", "default", "value" ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L1882-L1948
def _shorten_line(tokens, source, indentation, indent_word, aggressive=False, previous_line=''): """Separate line at OPERATOR. The input is expected to be free of newlines except for inside multiline strings and at the end. Multiple candidates will be yielded. """ for (token_type, token_string, start_offset, end_offset) in token_offsets(tokens): if ( token_type == tokenize.COMMENT and not is_probably_part_of_multiline(previous_line) and not is_probably_part_of_multiline(source) and not source[start_offset + 1:].strip().lower().startswith( ('noqa', 'pragma:', 'pylint:')) ): # Move inline comments to previous line. first = source[:start_offset] second = source[start_offset:] yield (indentation + second.strip() + '\n' + indentation + first.strip() + '\n') elif token_type == token.OP and token_string != '=': # Don't break on '=' after keyword as this violates PEP 8. assert token_type != token.INDENT first = source[:end_offset] second_indent = indentation if (first.rstrip().endswith('(') and source[end_offset:].lstrip().startswith(')')): pass elif first.rstrip().endswith('('): second_indent += indent_word elif '(' in first: second_indent += ' ' * (1 + first.find('(')) else: second_indent += indent_word second = (second_indent + source[end_offset:].lstrip()) if ( not second.strip() or second.lstrip().startswith('#') ): continue # Do not begin a line with a comma if second.lstrip().startswith(','): continue # Do end a line with a dot if first.rstrip().endswith('.'): continue if token_string in '+-*/': fixed = first + ' \\' + '\n' + second else: fixed = first + '\n' + second # Only fix if syntax is okay. if check_syntax(normalize_multiline(fixed) if aggressive else fixed): yield indentation + fixed
[ "def", "_shorten_line", "(", "tokens", ",", "source", ",", "indentation", ",", "indent_word", ",", "aggressive", "=", "False", ",", "previous_line", "=", "''", ")", ":", "for", "(", "token_type", ",", "token_string", ",", "start_offset", ",", "end_offset", "...
Separate line at OPERATOR. The input is expected to be free of newlines except for inside multiline strings and at the end. Multiple candidates will be yielded.
[ "Separate", "line", "at", "OPERATOR", "." ]
python
train
Kronuz/pyScss
scss/compiler.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/compiler.py#L427-L431
def _at_dump_functions(self, calculator, rule, scope, block): """ Implements @dump_functions """ sys.stderr.write("%s\n" % repr(rule.namespace._functions))
[ "def", "_at_dump_functions", "(", "self", ",", "calculator", ",", "rule", ",", "scope", ",", "block", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"%s\\n\"", "%", "repr", "(", "rule", ".", "namespace", ".", "_functions", ")", ")" ]
Implements @dump_functions
[ "Implements" ]
python
train
saltstack/salt
salt/modules/gpg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L1120-L1223
def encrypt(user=None, recipients=None, text=None, filename=None, output=None, sign=None, use_passphrase=False, gnupghome=None, bare=False): ''' Encrypt a message or file user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. recipients The fingerprints for those recipient whom the data is being encrypted for. text The text to encrypt. filename The filename to encrypt. output The filename where the signed file will be written, default is standard out. sign Whether to sign, in addition to encrypt, the data. ``True`` to use default key or fingerprint to specify a different key to sign with. use_passphrase Whether to use a passphrase with the signing key. Passphrase is received from Pillar. gnupghome Specify the location where GPG keyring and related files are stored. bare If ``True``, return the (armored) encrypted block as a string without the standard comment/res dict. CLI Example: .. code-block:: bash salt '*' gpg.encrypt text='Hello there. How are you?' salt '*' gpg.encrypt filename='/path/to/important.file' salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True ''' ret = { 'res': True, 'comment': '' } gpg = _create_gpg(user, gnupghome) if use_passphrase: gpg_passphrase = __salt__['pillar.get']('gpg_passphrase') if not gpg_passphrase: raise SaltInvocationError('gpg_passphrase not available in pillar.') gpg_passphrase = gpg_passphrase['gpg_passphrase'] else: gpg_passphrase = None if text: result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase) elif filename: if GPG_1_3_1: # This version does not allow us to encrypt using the # file stream # have to read in the contents and encrypt. with salt.utils.files.flopen(filename, 'rb') as _fp: _contents = _fp.read() result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output) else: # This version allows encrypting the file stream with salt.utils.files.flopen(filename, 'rb') as _fp: if output: result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign) else: result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign) else: raise SaltInvocationError('filename or text must be passed.') if result.ok: if not bare: if output: ret['comment'] = 'Encrypted data has been written to {0}'.format(output) else: ret['comment'] = result.data else: ret = result.data else: if not bare: ret['res'] = False ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status) else: ret = False log.error(result.stderr) return ret
[ "def", "encrypt", "(", "user", "=", "None", ",", "recipients", "=", "None", ",", "text", "=", "None", ",", "filename", "=", "None", ",", "output", "=", "None", ",", "sign", "=", "None", ",", "use_passphrase", "=", "False", ",", "gnupghome", "=", "Non...
Encrypt a message or file user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. recipients The fingerprints for those recipient whom the data is being encrypted for. text The text to encrypt. filename The filename to encrypt. output The filename where the signed file will be written, default is standard out. sign Whether to sign, in addition to encrypt, the data. ``True`` to use default key or fingerprint to specify a different key to sign with. use_passphrase Whether to use a passphrase with the signing key. Passphrase is received from Pillar. gnupghome Specify the location where GPG keyring and related files are stored. bare If ``True``, return the (armored) encrypted block as a string without the standard comment/res dict. CLI Example: .. code-block:: bash salt '*' gpg.encrypt text='Hello there. How are you?' salt '*' gpg.encrypt filename='/path/to/important.file' salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
[ "Encrypt", "a", "message", "or", "file" ]
python
train
Brightmd/TxPx
txpx/process.py
https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L128-L153
def background(cl, proto=EchoProcess, **kw): """ Use the reactor to run a process in the background. Keep the pid around. ``proto'' may be any callable which returns an instance of ProcessProtocol """ if isinstance(cl, basestring): cl = shlex.split(cl) if not cl[0].startswith('/'): path = which(cl[0]) assert path, '%s not found' % cl[0] cl[0] = path[0] d = Deferred() proc = reactor.spawnProcess( proto(name=basename(cl[0]), deferred=d), cl[0], cl, env=os.environ, **kw) daycare.add(proc.pid) return d
[ "def", "background", "(", "cl", ",", "proto", "=", "EchoProcess", ",", "*", "*", "kw", ")", ":", "if", "isinstance", "(", "cl", ",", "basestring", ")", ":", "cl", "=", "shlex", ".", "split", "(", "cl", ")", "if", "not", "cl", "[", "0", "]", "."...
Use the reactor to run a process in the background. Keep the pid around. ``proto'' may be any callable which returns an instance of ProcessProtocol
[ "Use", "the", "reactor", "to", "run", "a", "process", "in", "the", "background", "." ]
python
train
westurner/pyrpo
pyrpo/pyrpo.py
https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L1986-L2026
def get_option_parser(): """ Build an ``optparse.OptionParser`` for pyrpo commandline use """ import optparse prs = optparse.OptionParser( usage=( "$0 pyrpo [-h] [-v] [-q] [-s .] " "[-r <report>] [--thg]")) prs.add_option('-s', '--scan', dest='scan', action='append', default=[], help='Path(s) to scan for repositories') prs.add_option('-r', '--report', dest='reports', action='append', default=[], help=("""origin, status, full, gitmodule, json, sh, """ """str, pip, hgsub""")) prs.add_option('--thg', dest='thg_report', action='store_true', help='Write a thg-reporegistry.xml file to stdout') prs.add_option('--template', dest='report_template', action='store', help='Report template') prs.add_option('-v', '--verbose', dest='verbose', action='store_true',) prs.add_option('-q', '--quiet', dest='quiet', action='store_true',) return prs
[ "def", "get_option_parser", "(", ")", ":", "import", "optparse", "prs", "=", "optparse", ".", "OptionParser", "(", "usage", "=", "(", "\"$0 pyrpo [-h] [-v] [-q] [-s .] \"", "\"[-r <report>] [--thg]\"", ")", ")", "prs", ".", "add_option", "(", "'-s'", ",", "'--scan...
Build an ``optparse.OptionParser`` for pyrpo commandline use
[ "Build", "an", "optparse", ".", "OptionParser", "for", "pyrpo", "commandline", "use" ]
python
train
pmacosta/peng
peng/functions.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/functions.py#L599-L629
def peng_power(snum): r""" Return engineering suffix and its floating point equivalent of a number. :py:func:`peng.peng` lists the correspondence between suffix and floating point exponent. :param snum: Number :type snum: :ref:`EngineeringNotationNumber` :rtype: named tuple in which the first item is the engineering suffix and the second item is the floating point equivalent of the suffix when the number is represented in engineering notation. .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.functions.peng_power :raises: RuntimeError (Argument \`snum\` is not valid) .. [[[end]]] For example: >>> import peng >>> peng.peng_power(peng.peng(1235.6789E3, 3, False)) EngPower(suffix='M', exp=1000000.0) """ suffix = " " if snum[-1].isdigit() else snum[-1] return EngPower(suffix, _SUFFIX_POWER_DICT[suffix])
[ "def", "peng_power", "(", "snum", ")", ":", "suffix", "=", "\" \"", "if", "snum", "[", "-", "1", "]", ".", "isdigit", "(", ")", "else", "snum", "[", "-", "1", "]", "return", "EngPower", "(", "suffix", ",", "_SUFFIX_POWER_DICT", "[", "suffix", "]", ...
r""" Return engineering suffix and its floating point equivalent of a number. :py:func:`peng.peng` lists the correspondence between suffix and floating point exponent. :param snum: Number :type snum: :ref:`EngineeringNotationNumber` :rtype: named tuple in which the first item is the engineering suffix and the second item is the floating point equivalent of the suffix when the number is represented in engineering notation. .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.functions.peng_power :raises: RuntimeError (Argument \`snum\` is not valid) .. [[[end]]] For example: >>> import peng >>> peng.peng_power(peng.peng(1235.6789E3, 3, False)) EngPower(suffix='M', exp=1000000.0)
[ "r", "Return", "engineering", "suffix", "and", "its", "floating", "point", "equivalent", "of", "a", "number", "." ]
python
test
google/grumpy
third_party/pythonparser/parser.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/parser.py#L1296-L1300
def with_stmt__27(self, with_loc, items, colon_loc, body): """(2.7, 3.1-) with_stmt: 'with' with_item (',' with_item)* ':' suite""" return ast.With(items=items, body=body, keyword_loc=with_loc, colon_loc=colon_loc, loc=with_loc.join(body[-1].loc))
[ "def", "with_stmt__27", "(", "self", ",", "with_loc", ",", "items", ",", "colon_loc", ",", "body", ")", ":", "return", "ast", ".", "With", "(", "items", "=", "items", ",", "body", "=", "body", ",", "keyword_loc", "=", "with_loc", ",", "colon_loc", "=",...
(2.7, 3.1-) with_stmt: 'with' with_item (',' with_item)* ':' suite
[ "(", "2", ".", "7", "3", ".", "1", "-", ")", "with_stmt", ":", "with", "with_item", "(", "with_item", ")", "*", ":", "suite" ]
python
valid
programa-stic/barf-project
barf/core/smt/smttranslator.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/smt/smttranslator.py#L575-L586
def _translate_bisz(self, oprnd1, oprnd2, oprnd3): """Return a formula representation of a BISZ instruction. """ assert oprnd1.size and oprnd3.size op1_var = self._translate_src_oprnd(oprnd1) op3_var, op3_var_constrs = self._translate_dst_oprnd(oprnd3) result = smtfunction.ite(oprnd3.size, op1_var == 0x0, smtsymbol.Constant(oprnd3.size, 0x1), smtsymbol.Constant(oprnd3.size, 0x0)) return [op3_var == result] + op3_var_constrs
[ "def", "_translate_bisz", "(", "self", ",", "oprnd1", ",", "oprnd2", ",", "oprnd3", ")", ":", "assert", "oprnd1", ".", "size", "and", "oprnd3", ".", "size", "op1_var", "=", "self", ".", "_translate_src_oprnd", "(", "oprnd1", ")", "op3_var", ",", "op3_var_c...
Return a formula representation of a BISZ instruction.
[ "Return", "a", "formula", "representation", "of", "a", "BISZ", "instruction", "." ]
python
train
OpenHumans/open-humans-api
ohapi/public.py
https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/public.py#L175-L183
def get_members_by_source(base_url=BASE_URL_API): """ Function returns which members have joined each activity. :param base_url: It is URL: `https://www.openhumans.org/api/public-data`. """ url = '{}members-by-source/'.format(base_url) response = get_page(url) return response
[ "def", "get_members_by_source", "(", "base_url", "=", "BASE_URL_API", ")", ":", "url", "=", "'{}members-by-source/'", ".", "format", "(", "base_url", ")", "response", "=", "get_page", "(", "url", ")", "return", "response" ]
Function returns which members have joined each activity. :param base_url: It is URL: `https://www.openhumans.org/api/public-data`.
[ "Function", "returns", "which", "members", "have", "joined", "each", "activity", "." ]
python
train
adamcharnock/django-hordak
hordak/utilities/currency.py
https://github.com/adamcharnock/django-hordak/blob/0ffcad1d3b388b860c8c47fde12aa40df213066f/hordak/utilities/currency.py#L246-L260
def get_rate(self, currency, date): """Get the exchange rate for ``currency`` against ``_INTERNAL_CURRENCY`` If implementing your own backend, you should probably override :meth:`_get_rate()` rather than this. """ if str(currency) == defaults.INTERNAL_CURRENCY: return Decimal(1) cached = cache.get(_cache_key(currency, date)) if cached: return Decimal(cached) else: # Expect self._get_rate() to implement caching return Decimal(self._get_rate(currency, date))
[ "def", "get_rate", "(", "self", ",", "currency", ",", "date", ")", ":", "if", "str", "(", "currency", ")", "==", "defaults", ".", "INTERNAL_CURRENCY", ":", "return", "Decimal", "(", "1", ")", "cached", "=", "cache", ".", "get", "(", "_cache_key", "(", ...
Get the exchange rate for ``currency`` against ``_INTERNAL_CURRENCY`` If implementing your own backend, you should probably override :meth:`_get_rate()` rather than this.
[ "Get", "the", "exchange", "rate", "for", "currency", "against", "_INTERNAL_CURRENCY" ]
python
train
rwl/pylon
pylon/solver.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L515-L539
def _f(self, x, user_data=None): """ Evaluates the objective function. """ p_gen = x[self._Pg.i1:self._Pg.iN + 1] # Active generation in p.u. q_gen = x[self._Qg.i1:self._Qg.iN + 1] # Reactive generation in p.u. # Polynomial cost of P and Q. xx = r_[p_gen, q_gen] * self._base_mva if len(self._ipol) > 0: f = sum([g.total_cost(xx[i]) for i,g in enumerate(self._gn)]) else: f = 0 # Piecewise linear cost of P and Q. if self._ny: y = self.om.get_var("y") self._ccost = csr_matrix((ones(self._ny), (range(y.i1, y.iN + 1), zeros(self._ny))), shape=(self._nxyz, 1)).T f = f + self._ccost * x else: self._ccost = zeros((1, self._nxyz)) # TODO: Generalised cost term. return f
[ "def", "_f", "(", "self", ",", "x", ",", "user_data", "=", "None", ")", ":", "p_gen", "=", "x", "[", "self", ".", "_Pg", ".", "i1", ":", "self", ".", "_Pg", ".", "iN", "+", "1", "]", "# Active generation in p.u.", "q_gen", "=", "x", "[", "self", ...
Evaluates the objective function.
[ "Evaluates", "the", "objective", "function", "." ]
python
train
limix/chiscore
chiscore/_davies.py
https://github.com/limix/chiscore/blob/c3e774648b8166a5161df86e6bcad34af3cec95c/chiscore/_davies.py#L10-L34
def davies_pvalue(q, w): r"""Joint significance of statistics derived from chi2-squared distributions. Parameters ---------- q : float Test statistics. w : array_like Weights of the linear combination. Returns ------- float Estimated p-value. """ q = asarray(atleast_1d(q), float) w = asarray(w, float) re = _pvalue_lambda(_lambda(w), q) param = dict() param["liu_pval"] = re["p_val_liu"][0] param["Is_Converged"] = re["is_converge"][0] return re["p_value"][0]
[ "def", "davies_pvalue", "(", "q", ",", "w", ")", ":", "q", "=", "asarray", "(", "atleast_1d", "(", "q", ")", ",", "float", ")", "w", "=", "asarray", "(", "w", ",", "float", ")", "re", "=", "_pvalue_lambda", "(", "_lambda", "(", "w", ")", ",", "...
r"""Joint significance of statistics derived from chi2-squared distributions. Parameters ---------- q : float Test statistics. w : array_like Weights of the linear combination. Returns ------- float Estimated p-value.
[ "r", "Joint", "significance", "of", "statistics", "derived", "from", "chi2", "-", "squared", "distributions", "." ]
python
train
thombashi/SimpleSQLite
simplesqlite/core.py
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L1502-L1522
def close(self): """ Commit and close the connection. .. seealso:: :py:meth:`sqlite3.Connection.close` """ if self.__delayed_connection_path and self.__connection is None: self.__initialize_connection() return try: self.check_connection() except (SystemError, NullDatabaseConnectionError): return logger.debug("close connection to a SQLite database: path='{}'".format(self.database_path)) self.commit() self.connection.close() self.__initialize_connection()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "__delayed_connection_path", "and", "self", ".", "__connection", "is", "None", ":", "self", ".", "__initialize_connection", "(", ")", "return", "try", ":", "self", ".", "check_connection", "(", ")", ...
Commit and close the connection. .. seealso:: :py:meth:`sqlite3.Connection.close`
[ "Commit", "and", "close", "the", "connection", "." ]
python
train
toumorokoshi/sprinter
sprinter/core/globals.py
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/core/globals.py#L117-L135
def _configure_shell(config): """ Checks and queries values for the shell """ config.has_section('shell') or config.add_section('shell') logger.info( "What shells or environments would you like sprinter to work with?\n" "(Sprinter will not try to inject into environments not specified here.)\n" "If you specify 'gui', sprinter will attempt to inject it's state into graphical programs as well.\n" "i.e. environment variables sprinter set will affect programs as well, not just shells\n" "WARNING: injecting into the GUI can be very dangerous. it usually requires a restart\n" " to modify any environmental configuration." ) environments = list(enumerate(sorted(SHELL_CONFIG), start=1)) logger.info("[0]: All, " + ", ".join(["[%d]: %s" % (index, val) for index, val in environments])) desired_environments = lib.prompt("type the environment, comma-separated", default="0") for index, val in environments: if str(index) in desired_environments or "0" in desired_environments: config.set('shell', val, 'true') else: config.set('shell', val, 'false')
[ "def", "_configure_shell", "(", "config", ")", ":", "config", ".", "has_section", "(", "'shell'", ")", "or", "config", ".", "add_section", "(", "'shell'", ")", "logger", ".", "info", "(", "\"What shells or environments would you like sprinter to work with?\\n\"", "\"(...
Checks and queries values for the shell
[ "Checks", "and", "queries", "values", "for", "the", "shell" ]
python
train
nirum/descent
descent/objectives.py
https://github.com/nirum/descent/blob/074c8452f15a0da638668a4fe139fde06ccfae7f/descent/objectives.py#L209-L217
def styblinski_tang(theta): """Styblinski-Tang function""" x, y = theta obj = 0.5 * (x ** 4 - 16 * x ** 2 + 5 * x + y ** 4 - 16 * y ** 2 + 5 * y) grad = np.array([ 2 * x ** 3 - 16 * x + 2.5, 2 * y ** 3 - 16 * y + 2.5, ]) return obj, grad
[ "def", "styblinski_tang", "(", "theta", ")", ":", "x", ",", "y", "=", "theta", "obj", "=", "0.5", "*", "(", "x", "**", "4", "-", "16", "*", "x", "**", "2", "+", "5", "*", "x", "+", "y", "**", "4", "-", "16", "*", "y", "**", "2", "+", "5...
Styblinski-Tang function
[ "Styblinski", "-", "Tang", "function" ]
python
valid
jeremylow/pyshk
pyshk/models.py
https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L522-L551
def NewFromJSON(data): """ Create a new SharedFile instance from a JSON dict. Args: data (dict): JSON dictionary representing a SharedFile. Returns: A SharedFile instance. """ return SharedFile( sharekey=data.get('sharekey', None), name=data.get('name', None), user=User.NewFromJSON(data.get('user', None)), title=data.get('title', None), description=data.get('description', None), posted_at=data.get('posted_at', None), permalink=data.get('permalink', None), width=data.get('width', None), height=data.get('height', None), views=data.get('views', 0), likes=data.get('likes', 0), saves=data.get('saves', 0), comments=data.get('comments', None), nsfw=data.get('nsfw', False), image_url=data.get('image_url', None), source_url=data.get('source_url', None), saved=data.get('saved', False), liked=data.get('liked', False), )
[ "def", "NewFromJSON", "(", "data", ")", ":", "return", "SharedFile", "(", "sharekey", "=", "data", ".", "get", "(", "'sharekey'", ",", "None", ")", ",", "name", "=", "data", ".", "get", "(", "'name'", ",", "None", ")", ",", "user", "=", "User", "."...
Create a new SharedFile instance from a JSON dict. Args: data (dict): JSON dictionary representing a SharedFile. Returns: A SharedFile instance.
[ "Create", "a", "new", "SharedFile", "instance", "from", "a", "JSON", "dict", "." ]
python
train
streamlink/streamlink
src/streamlink_cli/main.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink_cli/main.py#L301-L333
def output_stream(plugin, stream): """Open stream, create output and finally write the stream to output.""" global output success_open = False for i in range(args.retry_open): try: stream_fd, prebuffer = open_stream(stream) success_open = True break except StreamError as err: log.error("Try {0}/{1}: Could not open stream {2} ({3})", i + 1, args.retry_open, stream, err) if not success_open: console.exit("Could not open stream {0}, tried {1} times, exiting", stream, args.retry_open) output = create_output(plugin) try: output.open() except (IOError, OSError) as err: if isinstance(output, PlayerOutput): console.exit("Failed to start player: {0} ({1})", args.player, err) else: console.exit("Failed to open output: {0} ({1})", args.output, err) with closing(output): log.debug("Writing stream to output") read_stream(stream_fd, output, prebuffer) return True
[ "def", "output_stream", "(", "plugin", ",", "stream", ")", ":", "global", "output", "success_open", "=", "False", "for", "i", "in", "range", "(", "args", ".", "retry_open", ")", ":", "try", ":", "stream_fd", ",", "prebuffer", "=", "open_stream", "(", "st...
Open stream, create output and finally write the stream to output.
[ "Open", "stream", "create", "output", "and", "finally", "write", "the", "stream", "to", "output", "." ]
python
test
wummel/patool
patoolib/__init__.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/__init__.py#L725-L733
def diff_archives(archive1, archive2, verbosity=0, interactive=True): """Print differences between two archives.""" util.check_existing_filename(archive1) util.check_existing_filename(archive2) if verbosity >= 0: util.log_info("Comparing %s with %s ..." % (archive1, archive2)) res = _diff_archives(archive1, archive2, verbosity=verbosity, interactive=interactive) if res == 0 and verbosity >= 0: util.log_info("... no differences found.")
[ "def", "diff_archives", "(", "archive1", ",", "archive2", ",", "verbosity", "=", "0", ",", "interactive", "=", "True", ")", ":", "util", ".", "check_existing_filename", "(", "archive1", ")", "util", ".", "check_existing_filename", "(", "archive2", ")", "if", ...
Print differences between two archives.
[ "Print", "differences", "between", "two", "archives", "." ]
python
train
swharden/SWHLab
doc/oldcode/swhlab/core/common.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/oldcode/swhlab/core/common.py#L845-L861
def version_upload(fname,username="nibjb"): """Only scott should do this. Upload new version to site.""" print("popping up pasword window...") password=TK_askPassword("FTP LOGIN","enter password for %s"%username) if not password: return print("username:",username) print("password:","*"*(len(password))) print("connecting...") ftp = ftplib.FTP("swharden.com") ftp.login(username, password) print("successful login!") ftp.cwd("/software/swhlab/versions") #IMMEDIATELY GO HERE!!! print("uploading",os.path.basename(fname)) ftp.storbinary("STOR " + os.path.basename(fname), open(fname, "rb"), 1024) #for binary files print("disconnecting...") ftp.quit()
[ "def", "version_upload", "(", "fname", ",", "username", "=", "\"nibjb\"", ")", ":", "print", "(", "\"popping up pasword window...\"", ")", "password", "=", "TK_askPassword", "(", "\"FTP LOGIN\"", ",", "\"enter password for %s\"", "%", "username", ")", "if", "not", ...
Only scott should do this. Upload new version to site.
[ "Only", "scott", "should", "do", "this", ".", "Upload", "new", "version", "to", "site", "." ]
python
valid
yyuu/botornado
boto/s3/bucket.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/bucket.py#L1015-L1063
def configure_versioning(self, versioning, mfa_delete=False, mfa_token=None, headers=None): """ Configure versioning for this bucket. ..note:: This feature is currently in beta. :type versioning: bool :param versioning: A boolean indicating whether version is enabled (True) or disabled (False). :type mfa_delete: bool :param mfa_delete: A boolean indicating whether the Multi-Factor Authentication Delete feature is enabled (True) or disabled (False). If mfa_delete is enabled then all Delete operations will require the token from your MFA device to be passed in the request. :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required when you are changing the status of the MfaDelete property of the bucket. """ if versioning: ver = 'Enabled' else: ver = 'Suspended' if mfa_delete: mfa = 'Enabled' else: mfa = 'Disabled' body = self.VersioningBody % (ver, mfa) if mfa_token: if not headers: headers = {} provider = self.connection.provider headers[provider.mfa_header] = ' '.join(mfa_token) response = self.connection.make_request('PUT', self.name, data=body, query_args='versioning', headers=headers) body = response.read() if response.status == 200: return True else: raise self.connection.provider.storage_response_error( response.status, response.reason, body)
[ "def", "configure_versioning", "(", "self", ",", "versioning", ",", "mfa_delete", "=", "False", ",", "mfa_token", "=", "None", ",", "headers", "=", "None", ")", ":", "if", "versioning", ":", "ver", "=", "'Enabled'", "else", ":", "ver", "=", "'Suspended'", ...
Configure versioning for this bucket. ..note:: This feature is currently in beta. :type versioning: bool :param versioning: A boolean indicating whether version is enabled (True) or disabled (False). :type mfa_delete: bool :param mfa_delete: A boolean indicating whether the Multi-Factor Authentication Delete feature is enabled (True) or disabled (False). If mfa_delete is enabled then all Delete operations will require the token from your MFA device to be passed in the request. :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required when you are changing the status of the MfaDelete property of the bucket.
[ "Configure", "versioning", "for", "this", "bucket", ".", "..", "note", "::", "This", "feature", "is", "currently", "in", "beta", ".", ":", "type", "versioning", ":", "bool", ":", "param", "versioning", ":", "A", "boolean", "indicating", "whether", "version",...
python
train
raiden-network/raiden
raiden/transfer/mediated_transfer/mediator.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/mediated_transfer/mediator.py#L154-L171
def is_send_transfer_almost_equal( send_channel: NettingChannelState, send: LockedTransferUnsignedState, received: LockedTransferSignedState, ) -> bool: """ True if both transfers are for the same mediated transfer. """ # The only thing that may change is the direction of the transfer return ( isinstance(send, LockedTransferUnsignedState) and isinstance(received, LockedTransferSignedState) and send.payment_identifier == received.payment_identifier and send.token == received.token and send.lock.amount == received.lock.amount - send_channel.mediation_fee and send.lock.expiration == received.lock.expiration and send.lock.secrethash == received.lock.secrethash and send.initiator == received.initiator and send.target == received.target )
[ "def", "is_send_transfer_almost_equal", "(", "send_channel", ":", "NettingChannelState", ",", "send", ":", "LockedTransferUnsignedState", ",", "received", ":", "LockedTransferSignedState", ",", ")", "->", "bool", ":", "# The only thing that may change is the direction of the tr...
True if both transfers are for the same mediated transfer.
[ "True", "if", "both", "transfers", "are", "for", "the", "same", "mediated", "transfer", "." ]
python
train
foremast/gogo-utils
src/gogoutils/generator.py
https://github.com/foremast/gogo-utils/blob/3909c2d26e49baa8ad68e6be40977d4370d7c1ca/src/gogoutils/generator.py#L170-L175
def jenkins(self): """Generate jenkins job details.""" job_name = self.format['jenkins_job_name'].format(**self.data) job = {'name': job_name} return job
[ "def", "jenkins", "(", "self", ")", ":", "job_name", "=", "self", ".", "format", "[", "'jenkins_job_name'", "]", ".", "format", "(", "*", "*", "self", ".", "data", ")", "job", "=", "{", "'name'", ":", "job_name", "}", "return", "job" ]
Generate jenkins job details.
[ "Generate", "jenkins", "job", "details", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/widgets/base.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/base.py#L1004-L1015
def extend_selection_to_complete_lines(self): """Extend current selection to complete lines""" cursor = self.textCursor() start_pos, end_pos = cursor.selectionStart(), cursor.selectionEnd() cursor.setPosition(start_pos) cursor.setPosition(end_pos, QTextCursor.KeepAnchor) if cursor.atBlockStart(): cursor.movePosition(QTextCursor.PreviousBlock, QTextCursor.KeepAnchor) cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor) self.setTextCursor(cursor)
[ "def", "extend_selection_to_complete_lines", "(", "self", ")", ":", "cursor", "=", "self", ".", "textCursor", "(", ")", "start_pos", ",", "end_pos", "=", "cursor", ".", "selectionStart", "(", ")", ",", "cursor", ".", "selectionEnd", "(", ")", "cursor", ".", ...
Extend current selection to complete lines
[ "Extend", "current", "selection", "to", "complete", "lines" ]
python
train
RRZE-HPC/kerncraft
kerncraft/models/ecm.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/ecm.py#L167-L173
def analyze(self): """Run complete anaylysis and return results.""" self.calculate_cache_access() self.calculate_cycles() self.results['flops per iteration'] = sum(self.kernel._flops.values()) return self.results
[ "def", "analyze", "(", "self", ")", ":", "self", ".", "calculate_cache_access", "(", ")", "self", ".", "calculate_cycles", "(", ")", "self", ".", "results", "[", "'flops per iteration'", "]", "=", "sum", "(", "self", ".", "kernel", ".", "_flops", ".", "v...
Run complete anaylysis and return results.
[ "Run", "complete", "anaylysis", "and", "return", "results", "." ]
python
test
jtwhite79/pyemu
pyemu/pst/pst_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L2068-L2099
def from_io_files(cls,tpl_files,in_files,ins_files,out_files,pst_filename=None): """ create a Pst instance from model interface files. Assigns generic values for parameter info. Tries to use INSCHEK to set somewhat meaningful observation values Parameters ---------- tpl_files : list list of template file names in_files : list list of model input file names (pairs with template files) ins_files : list list of instruction file names out_files : list list of model output file names (pairs with instruction files) pst_filename : str name of control file to write. If None, no file is written. Default is None Returns ------- Pst : Pst Note ---- calls pyemu.helpers.pst_from_io_files() """ from pyemu import helpers return helpers.pst_from_io_files(tpl_files=tpl_files,in_files=in_files, ins_files=ins_files,out_files=out_files, pst_filename=pst_filename)
[ "def", "from_io_files", "(", "cls", ",", "tpl_files", ",", "in_files", ",", "ins_files", ",", "out_files", ",", "pst_filename", "=", "None", ")", ":", "from", "pyemu", "import", "helpers", "return", "helpers", ".", "pst_from_io_files", "(", "tpl_files", "=", ...
create a Pst instance from model interface files. Assigns generic values for parameter info. Tries to use INSCHEK to set somewhat meaningful observation values Parameters ---------- tpl_files : list list of template file names in_files : list list of model input file names (pairs with template files) ins_files : list list of instruction file names out_files : list list of model output file names (pairs with instruction files) pst_filename : str name of control file to write. If None, no file is written. Default is None Returns ------- Pst : Pst Note ---- calls pyemu.helpers.pst_from_io_files()
[ "create", "a", "Pst", "instance", "from", "model", "interface", "files", ".", "Assigns", "generic", "values", "for", "parameter", "info", ".", "Tries", "to", "use", "INSCHEK", "to", "set", "somewhat", "meaningful", "observation", "values" ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L5290-L5298
def _get_all_volumes_paths(conn): ''' Extract the path and backing stores path of all volumes. :param conn: libvirt connection to use ''' volumes = [vol for l in [obj.listAllVolumes() for obj in conn.listAllStoragePools()] for vol in l] return {vol.path(): [path.text for path in ElementTree.fromstring(vol.XMLDesc()).findall('.//backingStore/path')] for vol in volumes if _is_valid_volume(vol)}
[ "def", "_get_all_volumes_paths", "(", "conn", ")", ":", "volumes", "=", "[", "vol", "for", "l", "in", "[", "obj", ".", "listAllVolumes", "(", ")", "for", "obj", "in", "conn", ".", "listAllStoragePools", "(", ")", "]", "for", "vol", "in", "l", "]", "r...
Extract the path and backing stores path of all volumes. :param conn: libvirt connection to use
[ "Extract", "the", "path", "and", "backing", "stores", "path", "of", "all", "volumes", "." ]
python
train
jelmer/python-fastimport
fastimport/commands.py
https://github.com/jelmer/python-fastimport/blob/5cef9e037b7d7b37f58f522ac9ea4e343e6a1dff/fastimport/commands.py#L270-L275
def iter_files(self): """Iterate over files.""" # file_iter may be a callable or an iterator if callable(self.file_iter): return self.file_iter() return iter(self.file_iter)
[ "def", "iter_files", "(", "self", ")", ":", "# file_iter may be a callable or an iterator", "if", "callable", "(", "self", ".", "file_iter", ")", ":", "return", "self", ".", "file_iter", "(", ")", "return", "iter", "(", "self", ".", "file_iter", ")" ]
Iterate over files.
[ "Iterate", "over", "files", "." ]
python
train
dw/mitogen
mitogen/core.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L391-L397
def listen(obj, name, func): """ Arrange for `func(*args, **kwargs)` to be invoked when the named signal is fired by `obj`. """ signals = vars(obj).setdefault('_signals', {}) signals.setdefault(name, []).append(func)
[ "def", "listen", "(", "obj", ",", "name", ",", "func", ")", ":", "signals", "=", "vars", "(", "obj", ")", ".", "setdefault", "(", "'_signals'", ",", "{", "}", ")", "signals", ".", "setdefault", "(", "name", ",", "[", "]", ")", ".", "append", "(",...
Arrange for `func(*args, **kwargs)` to be invoked when the named signal is fired by `obj`.
[ "Arrange", "for", "func", "(", "*", "args", "**", "kwargs", ")", "to", "be", "invoked", "when", "the", "named", "signal", "is", "fired", "by", "obj", "." ]
python
train
vaexio/vaex
packages/vaex-core/vaex/dataframe.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L3077-L3102
def add_virtual_columns_polar_velocities_to_cartesian(self, x='x', y='y', azimuth=None, vr='vr_polar', vazimuth='vphi_polar', vx_out='vx', vy_out='vy', propagate_uncertainties=False): """ Convert cylindrical polar velocities to Cartesian. :param x: :param y: :param azimuth: Optional expression for the azimuth in degrees , may lead to a better performance when given. :param vr: :param vazimuth: :param vx_out: :param vy_out: :param propagate_uncertainties: {propagate_uncertainties} """ x = self._expr(x) y = self._expr(y) vr = self._expr(vr) vazimuth = self._expr(vazimuth) if azimuth is not None: azimuth = self._expr(azimuth) azimuth = np.deg2rad(azimuth) else: azimuth = np.arctan2(y, x) azimuth = self._expr(azimuth) self[vx_out] = vr * np.cos(azimuth) - vazimuth * np.sin(azimuth) self[vy_out] = vr * np.sin(azimuth) + vazimuth * np.cos(azimuth) if propagate_uncertainties: self.propagate_uncertainties([self[vx_out], self[vy_out]])
[ "def", "add_virtual_columns_polar_velocities_to_cartesian", "(", "self", ",", "x", "=", "'x'", ",", "y", "=", "'y'", ",", "azimuth", "=", "None", ",", "vr", "=", "'vr_polar'", ",", "vazimuth", "=", "'vphi_polar'", ",", "vx_out", "=", "'vx'", ",", "vy_out", ...
Convert cylindrical polar velocities to Cartesian. :param x: :param y: :param azimuth: Optional expression for the azimuth in degrees , may lead to a better performance when given. :param vr: :param vazimuth: :param vx_out: :param vy_out: :param propagate_uncertainties: {propagate_uncertainties}
[ "Convert", "cylindrical", "polar", "velocities", "to", "Cartesian", "." ]
python
test
anjishnu/ask-alexa-pykit
ask/alexa_io.py
https://github.com/anjishnu/ask-alexa-pykit/blob/a47c278ca7a60532bbe1a9b789f6c37e609fea8b/ask/alexa_io.py#L103-L118
def create_response(self, message=None, end_session=False, card_obj=None, reprompt_message=None, is_ssml=None): """ message - text message to be spoken out by the Echo end_session - flag to determine whether this interaction should end the session card_obj = JSON card object to substitute the 'card' field in the raw_response """ response = dict(self.base_response) if message: response['response'] = self.create_speech(message, is_ssml) response['response']['shouldEndSession'] = end_session if card_obj: response['response']['card'] = card_obj if reprompt_message: response['response']['reprompt'] = self.create_speech(reprompt_message, is_ssml) return Response(response)
[ "def", "create_response", "(", "self", ",", "message", "=", "None", ",", "end_session", "=", "False", ",", "card_obj", "=", "None", ",", "reprompt_message", "=", "None", ",", "is_ssml", "=", "None", ")", ":", "response", "=", "dict", "(", "self", ".", ...
message - text message to be spoken out by the Echo end_session - flag to determine whether this interaction should end the session card_obj = JSON card object to substitute the 'card' field in the raw_response
[ "message", "-", "text", "message", "to", "be", "spoken", "out", "by", "the", "Echo", "end_session", "-", "flag", "to", "determine", "whether", "this", "interaction", "should", "end", "the", "session", "card_obj", "=", "JSON", "card", "object", "to", "substit...
python
train
TeamHG-Memex/eli5
eli5/sklearn/explain_weights.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/sklearn/explain_weights.py#L396-L467
def explain_linear_regressor_weights(reg, vec=None, top=_TOP, target_names=None, targets=None, feature_names=None, coef_scale=None, feature_re=None, feature_filter=None, ): """ Return an explanation of a linear regressor weights. See :func:`eli5.explain_weights` for description of ``top``, ``target_names``, ``targets``, ``feature_names``, ``feature_re`` and ``feature_filter`` parameters. ``vec`` is a vectorizer instance used to transform raw features to the input of the regressor ``reg``; you can pass it instead of ``feature_names``. ``coef_scale`` is a 1D np.ndarray with a scaling coefficient for each feature; coef[i] = coef[i] * coef_scale[i] if coef_scale[i] is not nan. Use it if you want to scale coefficients before displaying them, to take input feature sign or scale in account. """ if isinstance(reg, (SVR, NuSVR)) and reg.kernel != 'linear': return explain_weights_sklearn_not_supported(reg) feature_names, coef_scale = handle_hashing_vec(vec, feature_names, coef_scale) feature_names, flt_indices = get_feature_names_filtered( reg, vec, feature_names=feature_names, feature_filter=feature_filter, feature_re=feature_re, ) _extra_caveats = "\n" + HASHING_CAVEATS if is_invhashing(vec) else '' def _features(target_id): coef = get_coef(reg, target_id, scale=coef_scale) if flt_indices is not None: coef = coef[flt_indices] return get_top_features(feature_names, coef, top) display_names = get_target_display_names(get_default_target_names(reg), target_names, targets) if is_multitarget_regressor(reg): return Explanation( targets=[ TargetExplanation( target=target_name, feature_weights=_features(target_id) ) for target_id, target_name in display_names ], description=DESCRIPTION_REGRESSION_MULTITARGET + _extra_caveats, estimator=repr(reg), method='linear model', is_regression=True, ) else: return Explanation( targets=[TargetExplanation( target=display_names[0][1], feature_weights=_features(0), )], description=DESCRIPTION_REGRESSION + _extra_caveats, estimator=repr(reg), method='linear model', is_regression=True, )
[ "def", "explain_linear_regressor_weights", "(", "reg", ",", "vec", "=", "None", ",", "top", "=", "_TOP", ",", "target_names", "=", "None", ",", "targets", "=", "None", ",", "feature_names", "=", "None", ",", "coef_scale", "=", "None", ",", "feature_re", "=...
Return an explanation of a linear regressor weights. See :func:`eli5.explain_weights` for description of ``top``, ``target_names``, ``targets``, ``feature_names``, ``feature_re`` and ``feature_filter`` parameters. ``vec`` is a vectorizer instance used to transform raw features to the input of the regressor ``reg``; you can pass it instead of ``feature_names``. ``coef_scale`` is a 1D np.ndarray with a scaling coefficient for each feature; coef[i] = coef[i] * coef_scale[i] if coef_scale[i] is not nan. Use it if you want to scale coefficients before displaying them, to take input feature sign or scale in account.
[ "Return", "an", "explanation", "of", "a", "linear", "regressor", "weights", "." ]
python
train
shidenggui/easytrader
easytrader/helpers.py
https://github.com/shidenggui/easytrader/blob/e5ae4daeda4ea125763a95b280dd694c7f68257d/easytrader/helpers.py#L32-L53
def get_stock_type(stock_code): """判断股票ID对应的证券市场 匹配规则 ['50', '51', '60', '90', '110'] 为 sh ['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz ['5', '6', '9'] 开头的为 sh, 其余为 sz :param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断 :return 'sh' or 'sz'""" stock_code = str(stock_code) if stock_code.startswith(("sh", "sz")): return stock_code[:2] if stock_code.startswith( ("50", "51", "60", "73", "90", "110", "113", "132", "204", "78") ): return "sh" if stock_code.startswith( ("00", "13", "18", "15", "16", "18", "20", "30", "39", "115", "1318") ): return "sz" if stock_code.startswith(("5", "6", "9")): return "sh" return "sz"
[ "def", "get_stock_type", "(", "stock_code", ")", ":", "stock_code", "=", "str", "(", "stock_code", ")", "if", "stock_code", ".", "startswith", "(", "(", "\"sh\"", ",", "\"sz\"", ")", ")", ":", "return", "stock_code", "[", ":", "2", "]", "if", "stock_code...
判断股票ID对应的证券市场 匹配规则 ['50', '51', '60', '90', '110'] 为 sh ['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz ['5', '6', '9'] 开头的为 sh, 其余为 sz :param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断 :return 'sh' or 'sz
[ "判断股票ID对应的证券市场", "匹配规则", "[", "50", "51", "60", "90", "110", "]", "为", "sh", "[", "00", "13", "18", "15", "16", "18", "20", "30", "39", "115", "]", "为", "sz", "[", "5", "6", "9", "]", "开头的为", "sh,", "其余为", "sz", ":", "param", "stock_code", ":...
python
train
quiltdata/quilt
compiler/quilt/tools/store.py
https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/tools/store.py#L327-L333
def team_path(self, team=None): """ Returns the path to directory with the team's users' package repositories. """ if team is None: team = DEFAULT_TEAM return os.path.join(self._path, self.PKG_DIR, team)
[ "def", "team_path", "(", "self", ",", "team", "=", "None", ")", ":", "if", "team", "is", "None", ":", "team", "=", "DEFAULT_TEAM", "return", "os", ".", "path", ".", "join", "(", "self", ".", "_path", ",", "self", ".", "PKG_DIR", ",", "team", ")" ]
Returns the path to directory with the team's users' package repositories.
[ "Returns", "the", "path", "to", "directory", "with", "the", "team", "s", "users", "package", "repositories", "." ]
python
train
ratt-ru/PyMORESANE
pymoresane/main.py
https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/main.py#L601-L616
def restore(self): """ This method constructs the restoring beam and then adds the convolution to the residual. """ clean_beam, beam_params = beam_fit(self.psf_data, self.cdelt1, self.cdelt2) if np.all(np.array(self.psf_data_shape)==2*np.array(self.dirty_data_shape)): self.restored = np.fft.fftshift(np.fft.irfft2(np.fft.rfft2(conv.pad_array(self.model))*np.fft.rfft2(clean_beam))) self.restored = self.restored[self.dirty_data_shape[0]/2:-self.dirty_data_shape[0]/2, self.dirty_data_shape[1]/2:-self.dirty_data_shape[1]/2] else: self.restored = np.fft.fftshift(np.fft.irfft2(np.fft.rfft2(self.model)*np.fft.rfft2(clean_beam))) self.restored += self.residual self.restored = self.restored.astype(np.float32) return beam_params
[ "def", "restore", "(", "self", ")", ":", "clean_beam", ",", "beam_params", "=", "beam_fit", "(", "self", ".", "psf_data", ",", "self", ".", "cdelt1", ",", "self", ".", "cdelt2", ")", "if", "np", ".", "all", "(", "np", ".", "array", "(", "self", "."...
This method constructs the restoring beam and then adds the convolution to the residual.
[ "This", "method", "constructs", "the", "restoring", "beam", "and", "then", "adds", "the", "convolution", "to", "the", "residual", "." ]
python
train
tornadoweb/tornado
tornado/iostream.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/iostream.py#L349-L384
def read_until_regex(self, regex: bytes, max_bytes: int = None) -> Awaitable[bytes]: """Asynchronously read until we have matched the given regex. The result includes the data that matches the regex and anything that came before it. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the regex is not satisfied. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ future = self._start_read() self._read_regex = re.compile(regex) self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=e) return future except: # Ensure that the future doesn't log an error because its # failure was never examined. future.add_done_callback(lambda f: f.exception()) raise return future
[ "def", "read_until_regex", "(", "self", ",", "regex", ":", "bytes", ",", "max_bytes", ":", "int", "=", "None", ")", "->", "Awaitable", "[", "bytes", "]", ":", "future", "=", "self", ".", "_start_read", "(", ")", "self", ".", "_read_regex", "=", "re", ...
Asynchronously read until we have matched the given regex. The result includes the data that matches the regex and anything that came before it. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the regex is not satisfied. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead.
[ "Asynchronously", "read", "until", "we", "have", "matched", "the", "given", "regex", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/imageObject.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L734-L779
def buildIVMmask(self ,chip, dqarr, scale): """ Builds a weight mask from an input DQ array and either an IVM array provided by the user or a self-generated IVM array derived from the flat-field reference file associated with the input image. """ sci_chip = self._image[self.scienceExt,chip] ivmname = self.outputNames['ivmFile'] if ivmname is not None: log.info("Applying user supplied IVM files for chip %s" % chip) #Parse the input file name to get the extension we are working on extn = "IVM,{}".format(chip) #Open the mask image for updating and the IVM image ivm = fileutil.openImage(ivmname, mode='readonly', memmap=False) ivmfile = fileutil.getExtn(ivm, extn) # Multiply the IVM file by the input mask in place. ivmarr = ivmfile.data * dqarr ivm.close() else: log.info("Automatically creating IVM files for chip %s" % chip) # If no IVM files were provided by the user we will # need to automatically generate them based upon # instrument specific information. flat = self.getflat(chip) RN = self.getReadNoiseImage(chip) darkimg = self.getdarkimg(chip) skyimg = self.getskyimg(chip) #exptime = self.getexptimeimg(chip) #exptime = sci_chip._exptime #ivm = (flat*exptime)**2/(darkimg+(skyimg*flat)+RN**2) ivm = (flat)**2/(darkimg+(skyimg*flat)+RN**2) # Multiply the IVM file by the input mask in place. ivmarr = ivm * dqarr # Update 'wt_scl' parameter to match use of IVM file sci_chip._wtscl = pow(sci_chip._exptime,2)/pow(scale,4) #sci_chip._wtscl = 1.0/pow(scale,4) return ivmarr.astype(np.float32)
[ "def", "buildIVMmask", "(", "self", ",", "chip", ",", "dqarr", ",", "scale", ")", ":", "sci_chip", "=", "self", ".", "_image", "[", "self", ".", "scienceExt", ",", "chip", "]", "ivmname", "=", "self", ".", "outputNames", "[", "'ivmFile'", "]", "if", ...
Builds a weight mask from an input DQ array and either an IVM array provided by the user or a self-generated IVM array derived from the flat-field reference file associated with the input image.
[ "Builds", "a", "weight", "mask", "from", "an", "input", "DQ", "array", "and", "either", "an", "IVM", "array", "provided", "by", "the", "user", "or", "a", "self", "-", "generated", "IVM", "array", "derived", "from", "the", "flat", "-", "field", "reference...
python
train
log2timeline/dfvfs
dfvfs/mount/manager.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/mount/manager.py#L61-L74
def RegisterMountPoint(cls, mount_point, path_spec): """Registers a path specification mount point. Args: mount_point (str): mount point identifier. path_spec (PathSpec): path specification of the mount point. Raises: KeyError: if the corresponding mount point is already set. """ if mount_point in cls._mount_points: raise KeyError('Mount point: {0:s} already set.'.format(mount_point)) cls._mount_points[mount_point] = path_spec
[ "def", "RegisterMountPoint", "(", "cls", ",", "mount_point", ",", "path_spec", ")", ":", "if", "mount_point", "in", "cls", ".", "_mount_points", ":", "raise", "KeyError", "(", "'Mount point: {0:s} already set.'", ".", "format", "(", "mount_point", ")", ")", "cls...
Registers a path specification mount point. Args: mount_point (str): mount point identifier. path_spec (PathSpec): path specification of the mount point. Raises: KeyError: if the corresponding mount point is already set.
[ "Registers", "a", "path", "specification", "mount", "point", "." ]
python
train
thebigmunch/google-music-utils
src/google_music_utils/misc.py
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/misc.py#L54-L138
def template_to_filepath(template, metadata, template_patterns=None): """Create directory structure and file name based on metadata template. Note: A template meant to be a base directory for suggested names should have a trailing slash or backslash. Parameters: template (str or ~os.PathLike): A filepath which can include template patterns as defined by :param template_patterns:. metadata (~collections.abc.Mapping): A metadata dict. template_patterns (~collections.abc.Mapping): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values. Default: :const:`~google_music_utils.constants.TEMPLATE_PATTERNS` Returns: ~pathlib.Path: A filepath. """ path = Path(template) if template_patterns is None: template_patterns = TEMPLATE_PATTERNS suggested_filename = suggest_filename(metadata) if ( path == Path.cwd() or path == Path('%suggested%') ): filepath = Path(suggested_filename) elif any(template_pattern in path.parts for template_pattern in template_patterns): if template.endswith(('/', '\\')): template += suggested_filename path = Path(template.replace('%suggested%', suggested_filename)) parts = [] for part in path.parts: if part == path.anchor: parts.append(part) else: for key in template_patterns: if ( # pragma: no branch key in part and any(field in metadata for field in template_patterns[key]) ): field = more_itertools.first_true( template_patterns[key], pred=lambda k: k in metadata ) if key.startswith(('%disc', '%track')): number = _split_number_field( str( list_to_single_value( metadata[field] ) ) ) if key.endswith('2%'): metadata[field] = number.zfill(2) else: metadata[field] = number part = part.replace( key, list_to_single_value( metadata[field] ) ) parts.append(_replace_invalid_characters(part)) filepath = Path(*parts) elif '%suggested%' in template: filepath = Path(template.replace('%suggested%', suggested_filename)) elif template.endswith(('/', '\\')): filepath = path / suggested_filename else: filepath = path return filepath
[ "def", "template_to_filepath", "(", "template", ",", "metadata", ",", "template_patterns", "=", "None", ")", ":", "path", "=", "Path", "(", "template", ")", "if", "template_patterns", "is", "None", ":", "template_patterns", "=", "TEMPLATE_PATTERNS", "suggested_fil...
Create directory structure and file name based on metadata template. Note: A template meant to be a base directory for suggested names should have a trailing slash or backslash. Parameters: template (str or ~os.PathLike): A filepath which can include template patterns as defined by :param template_patterns:. metadata (~collections.abc.Mapping): A metadata dict. template_patterns (~collections.abc.Mapping): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values. Default: :const:`~google_music_utils.constants.TEMPLATE_PATTERNS` Returns: ~pathlib.Path: A filepath.
[ "Create", "directory", "structure", "and", "file", "name", "based", "on", "metadata", "template", "." ]
python
train
ktbyers/netmiko
netmiko/base_connection.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/base_connection.py#L935-L948
def _build_ssh_client(self): """Prepare for Paramiko SSH connection.""" # Create instance of SSHClient object remote_conn_pre = paramiko.SSHClient() # Load host_keys for better SSH security if self.system_host_keys: remote_conn_pre.load_system_host_keys() if self.alt_host_keys and path.isfile(self.alt_key_file): remote_conn_pre.load_host_keys(self.alt_key_file) # Default is to automatically add untrusted hosts (make sure appropriate for your env) remote_conn_pre.set_missing_host_key_policy(self.key_policy) return remote_conn_pre
[ "def", "_build_ssh_client", "(", "self", ")", ":", "# Create instance of SSHClient object", "remote_conn_pre", "=", "paramiko", ".", "SSHClient", "(", ")", "# Load host_keys for better SSH security", "if", "self", ".", "system_host_keys", ":", "remote_conn_pre", ".", "loa...
Prepare for Paramiko SSH connection.
[ "Prepare", "for", "Paramiko", "SSH", "connection", "." ]
python
train
sdispater/pendulum
pendulum/date.py
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/date.py#L244-L262
def subtract(self, years=0, months=0, weeks=0, days=0): """ Remove duration from the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :rtype: Date """ return self.add(years=-years, months=-months, weeks=-weeks, days=-days)
[ "def", "subtract", "(", "self", ",", "years", "=", "0", ",", "months", "=", "0", ",", "weeks", "=", "0", ",", "days", "=", "0", ")", ":", "return", "self", ".", "add", "(", "years", "=", "-", "years", ",", "months", "=", "-", "months", ",", "...
Remove duration from the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :rtype: Date
[ "Remove", "duration", "from", "the", "instance", "." ]
python
train