repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
lago-project/lago
lago/lago_ansible.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/lago_ansible.py#L144-L167
def get_inventory_temp_file(self, keys=None): """ Context manager which returns the inventory written on a tempfile. The tempfile will be deleted as soon as this context manger ends. Args: keys (list of str): Path to the keys that will be used to create groups. Yields: tempfile.NamedTemporaryFile: Temp file containing the inventory """ temp_file = tempfile.NamedTemporaryFile(mode='r+t') inventory = self.get_inventory_str(keys) LOGGER.debug( 'Writing inventory to temp file {} \n{}'.format( temp_file.name, inventory ) ) temp_file.write(inventory) temp_file.flush() temp_file.seek(0) yield temp_file temp_file.close()
[ "def", "get_inventory_temp_file", "(", "self", ",", "keys", "=", "None", ")", ":", "temp_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'r+t'", ")", "inventory", "=", "self", ".", "get_inventory_str", "(", "keys", ")", "LOGGER", ".", ...
Context manager which returns the inventory written on a tempfile. The tempfile will be deleted as soon as this context manger ends. Args: keys (list of str): Path to the keys that will be used to create groups. Yields: tempfile.NamedTemporaryFile: Temp file containing the inventory
[ "Context", "manager", "which", "returns", "the", "inventory", "written", "on", "a", "tempfile", ".", "The", "tempfile", "will", "be", "deleted", "as", "soon", "as", "this", "context", "manger", "ends", "." ]
python
train
Microsoft/ApplicationInsights-Python
applicationinsights/channel/contracts/Device.py
https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/contracts/Device.py#L128-L137
def os_version(self, value): """The os_version property. Args: value (string). the property value. """ if value == self._defaults['ai.device.osVersion'] and 'ai.device.osVersion' in self._values: del self._values['ai.device.osVersion'] else: self._values['ai.device.osVersion'] = value
[ "def", "os_version", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'ai.device.osVersion'", "]", "and", "'ai.device.osVersion'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'ai.device.osV...
The os_version property. Args: value (string). the property value.
[ "The", "os_version", "property", ".", "Args", ":", "value", "(", "string", ")", ".", "the", "property", "value", "." ]
python
train
authomatic/liveandletdie
liveandletdie/__init__.py
https://github.com/authomatic/liveandletdie/blob/bf3bcdbd679452ec7c248e9910d85c7fcdca586b/liveandletdie/__init__.py#L87-L143
def port_in_use(port, kill=False, logging=False): """ Checks whether a port is free or not. :param int port: The port number to check for. :param bool kill: If ``True`` the process will be killed. :returns: The process id as :class:`int` if in use, otherwise ``False`` . """ command_template = 'lsof -iTCP:{0} -sTCP:LISTEN' process = subprocess.Popen(command_template.format(port).split(), stdout=subprocess.PIPE) headers = process.stdout.readline().decode().split() if 'PID' not in headers: _log(logging, 'Port {0} is free.'.format(port)) return False index_pid = headers.index('PID') index_cmd = headers.index('COMMAND') row = process.stdout.readline().decode().split() if len(row) < index_pid: _log(logging, 'Port {0} is free.'.format(port)) return False pid = int(row[index_pid]) command = row[index_cmd] if pid and command.startswith('python'): _log(logging, 'Port {0} is already being used by process {1}!' .format(port, pid)) if kill: _log(logging, 'Killing process with id {0} listening on port {1}!' .format(pid, port)) os.kill(pid, signal.SIGKILL) # Check whether it was really killed. try: # If still alive kill_process(pid, logging) # call me again _log(logging, 'Process {0} is still alive! checking again...' .format(pid)) return port_in_use(port, kill) except OSError: # If killed return False else: return pid
[ "def", "port_in_use", "(", "port", ",", "kill", "=", "False", ",", "logging", "=", "False", ")", ":", "command_template", "=", "'lsof -iTCP:{0} -sTCP:LISTEN'", "process", "=", "subprocess", ".", "Popen", "(", "command_template", ".", "format", "(", "port", ")"...
Checks whether a port is free or not. :param int port: The port number to check for. :param bool kill: If ``True`` the process will be killed. :returns: The process id as :class:`int` if in use, otherwise ``False`` .
[ "Checks", "whether", "a", "port", "is", "free", "or", "not", ".", ":", "param", "int", "port", ":", "The", "port", "number", "to", "check", "for", ".", ":", "param", "bool", "kill", ":", "If", "True", "the", "process", "will", "be", "killed", ".", ...
python
train
timofurrer/tag-expressions
tagexpressions/models.py
https://github.com/timofurrer/tag-expressions/blob/9b58c34296b31530f31517ffefc8715516c73da3/tagexpressions/models.py#L34-L40
def evaluate(self, values): """Evaluate the "OR" expression Check if the left "or" right expression evaluate to True. """ return self.left.evaluate(values) or self.right.evaluate(values)
[ "def", "evaluate", "(", "self", ",", "values", ")", ":", "return", "self", ".", "left", ".", "evaluate", "(", "values", ")", "or", "self", ".", "right", ".", "evaluate", "(", "values", ")" ]
Evaluate the "OR" expression Check if the left "or" right expression evaluate to True.
[ "Evaluate", "the", "OR", "expression" ]
python
train
fracpete/python-weka-wrapper3
python/weka/flow/source.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/source.py#L598-L611
def to_config(self, k, v): """ Hook method that allows conversion of individual options. :param k: the key of the option :type k: str :param v: the value :type v: object :return: the potentially processed value :rtype: object """ if k == "setup": return base.to_commandline(v) return super(DataGenerator, self).to_config(k, v)
[ "def", "to_config", "(", "self", ",", "k", ",", "v", ")", ":", "if", "k", "==", "\"setup\"", ":", "return", "base", ".", "to_commandline", "(", "v", ")", "return", "super", "(", "DataGenerator", ",", "self", ")", ".", "to_config", "(", "k", ",", "v...
Hook method that allows conversion of individual options. :param k: the key of the option :type k: str :param v: the value :type v: object :return: the potentially processed value :rtype: object
[ "Hook", "method", "that", "allows", "conversion", "of", "individual", "options", "." ]
python
train
corpusops/pdbclone
lib/pdb_clone/bootstrappdb_gdb.py
https://github.com/corpusops/pdbclone/blob/f781537c243a4874b246d43dbdef8c4279f0094d/lib/pdb_clone/bootstrappdb_gdb.py#L80-L103
def get_curline(): """Return the current python source line.""" if Frame: frame = Frame.get_selected_python_frame() if frame: line = '' f = frame.get_pyop() if f and not f.is_optimized_out(): cwd = os.path.join(os.getcwd(), '') fname = f.filename() if cwd in fname: fname = fname[len(cwd):] try: line = f.current_line() except IOError: pass if line: # Use repr(line) to avoid UnicodeDecodeError on the # following print invocation. line = repr(line).strip("'") line = line[:-2] if line.endswith(r'\n') else line return ('-> %s(%s): %s' % (fname, f.current_line_num(), line)) return ''
[ "def", "get_curline", "(", ")", ":", "if", "Frame", ":", "frame", "=", "Frame", ".", "get_selected_python_frame", "(", ")", "if", "frame", ":", "line", "=", "''", "f", "=", "frame", ".", "get_pyop", "(", ")", "if", "f", "and", "not", "f", ".", "is_...
Return the current python source line.
[ "Return", "the", "current", "python", "source", "line", "." ]
python
train
projectatomic/atomic-reactor
atomic_reactor/plugins/pre_add_yum_repo_by_url.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/pre_add_yum_repo_by_url.py#L50-L76
def run(self): """ run the plugin """ if self.workflow.builder.base_from_scratch and not self.workflow.builder.parent_images: self.log.info("Skipping add yum repo by url: unsupported for FROM-scratch images") return if self.repourls: for repourl in self.repourls: yumrepo = YumRepo(repourl) self.log.info("fetching yum repo from '%s'", yumrepo.repourl) try: yumrepo.fetch() except Exception as e: msg = "Failed to fetch yum repo {repo}: {exc}".format( repo=yumrepo.repourl, exc=e) raise RuntimeError(msg) else: self.log.info("fetched yum repo from '%s'", yumrepo.repourl) if self.inject_proxy: if yumrepo.is_valid(): yumrepo.set_proxy_for_all_repos(self.inject_proxy) self.workflow.files[yumrepo.dst_filename] = yumrepo.content self.log.debug("saving yum repo '%s', length %d", yumrepo.dst_filename, len(yumrepo.content))
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "workflow", ".", "builder", ".", "base_from_scratch", "and", "not", "self", ".", "workflow", ".", "builder", ".", "parent_images", ":", "self", ".", "log", ".", "info", "(", "\"Skipping add yum repo b...
run the plugin
[ "run", "the", "plugin" ]
python
train
jtwhite79/pyemu
pyemu/utils/gw_utils.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/gw_utils.py#L1855-L1935
def load_sfr_out(sfr_out_file, selection=None): """load an ASCII SFR output file into a dictionary of kper: dataframes. aggregates flow to aquifer for segments and returns and flow out at downstream end of segment. Parameters ---------- sfr_out_file : str SFR ASCII output file Returns ------- sfr_dict : dict dictionary of {kper:dataframe} """ assert os.path.exists(sfr_out_file),"couldn't find sfr out file {0}".\ format(sfr_out_file) tag = " stream listing" lcount = 0 sfr_dict = {} if selection is None: pass elif isinstance(selection, str): assert selection == 'all', "If string passed as selection only 'all' allowed: {}".format(selection) else: assert isinstance( selection, pd.DataFrame), "'selection needs to be pandas Dataframe. Type {} passed.".format(type(selection)) assert np.all([sr in selection.columns for sr in ['segment', 'reach']] ), "Either 'segment' or 'reach' not in selection columns" with open(sfr_out_file) as f: while True: line = f.readline().lower() lcount += 1 if line == '': break if line.startswith(tag): raw = line.strip().split() kper = int(raw[3]) - 1 kstp = int(raw[5]) - 1 [f.readline() for _ in range(4)] #skip to where the data starts lcount += 4 dlines = [] while True: dline = f.readline() lcount += 1 if dline.strip() == '': break draw = dline.strip().split() dlines.append(draw) df = pd.DataFrame(data=np.array(dlines)).iloc[:, [3, 4, 6, 7]] df.columns = ["segment", "reach", "flaqx", "flout"] df.loc[:, "segment"] = df.segment.apply(np.int) df.loc[:, "reach"] = df.reach.apply(np.int) df.loc[:, "flaqx"] = df.flaqx.apply(np.float) df.loc[:, "flout"] = df.flout.apply(np.float) df.index = df.apply(lambda x: "{0:03d}_{1:03d}".format(int(x.segment), int(x.reach)), axis=1) if selection is None: # setup for all segs, aggregate gp = df.groupby(df.segment) bot_reaches = gp[['reach']].max().apply( lambda x: "{0:03d}_{1:03d}".format(int(x.name), int(x.reach)), axis=1) df2 = pd.DataFrame(index=gp.groups.keys(), columns=['flaqx', 'flout']) df2['flaqx'] = gp.flaqx.sum() # only sum distributed output df2['flout'] = df.loc[bot_reaches, 'flout'].values # take flow out of seg # df = df.groupby(df.segment).sum() df2.loc[:,"segment"] = df2.index elif isinstance(selection, str) and selection == 'all': df2 = df else: seg_reach_id = selection.apply(lambda x: "{0:03d}_{1:03d}". format(int(x.segment), int(x.reach)), axis=1).values for sr in seg_reach_id: if sr not in df.index: s, r = [x.lstrip('0') for x in sr.split('_')] warnings.warn("Requested segment reach pair ({0},{1}) is not in sfr output. Dropping...". format(int(r), int(s)), PyemuWarning) seg_reach_id = np.delete(seg_reach_id, np.where(seg_reach_id == sr), axis=0) df2 = df.loc[seg_reach_id].copy() if kper in sfr_dict.keys(): print("multiple entries found for kper {0}, replacing...".format(kper)) sfr_dict[kper] = df2 return sfr_dict
[ "def", "load_sfr_out", "(", "sfr_out_file", ",", "selection", "=", "None", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "sfr_out_file", ")", ",", "\"couldn't find sfr out file {0}\"", ".", "format", "(", "sfr_out_file", ")", "tag", "=", "\" strea...
load an ASCII SFR output file into a dictionary of kper: dataframes. aggregates flow to aquifer for segments and returns and flow out at downstream end of segment. Parameters ---------- sfr_out_file : str SFR ASCII output file Returns ------- sfr_dict : dict dictionary of {kper:dataframe}
[ "load", "an", "ASCII", "SFR", "output", "file", "into", "a", "dictionary", "of", "kper", ":", "dataframes", ".", "aggregates", "flow", "to", "aquifer", "for", "segments", "and", "returns", "and", "flow", "out", "at", "downstream", "end", "of", "segment", "...
python
train
linkedin/naarad
src/naarad/metrics/metric.py
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/metrics/metric.py#L494-L547
def plot_timeseries(self, graphing_library='matplotlib'): """ plot timeseries for sub-metrics """ if self.groupby: plot_data = {} # plot time series data for submetrics for out_csv in sorted(self.csv_files, reverse=True): csv_filename = os.path.basename(out_csv) transaction_name = ".".join(csv_filename.split('.')[1:-1]) if transaction_name in self.anomalies.keys(): highlight_regions = self.anomalies[transaction_name] else: highlight_regions = None # The last element is .csv, don't need that in the name of the chart column = csv_filename.split('.')[-2] transaction_name = ' '.join(csv_filename.split('.')[1:-2]) plot = PD(input_csv=out_csv, csv_column=1, series_name=transaction_name + '.' + column, y_label=column + ' (' + self.sub_metric_description[column] + ')', precision=None, graph_height=500, graph_width=1200, graph_type='line', highlight_regions=highlight_regions) if transaction_name in plot_data: plot_data[transaction_name].append(plot) else: plot_data[transaction_name] = [plot] for transaction in plot_data: graphed, div_file = Metric.graphing_modules[graphing_library].graph_data(plot_data[transaction], self.resource_directory, self.resource_path, self.label + '.' + transaction) if graphed: self.plot_files.append(div_file) else: graphed = False for out_csv in self.csv_files: csv_filename = os.path.basename(out_csv) transaction_name = ".".join(csv_filename.split('.')[1:-1]) if transaction_name in self.anomalies.keys(): highlight_regions = self.anomalies[transaction_name] else: highlight_regions = None # The last element is .csv, don't need that in the name of the chart column = self.csv_column_map[out_csv] column = naarad.utils.sanitize_string(column) graph_title = '.'.join(csv_filename.split('.')[0:-1]) if self.sub_metric_description and column in self.sub_metric_description.keys(): graph_title += ' (' + self.sub_metric_description[column] + ')' if self.sub_metric_unit and column in self.sub_metric_unit.keys(): plot_data = [PD(input_csv=out_csv, csv_column=1, series_name=graph_title, y_label=column + ' (' + self.sub_metric_unit[column] + ')', precision=None, graph_height=600, graph_width=1200, graph_type='line', highlight_regions=highlight_regions)] else: plot_data = [PD(input_csv=out_csv, csv_column=1, series_name=graph_title, y_label=column, precision=None, graph_height=600, graph_width=1200, graph_type='line', highlight_regions=highlight_regions)] graphed, div_file = Metric.graphing_modules[graphing_library].graph_data(plot_data, self.resource_directory, self.resource_path, graph_title) if graphed: self.plot_files.append(div_file) return True
[ "def", "plot_timeseries", "(", "self", ",", "graphing_library", "=", "'matplotlib'", ")", ":", "if", "self", ".", "groupby", ":", "plot_data", "=", "{", "}", "# plot time series data for submetrics", "for", "out_csv", "in", "sorted", "(", "self", ".", "csv_files...
plot timeseries for sub-metrics
[ "plot", "timeseries", "for", "sub", "-", "metrics" ]
python
valid
mk-fg/txboxdotnet
txboxdotnet/api_v2.py
https://github.com/mk-fg/txboxdotnet/blob/4a3e48fbe1388c5e2a17e808aaaf6b2460e61f48/txboxdotnet/api_v2.py#L737-L757
def listdir( self, folder_id='0', type_filter=None, offset=None, limit=None, **listdir_kwz ): '''Return a list of objects in the specified folder_id. limit is passed to the API, so might be used as optimization. None means "fetch all items, with several requests, if necessary". type_filter can be set to type (str) or sequence of object types to return, post-api-call processing.''' res = yield super(txBox, self).listdir( folder_id=folder_id, offset=offset, limit=limit if limit is not None else 900, **listdir_kwz ) lst = res['entries'] if limit is None: # treat it as "no limit", using several requests to fetch all items while res['total_count'] > res['offset'] + res['limit']: offset = res['offset'] + res['limit'] res = yield super(txBox, self).listdir( folder_id=folder_id, offset=offset, limit=900, **listdir_kwz ) lst.extend(res['entries']) if type_filter: if isinstance(type_filter, types.StringTypes): type_filter = {type_filter} lst = list(obj for obj in lst if obj['type'] in type_filter) defer.returnValue(lst)
[ "def", "listdir", "(", "self", ",", "folder_id", "=", "'0'", ",", "type_filter", "=", "None", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "*", "*", "listdir_kwz", ")", ":", "res", "=", "yield", "super", "(", "txBox", ",", "self", ")...
Return a list of objects in the specified folder_id. limit is passed to the API, so might be used as optimization. None means "fetch all items, with several requests, if necessary". type_filter can be set to type (str) or sequence of object types to return, post-api-call processing.
[ "Return", "a", "list", "of", "objects", "in", "the", "specified", "folder_id", ".", "limit", "is", "passed", "to", "the", "API", "so", "might", "be", "used", "as", "optimization", ".", "None", "means", "fetch", "all", "items", "with", "several", "requests"...
python
train
pypa/pipenv
pipenv/vendor/six.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/six.py#L892-L908
def ensure_text(s, encoding='utf-8', errors='strict'): """Coerce *s* to six.text_type. For Python 2: - `unicode` -> `unicode` - `str` -> `unicode` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str` """ if isinstance(s, binary_type): return s.decode(encoding, errors) elif isinstance(s, text_type): return s else: raise TypeError("not expecting type '%s'" % type(s))
[ "def", "ensure_text", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "if", "isinstance", "(", "s", ",", "binary_type", ")", ":", "return", "s", ".", "decode", "(", "encoding", ",", "errors", ")", "elif", "isinstanc...
Coerce *s* to six.text_type. For Python 2: - `unicode` -> `unicode` - `str` -> `unicode` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str`
[ "Coerce", "*", "s", "*", "to", "six", ".", "text_type", "." ]
python
train
Telefonica/toolium
toolium/utils.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L418-L459
def get_remote_node(self): """Return the remote node that it's executing the actual test session :returns: tuple with server type (local, grid, ggr, selenium) and remote node name """ logging.getLogger("requests").setLevel(logging.WARNING) remote_node = None server_type = 'local' if self.driver_wrapper.config.getboolean_optional('Server', 'enabled'): # Request session info from grid hub session_id = self.driver_wrapper.driver.session_id self.logger.debug("Trying to identify remote node") try: # Request session info from grid hub and extract remote node url = '{}/grid/api/testsession?session={}'.format(self.get_server_url(), session_id) proxy_id = requests.get(url).json()['proxyId'] remote_node = urlparse(proxy_id).hostname if urlparse(proxy_id).hostname else proxy_id server_type = 'grid' self.logger.debug("Test running in remote node %s", remote_node) except (ValueError, KeyError): try: # Request session info from GGR and extract remote node from toolium.selenoid import Selenoid remote_node = Selenoid(self.driver_wrapper).get_selenoid_info()['Name'] server_type = 'ggr' self.logger.debug("Test running in a GGR remote node %s", remote_node) except Exception: try: # The remote node is a Selenoid node url = '{}/status'.format(self.get_server_url()) requests.get(url).json()['total'] remote_node = self.driver_wrapper.config.get('Server', 'host') server_type = 'selenoid' self.logger.debug("Test running in a Selenoid node %s", remote_node) except Exception: # The remote node is not a grid node or the session has been closed remote_node = self.driver_wrapper.config.get('Server', 'host') server_type = 'selenium' self.logger.debug("Test running in a Selenium node %s", remote_node) return server_type, remote_node
[ "def", "get_remote_node", "(", "self", ")", ":", "logging", ".", "getLogger", "(", "\"requests\"", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "remote_node", "=", "None", "server_type", "=", "'local'", "if", "self", ".", "driver_wrapper", ".",...
Return the remote node that it's executing the actual test session :returns: tuple with server type (local, grid, ggr, selenium) and remote node name
[ "Return", "the", "remote", "node", "that", "it", "s", "executing", "the", "actual", "test", "session" ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L624-L629
def includePoint(self, p): """Extend rectangle to include point p.""" if not len(p) == 2: raise ValueError("bad sequ. length") self.x0, self.y0, self.x1, self.y1 = TOOLS._include_point_in_rect(self, p) return self
[ "def", "includePoint", "(", "self", ",", "p", ")", ":", "if", "not", "len", "(", "p", ")", "==", "2", ":", "raise", "ValueError", "(", "\"bad sequ. length\"", ")", "self", ".", "x0", ",", "self", ".", "y0", ",", "self", ".", "x1", ",", "self", "....
Extend rectangle to include point p.
[ "Extend", "rectangle", "to", "include", "point", "p", "." ]
python
train
StanfordBioinformatics/loom
server/loomengine_server/api/async.py
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/async.py#L66-L81
def check_for_missed_cleanup(): """Check for TaskAttempts that were never cleaned up """ if get_setting('PRESERVE_ALL'): return from api.models.tasks import TaskAttempt if get_setting('PRESERVE_ON_FAILURE'): for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter( status_is_cleaned_up=False).exclude( status_is_failed=True): task_attempt.cleanup() else: for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter(status_is_cleaned_up=False): task_attempt.cleanup()
[ "def", "check_for_missed_cleanup", "(", ")", ":", "if", "get_setting", "(", "'PRESERVE_ALL'", ")", ":", "return", "from", "api", ".", "models", ".", "tasks", "import", "TaskAttempt", "if", "get_setting", "(", "'PRESERVE_ON_FAILURE'", ")", ":", "for", "task_attem...
Check for TaskAttempts that were never cleaned up
[ "Check", "for", "TaskAttempts", "that", "were", "never", "cleaned", "up" ]
python
train
saltstack/salt
salt/output/no_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/no_return.py#L33-L56
def display(self, ret, indent, prefix, out): ''' Recursively iterate down through data structures to determine output ''' if isinstance(ret, six.string_types): lines = ret.split('\n') for line in lines: out += '{0}{1}{2}{3}{4}\n'.format( self.colors['RED'], ' ' * indent, prefix, line, self.colors['ENDC']) elif isinstance(ret, dict): for key in sorted(ret): val = ret[key] out += '{0}{1}{2}{3}{4}:\n'.format( self.colors['CYAN'], ' ' * indent, prefix, key, self.colors['ENDC']) out = self.display(val, indent + 4, '', out) return out
[ "def", "display", "(", "self", ",", "ret", ",", "indent", ",", "prefix", ",", "out", ")", ":", "if", "isinstance", "(", "ret", ",", "six", ".", "string_types", ")", ":", "lines", "=", "ret", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "...
Recursively iterate down through data structures to determine output
[ "Recursively", "iterate", "down", "through", "data", "structures", "to", "determine", "output" ]
python
train
Yubico/python-pyhsm
pyhsm/util.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/util.py#L152-L164
def validate_cmd_response_nonce(got, used): """ Check that the returned nonce matches nonce used in request. A request nonce of 000000000000 means the HSM should generate a nonce internally though, so if 'used' is all zeros we actually check that 'got' does NOT match 'used'. """ if used == '000000000000'.decode('hex'): if got == used: raise(pyhsm.exception.YHSM_Error("Bad nonce in response (got %s, expected HSM generated nonce)" \ % (got.encode('hex')))) return got return validate_cmd_response_str('nonce', got, used)
[ "def", "validate_cmd_response_nonce", "(", "got", ",", "used", ")", ":", "if", "used", "==", "'000000000000'", ".", "decode", "(", "'hex'", ")", ":", "if", "got", "==", "used", ":", "raise", "(", "pyhsm", ".", "exception", ".", "YHSM_Error", "(", "\"Bad ...
Check that the returned nonce matches nonce used in request. A request nonce of 000000000000 means the HSM should generate a nonce internally though, so if 'used' is all zeros we actually check that 'got' does NOT match 'used'.
[ "Check", "that", "the", "returned", "nonce", "matches", "nonce", "used", "in", "request", "." ]
python
train
hugapi/hug
hug/output_format.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/output_format.py#L369-L393
def prefix(handlers, default=None, error='The requested prefix does not match any of those allowed'): """Returns a content in a different format based on the prefix placed at the end of the URL route should pass in a dict with the following format: {'[prefix]': action, ... } """ def output_type(data, request, response): path = request.path handler = default for prefix_test, prefix_handler in handlers.items(): if path.startswith(prefix_test): handler = prefix_handler break if not handler: raise falcon.HTTPNotAcceptable(error) response.content_type = handler.content_type return handler(data, request=request, response=response) output_type.__doc__ = 'Supports any of the following formats: {0}'.format(', '.join(function.__doc__ for function in handlers.values())) output_type.content_type = ', '.join(handlers.keys()) return output_type
[ "def", "prefix", "(", "handlers", ",", "default", "=", "None", ",", "error", "=", "'The requested prefix does not match any of those allowed'", ")", ":", "def", "output_type", "(", "data", ",", "request", ",", "response", ")", ":", "path", "=", "request", ".", ...
Returns a content in a different format based on the prefix placed at the end of the URL route should pass in a dict with the following format: {'[prefix]': action, ... }
[ "Returns", "a", "content", "in", "a", "different", "format", "based", "on", "the", "prefix", "placed", "at", "the", "end", "of", "the", "URL", "route", "should", "pass", "in", "a", "dict", "with", "the", "following", "format", ":" ]
python
train
mitsei/dlkit
dlkit/records/assessment/basic/simple_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/simple_records.py#L453-L465
def set_min_string_length(self, length=None): """stub""" if self.get_min_string_length_metadata().is_read_only(): raise NoAccess() if not self.my_osid_object_form._is_valid_cardinal( length, self.get_min_string_length_metadata()): raise InvalidArgument() if self.my_osid_object_form.max_string_length is not None and \ length > self.my_osid_object_form.max_string_length - 1: raise InvalidArgument() self.my_osid_object_form._my_map['minStringLength'] = length self._min_string_length = length
[ "def", "set_min_string_length", "(", "self", ",", "length", "=", "None", ")", ":", "if", "self", ".", "get_min_string_length_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "NoAccess", "(", ")", "if", "not", "self", ".", "my_osid_object_for...
stub
[ "stub" ]
python
train
Scille/mestr
mestr/mestr.py
https://github.com/Scille/mestr/blob/344092aac77ad222fb6cd62927c7801c2ece33fc/mestr/mestr.py#L61-L109
def authenticate(realm, authid, details): """ application_name : name of your application version : version of your application required_components dictionary of components required for you application and their version required { "component" : "1.1", "component2" : "0.1", ... } when all the different component required has been register your component will be allow to authenticate with a role build only for your application with only the right right for it to works """ global _start global _waiting import json ticket = json.loads(details['ticket'] ) if 'application_name' not in ticket and 'version' not in ticket: raise ApplicationError( 'could not start the authentication of an app,\ field application_name or version is missing') application_name = ticket['application_name'] version = ticket['version'] required_components = ticket[ 'required_components'] if 'required_components' in ticket else {} if not _try_to_start_app(application_name, version, required_components): ready_defered = defer.Deferred() ready_defered.addCallback(defer_try_start_app, application_name=application_name, version=version, required_components=required_components) _waiting[application_name]['defer'] = ready_defered yield ready_defered print("[MESTR] start app: ", _start) print("[MESTR] waiting app: ", _waiting) for k in _start: if k in _waiting: _waiting = remove_element(_waiting, k) # backend role must be contains in the config.json # since we can't create them dynamically for the moment returnValue("backend")
[ "def", "authenticate", "(", "realm", ",", "authid", ",", "details", ")", ":", "global", "_start", "global", "_waiting", "import", "json", "ticket", "=", "json", ".", "loads", "(", "details", "[", "'ticket'", "]", ")", "if", "'application_name'", "not", "in...
application_name : name of your application version : version of your application required_components dictionary of components required for you application and their version required { "component" : "1.1", "component2" : "0.1", ... } when all the different component required has been register your component will be allow to authenticate with a role build only for your application with only the right right for it to works
[ "application_name", ":", "name", "of", "your", "application", "version", ":", "version", "of", "your", "application", "required_components", "dictionary", "of", "components", "required", "for", "you", "application", "and", "their", "version", "required" ]
python
train
tadashi-aikawa/jumeaux
jumeaux/executor.py
https://github.com/tadashi-aikawa/jumeaux/blob/23389bde3e9b27b3a646d99289f8b5ced411f6f0/jumeaux/executor.py#L248-L398
def challenge(arg: ChallengeArg) -> dict: """ Response is dict like `Trial` because Status(OwlEnum) can't be pickled. """ name: str = arg.req.name.get_or(str(arg.seq)) log_prefix = f"[{arg.seq} / {arg.number_of_request}]" logger.info_lv3(f"{log_prefix} {'-'*80}") logger.info_lv3(f"{log_prefix} {arg.seq}. {arg.req.name.get_or(arg.req.path)}") logger.info_lv3(f"{log_prefix} {'-'*80}") path_str_one = arg.path_one.map(lambda x: re.sub(x.before, x.after, arg.req.path)).get_or(arg.req.path) path_str_other = arg.path_other.map(lambda x: re.sub(x.before, x.after, arg.req.path)).get_or(arg.req.path) qs_str_one = create_query_string(arg.req.qs, arg.query_one, arg.req.url_encoding) qs_str_other = create_query_string(arg.req.qs, arg.query_other, arg.req.url_encoding) url_one = f'{arg.host_one}{path_str_one}?{qs_str_one}' url_other = f'{arg.host_other}{path_str_other}?{qs_str_other}' # Get two responses req_time = now() try: logger.info_lv3(f"{log_prefix} One URL: {url_one}") logger.debug(f"{log_prefix} One PROXY: {arg.proxy_one.map(lambda x: x.to_dict()).get()}") logger.info_lv3(f"{log_prefix} Other URL: {url_other}") logger.debug(f"{log_prefix} Other PROXY: {arg.proxy_other.map(lambda x: x.to_dict()).get()}") r_one, r_other = concurrent_request(arg.session, arg.req.headers, url_one, url_other, arg.proxy_one, arg.proxy_other) logger.info_lv3( f"{log_prefix} One: {r_one.status_code} / {to_sec(r_one.elapsed)}s / {len(r_one.content)}b / {r_one.headers.get('content-type')}" # noqa ) logger.info_lv3( f"{log_prefix} Other: {r_other.status_code} / {to_sec(r_other.elapsed)}s / {len(r_other.content)}b / {r_other.headers.get('content-type')}" # noqa ) except ConnectionError: logger.info_lv1(f"{log_prefix} 💀 {arg.req.name.get()}") # TODO: Integrate logic into create_trial return { "seq": arg.seq, "name": name, "tags": [], "request_time": req_time.isoformat(), "status": 'failure', "path": arg.req.path, "queries": arg.req.qs, "headers": arg.req.headers, "one": { "url": url_one, "type": "unknown", }, "other": { "url": url_other, "type": "unknown", } } res_one: Response = res2res(Response.from_requests(r_one, arg.default_response_encoding_one), arg.req) res_other: Response = res2res(Response.from_requests(r_other, arg.default_response_encoding_other), arg.req) dict_one: TOption[dict] = res2dict(res_one) dict_other: TOption[dict] = res2dict(res_other) # Create diff # Either dict_one or dic_other is None, it means that it can't be analyzed, therefore return None ddiff = None if dict_one.is_none() or dict_other.is_none() \ else {} if res_one.body == res_other.body \ else DeepDiff(dict_one.get(), dict_other.get()) initial_diffs_by_cognition: Optional[TDict[DiffKeys]] = TDict({ "unknown": DiffKeys.from_dict({ "changed": TList(ddiff.get('type_changes', {}).keys() | ddiff.get('values_changed', {}).keys()) .map(to_jumeaux_xpath) .order_by(_), "added": TList(ddiff.get('dictionary_item_added', {}) | ddiff.get('iterable_item_added', {}).keys()) .map(to_jumeaux_xpath) .order_by(_), "removed": TList(ddiff.get('dictionary_item_removed', {}) | ddiff.get('iterable_item_removed', {}).keys()) .map(to_jumeaux_xpath) .order_by(_) }) }) if ddiff is not None else None # Judgement status, diffs_by_cognition = judgement(res_one, res_other, dict_one, dict_other, name, arg.req.path, arg.req.qs, arg.req.headers, initial_diffs_by_cognition) status_symbol = "O" if status == Status.SAME else "X" log_msg = f"{log_prefix} {status_symbol} ({res_one.status_code} - {res_other.status_code}) <{res_one.elapsed_sec}s - {res_other.elapsed_sec}s> {arg.req.name.get_or(arg.req.path)}" # noqa (logger.info_lv2 if status == Status.SAME else logger.info_lv1)(log_msg) file_one: Optional[str] = None file_other: Optional[str] = None prop_file_one: Optional[str] = None prop_file_other: Optional[str] = None if store_criterion(status, name, arg.req, res_one, res_other): dir = f'{arg.res_dir}/{arg.key}' file_one = f'one/({arg.seq}){name}' file_other = f'other/({arg.seq}){name}' write_to_file(file_one, dir, dump(res_one)) write_to_file(file_other, dir, dump(res_other)) if not dict_one.is_none(): prop_file_one = f'one-props/({arg.seq}){name}.json' write_to_file(prop_file_one, dir, TDict(dict_one.get()).to_json().encode('utf-8', errors='replace')) if not dict_other.is_none(): prop_file_other = f'other-props/({arg.seq}){name}.json' write_to_file(prop_file_other, dir, TDict(dict_other.get()).to_json().encode('utf-8', errors='replace')) return global_addon_executor.apply_did_challenge( DidChallengeAddOnPayload.from_dict({ "trial": Trial.from_dict({ "seq": arg.seq, "name": name, "tags": [], # TODO: tags created by reqs2reqs "request_time": req_time.isoformat(), "status": status, "path": arg.req.path, "queries": arg.req.qs, "headers": arg.req.headers, "diffs_by_cognition": diffs_by_cognition, "one": { "url": res_one.url, "type": res_one.type, "status_code": res_one.status_code, "byte": res_one.byte, "response_sec": res_one.elapsed_sec, "content_type": res_one.content_type, "mime_type": res_one.mime_type, "encoding": res_one.encoding, "file": file_one, "prop_file": prop_file_one, }, "other": { "url": res_other.url, "type": res_other.type, "status_code": res_other.status_code, "byte": res_other.byte, "response_sec": res_other.elapsed_sec, "content_type": res_other.content_type, "mime_type": res_other.mime_type, "encoding": res_other.encoding, "file": file_other, "prop_file": prop_file_other, } }) }), DidChallengeAddOnReference.from_dict({ "res_one": res_one, "res_other": res_other, "res_one_props": dict_one, "res_other_props": dict_other, }) ).trial.to_dict()
[ "def", "challenge", "(", "arg", ":", "ChallengeArg", ")", "->", "dict", ":", "name", ":", "str", "=", "arg", ".", "req", ".", "name", ".", "get_or", "(", "str", "(", "arg", ".", "seq", ")", ")", "log_prefix", "=", "f\"[{arg.seq} / {arg.number_of_request}...
Response is dict like `Trial` because Status(OwlEnum) can't be pickled.
[ "Response", "is", "dict", "like", "Trial", "because", "Status", "(", "OwlEnum", ")", "can", "t", "be", "pickled", "." ]
python
train
dswah/pyGAM
pygam/datasets/load_datasets.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L17-L20
def _clean_X_y(X, y): """ensure that X and y data are float and correct shapes """ return make_2d(X, verbose=False).astype('float'), y.astype('float')
[ "def", "_clean_X_y", "(", "X", ",", "y", ")", ":", "return", "make_2d", "(", "X", ",", "verbose", "=", "False", ")", ".", "astype", "(", "'float'", ")", ",", "y", ".", "astype", "(", "'float'", ")" ]
ensure that X and y data are float and correct shapes
[ "ensure", "that", "X", "and", "y", "data", "are", "float", "and", "correct", "shapes" ]
python
train
nerdvegas/rez
src/rez/vendor/pygraph/readwrite/dot.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/readwrite/dot.py#L107-L176
def write(G, weighted=False): """ Return a string specifying the given graph in Dot language. @type G: graph @param G: Graph. @type weighted: boolean @param weighted: Whether edges should be labelled with their weight. @rtype: string @return: String specifying the graph in Dot Language. """ dotG = pydot.Dot() if not 'name' in dir(G): dotG.set_name('graphname') else: dotG.set_name(G.name) if (isinstance(G, graph)): dotG.set_type('graph') directed = False elif (isinstance(G, digraph)): dotG.set_type('digraph') directed = True elif (isinstance(G, hypergraph)): return write_hypergraph(G) else: raise InvalidGraphType("Expected graph or digraph, got %s" % repr(G) ) for node in G.nodes(): attr_list = {} for attr in G.node_attributes(node): attr_list[str(attr[0])] = str(attr[1]) newNode = pydot.Node(str(node), **attr_list) dotG.add_node(newNode) # Pydot doesn't work properly with the get_edge, so we use # our own set to keep track of what's been added or not. seen_edges = set([]) for edge_from, edge_to in G.edges(): if (str(edge_from) + "-" + str(edge_to)) in seen_edges: continue if (not directed) and (str(edge_to) + "-" + str(edge_from)) in seen_edges: continue attr_list = {} for attr in G.edge_attributes((edge_from, edge_to)): attr_list[str(attr[0])] = str(attr[1]) if str(G.edge_label((edge_from, edge_to))): attr_list['label'] = str(G.edge_label((edge_from, edge_to))) elif weighted: attr_list['label'] = str(G.edge_weight((edge_from, edge_to))) if weighted: attr_list['weight'] = str(G.edge_weight((edge_from, edge_to))) newEdge = pydot.Edge(str(edge_from), str(edge_to), **attr_list) dotG.add_edge(newEdge) seen_edges.add(str(edge_from) + "-" + str(edge_to)) return dotG.to_string()
[ "def", "write", "(", "G", ",", "weighted", "=", "False", ")", ":", "dotG", "=", "pydot", ".", "Dot", "(", ")", "if", "not", "'name'", "in", "dir", "(", "G", ")", ":", "dotG", ".", "set_name", "(", "'graphname'", ")", "else", ":", "dotG", ".", "...
Return a string specifying the given graph in Dot language. @type G: graph @param G: Graph. @type weighted: boolean @param weighted: Whether edges should be labelled with their weight. @rtype: string @return: String specifying the graph in Dot Language.
[ "Return", "a", "string", "specifying", "the", "given", "graph", "in", "Dot", "language", ".", "@type", "G", ":", "graph", "@param", "G", ":", "Graph", "." ]
python
train
thewca/wca-regulations-compiler
wrc/codegen/cghtml.py
https://github.com/thewca/wca-regulations-compiler/blob/3ebbd8fe8fec7c9167296f59b2677696fe61a954/wrc/codegen/cghtml.py#L70-L74
def link2html(text): ''' Turns md links to html ''' match = r'\[([^\]]+)\]\(([^)]+)\)' replace = r'<a href="\2">\1</a>' return re.sub(match, replace, text)
[ "def", "link2html", "(", "text", ")", ":", "match", "=", "r'\\[([^\\]]+)\\]\\(([^)]+)\\)'", "replace", "=", "r'<a href=\"\\2\">\\1</a>'", "return", "re", ".", "sub", "(", "match", ",", "replace", ",", "text", ")" ]
Turns md links to html
[ "Turns", "md", "links", "to", "html" ]
python
train
bitcraze/crazyflie-lib-python
cflib/crtp/radiodriver.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crtp/radiodriver.py#L273-L288
def close(self): """ Close the link. """ # Stop the comm thread self._thread.stop() # Close the USB dongle if self._radio_manager: self._radio_manager.close() self._radio_manager = None while not self.out_queue.empty(): self.out_queue.get() # Clear callbacks self.link_error_callback = None self.link_quality_callback = None
[ "def", "close", "(", "self", ")", ":", "# Stop the comm thread", "self", ".", "_thread", ".", "stop", "(", ")", "# Close the USB dongle", "if", "self", ".", "_radio_manager", ":", "self", ".", "_radio_manager", ".", "close", "(", ")", "self", ".", "_radio_ma...
Close the link.
[ "Close", "the", "link", "." ]
python
train
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L435-L446
def ping(cls, host=None, count=1): """ execute ping :param host: the host to ping :param count: the number of pings :return: """ option = '-n' if platform.system().lower() == 'windows' else '-c' return cls.execute('ping', "{option} {count} {host}".format(option=option, count=count, host=host))
[ "def", "ping", "(", "cls", ",", "host", "=", "None", ",", "count", "=", "1", ")", ":", "option", "=", "'-n'", "if", "platform", ".", "system", "(", ")", ".", "lower", "(", ")", "==", "'windows'", "else", "'-c'", "return", "cls", ".", "execute", "...
execute ping :param host: the host to ping :param count: the number of pings :return:
[ "execute", "ping", ":", "param", "host", ":", "the", "host", "to", "ping", ":", "param", "count", ":", "the", "number", "of", "pings", ":", "return", ":" ]
python
train
JelteF/PyLaTeX
pylatex/lists.py
https://github.com/JelteF/PyLaTeX/blob/62d9d9912ce8445e6629cdbcb80ad86143a1ed23/pylatex/lists.py#L74-L85
def add_item(self, label, s): """Add an item to the list. Args ---- label: str Description of the item. s: str or `~.LatexObject` The item itself. """ self.append(Command('item', options=label)) self.append(s)
[ "def", "add_item", "(", "self", ",", "label", ",", "s", ")", ":", "self", ".", "append", "(", "Command", "(", "'item'", ",", "options", "=", "label", ")", ")", "self", ".", "append", "(", "s", ")" ]
Add an item to the list. Args ---- label: str Description of the item. s: str or `~.LatexObject` The item itself.
[ "Add", "an", "item", "to", "the", "list", "." ]
python
train
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L1104-L1138
def _LoadNvmlLibrary(): """ Load the library if it isn't loaded already """ global nvmlLib if (nvmlLib is None): # lock to ensure only one caller loads the library libLoadLock.acquire() try: # ensure the library still isn't loaded if (nvmlLib is None): try: if (sys.platform[:3] == "win"): searchPaths = [ os.path.join(os.getenv("ProgramFiles", r"C:\Program Files"), r"NVIDIA Corporation\NVSMI\nvml.dll"), os.path.join(os.getenv("WinDir", r"C:\Windows"), r"System32\nvml.dll"), ] nvmlPath = next((x for x in searchPaths if os.path.isfile(x)), None) if (nvmlPath == None): _nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND) else: # cdecl calling convention nvmlLib = CDLL(nvmlPath) else: # assume linux nvmlLib = CDLL("libnvidia-ml.so.1") except OSError as ose: _nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND) if (nvmlLib == None): _nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND) finally: # lock is always freed libLoadLock.release()
[ "def", "_LoadNvmlLibrary", "(", ")", ":", "global", "nvmlLib", "if", "(", "nvmlLib", "is", "None", ")", ":", "# lock to ensure only one caller loads the library", "libLoadLock", ".", "acquire", "(", ")", "try", ":", "# ensure the library still isn't loaded", "if", "("...
Load the library if it isn't loaded already
[ "Load", "the", "library", "if", "it", "isn", "t", "loaded", "already" ]
python
train
thunder-project/thunder
thunder/images/readers.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L159-L221
def frompath(path, accessor=None, ext=None, start=None, stop=None, recursive=False, npartitions=None, dims=None, dtype=None, labels=None, recount=False, engine=None, credentials=None): """ Load images from a path using the given accessor. Supports both local and remote filesystems. Parameters ---------- accessor : function Apply to each item after loading to yield an image. ext : str, optional, default=None File extension. npartitions : int, optional, default=None Number of partitions for computational engine, if None will use default for engine. dims : tuple, optional, default=None Dimensions of images. dtype : str, optional, default=None Numerical type of images. labels : array, optional, default = None Labels for records. If provided, should be one-dimensional. start, stop : nonnegative int, optional, default=None Indices of files to load, interpreted using Python slicing conventions. recursive : boolean, optional, default=False If true, will recursively descend directories from path, loading all files with an extension matching 'ext'. recount : boolean, optional, default=False Force subsequent record counting. """ from thunder.readers import get_parallel_reader reader = get_parallel_reader(path)(engine, credentials=credentials) data = reader.read(path, ext=ext, start=start, stop=stop, recursive=recursive, npartitions=npartitions) if spark and isinstance(engine, spark): if accessor: data = data.flatMap(accessor) if recount: nrecords = None def switch(record): ary, idx = record return (idx,), ary data = data.values().zipWithIndex().map(switch) else: nrecords = reader.nfiles return fromrdd(data, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True) else: if accessor: data = [accessor(d) for d in data] flattened = list(itertools.chain(*data)) values = [kv[1] for kv in flattened] return fromarray(values, labels=labels)
[ "def", "frompath", "(", "path", ",", "accessor", "=", "None", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "npartitions", "=", "None", ",", "dims", "=", "None", ",", "dtype", "="...
Load images from a path using the given accessor. Supports both local and remote filesystems. Parameters ---------- accessor : function Apply to each item after loading to yield an image. ext : str, optional, default=None File extension. npartitions : int, optional, default=None Number of partitions for computational engine, if None will use default for engine. dims : tuple, optional, default=None Dimensions of images. dtype : str, optional, default=None Numerical type of images. labels : array, optional, default = None Labels for records. If provided, should be one-dimensional. start, stop : nonnegative int, optional, default=None Indices of files to load, interpreted using Python slicing conventions. recursive : boolean, optional, default=False If true, will recursively descend directories from path, loading all files with an extension matching 'ext'. recount : boolean, optional, default=False Force subsequent record counting.
[ "Load", "images", "from", "a", "path", "using", "the", "given", "accessor", "." ]
python
train
aiortc/aioice
aioice/ice.py
https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L299-L319
def add_remote_candidate(self, remote_candidate): """ Add a remote candidate or signal end-of-candidates. To signal end-of-candidates, pass `None`. """ if self._remote_candidates_end: raise ValueError('Cannot add remote candidate after end-of-candidates.') if remote_candidate is None: self._prune_components() self._remote_candidates_end = True return self._remote_candidates.append(remote_candidate) for protocol in self._protocols: if (protocol.local_candidate.can_pair_with(remote_candidate) and not self._find_pair(protocol, remote_candidate)): pair = CandidatePair(protocol, remote_candidate) self._check_list.append(pair) self.sort_check_list()
[ "def", "add_remote_candidate", "(", "self", ",", "remote_candidate", ")", ":", "if", "self", ".", "_remote_candidates_end", ":", "raise", "ValueError", "(", "'Cannot add remote candidate after end-of-candidates.'", ")", "if", "remote_candidate", "is", "None", ":", "self...
Add a remote candidate or signal end-of-candidates. To signal end-of-candidates, pass `None`.
[ "Add", "a", "remote", "candidate", "or", "signal", "end", "-", "of", "-", "candidates", "." ]
python
train
cloudant/python-cloudant
src/cloudant/document.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L196-L211
def list_field_append(doc, field, value): """ Appends a value to a list field in a locally cached Document object. If a field does not exist it will be created first. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field list to append to. :param value: Value to append to the field list. """ if doc.get(field) is None: doc[field] = [] if not isinstance(doc[field], list): raise CloudantDocumentException(102, field) if value is not None: doc[field].append(value)
[ "def", "list_field_append", "(", "doc", ",", "field", ",", "value", ")", ":", "if", "doc", ".", "get", "(", "field", ")", "is", "None", ":", "doc", "[", "field", "]", "=", "[", "]", "if", "not", "isinstance", "(", "doc", "[", "field", "]", ",", ...
Appends a value to a list field in a locally cached Document object. If a field does not exist it will be created first. :param Document doc: Locally cached Document object that can be a Document, DesignDocument or dict. :param str field: Name of the field list to append to. :param value: Value to append to the field list.
[ "Appends", "a", "value", "to", "a", "list", "field", "in", "a", "locally", "cached", "Document", "object", ".", "If", "a", "field", "does", "not", "exist", "it", "will", "be", "created", "first", "." ]
python
train
prometheus/client_python
prometheus_client/exposition.py
https://github.com/prometheus/client_python/blob/31f5557e2e84ca4ffa9a03abf6e3f4d0c8b8c3eb/prometheus_client/exposition.py#L229-L245
def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None): """Handler that implements HTTP/HTTPS connections with Basic Auth. Sets auth headers using supplied 'username' and 'password', if set. Used by the push_to_gateway functions. Can be re-used by other handlers.""" def handle(): """Handler that implements HTTP Basic Auth. """ if username is not None and password is not None: auth_value = '{0}:{1}'.format(username, password).encode('utf-8') auth_token = base64.b64encode(auth_value) auth_header = b'Basic ' + auth_token headers.append(['Authorization', auth_header]) default_handler(url, method, timeout, headers, data)() return handle
[ "def", "basic_auth_handler", "(", "url", ",", "method", ",", "timeout", ",", "headers", ",", "data", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "def", "handle", "(", ")", ":", "\"\"\"Handler that implements HTTP Basic Auth.\n \"...
Handler that implements HTTP/HTTPS connections with Basic Auth. Sets auth headers using supplied 'username' and 'password', if set. Used by the push_to_gateway functions. Can be re-used by other handlers.
[ "Handler", "that", "implements", "HTTP", "/", "HTTPS", "connections", "with", "Basic", "Auth", "." ]
python
train
QuantEcon/QuantEcon.py
quantecon/markov/ddp.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/markov/ddp.py#L797-L834
def policy_iteration(self, v_init=None, max_iter=None): """ Solve the optimization problem by policy iteration. See the `solve` method. """ if self.beta == 1: raise NotImplementedError(self._error_msg_no_discounting) if max_iter is None: max_iter = self.max_iter # What for initial condition? if v_init is None: v_init = self.s_wise_max(self.R) sigma = self.compute_greedy(v_init) new_sigma = np.empty(self.num_states, dtype=int) for i in range(max_iter): # Policy evaluation v_sigma = self.evaluate_policy(sigma) # Policy improvement self.compute_greedy(v_sigma, sigma=new_sigma) if np.array_equal(new_sigma, sigma): break sigma[:] = new_sigma num_iter = i + 1 res = DPSolveResult(v=v_sigma, sigma=sigma, num_iter=num_iter, mc=self.controlled_mc(sigma), method='policy iteration', max_iter=max_iter) return res
[ "def", "policy_iteration", "(", "self", ",", "v_init", "=", "None", ",", "max_iter", "=", "None", ")", ":", "if", "self", ".", "beta", "==", "1", ":", "raise", "NotImplementedError", "(", "self", ".", "_error_msg_no_discounting", ")", "if", "max_iter", "is...
Solve the optimization problem by policy iteration. See the `solve` method.
[ "Solve", "the", "optimization", "problem", "by", "policy", "iteration", ".", "See", "the", "solve", "method", "." ]
python
train
Fuyukai/asyncwebsockets
asyncwebsockets/client.py
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/client.py#L65-L76
async def open_websocket_client(sock: anyio.abc.SocketStream, addr, path: str, headers: Optional[list] = None, subprotocols: Optional[list] = None): """Create a websocket on top of a socket.""" ws = await create_websocket_client( sock, addr=addr, path=path, headers=headers, subprotocols=subprotocols) try: yield ws finally: await ws.close()
[ "async", "def", "open_websocket_client", "(", "sock", ":", "anyio", ".", "abc", ".", "SocketStream", ",", "addr", ",", "path", ":", "str", ",", "headers", ":", "Optional", "[", "list", "]", "=", "None", ",", "subprotocols", ":", "Optional", "[", "list", ...
Create a websocket on top of a socket.
[ "Create", "a", "websocket", "on", "top", "of", "a", "socket", "." ]
python
train
rgs1/zk_shell
zk_shell/xclient.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/xclient.py#L147-L157
def get(self, *args, **kwargs): """ wraps the default get() and deals with encoding """ value, stat = super(XClient, self).get(*args, **kwargs) try: if value is not None: value = value.decode(encoding="utf-8") except UnicodeDecodeError: pass return (value, stat)
[ "def", "get", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", ",", "stat", "=", "super", "(", "XClient", ",", "self", ")", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "if", "value", "is",...
wraps the default get() and deals with encoding
[ "wraps", "the", "default", "get", "()", "and", "deals", "with", "encoding" ]
python
train
yyuu/botornado
boto/cloudfront/invalidation.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/cloudfront/invalidation.py#L70-L79
def to_xml(self): """Get this batch as XML""" assert self.connection != None s = '<?xml version="1.0" encoding="UTF-8"?>\n' s += '<InvalidationBatch xmlns="http://cloudfront.amazonaws.com/doc/%s/">\n' % self.connection.Version for p in self.paths: s += ' <Path>%s</Path>\n' % self.escape(p) s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference s += '</InvalidationBatch>\n' return s
[ "def", "to_xml", "(", "self", ")", ":", "assert", "self", ".", "connection", "!=", "None", "s", "=", "'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'", "s", "+=", "'<InvalidationBatch xmlns=\"http://cloudfront.amazonaws.com/doc/%s/\">\\n'", "%", "self", ".", "connection", ...
Get this batch as XML
[ "Get", "this", "batch", "as", "XML" ]
python
train
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1029-L1049
def console_map_ascii_codes_to_font( firstAsciiCode: int, nbCodes: int, fontCharX: int, fontCharY: int ) -> None: """Remap a contiguous set of codes to a contiguous set of tiles. Both the tile-set and character codes must be contiguous to use this function. If this is not the case you may want to use :any:`console_map_ascii_code_to_font`. Args: firstAsciiCode (int): The starting character code. nbCodes (int): The length of the contiguous set. fontCharX (int): The starting X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The starting Y tile coordinate on the loaded tileset. 0 is the topmost tile. """ lib.TCOD_console_map_ascii_codes_to_font( _int(firstAsciiCode), nbCodes, fontCharX, fontCharY )
[ "def", "console_map_ascii_codes_to_font", "(", "firstAsciiCode", ":", "int", ",", "nbCodes", ":", "int", ",", "fontCharX", ":", "int", ",", "fontCharY", ":", "int", ")", "->", "None", ":", "lib", ".", "TCOD_console_map_ascii_codes_to_font", "(", "_int", "(", "...
Remap a contiguous set of codes to a contiguous set of tiles. Both the tile-set and character codes must be contiguous to use this function. If this is not the case you may want to use :any:`console_map_ascii_code_to_font`. Args: firstAsciiCode (int): The starting character code. nbCodes (int): The length of the contiguous set. fontCharX (int): The starting X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The starting Y tile coordinate on the loaded tileset. 0 is the topmost tile.
[ "Remap", "a", "contiguous", "set", "of", "codes", "to", "a", "contiguous", "set", "of", "tiles", "." ]
python
train
googledatalab/pydatalab
datalab/bigquery/_query.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query.py#L434-L494
def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='interactive', allow_large_results=False, dialect=None, billing_tier=None): """ Initiate the query and return a QueryJob. Args: table_name: the result table name as a string or TableName; if None (the default), then a temporary table will be used. table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request will fail if the table exists. use_cache: whether to use past query results or ignore cache. Has no effect if destination is specified (default True). priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much as three hours but are not rate-limited. allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is slower and requires a table_name to be specified) (default False). dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryJob. Raises: Exception if query could not be executed. """ batch = priority == 'low' append = table_mode == 'append' overwrite = table_mode == 'overwrite' if table_name is not None: table_name = _utils.parse_table_name(table_name, self._api.project_id) try: query_result = self._api.jobs_insert_query(self._sql, self._code, self._imports, table_name=table_name, append=append, overwrite=overwrite, use_cache=use_cache, batch=batch, allow_large_results=allow_large_results, table_definitions=self._external_tables, dialect=dialect, billing_tier=billing_tier) except Exception as e: raise e if 'jobReference' not in query_result: raise Exception('Unexpected response from server') job_id = query_result['jobReference']['jobId'] if not table_name: try: destination = query_result['configuration']['query']['destinationTable'] table_name = (destination['projectId'], destination['datasetId'], destination['tableId']) except KeyError: # The query was in error raise Exception(_utils.format_query_errors(query_result['status']['errors'])) return _query_job.QueryJob(job_id, table_name, self._sql, context=self._context)
[ "def", "execute_async", "(", "self", ",", "table_name", "=", "None", ",", "table_mode", "=", "'create'", ",", "use_cache", "=", "True", ",", "priority", "=", "'interactive'", ",", "allow_large_results", "=", "False", ",", "dialect", "=", "None", ",", "billin...
Initiate the query and return a QueryJob. Args: table_name: the result table name as a string or TableName; if None (the default), then a temporary table will be used. table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request will fail if the table exists. use_cache: whether to use past query results or ignore cache. Has no effect if destination is specified (default True). priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much as three hours but are not rate-limited. allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is slower and requires a table_name to be specified) (default False). dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryJob. Raises: Exception if query could not be executed.
[ "Initiate", "the", "query", "and", "return", "a", "QueryJob", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L932-L959
def _AddEqualsMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __eq__(self, other): if (not isinstance(other, message_mod.Message) or other.DESCRIPTOR != self.DESCRIPTOR): return False if self is other: return True if self.DESCRIPTOR.full_name == _AnyFullTypeName: any_a = _InternalUnpackAny(self) any_b = _InternalUnpackAny(other) if any_a and any_b: return any_a == any_b if not self.ListFields() == other.ListFields(): return False # Sort unknown fields because their order shouldn't affect equality test. unknown_fields = list(self._unknown_fields) unknown_fields.sort() other_unknown_fields = list(other._unknown_fields) other_unknown_fields.sort() return unknown_fields == other_unknown_fields cls.__eq__ = __eq__
[ "def", "_AddEqualsMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "__eq__", "(", "self", ",", "other", ")", ":", "if", "(", "not", "isinstance", "(", "other", ",", "message_mod", ".", "Message", ")", "or", "other", ".", "DESCRIPTOR", "!=...
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
python
train
PSU-OIT-ARC/elasticmodels
elasticmodels/analysis.py
https://github.com/PSU-OIT-ARC/elasticmodels/blob/67870508096f66123ef10b89789bbac06571cc80/elasticmodels/analysis.py#L74-L102
def is_analysis_compatible(using): """ Returns True if the analysis defined in Python land and ES for the connection `using` are compatible """ python_analysis = collect_analysis(using) es_analysis = existing_analysis(using) if es_analysis == DOES_NOT_EXIST: return True # we want to ensure everything defined in Python land is exactly matched in ES land for section in python_analysis: # there is an analysis section (analysis, tokenizers, filters, etc) defined in Python that isn't in ES if section not in es_analysis: return False # for this section of analysis (analysis, tokenizer, filter, etc), get # all the items defined in that section, and make sure they exist, and # are equal in Python land subdict_python = python_analysis[section] subdict_es = es_analysis[section] for name in subdict_python: # this analyzer, filter, etc isn't defined in ES if name not in subdict_es: return False # this analyzer, filter etc doesn't match what is in ES if subdict_python[name] != subdict_es[name]: return False return True
[ "def", "is_analysis_compatible", "(", "using", ")", ":", "python_analysis", "=", "collect_analysis", "(", "using", ")", "es_analysis", "=", "existing_analysis", "(", "using", ")", "if", "es_analysis", "==", "DOES_NOT_EXIST", ":", "return", "True", "# we want to ensu...
Returns True if the analysis defined in Python land and ES for the connection `using` are compatible
[ "Returns", "True", "if", "the", "analysis", "defined", "in", "Python", "land", "and", "ES", "for", "the", "connection", "using", "are", "compatible" ]
python
train
dereneaton/ipyrad
ipyrad/assemble/cluster_within.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_within.py#L1323-L1350
def reconcat(data, sample): """ takes aligned chunks (usually 10) and concatenates them """ try: ## get chunks chunks = glob.glob(os.path.join(data.tmpdir, sample.name+"_chunk_[0-9].aligned")) ## sort by chunk number, cuts off last 8 =(aligned) chunks.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-8])) LOGGER.info("chunk %s", chunks) ## concatenate finished reads sample.files.clusters = os.path.join(data.dirs.clusts, sample.name+".clustS.gz") ## reconcats aligned clusters with gzip.open(sample.files.clusters, 'wb') as out: for fname in chunks: with open(fname) as infile: dat = infile.read() ## avoids mess if last chunk was empty if dat.endswith("\n"): out.write(dat+"//\n//\n") else: out.write(dat+"\n//\n//\n") os.remove(fname) except Exception as inst: LOGGER.error("Error in reconcat {}".format(inst)) raise
[ "def", "reconcat", "(", "data", ",", "sample", ")", ":", "try", ":", "## get chunks", "chunks", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "data", ".", "tmpdir", ",", "sample", ".", "name", "+", "\"_chunk_[0-9].aligned\"", ")",...
takes aligned chunks (usually 10) and concatenates them
[ "takes", "aligned", "chunks", "(", "usually", "10", ")", "and", "concatenates", "them" ]
python
valid
jtwhite79/pyemu
pyemu/utils/helpers.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L809-L877
def zero_order_tikhonov(pst, parbounds=True,par_groups=None, reset=True): """setup preferred-value regularization Parameters ---------- pst : pyemu.Pst the control file instance parbounds : bool flag to weight the prior information equations according to parameter bound width - approx the KL transform. Default is True par_groups : list parameter groups to build PI equations for. If None, all adjustable parameters are used. Default is None reset : bool flag to reset the prior_information attribute of the pst instance. Default is True Example ------- ``>>>import pyemu`` ``>>>pst = pyemu.Pst("pest.pst")`` ``>>>pyemu.helpers.zero_order_tikhonov(pst)`` """ if par_groups is None: par_groups = pst.par_groups pilbl, obgnme, weight, equation = [], [], [], [] for idx, row in pst.parameter_data.iterrows(): pt = row["partrans"].lower() try: pt = pt.decode() except: pass if pt not in ["tied", "fixed"] and\ row["pargp"] in par_groups: pilbl.append(row["parnme"]) weight.append(1.0) ogp_name = "regul"+row["pargp"] obgnme.append(ogp_name[:12]) parnme = row["parnme"] parval1 = row["parval1"] if pt == "log": parnme = "log(" + parnme + ")" parval1 = np.log10(parval1) eq = "1.0 * " + parnme + " ={0:15.6E}".format(parval1) equation.append(eq) if reset: pst.prior_information = pd.DataFrame({"pilbl": pilbl, "equation": equation, "obgnme": obgnme, "weight": weight}) else: pi = pd.DataFrame({"pilbl": pilbl, "equation": equation, "obgnme": obgnme, "weight": weight}) pst.prior_information = pst.prior_information.append(pi) if parbounds: regweight_from_parbound(pst) if pst.control_data.pestmode == "estimation": pst.control_data.pestmode = "regularization"
[ "def", "zero_order_tikhonov", "(", "pst", ",", "parbounds", "=", "True", ",", "par_groups", "=", "None", ",", "reset", "=", "True", ")", ":", "if", "par_groups", "is", "None", ":", "par_groups", "=", "pst", ".", "par_groups", "pilbl", ",", "obgnme", ",",...
setup preferred-value regularization Parameters ---------- pst : pyemu.Pst the control file instance parbounds : bool flag to weight the prior information equations according to parameter bound width - approx the KL transform. Default is True par_groups : list parameter groups to build PI equations for. If None, all adjustable parameters are used. Default is None reset : bool flag to reset the prior_information attribute of the pst instance. Default is True Example ------- ``>>>import pyemu`` ``>>>pst = pyemu.Pst("pest.pst")`` ``>>>pyemu.helpers.zero_order_tikhonov(pst)``
[ "setup", "preferred", "-", "value", "regularization" ]
python
train
PrefPy/prefpy
prefpy/mechanismMcmcSampleGenerator.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L125-L157
def getNextSample(self, V): """ We generate a new ranking based on a Mallows-based jumping distribution. The algorithm is described in "Bayesian Ordinal Peer Grading" by Raman and Joachims. :ivar list<int> V: Contains integer representations of each candidate in order of their ranking in a vote, from first to last. """ phi = self.phi wmg = self.wmg W = [] W.append(V[0]) for j in range(2, len(V)+1): randomSelect = random.random() threshold = 0.0 denom = 1.0 for k in range(1, j): denom = denom + phi**k for k in range(1, j+1): numerator = phi**(j - k) threshold = threshold + numerator/denom if randomSelect <= threshold: W.insert(k-1,V[j-1]) break # Check whether we should change to the new ranking. acceptanceRatio = self.calcAcceptanceRatio(V, W) prob = min(1.0,acceptanceRatio) if random.random() <= prob: V = W return V
[ "def", "getNextSample", "(", "self", ",", "V", ")", ":", "phi", "=", "self", ".", "phi", "wmg", "=", "self", ".", "wmg", "W", "=", "[", "]", "W", ".", "append", "(", "V", "[", "0", "]", ")", "for", "j", "in", "range", "(", "2", ",", "len", ...
We generate a new ranking based on a Mallows-based jumping distribution. The algorithm is described in "Bayesian Ordinal Peer Grading" by Raman and Joachims. :ivar list<int> V: Contains integer representations of each candidate in order of their ranking in a vote, from first to last.
[ "We", "generate", "a", "new", "ranking", "based", "on", "a", "Mallows", "-", "based", "jumping", "distribution", ".", "The", "algorithm", "is", "described", "in", "Bayesian", "Ordinal", "Peer", "Grading", "by", "Raman", "and", "Joachims", "." ]
python
train
etingof/pysnmp
pysnmp/smi/mibs/SNMPv2-SMI.py
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/mibs/SNMPv2-SMI.py#L3308-L3329
def getInstIdFromIndices(self, *indices): """Return column instance identification from indices""" try: return self._idxToIdCache[indices] except TypeError: cacheable = False except KeyError: cacheable = True idx = 0 instId = () parentIndices = [] for impliedFlag, modName, symName in self._indexNames: if idx >= len(indices): break mibObj, = mibBuilder.importSymbols(modName, symName) syntax = mibObj.syntax.clone(indices[idx]) instId += self.valueToOid(syntax, impliedFlag, parentIndices) parentIndices.append(syntax) idx += 1 if cacheable: self._idxToIdCache[indices] = instId return instId
[ "def", "getInstIdFromIndices", "(", "self", ",", "*", "indices", ")", ":", "try", ":", "return", "self", ".", "_idxToIdCache", "[", "indices", "]", "except", "TypeError", ":", "cacheable", "=", "False", "except", "KeyError", ":", "cacheable", "=", "True", ...
Return column instance identification from indices
[ "Return", "column", "instance", "identification", "from", "indices" ]
python
train
maas/python-libmaas
maas/client/viscera/boot_source_selections.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/boot_source_selections.py#L105-L108
async def delete(self): """Delete boot source selection.""" await self._handler.delete( boot_source_id=self.boot_source.id, id=self.id)
[ "async", "def", "delete", "(", "self", ")", ":", "await", "self", ".", "_handler", ".", "delete", "(", "boot_source_id", "=", "self", ".", "boot_source", ".", "id", ",", "id", "=", "self", ".", "id", ")" ]
Delete boot source selection.
[ "Delete", "boot", "source", "selection", "." ]
python
train
pyviz/holoviews
holoviews/core/options.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/options.py#L204-L212
def options_policy(skip_invalid, warn_on_skip): """ Context manager to temporarily set the skip_invalid and warn_on_skip class parameters on Options. """ settings = (Options.skip_invalid, Options.warn_on_skip) (Options.skip_invalid, Options.warn_on_skip) = (skip_invalid, warn_on_skip) yield (Options.skip_invalid, Options.warn_on_skip) = settings
[ "def", "options_policy", "(", "skip_invalid", ",", "warn_on_skip", ")", ":", "settings", "=", "(", "Options", ".", "skip_invalid", ",", "Options", ".", "warn_on_skip", ")", "(", "Options", ".", "skip_invalid", ",", "Options", ".", "warn_on_skip", ")", "=", "...
Context manager to temporarily set the skip_invalid and warn_on_skip class parameters on Options.
[ "Context", "manager", "to", "temporarily", "set", "the", "skip_invalid", "and", "warn_on_skip", "class", "parameters", "on", "Options", "." ]
python
train
SheffieldML/GPy
GPy/util/datasets.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/datasets.py#L114-L169
def download_url(url, store_directory, save_name=None, messages=True, suffix=''): """Download a file from a url and save it to disk.""" i = url.rfind('/') file = url[i+1:] print(file) dir_name = os.path.join(data_path, store_directory) if save_name is None: save_name = os.path.join(dir_name, file) else: save_name = os.path.join(dir_name, save_name) if suffix is None: suffix='' print("Downloading ", url, "->", save_name) if not os.path.exists(dir_name): os.makedirs(dir_name) try: response = urlopen(url+suffix) except URLError as e: if not hasattr(e, "code"): raise response = e if response.code > 399 and response.code<500: raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(response.code)) elif response.code > 499: raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(response.code)) with open(save_name, 'wb') as f: meta = response.info() content_length_str = meta.get("Content-Length") if content_length_str: file_size = int(content_length_str) else: file_size = None status = "" file_size_dl = 0 block_sz = 8192 line_length=30 while True: buff = response.read(block_sz) if not buff: break file_size_dl += len(buff) f.write(buff) sys.stdout.write(" "*(len(status)) + "\r") if file_size: status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.), full=file_size/(1048576.), ll=line_length, perc="="*int(line_length*float(file_size_dl)/file_size)) else: status = r"[{perc: <{ll}}] {dl:7.3f}MB".format(dl=file_size_dl/(1048576.), ll=line_length, perc="."*int(line_length*float(file_size_dl/(10*1048576.)))) sys.stdout.write(status) sys.stdout.flush() sys.stdout.write(" "*(len(status)) + "\r") print(status)
[ "def", "download_url", "(", "url", ",", "store_directory", ",", "save_name", "=", "None", ",", "messages", "=", "True", ",", "suffix", "=", "''", ")", ":", "i", "=", "url", ".", "rfind", "(", "'/'", ")", "file", "=", "url", "[", "i", "+", "1", ":...
Download a file from a url and save it to disk.
[ "Download", "a", "file", "from", "a", "url", "and", "save", "it", "to", "disk", "." ]
python
train
cltk/cltk
cltk/corpus/readers.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/readers.py#L211-L232
def docs(self, fileids=None) -> Generator[str, str, None]: """ Returns the complete text of an Text document, closing the document after we are done reading it and yielding it in a memory safe fashion. """ if not fileids: fileids = self.fileids() # Create a generator, loading one document into memory at a time. for path, encoding in self.abspaths(fileids, include_encoding=True): with codecs.open(path, 'r', encoding=encoding) as reader: if self.skip_keywords: tmp_data = [] for line in reader: skip = False for keyword in self.skip_keywords: if keyword in line: skip = True if not skip: tmp_data.append(line) yield ''.join(tmp_data) else: yield reader.read()
[ "def", "docs", "(", "self", ",", "fileids", "=", "None", ")", "->", "Generator", "[", "str", ",", "str", ",", "None", "]", ":", "if", "not", "fileids", ":", "fileids", "=", "self", ".", "fileids", "(", ")", "# Create a generator, loading one document into ...
Returns the complete text of an Text document, closing the document after we are done reading it and yielding it in a memory safe fashion.
[ "Returns", "the", "complete", "text", "of", "an", "Text", "document", "closing", "the", "document", "after", "we", "are", "done", "reading", "it", "and", "yielding", "it", "in", "a", "memory", "safe", "fashion", "." ]
python
train
deepmipt/DeepPavlov
deeppavlov/core/layers/tf_csoftmax_attention.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/layers/tf_csoftmax_attention.py#L142-L173
def attention_gen_block(hidden_for_sketch, hidden_for_attn_alignment, key, attention_depth): """ It is a implementation of the Luong et al. attention mechanism with general score and the constrained softmax (csoftmax). Based on the papers: https://arxiv.org/abs/1508.04025 "Effective Approaches to Attention-based Neural Machine Translation" https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" Args: hidden_for_sketch: A tensorflow tensor for a sketch computing. This tensor have dimensionality [None, max_num_tokens, sketch_hidden_size] hidden_for_attn_alignment: A tensorflow tensor is aligned for output during a performing. This tensor have dimensionality [None, max_num_tokens, hidden_size_for_attn_alignment] key: A tensorflow tensor with dimensionality [None, None, key_size] attention_depth: Number of usage csoftmax Returns: final_aligned_hiddens: Tensor at the output with dimensionality [1, attention_depth, hidden_size_for_attn_alignment] """ with tf.name_scope('attention_block'): sketch_dims = tf.shape(hidden_for_sketch) batch_size = sketch_dims[0] num_tokens = sketch_dims[1] hidden_size = sketch_dims[2] attn_alignment_dims = tf.shape(hidden_for_attn_alignment) attn_alignment_hidden_size = attn_alignment_dims[2] sketches = [tf.zeros(shape=[batch_size, hidden_size], dtype=tf.float32)] aligned_hiddens = [] cum_att = tf.zeros(shape=[batch_size, num_tokens]) # cumulative attention for i in range(attention_depth): sketch, cum_att_, aligned_hidden = attention_gen_step(hidden_for_sketch, hidden_for_attn_alignment, sketches[-1], key, cum_att) sketches.append(sketch) #sketch aligned_hiddens.append(aligned_hidden) #sketch cum_att += cum_att_ final_aligned_hiddens = tf.reshape(tf.transpose(tf.stack(aligned_hiddens), [1, 0, 2]),[1, attention_depth, attn_alignment_hidden_size]) return final_aligned_hiddens
[ "def", "attention_gen_block", "(", "hidden_for_sketch", ",", "hidden_for_attn_alignment", ",", "key", ",", "attention_depth", ")", ":", "with", "tf", ".", "name_scope", "(", "'attention_block'", ")", ":", "sketch_dims", "=", "tf", ".", "shape", "(", "hidden_for_sk...
It is a implementation of the Luong et al. attention mechanism with general score and the constrained softmax (csoftmax). Based on the papers: https://arxiv.org/abs/1508.04025 "Effective Approaches to Attention-based Neural Machine Translation" https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" Args: hidden_for_sketch: A tensorflow tensor for a sketch computing. This tensor have dimensionality [None, max_num_tokens, sketch_hidden_size] hidden_for_attn_alignment: A tensorflow tensor is aligned for output during a performing. This tensor have dimensionality [None, max_num_tokens, hidden_size_for_attn_alignment] key: A tensorflow tensor with dimensionality [None, None, key_size] attention_depth: Number of usage csoftmax Returns: final_aligned_hiddens: Tensor at the output with dimensionality [1, attention_depth, hidden_size_for_attn_alignment]
[ "It", "is", "a", "implementation", "of", "the", "Luong", "et", "al", ".", "attention", "mechanism", "with", "general", "score", "and", "the", "constrained", "softmax", "(", "csoftmax", ")", ".", "Based", "on", "the", "papers", ":", "https", ":", "//", "a...
python
test
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L248-L257
def trace_memory_usage(self, frame, event, arg): """Callback for sys.settrace""" if event in ('line', 'return') and frame.f_code in self.code_map: lineno = frame.f_lineno if event == 'return': lineno += 1 entry = self.code_map[frame.f_code].setdefault(lineno, []) entry.append(_get_memory(os.getpid())) return self.trace_memory_usage
[ "def", "trace_memory_usage", "(", "self", ",", "frame", ",", "event", ",", "arg", ")", ":", "if", "event", "in", "(", "'line'", ",", "'return'", ")", "and", "frame", ".", "f_code", "in", "self", ".", "code_map", ":", "lineno", "=", "frame", ".", "f_l...
Callback for sys.settrace
[ "Callback", "for", "sys", ".", "settrace" ]
python
train
Kortemme-Lab/klab
klab/bio/pdb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L2490-L2502
def get_residue_id_to_type_map(self): '''Returns a dictionary mapping 6-character residue IDs (Chain, residue number, insertion code e.g. "A 123B") to the corresponding one-letter amino acid. Caveat: This function ignores occupancy - this function should be called once occupancy has been dealt with appropriately.''' resid2type = {} atomlines = self.parsed_lines['ATOM '] for line in atomlines: resname = line[17:20] if resname in allowed_PDB_residues_types and line[13:16] == 'CA ': resid2type[line[21:27]] = residue_type_3to1_map.get(resname) or protonated_residue_type_3to1_map.get(resname) return resid2type
[ "def", "get_residue_id_to_type_map", "(", "self", ")", ":", "resid2type", "=", "{", "}", "atomlines", "=", "self", ".", "parsed_lines", "[", "'ATOM '", "]", "for", "line", "in", "atomlines", ":", "resname", "=", "line", "[", "17", ":", "20", "]", "if", ...
Returns a dictionary mapping 6-character residue IDs (Chain, residue number, insertion code e.g. "A 123B") to the corresponding one-letter amino acid. Caveat: This function ignores occupancy - this function should be called once occupancy has been dealt with appropriately.
[ "Returns", "a", "dictionary", "mapping", "6", "-", "character", "residue", "IDs", "(", "Chain", "residue", "number", "insertion", "code", "e", ".", "g", ".", "A", "123B", ")", "to", "the", "corresponding", "one", "-", "letter", "amino", "acid", "." ]
python
train
respeaker/respeaker_python_library
respeaker/usb_hid/pyusb_backend.py
https://github.com/respeaker/respeaker_python_library/blob/905a5334ccdc2d474ad973caf6a23d05c65bbb25/respeaker/usb_hid/pyusb_backend.py#L119-L144
def write(self, data): """ write data on the OUT endpoint associated to the HID interface """ # report_size = 64 # if self.ep_out: # report_size = self.ep_out.wMaxPacketSize # # for _ in range(report_size - len(data)): # data.append(0) self.read_sem.release() if not self.ep_out: bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0) wValue = 0x200 #Issuing an OUT report wIndex = self.intf_number #mBed Board interface number for HID self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data) return #raise ValueError('EP_OUT endpoint is NULL') self.ep_out.write(data) #logging.debug('sent: %s', data) return
[ "def", "write", "(", "self", ",", "data", ")", ":", "# report_size = 64", "# if self.ep_out:", "# report_size = self.ep_out.wMaxPacketSize", "#", "# for _ in range(report_size - len(data)):", "# data.append(0)", "self", ".", "read_sem", ".", "release", "(", ")", "if"...
write data on the OUT endpoint associated to the HID interface
[ "write", "data", "on", "the", "OUT", "endpoint", "associated", "to", "the", "HID", "interface" ]
python
train
Cornices/cornice.ext.sphinx
cornice_sphinx/__init__.py
https://github.com/Cornices/cornice.ext.sphinx/blob/f73fdcc94d78fb5c94262adb9adc187c96378a53/cornice_sphinx/__init__.py#L117-L128
def _get_attributes(schema, location): """Return the schema's children, filtered by location.""" schema = DottedNameResolver(__name__).maybe_resolve(schema) def _filter(attr): if not hasattr(attr, "location"): valid_location = 'body' in location else: valid_location = attr.location in to_list(location) return valid_location return list(filter(_filter, schema().children))
[ "def", "_get_attributes", "(", "schema", ",", "location", ")", ":", "schema", "=", "DottedNameResolver", "(", "__name__", ")", ".", "maybe_resolve", "(", "schema", ")", "def", "_filter", "(", "attr", ")", ":", "if", "not", "hasattr", "(", "attr", ",", "\...
Return the schema's children, filtered by location.
[ "Return", "the", "schema", "s", "children", "filtered", "by", "location", "." ]
python
train
snare/voltron
voltron/plugin.py
https://github.com/snare/voltron/blob/4ee3cbe6f7c1e38303f5dc6114c48b60217253c3/voltron/plugin.py#L91-L104
def valid_api_plugin(self, plugin): """ Validate an API plugin, ensuring it is an API plugin and has the necessary fields present. `plugin` is a subclass of scruffy's Plugin class. """ if (issubclass(plugin, APIPlugin) and hasattr(plugin, 'plugin_type') and plugin.plugin_type == 'api' and hasattr(plugin, 'request') and plugin.request != None and hasattr(plugin, 'request_class') and plugin.request_class != None and hasattr(plugin, 'response_class') and plugin.response_class != None): return True return False
[ "def", "valid_api_plugin", "(", "self", ",", "plugin", ")", ":", "if", "(", "issubclass", "(", "plugin", ",", "APIPlugin", ")", "and", "hasattr", "(", "plugin", ",", "'plugin_type'", ")", "and", "plugin", ".", "plugin_type", "==", "'api'", "and", "hasattr"...
Validate an API plugin, ensuring it is an API plugin and has the necessary fields present. `plugin` is a subclass of scruffy's Plugin class.
[ "Validate", "an", "API", "plugin", "ensuring", "it", "is", "an", "API", "plugin", "and", "has", "the", "necessary", "fields", "present", "." ]
python
train
openearth/bmi-python
bmi/runner.py
https://github.com/openearth/bmi-python/blob/2f53f24d45515eb0711c2d28ddd6c1582045248f/bmi/runner.py#L82-L126
def main(): """main bmi runner program""" arguments = docopt.docopt(__doc__, version=__version__) colorlogs() # Read input file file wrapper = BMIWrapper( engine=arguments['<engine>'], configfile=arguments['<config>'] or '' ) # add logger if required if not arguments['--disable-logger']: logging.root.setLevel(logging.DEBUG) wrapper.set_logger(logging.root) with wrapper as model: # if siginfo is supported by OS (BSD) def handler(signum, frame): """report progress information""" t_start = model.get_start_time() t_end = model.get_end_time() t_current = model.get_current_time() total = (t_end - t_start) now = (t_current - t_start) if total > 0: logging.info("progress: %s%%", 100.0 * now / total) else: logging.info("progress: unknown") if hasattr(signal, 'SIGINFO'): # attach a siginfo handler (CTRL-t) to print progress signal.signal(signal.SIGINFO, handler) if arguments['--info']: logging.info("%s", trace(model)) t_end = model.get_end_time() t = model.get_start_time() while t < t_end: model.update(-1) t = model.get_current_time() if arguments['--info']: logging.info("%s", trace(model))
[ "def", "main", "(", ")", ":", "arguments", "=", "docopt", ".", "docopt", "(", "__doc__", ",", "version", "=", "__version__", ")", "colorlogs", "(", ")", "# Read input file file", "wrapper", "=", "BMIWrapper", "(", "engine", "=", "arguments", "[", "'<engine>'...
main bmi runner program
[ "main", "bmi", "runner", "program" ]
python
train
rocky/python3-trepan
trepan/processor/frame.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/frame.py#L45-L81
def adjust_frame(proc_obj, name, pos, absolute_pos): """Adjust stack frame by pos positions. If absolute_pos then pos is an absolute number. Otherwise it is a relative number. A negative number indexes from the other end.""" if not proc_obj.curframe: proc_obj.errmsg("No stack.") return # Below we remove any negativity. At the end, pos will be # the new value of proc_obj.curindex. if absolute_pos: if pos >= 0: pos = frame_num(proc_obj, pos) else: pos = -pos - 1 pass else: pos += proc_obj.curindex pass if pos < 0: proc_obj.errmsg("Adjusting would put us beyond the oldest frame.") return elif pos >= len(proc_obj.stack): proc_obj.errmsg("Adjusting would put us beyond the newest frame.") return proc_obj.curindex = pos proc_obj.curframe = proc_obj.stack[proc_obj.curindex][0] proc_obj.location() proc_obj.list_lineno = None proc_obj.list_offset = proc_obj.curframe.f_lasti proc_obj.list_object = proc_obj.curframe proc_obj.list_filename = proc_obj.curframe.f_code.co_filename return
[ "def", "adjust_frame", "(", "proc_obj", ",", "name", ",", "pos", ",", "absolute_pos", ")", ":", "if", "not", "proc_obj", ".", "curframe", ":", "proc_obj", ".", "errmsg", "(", "\"No stack.\"", ")", "return", "# Below we remove any negativity. At the end, pos will be"...
Adjust stack frame by pos positions. If absolute_pos then pos is an absolute number. Otherwise it is a relative number. A negative number indexes from the other end.
[ "Adjust", "stack", "frame", "by", "pos", "positions", ".", "If", "absolute_pos", "then", "pos", "is", "an", "absolute", "number", ".", "Otherwise", "it", "is", "a", "relative", "number", "." ]
python
test
pvlib/pvlib-python
pvlib/forecast.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/forecast.py#L149-L166
def set_dataset(self): ''' Retrieves the designated dataset, creates NCSS object, and creates a NCSS query object. ''' keys = list(self.model.datasets.keys()) labels = [item.split()[0].lower() for item in keys] if self.set_type == 'best': self.dataset = self.model.datasets[keys[labels.index('best')]] elif self.set_type == 'latest': self.dataset = self.model.datasets[keys[labels.index('latest')]] elif self.set_type == 'full': self.dataset = self.model.datasets[keys[labels.index('full')]] self.access_url = self.dataset.access_urls[self.access_url_key] self.ncss = NCSS(self.access_url) self.query = self.ncss.query()
[ "def", "set_dataset", "(", "self", ")", ":", "keys", "=", "list", "(", "self", ".", "model", ".", "datasets", ".", "keys", "(", ")", ")", "labels", "=", "[", "item", ".", "split", "(", ")", "[", "0", "]", ".", "lower", "(", ")", "for", "item", ...
Retrieves the designated dataset, creates NCSS object, and creates a NCSS query object.
[ "Retrieves", "the", "designated", "dataset", "creates", "NCSS", "object", "and", "creates", "a", "NCSS", "query", "object", "." ]
python
train
luckydonald/pytgbot
code_generation/code_generator_template.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/code_generator_template.py#L235-L249
def class_name_teleflask_message(self) -> str: """ If it starts with `Send` remove that. """ # strip leading "Send" name = self.class_name # "sendPhoto" -> "SendPhoto" name = name[4:] if name.startswith('Send') else name # "SendPhoto" -> "Photo" name = name + "Message" # "Photo" -> "PhotoMessage" # e.g. "MessageMessage" will be replaced as "TextMessage" # b/c "sendMessage" -> "SendMessage" -> "Message" -> "MessageMessage" ==> "TextMessage" if name in MESSAGE_CLASS_OVERRIDES: return MESSAGE_CLASS_OVERRIDES[name] # end if return name
[ "def", "class_name_teleflask_message", "(", "self", ")", "->", "str", ":", "# strip leading \"Send\"", "name", "=", "self", ".", "class_name", "# \"sendPhoto\" -> \"SendPhoto\"", "name", "=", "name", "[", "4", ":", "]", "if", "name", ".", "startswith", "(", "'Se...
If it starts with `Send` remove that.
[ "If", "it", "starts", "with", "Send", "remove", "that", "." ]
python
train
aarongarrett/inspyred
inspyred/ec/observers.py
https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/observers.py#L337-L407
def plot_observer(population, num_generations, num_evaluations, args): """Plot the output of the evolutionary computation as a graph. This function plots the performance of the EC as a line graph using matplotlib and numpy. The graph consists of a blue line representing the best fitness, a green line representing the average fitness, and a red line representing the median fitness. It modifies the keyword arguments variable 'args' by including an entry called 'plot_data'. If this observer is used, the calling script should also import the matplotlib library and should end the script with:: matplotlib.pyplot.show() Otherwise, the program may generate a runtime error. .. note:: This function makes use of the matplotlib and numpy libraries. .. Arguments: population -- the population of Individuals num_generations -- the number of elapsed generations num_evaluations -- the number of candidate solution evaluations args -- a dictionary of keyword arguments """ import matplotlib.pyplot as plt import numpy stats = inspyred.ec.analysis.fitness_statistics(population) best_fitness = stats['best'] worst_fitness = stats['worst'] median_fitness = stats['median'] average_fitness = stats['mean'] colors = ['black', 'blue', 'green', 'red'] labels = ['average', 'median', 'best', 'worst'] data = [] if num_generations == 0: plt.ion() data = [[num_evaluations], [average_fitness], [median_fitness], [best_fitness], [worst_fitness]] lines = [] for i in range(4): line, = plt.plot(data[0], data[i+1], color=colors[i], label=labels[i]) lines.append(line) # Add the legend when the first data is added. plt.legend(loc='lower right') args['plot_data'] = data args['plot_lines'] = lines plt.xlabel('Evaluations') plt.ylabel('Fitness') else: data = args['plot_data'] data[0].append(num_evaluations) data[1].append(average_fitness) data[2].append(median_fitness) data[3].append(best_fitness) data[4].append(worst_fitness) lines = args['plot_lines'] for i, line in enumerate(lines): line.set_xdata(numpy.array(data[0])) line.set_ydata(numpy.array(data[i+1])) args['plot_data'] = data args['plot_lines'] = lines ymin = min([min(d) for d in data[1:]]) ymax = max([max(d) for d in data[1:]]) yrange = ymax - ymin plt.xlim((0, num_evaluations)) plt.ylim((ymin - 0.1*yrange, ymax + 0.1*yrange)) plt.draw()
[ "def", "plot_observer", "(", "population", ",", "num_generations", ",", "num_evaluations", ",", "args", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "numpy", "stats", "=", "inspyred", ".", "ec", ".", "analysis", ".", "fitness_statist...
Plot the output of the evolutionary computation as a graph. This function plots the performance of the EC as a line graph using matplotlib and numpy. The graph consists of a blue line representing the best fitness, a green line representing the average fitness, and a red line representing the median fitness. It modifies the keyword arguments variable 'args' by including an entry called 'plot_data'. If this observer is used, the calling script should also import the matplotlib library and should end the script with:: matplotlib.pyplot.show() Otherwise, the program may generate a runtime error. .. note:: This function makes use of the matplotlib and numpy libraries. .. Arguments: population -- the population of Individuals num_generations -- the number of elapsed generations num_evaluations -- the number of candidate solution evaluations args -- a dictionary of keyword arguments
[ "Plot", "the", "output", "of", "the", "evolutionary", "computation", "as", "a", "graph", ".", "This", "function", "plots", "the", "performance", "of", "the", "EC", "as", "a", "line", "graph", "using", "matplotlib", "and", "numpy", ".", "The", "graph", "con...
python
train
pandas-dev/pandas
pandas/core/groupby/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1202-L1213
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None): """Calcuate pct_change of each value to previous entry in group""" # TODO: Remove this conditional when #23918 is fixed if freq: return self.apply(lambda x: x.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)) filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby(self.grouper.labels) shifted = fill_grp.shift(periods=periods, freq=freq) return (filled / shifted) - 1
[ "def", "pct_change", "(", "self", ",", "periods", "=", "1", ",", "fill_method", "=", "'pad'", ",", "limit", "=", "None", ",", "freq", "=", "None", ")", ":", "# TODO: Remove this conditional when #23918 is fixed", "if", "freq", ":", "return", "self", ".", "ap...
Calcuate pct_change of each value to previous entry in group
[ "Calcuate", "pct_change", "of", "each", "value", "to", "previous", "entry", "in", "group" ]
python
train
scanny/python-pptx
pptx/parts/slide.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/parts/slide.py#L89-L97
def _new_theme_part(cls, package): """ Create and return a default theme part suitable for use with a notes master. """ partname = package.next_partname('/ppt/theme/theme%d.xml') content_type = CT.OFC_THEME theme = CT_OfficeStyleSheet.new_default() return XmlPart(partname, content_type, theme, package)
[ "def", "_new_theme_part", "(", "cls", ",", "package", ")", ":", "partname", "=", "package", ".", "next_partname", "(", "'/ppt/theme/theme%d.xml'", ")", "content_type", "=", "CT", ".", "OFC_THEME", "theme", "=", "CT_OfficeStyleSheet", ".", "new_default", "(", ")"...
Create and return a default theme part suitable for use with a notes master.
[ "Create", "and", "return", "a", "default", "theme", "part", "suitable", "for", "use", "with", "a", "notes", "master", "." ]
python
train
johnnoone/json-spec
src/jsonspec/pointer/bases.py
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/pointer/bases.py#L96-L121
def parse(self, pointer): """parse pointer into tokens""" if isinstance(pointer, Pointer): return pointer.tokens[:] elif pointer == '': return [] tokens = [] staged, _, children = pointer.partition('/') if staged: try: token = StagesToken(staged) token.last = False tokens.append(token) except ValueError: raise ParseError('pointer must start with / or int', pointer) if _: for part in children.split('/'): part = part.replace('~1', '/') part = part.replace('~0', '~') token = ChildToken(part) token.last = False tokens.append(token) return tokens
[ "def", "parse", "(", "self", ",", "pointer", ")", ":", "if", "isinstance", "(", "pointer", ",", "Pointer", ")", ":", "return", "pointer", ".", "tokens", "[", ":", "]", "elif", "pointer", "==", "''", ":", "return", "[", "]", "tokens", "=", "[", "]",...
parse pointer into tokens
[ "parse", "pointer", "into", "tokens" ]
python
train
brian-rose/climlab
climlab/process/process.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/process/process.py#L407-L441
def add_diagnostic(self, name, value=None): """Create a new diagnostic variable called ``name`` for this process and initialize it with the given ``value``. Quantity is accessible in two ways: * as a process attribute, i.e. ``proc.name`` * as a member of the diagnostics dictionary, i.e. ``proc.diagnostics['name']`` Use attribute method to set values, e.g. ```proc.name = value ``` :param str name: name of diagnostic quantity to be initialized :param array value: initial value for quantity [default: None] :Example: Add a diagnostic CO2 variable to an energy balance model:: >>> import climlab >>> model = climlab.EBM() >>> # initialize CO2 variable with value 280 ppm >>> model.add_diagnostic('CO2',280.) >>> # access variable directly or through diagnostic dictionary >>> model.CO2 280 >>> model.diagnostics.keys() ['ASR', 'CO2', 'net_radiation', 'icelat', 'OLR', 'albedo'] """ self._diag_vars.append(name) self.__setattr__(name, value)
[ "def", "add_diagnostic", "(", "self", ",", "name", ",", "value", "=", "None", ")", ":", "self", ".", "_diag_vars", ".", "append", "(", "name", ")", "self", ".", "__setattr__", "(", "name", ",", "value", ")" ]
Create a new diagnostic variable called ``name`` for this process and initialize it with the given ``value``. Quantity is accessible in two ways: * as a process attribute, i.e. ``proc.name`` * as a member of the diagnostics dictionary, i.e. ``proc.diagnostics['name']`` Use attribute method to set values, e.g. ```proc.name = value ``` :param str name: name of diagnostic quantity to be initialized :param array value: initial value for quantity [default: None] :Example: Add a diagnostic CO2 variable to an energy balance model:: >>> import climlab >>> model = climlab.EBM() >>> # initialize CO2 variable with value 280 ppm >>> model.add_diagnostic('CO2',280.) >>> # access variable directly or through diagnostic dictionary >>> model.CO2 280 >>> model.diagnostics.keys() ['ASR', 'CO2', 'net_radiation', 'icelat', 'OLR', 'albedo']
[ "Create", "a", "new", "diagnostic", "variable", "called", "name", "for", "this", "process", "and", "initialize", "it", "with", "the", "given", "value", "." ]
python
train
CTPUG/wafer
wafer/compare/admin.py
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/compare/admin.py#L156-L177
def comparelist_view(self, request, object_id, extra_context=None): """Allow selecting versions to compare.""" opts = self.model._meta object_id = unquote(object_id) current = get_object_or_404(self.model, pk=object_id) # As done by reversion's history_view action_list = [ { "revision": version.revision, "url": reverse("%s:%s_%s_compare" % (self.admin_site.name, opts.app_label, opts.model_name), args=(quote(version.object_id), version.id)), } for version in self._reversion_order_version_queryset(Version.objects.get_for_object_reference( self.model, object_id).select_related("revision__user"))] context = {"action_list": action_list, "opts": opts, "object_id": quote(object_id), "original": current, } extra_context = extra_context or {} context.update(extra_context) return render(request, self.compare_list_template or self._get_template_list("compare_list.html"), context)
[ "def", "comparelist_view", "(", "self", ",", "request", ",", "object_id", ",", "extra_context", "=", "None", ")", ":", "opts", "=", "self", ".", "model", ".", "_meta", "object_id", "=", "unquote", "(", "object_id", ")", "current", "=", "get_object_or_404", ...
Allow selecting versions to compare.
[ "Allow", "selecting", "versions", "to", "compare", "." ]
python
train
has2k1/plotnine
plotnine/stats/stat_ellipse.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat_ellipse.py#L88-L230
def cov_trob(x, wt=None, cor=False, center=True, nu=5, maxit=25, tol=0.01): """ Covariance Estimation for Multivariate t Distribution Estimates a covariance or correlation matrix assuming the data came from a multivariate t distribution: this provides some degree of robustness to outlier without giving a high breakdown point. **credit**: This function a port of the R function ``MASS::cov.trob``. Parameters ---------- x : array data matrix. Missing values (NaNs) are not allowed. wt : array A vector of weights for each case: these are treated as if the case i actually occurred ``wt[i]`` times. cor : bool Flag to choose between returning the correlation (``cor=True``) or covariance (``cor=False``) matrix. center : array or bool A logical value or a numeric vector providing the location about which the covariance is to be taken. If ``center=False``, no centering is done; if ``center=True`` the MLE of the location vector is used. nu : int 'degrees of freedom' for the multivariate t distribution. Must exceed 2 (so that the covariance matrix is finite). maxit : int Maximum number of iterations in fitting. tol : float Convergence tolerance for fitting. Returns ------- out : dict A dictionary with with the following key-value - ``cov`` : the fitted covariance matrix. - ``center`` : the estimated or specified location vector. - ``wt`` : the specified weights: only returned if the wt argument was given. - ``n_obs`` : the number of cases used in the fitting. - ``cor`` : the fitted correlation matrix: only returned if ``cor=True``. - ``call`` : The matched call. - ``iter`` : The number of iterations used. References ---------- - J. T. Kent, D. E. Tyler and Y. Vardi (1994) A curious likelihood identity for the multivariate t-distribution. *Communications in Statistics-Simulation and Computation* **23**, 441-453. - Venables, W. N. and Ripley, B. D. (1999) *Modern Applied Statistics with S-PLUS*. Third Edition. Springer. """ def test_values(x): if pd.isnull(x).any() or np.isinf(x).any(): raise ValueError("Missing or infinite values in 'x'") def scale_simp(x, center, n, p): return x - np.repeat([center], n, axis=0) x = np.asarray(x) n, p = x.shape test_values(x) # wt miss_wt = wt is None if not miss_wt: wt = np.asarray(wt) wt0 = wt if len(wt) != n: raise ValueError( "length of 'wt' must equal number of observations.") if any(wt < 0): raise ValueError("Negative weights not allowed.") if not np.sum(wt): raise ValueError("No positive weights.") x = x[wt > 0, :] wt = wt[wt > 0] n, _ = x.shape else: wt = np.ones(n) wt = wt[:, np.newaxis] # loc loc = np.sum(wt*x, axis=0) / wt.sum() try: _len = len(center) except TypeError: if isinstance(center, bool) and not center: loc = np.zeros(p) else: if _len != p: raise ValueError("'center' is not the right length") loc = p use_loc = isinstance(center, bool) and center w = wt * (1 + p/nu) for iteration in range(maxit): w0 = w X = scale_simp(x, loc, n, p) _, s, v = linalg.svd(np.sqrt(w/np.sum(w)) * X) # wX = X @ v.T @ np.diag(np.full(p, 1/s)) wX = np.dot(np.dot(X, v.T), np.diag(np.full(p, 1/s))) # Q = np.squeeze((wX**2) @ np.ones(p)) Q = np.squeeze(np.dot(wX**2, np.ones(p))) w = (wt * (nu + p)) / (nu + Q)[:, np.newaxis] if use_loc: loc = np.sum(w*x, axis=0) / w.sum() if all(np.abs(w-w0) < tol): break else: if ((np.mean(w) - np.mean(wt) > tol) or (np.abs(np.mean(w * Q)/p - 1) > tol)): warn("Probable convergence failure.", PlotnineWarning) _a = np.sqrt(w) * X # cov = (_a.T @ _a) / np.sum(wt) cov = np.dot(_a.T, _a) / np.sum(wt) if miss_wt: ans = dict(cov=cov, center=loc, n_obs=n) else: ans = dict(cov=cov, center=loc, wt=wt0, n_obs=n) if cor: sd = np.sqrt(np.diag(cov)) cor = (cov/sd)/np.repeat([sd], p, axis=0).T ans['cor'] = cor ans['iter'] = iteration return ans
[ "def", "cov_trob", "(", "x", ",", "wt", "=", "None", ",", "cor", "=", "False", ",", "center", "=", "True", ",", "nu", "=", "5", ",", "maxit", "=", "25", ",", "tol", "=", "0.01", ")", ":", "def", "test_values", "(", "x", ")", ":", "if", "pd", ...
Covariance Estimation for Multivariate t Distribution Estimates a covariance or correlation matrix assuming the data came from a multivariate t distribution: this provides some degree of robustness to outlier without giving a high breakdown point. **credit**: This function a port of the R function ``MASS::cov.trob``. Parameters ---------- x : array data matrix. Missing values (NaNs) are not allowed. wt : array A vector of weights for each case: these are treated as if the case i actually occurred ``wt[i]`` times. cor : bool Flag to choose between returning the correlation (``cor=True``) or covariance (``cor=False``) matrix. center : array or bool A logical value or a numeric vector providing the location about which the covariance is to be taken. If ``center=False``, no centering is done; if ``center=True`` the MLE of the location vector is used. nu : int 'degrees of freedom' for the multivariate t distribution. Must exceed 2 (so that the covariance matrix is finite). maxit : int Maximum number of iterations in fitting. tol : float Convergence tolerance for fitting. Returns ------- out : dict A dictionary with with the following key-value - ``cov`` : the fitted covariance matrix. - ``center`` : the estimated or specified location vector. - ``wt`` : the specified weights: only returned if the wt argument was given. - ``n_obs`` : the number of cases used in the fitting. - ``cor`` : the fitted correlation matrix: only returned if ``cor=True``. - ``call`` : The matched call. - ``iter`` : The number of iterations used. References ---------- - J. T. Kent, D. E. Tyler and Y. Vardi (1994) A curious likelihood identity for the multivariate t-distribution. *Communications in Statistics-Simulation and Computation* **23**, 441-453. - Venables, W. N. and Ripley, B. D. (1999) *Modern Applied Statistics with S-PLUS*. Third Edition. Springer.
[ "Covariance", "Estimation", "for", "Multivariate", "t", "Distribution" ]
python
train
saltstack/salt
salt/cloud/clouds/softlayer_hw.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/softlayer_hw.py#L646-L663
def show_all_categories(call=None): ''' Return a dict of all available categories on the cloud provider. .. versionadded:: 2016.3.0 ''' if call == 'action': raise SaltCloudSystemExit( 'The show_all_categories function must be called with -f or --function.' ) conn = get_conn(service='SoftLayer_Product_Package') categories = [] for category in conn.getCategories(id=50): categories.append(category['categoryCode']) return {'category_codes': categories}
[ "def", "show_all_categories", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The show_all_categories function must be called with -f or --function.'", ")", "conn", "=", "get_conn", "(", "service", "=", "...
Return a dict of all available categories on the cloud provider. .. versionadded:: 2016.3.0
[ "Return", "a", "dict", "of", "all", "available", "categories", "on", "the", "cloud", "provider", "." ]
python
train
CxAalto/gtfspy
gtfspy/gtfs.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/gtfs.py#L1113-L1131
def increment_day_start_ut(self, day_start_ut, n_days=1): """Increment the GTFS-definition of "day start". Parameters ---------- day_start_ut : int unixtime of the previous start of day. If this time is between 12:00 or greater, there *will* be bugs. To solve this, run the input through day_start_ut first. n_days: int number of days to increment """ old_tz = self.set_current_process_time_zone() day0 = time.localtime(day_start_ut + 43200) # time of noon dayN = time.mktime(day0[:2] + # YYYY, MM (day0[2] + n_days,) + # DD (12, 00, 0, 0, 0, -1)) - 43200 # HHMM, etc. Minus 12 hours. set_process_timezone(old_tz) return dayN
[ "def", "increment_day_start_ut", "(", "self", ",", "day_start_ut", ",", "n_days", "=", "1", ")", ":", "old_tz", "=", "self", ".", "set_current_process_time_zone", "(", ")", "day0", "=", "time", ".", "localtime", "(", "day_start_ut", "+", "43200", ")", "# tim...
Increment the GTFS-definition of "day start". Parameters ---------- day_start_ut : int unixtime of the previous start of day. If this time is between 12:00 or greater, there *will* be bugs. To solve this, run the input through day_start_ut first. n_days: int number of days to increment
[ "Increment", "the", "GTFS", "-", "definition", "of", "day", "start", "." ]
python
valid
berkeley-cocosci/Wallace
wallace/experiments.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/experiments.py#L230-L234
def log(self, text, key="?????", force=False): """Print a string to the logs.""" if force or self.verbose: print ">>>> {} {}".format(key, text) sys.stdout.flush()
[ "def", "log", "(", "self", ",", "text", ",", "key", "=", "\"?????\"", ",", "force", "=", "False", ")", ":", "if", "force", "or", "self", ".", "verbose", ":", "print", "\">>>> {} {}\"", ".", "format", "(", "key", ",", "text", ")", "sys", ".", "stdou...
Print a string to the logs.
[ "Print", "a", "string", "to", "the", "logs", "." ]
python
train
funilrys/PyFunceble
PyFunceble/expiration_date.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/expiration_date.py#L434-L593
def _extract(self): # pragma: no cover """ Extract the expiration date from the whois record. :return: The status of the domain. :rtype: str """ # We try to get the expiration date from the database. expiration_date_from_database = Whois().get_expiration_date() if expiration_date_from_database: # The hash of the current whois record did not changed and the # expiration date from the database is not empty not equal to # None or False. # We generate the files and print the status. # It's an active element! Generate( PyFunceble.STATUS["official"]["up"], "WHOIS", expiration_date_from_database, ).status_file() # We handle und return the official up status. return PyFunceble.STATUS["official"]["up"] # We get the whois record. self.whois_record = Lookup().whois(PyFunceble.INTERN["referer"]) # We list the list of regex which will help us get an unformatted expiration date. to_match = [ r"expire:(.*)", r"expire on:(.*)", r"Expiry Date:(.*)", r"free-date(.*)", r"expires:(.*)", r"Expiration date:(.*)", r"Expiry date:(.*)", r"Expire Date:(.*)", r"renewal date:(.*)", r"Expires:(.*)", r"validity:(.*)", r"Expiration Date :(.*)", r"Expiry :(.*)", r"expires at:(.*)", r"domain_datebilleduntil:(.*)", r"Data de expiração \/ Expiration Date \(dd\/mm\/yyyy\):(.*)", r"Fecha de expiración \(Expiration date\):(.*)", r"\[Expires on\](.*)", r"Record expires on(.*)(\(YYYY-MM-DD\))", r"status: OK-UNTIL(.*)", r"renewal:(.*)", r"expires............:(.*)", r"expire-date:(.*)", r"Exp date:(.*)", r"Valid-date(.*)", r"Expires On:(.*)", r"Fecha de vencimiento:(.*)", r"Expiration:.........(.*)", r"Fecha de Vencimiento:(.*)", r"Registry Expiry Date:(.*)", r"Expires on..............:(.*)", r"Expiration Time:(.*)", r"Expiration Date:(.*)", r"Expired:(.*)", r"Date d'expiration:(.*)", r"expiration date:(.*)", ] if self.whois_record: # The whois record is not empty. if "current_test_data" in PyFunceble.INTERN: # The end-user want more information whith his test. # We update the whois_record index. PyFunceble.INTERN["current_test_data"][ "whois_record" ] = self.whois_record for string in to_match: # We loop through the list of regex. # We try tro extract the expiration date from the WHOIS record. expiration_date = Regex( self.whois_record, string, return_data=True, rematch=True, group=0 ).match() if expiration_date: # The expiration date could be extracted. # We get the extracted expiration date. self.expiration_date = expiration_date[0].strip() # We initate a regex which will help us know if a number # is present into the extracted expiration date. regex_rumbers = r"[0-9]" if Regex( self.expiration_date, regex_rumbers, return_data=False ).match(): # The extracted expiration date has a number. # We format the extracted expiration date. self.expiration_date = self._format() if ( self.expiration_date and not Regex( self.expiration_date, r"[0-9]{2}\-[a-z]{3}\-2[0-9]{3}", return_data=False, ).match() ): # The formatted expiration date does not match our unified format. # We log the problem. Logs().expiration_date(self.expiration_date) # We log the whois record. Logs().whois(self.whois_record) if "current_test_data" in PyFunceble.INTERN: # The end-user want more information whith his test. # We update the expiration_date index. PyFunceble.INTERN["current_test_data"][ "expiration_date" ] = self.expiration_date # We generate the files and print the status. # It's an active element! Generate( PyFunceble.STATUS["official"]["up"], "WHOIS", self.expiration_date, ).status_file() # We log the whois record. Logs().whois(self.whois_record) # We save the whois record into the database. Whois(expiration_date=self.expiration_date).add() # We handle und return the official up status. return PyFunceble.STATUS["official"]["up"] # The extracted expiration date does not have a number. # We log the whois record. Logs().whois(self.whois_record) # We return None, we could not get the expiration date. return None # The whois record is empty. # We return None, we could not get the whois record. return None
[ "def", "_extract", "(", "self", ")", ":", "# pragma: no cover", "# We try to get the expiration date from the database.", "expiration_date_from_database", "=", "Whois", "(", ")", ".", "get_expiration_date", "(", ")", "if", "expiration_date_from_database", ":", "# The hash of ...
Extract the expiration date from the whois record. :return: The status of the domain. :rtype: str
[ "Extract", "the", "expiration", "date", "from", "the", "whois", "record", "." ]
python
test
datasift/datasift-python
datasift/managed_sources.py
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/managed_sources.py#L220-L247
def get(self, source_id=None, source_type=None, page=None, per_page=None): """ Get a specific managed source or a list of them. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceget :param source_id: (optional) target Source ID :type source_id: str :param source_type: (optional) data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param page: (optional) page number for pagination, default 1 :type page: int :param per_page: (optional) number of items per page, default 20 :type per_page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if source_type: params['source_type'] = source_type if source_id: params['id'] = source_id if page: params['page'] = page if per_page: params['per_page'] = per_page return self.request.get('get', params=params)
[ "def", "get", "(", "self", ",", "source_id", "=", "None", ",", "source_type", "=", "None", ",", "page", "=", "None", ",", "per_page", "=", "None", ")", ":", "params", "=", "{", "}", "if", "source_type", ":", "params", "[", "'source_type'", "]", "=", ...
Get a specific managed source or a list of them. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceget :param source_id: (optional) target Source ID :type source_id: str :param source_type: (optional) data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param page: (optional) page number for pagination, default 1 :type page: int :param per_page: (optional) number of items per page, default 20 :type per_page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "a", "specific", "managed", "source", "or", "a", "list", "of", "them", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/aio/awsprovisioner.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/awsprovisioner.py#L251-L265
async def removeSecret(self, *args, **kwargs): """ Remove a Secret Remove a secret. After this call, a call to `getSecret` with the given token will return no information. It is very important that the consumer of a secret delete the secret from storage before handing over control to untrusted processes to prevent credential and/or secret leakage. This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["removeSecret"], *args, **kwargs)
[ "async", "def", "removeSecret", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"removeSecret\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", "...
Remove a Secret Remove a secret. After this call, a call to `getSecret` with the given token will return no information. It is very important that the consumer of a secret delete the secret from storage before handing over control to untrusted processes to prevent credential and/or secret leakage. This method is ``stable``
[ "Remove", "a", "Secret" ]
python
train
savvastj/nbashots
nbashots/api.py
https://github.com/savvastj/nbashots/blob/76ece28d717f10b25eb0fc681b317df6ef6b5157/nbashots/api.py#L361-L383
def get_team_id(team_name): """ Returns the team ID associated with the team name that is passed in. Parameters ---------- team_name : str The team name whose ID we want. NOTE: Only pass in the team name (e.g. "Lakers"), not the city, or city and team name, or the team abbreviation. Returns ------- team_id : int The team ID associated with the team name. """ df = get_all_team_ids() df = df[df.TEAM_NAME == team_name] if len(df) == 0: er = "Invalid team name or there is no team with that name." raise ValueError(er) team_id = df.TEAM_ID.iloc[0] return team_id
[ "def", "get_team_id", "(", "team_name", ")", ":", "df", "=", "get_all_team_ids", "(", ")", "df", "=", "df", "[", "df", ".", "TEAM_NAME", "==", "team_name", "]", "if", "len", "(", "df", ")", "==", "0", ":", "er", "=", "\"Invalid team name or there is no t...
Returns the team ID associated with the team name that is passed in. Parameters ---------- team_name : str The team name whose ID we want. NOTE: Only pass in the team name (e.g. "Lakers"), not the city, or city and team name, or the team abbreviation. Returns ------- team_id : int The team ID associated with the team name.
[ "Returns", "the", "team", "ID", "associated", "with", "the", "team", "name", "that", "is", "passed", "in", "." ]
python
train
voidpp/python-tools
voidpp_tools/terminal.py
https://github.com/voidpp/python-tools/blob/0fc7460c827b02d8914411cedddadc23ccb3cc73/voidpp_tools/terminal.py#L10-L29
def get_size(): """ - get width and height of console - works on linux,os x,windows,cygwin(windows) originally retrieved from: http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python """ current_os = platform.system() tuple_xy = None if current_os == 'Windows': tuple_xy = _get_terminal_size_windows() if tuple_xy is None: tuple_xy = _get_terminal_size_tput() # needed for window's python in cygwin's xterm! if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'): tuple_xy = _get_terminal_size_linux() if tuple_xy is None: logger.debug("Cannot determine terminal size, use default") tuple_xy = (80, 25) # default value return dict(cols = tuple_xy[0], rows = tuple_xy[1])
[ "def", "get_size", "(", ")", ":", "current_os", "=", "platform", ".", "system", "(", ")", "tuple_xy", "=", "None", "if", "current_os", "==", "'Windows'", ":", "tuple_xy", "=", "_get_terminal_size_windows", "(", ")", "if", "tuple_xy", "is", "None", ":", "tu...
- get width and height of console - works on linux,os x,windows,cygwin(windows) originally retrieved from: http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
[ "-", "get", "width", "and", "height", "of", "console", "-", "works", "on", "linux", "os", "x", "windows", "cygwin", "(", "windows", ")", "originally", "retrieved", "from", ":", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "56674...
python
train
APSL/transmanager
transmanager/manager.py
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L362-L371
def get_main_language(): """ returns the main language :return: """ try: main_language = TransLanguage.objects.filter(main_language=True).get() return main_language.code except TransLanguage.DoesNotExist: return TM_DEFAULT_LANGUAGE_CODE
[ "def", "get_main_language", "(", ")", ":", "try", ":", "main_language", "=", "TransLanguage", ".", "objects", ".", "filter", "(", "main_language", "=", "True", ")", ".", "get", "(", ")", "return", "main_language", ".", "code", "except", "TransLanguage", ".",...
returns the main language :return:
[ "returns", "the", "main", "language", ":", "return", ":" ]
python
train
KoffeinFlummi/Chronyk
chronyk/chronyk.py
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L541-L552
def ctime(self, timezone=None): """Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return time.ctime(self.__timestamp__ - timezone)
[ "def", "ctime", "(", "self", ",", "timezone", "=", "None", ")", ":", "if", "timezone", "is", "None", ":", "timezone", "=", "self", ".", "timezone", "return", "time", ".", "ctime", "(", "self", ".", "__timestamp__", "-", "timezone", ")" ]
Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
[ "Returns", "a", "ctime", "string", "." ]
python
train
raphaelm/python-sepaxml
sepaxml/utils.py
https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/utils.py#L64-L76
def int_to_decimal_str(integer): """ Helper to convert integers (representing cents) into decimal currency string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS. @param integer The amount in cents @return string The amount in currency with full stop decimal separator """ int_string = str(integer) if len(int_string) < 2: return "0." + int_string.zfill(2) else: return int_string[:-2] + "." + int_string[-2:]
[ "def", "int_to_decimal_str", "(", "integer", ")", ":", "int_string", "=", "str", "(", "integer", ")", "if", "len", "(", "int_string", ")", "<", "2", ":", "return", "\"0.\"", "+", "int_string", ".", "zfill", "(", "2", ")", "else", ":", "return", "int_st...
Helper to convert integers (representing cents) into decimal currency string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS. @param integer The amount in cents @return string The amount in currency with full stop decimal separator
[ "Helper", "to", "convert", "integers", "(", "representing", "cents", ")", "into", "decimal", "currency", "string", ".", "WARNING", ":", "DO", "NOT", "TRY", "TO", "DO", "THIS", "BY", "DIVISION", "FLOATING", "POINT", "ERRORS", "ARE", "NO", "FUN", "IN", "FINA...
python
train
davgeo/clear
clear/renamer.py
https://github.com/davgeo/clear/blob/5ec85d27efd28afddfcd4c3f44df17f0115a77aa/clear/renamer.py#L150-L261
def _GetShowID(self, stringSearch, origStringSearch = None): """ Search for given string as an existing entry in the database file name table or, if no match is found, as a show name from the TV guide. If an exact match is not found in the database the user can accept or decline the best match from the TV guide or can provide an alternate match to lookup. Parameters ---------- stringSearch : string String to look up in database or guide. origStringSearch : string [optional: default = None] Original search string, used by recusive function calls. Returns ---------- tvfile.ShowInfo or None If no show id could be found this returns None, otherwise it returns a tvfile.ShowInfo object containing show name and show id. """ showInfo = tvfile.ShowInfo() if origStringSearch is None: goodlogging.Log.Info("RENAMER", "Looking up show ID for: {0}".format(stringSearch)) origStringSearch = stringSearch goodlogging.Log.IncreaseIndent() showInfo.showID = self._db.SearchFileNameTable(stringSearch) if showInfo.showID is None: goodlogging.Log.Info("RENAMER", "No show ID match found for '{0}' in database".format(stringSearch)) showNameList = self._guide.ShowNameLookUp(stringSearch) if self._skipUserInput is True: if len(showNameList) == 1: showName = showNameList[0] goodlogging.Log.Info("RENAMER", "Automatic selection of showname: {0}".format(showName)) else: showName = None goodlogging.Log.Info("RENAMER", "Show skipped - could not make automatic selection of showname") else: showName = util.UserAcceptance(showNameList) if showName in showNameList: libEntry = self._db.SearchTVLibrary(showName = showName) if libEntry is None: if self._skipUserInput is True: response = 'y' else: goodlogging.Log.Info("RENAMER", "No show by this name found in TV library database. Is this a new show for the database?") response = goodlogging.Log.Input("RENAMER", "Enter 'y' (yes), 'n' (no) or 'ls' (list existing shows): ") response = util.ValidUserResponse(response, ('y', 'n', 'ls')) if response.lower() == 'ls': dbLibList = self._db.SearchTVLibrary() if dbLibList is None: goodlogging.Log.Info("RENAMER", "TV library is empty") response = 'y' else: dbShowNameList = [i[1] for i in dbLibList] dbShowNameStr = ', '.join(dbShowNameList) goodlogging.Log.Info("RENAMER", "Existing shows in database are: {0}".format(dbShowNameStr)) response = goodlogging.Log.Input("RENAMER", "Is this a new show? [y/n]: ") response = util.ValidUserResponse(response, ('y', 'n')) if response.lower() == 'y': showInfo.showID = self._db.AddShowToTVLibrary(showName) showInfo.showName = showName else: try: dbShowNameList except NameError: dbLibList = self._db.SearchTVLibrary() if dbLibList is None: goodlogging.Log.Info("RENAMER", "No show ID found - TV library is empty") return None dbShowNameList = [i[1] for i in dbLibList] while showInfo.showID is None: matchShowList = util.GetBestMatch(showName, dbShowNameList) showName = util.UserAcceptance(matchShowList) if showName is None: goodlogging.Log.Info("RENAMER", "No show ID found - could not match to existing show") return None elif showName in matchShowList: showInfo.showID = self._db.SearchTVLibrary(showName = showName)[0][0] showInfo.showName = showName else: showInfo.showID = libEntry[0][0] self._db.AddToFileNameTable(origStringSearch, showInfo.showID) goodlogging.Log.DecreaseIndent() return showInfo elif showName is None: goodlogging.Log.DecreaseIndent() return None else: goodlogging.Log.DecreaseIndent() return self._GetShowID(showName, origStringSearch) else: goodlogging.Log.Info("RENAMER", "Match found: show ID = {0}".format(showInfo.showID)) if origStringSearch != stringSearch: self._db.AddToFileNameTable(origStringSearch, showInfo.showID) goodlogging.Log.DecreaseIndent() return showInfo
[ "def", "_GetShowID", "(", "self", ",", "stringSearch", ",", "origStringSearch", "=", "None", ")", ":", "showInfo", "=", "tvfile", ".", "ShowInfo", "(", ")", "if", "origStringSearch", "is", "None", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"RENAME...
Search for given string as an existing entry in the database file name table or, if no match is found, as a show name from the TV guide. If an exact match is not found in the database the user can accept or decline the best match from the TV guide or can provide an alternate match to lookup. Parameters ---------- stringSearch : string String to look up in database or guide. origStringSearch : string [optional: default = None] Original search string, used by recusive function calls. Returns ---------- tvfile.ShowInfo or None If no show id could be found this returns None, otherwise it returns a tvfile.ShowInfo object containing show name and show id.
[ "Search", "for", "given", "string", "as", "an", "existing", "entry", "in", "the", "database", "file", "name", "table", "or", "if", "no", "match", "is", "found", "as", "a", "show", "name", "from", "the", "TV", "guide", "." ]
python
train
tansey/gfl
pygfl/utils.py
https://github.com/tansey/gfl/blob/ae0f078bab57aba9e827ed6162f247ff9dc2aa19/pygfl/utils.py#L160-L163
def vector_str(p, decimal_places=2, print_zero=True): '''Pretty-print the vector values.''' style = '{0:.' + str(decimal_places) + 'f}' return '[{0}]'.format(", ".join([' ' if not print_zero and a == 0 else style.format(a) for a in p]))
[ "def", "vector_str", "(", "p", ",", "decimal_places", "=", "2", ",", "print_zero", "=", "True", ")", ":", "style", "=", "'{0:.'", "+", "str", "(", "decimal_places", ")", "+", "'f}'", "return", "'[{0}]'", ".", "format", "(", "\", \"", ".", "join", "(", ...
Pretty-print the vector values.
[ "Pretty", "-", "print", "the", "vector", "values", "." ]
python
train
tcalmant/ipopo
pelix/shell/report.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/report.py#L503-L511
def pelix_infos(self): """ Basic information about the Pelix framework instance """ framework = self.__context.get_framework() return { "version": framework.get_version(), "properties": framework.get_properties(), }
[ "def", "pelix_infos", "(", "self", ")", ":", "framework", "=", "self", ".", "__context", ".", "get_framework", "(", ")", "return", "{", "\"version\"", ":", "framework", ".", "get_version", "(", ")", ",", "\"properties\"", ":", "framework", ".", "get_properti...
Basic information about the Pelix framework instance
[ "Basic", "information", "about", "the", "Pelix", "framework", "instance" ]
python
train
datajoint/datajoint-python
datajoint/table.py
https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/table.py#L571-L604
def lookup_class_name(name, context, depth=3): """ given a table name in the form `schema_name`.`table_name`, find its class in the context. :param name: `schema_name`.`table_name` :param context: dictionary representing the namespace :param depth: search depth into imported modules, helps avoid infinite recursion. :return: class name found in the context or None if not found """ # breadth-first search nodes = [dict(context=context, context_name='', depth=depth)] while nodes: node = nodes.pop(0) for member_name, member in node['context'].items(): if not member_name.startswith('_'): # skip IPython's implicit variables if inspect.isclass(member) and issubclass(member, Table): if member.full_table_name == name: # found it! return '.'.join([node['context_name'], member_name]).lstrip('.') try: # look for part tables parts = member._ordered_class_members except AttributeError: pass # not a UserTable -- cannot have part tables. else: for part in (getattr(member, p) for p in parts if p[0].isupper() and hasattr(member, p)): if inspect.isclass(part) and issubclass(part, Table) and part.full_table_name == name: return '.'.join([node['context_name'], member_name, part.__name__]).lstrip('.') elif node['depth'] > 0 and inspect.ismodule(member) and member.__name__ != 'datajoint': try: nodes.append( dict(context=dict(inspect.getmembers(member)), context_name=node['context_name'] + '.' + member_name, depth=node['depth']-1)) except ImportError: pass # could not import, so do not attempt return None
[ "def", "lookup_class_name", "(", "name", ",", "context", ",", "depth", "=", "3", ")", ":", "# breadth-first search", "nodes", "=", "[", "dict", "(", "context", "=", "context", ",", "context_name", "=", "''", ",", "depth", "=", "depth", ")", "]", "while",...
given a table name in the form `schema_name`.`table_name`, find its class in the context. :param name: `schema_name`.`table_name` :param context: dictionary representing the namespace :param depth: search depth into imported modules, helps avoid infinite recursion. :return: class name found in the context or None if not found
[ "given", "a", "table", "name", "in", "the", "form", "schema_name", ".", "table_name", "find", "its", "class", "in", "the", "context", ".", ":", "param", "name", ":", "schema_name", ".", "table_name", ":", "param", "context", ":", "dictionary", "representing"...
python
train
clalancette/pycdlib
pycdlib/udf.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L1503-L1538
def new(self): # type: () -> None ''' A method to create a new UDF Partition Volume Descriptor. Parameters: None. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor already initialized') self.desc_tag = UDFTag() self.desc_tag.new(5) # FIXME: we should let the user set serial_number self.vol_desc_seqnum = 2 self.part_flags = 1 # FIXME: how should we set this? self.part_num = 0 # FIXME: how should we set this? self.part_contents = UDFEntityID() self.part_contents.new(2, b'+NSR02') self.part_contents_use = UDFPartitionHeaderDescriptor() self.part_contents_use.new() self.access_type = 1 self.part_start_location = 0 # This will get set later self.part_length = 3 # This will get set later self.impl_ident = UDFEntityID() self.impl_ident.new(0, b'*pycdlib') self.implementation_use = b'\x00' * 128 # FIXME: we should let the user set this self._initialized = True
[ "def", "new", "(", "self", ")", ":", "# type: () -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'UDF Partition Volume Descriptor already initialized'", ")", "self", ".", "desc_tag", "=", "UDFTag", "(",...
A method to create a new UDF Partition Volume Descriptor. Parameters: None. Returns: Nothing.
[ "A", "method", "to", "create", "a", "new", "UDF", "Partition", "Volume", "Descriptor", "." ]
python
train
futursolo/magichttp
magichttp/readers.py
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/readers.py#L342-L400
async def read_until( self, separator: bytes=b"\n", *, keep_separator: bool=True) -> bytes: """ Read until the separator has been found. When the max size of the buffer has been reached, and the separator is not found, this method will raise a :class:`MaxBufferLengthReachedError`. Similarly, if the end has been reached before found the separator it will raise a :class:`SeparatorNotFoundError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`. """ async with self._read_lock: self._raise_exc_if_finished() start_pos = 0 while True: separator_pos = self._buf.find(separator, start_pos) if separator_pos != -1: break if len(self) > self.max_buf_len: raise MaxBufferLengthReachedError try: await self._wait_for_data() except asyncio.CancelledError: # pragma: no cover raise except Exception as e: if len(self) > 0: raise SeparatorNotFoundError from e else: raise new_start_pos = len(self) - len(separator) if new_start_pos > 0: start_pos = new_start_pos full_pos = separator_pos + len(separator) if keep_separator: data_pos = full_pos else: data_pos = separator_pos data = bytes(self._buf[0:data_pos]) del self._buf[0:full_pos] return data
[ "async", "def", "read_until", "(", "self", ",", "separator", ":", "bytes", "=", "b\"\\n\"", ",", "*", ",", "keep_separator", ":", "bool", "=", "True", ")", "->", "bytes", ":", "async", "with", "self", ".", "_read_lock", ":", "self", ".", "_raise_exc_if_f...
Read until the separator has been found. When the max size of the buffer has been reached, and the separator is not found, this method will raise a :class:`MaxBufferLengthReachedError`. Similarly, if the end has been reached before found the separator it will raise a :class:`SeparatorNotFoundError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`.
[ "Read", "until", "the", "separator", "has", "been", "found", "." ]
python
train
cloud-custodian/cloud-custodian
c7n/filters/revisions.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/filters/revisions.py#L156-L159
def transform_revision(self, revision): """make config revision look like describe output.""" config = self.manager.get_source('config') return config.load_resource(revision)
[ "def", "transform_revision", "(", "self", ",", "revision", ")", ":", "config", "=", "self", ".", "manager", ".", "get_source", "(", "'config'", ")", "return", "config", ".", "load_resource", "(", "revision", ")" ]
make config revision look like describe output.
[ "make", "config", "revision", "look", "like", "describe", "output", "." ]
python
train
UCSBarchlab/PyRTL
pyrtl/inputoutput.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/inputoutput.py#L544-L551
def block_to_svg(block=None): """ Return an SVG for the block. """ block = working_block(block) try: from graphviz import Source return Source(block_to_graphviz_string())._repr_svg_() except ImportError: raise PyrtlError('need graphviz installed (try "pip install graphviz")')
[ "def", "block_to_svg", "(", "block", "=", "None", ")", ":", "block", "=", "working_block", "(", "block", ")", "try", ":", "from", "graphviz", "import", "Source", "return", "Source", "(", "block_to_graphviz_string", "(", ")", ")", ".", "_repr_svg_", "(", ")...
Return an SVG for the block.
[ "Return", "an", "SVG", "for", "the", "block", "." ]
python
train
tjcsl/ion
intranet/apps/emailfwd/views.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/emailfwd/views.py#L14-L43
def senior_email_forward_view(request): """Add a forwarding address for graduating seniors.""" if not request.user.is_senior: messages.error(request, "Only seniors can set their forwarding address.") return redirect("index") try: forward = SeniorEmailForward.objects.get(user=request.user) except SeniorEmailForward.DoesNotExist: forward = None if request.method == "POST": if forward: form = SeniorEmailForwardForm(request.POST, instance=forward) else: form = SeniorEmailForwardForm(request.POST) logger.debug(form) if form.is_valid(): obj = form.save(commit=False) obj.user = request.user obj.save() messages.success(request, "Successfully added forwarding address.") return redirect("index") else: messages.error(request, "Error adding forwarding address.") else: if forward: form = SeniorEmailForwardForm(instance=forward) else: form = SeniorEmailForwardForm() return render(request, "emailfwd/senior_forward.html", {"form": form, "forward": forward})
[ "def", "senior_email_forward_view", "(", "request", ")", ":", "if", "not", "request", ".", "user", ".", "is_senior", ":", "messages", ".", "error", "(", "request", ",", "\"Only seniors can set their forwarding address.\"", ")", "return", "redirect", "(", "\"index\""...
Add a forwarding address for graduating seniors.
[ "Add", "a", "forwarding", "address", "for", "graduating", "seniors", "." ]
python
train
Julius2342/pyvlx
pyvlx/house_status_monitor.py
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L39-L44
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorDisableConfirmation): return False self.success = True return True
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "not", "isinstance", "(", "frame", ",", "FrameHouseStatusMonitorDisableConfirmation", ")", ":", "return", "False", "self", ".", "success", "=", "True", "return", "True" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
python
train
gbowerman/azurerm
azurerm/keyvault.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L169-L183
def delete_keyvault_secret(access_token, vault_uri, secret_name): '''Deletes a secret from a key vault using the key vault URI. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.azure.net. secret_name (str): Name of the secret to add. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([vault_uri, '/secrets/', secret_name, '?api-version=', '7.0']) return do_delete(endpoint, access_token)
[ "def", "delete_keyvault_secret", "(", "access_token", ",", "vault_uri", ",", "secret_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "vault_uri", ",", "'/secrets/'", ",", "secret_name", ",", "'?api-version='", ",", "'7.0'", "]", ")", "return", ...
Deletes a secret from a key vault using the key vault URI. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.azure.net. secret_name (str): Name of the secret to add. Returns: HTTP response. 200 OK.
[ "Deletes", "a", "secret", "from", "a", "key", "vault", "using", "the", "key", "vault", "URI", "." ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/utils.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/utils.py#L137-L143
def get_heron_tracker_dir(): """ This will extract heron tracker directory from .pex file. :return: root location for heron-tools. """ path = "/".join(os.path.realpath(__file__).split('/')[:-8]) return normalized_class_path(path)
[ "def", "get_heron_tracker_dir", "(", ")", ":", "path", "=", "\"/\"", ".", "join", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ".", "split", "(", "'/'", ")", "[", ":", "-", "8", "]", ")", "return", "normalized_class_path", "(", "pat...
This will extract heron tracker directory from .pex file. :return: root location for heron-tools.
[ "This", "will", "extract", "heron", "tracker", "directory", "from", ".", "pex", "file", ".", ":", "return", ":", "root", "location", "for", "heron", "-", "tools", "." ]
python
valid
vsoch/helpme
helpme/action/record.py
https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/action/record.py#L80-L101
def record_asciinema(): '''a wrapper around generation of an asciinema.api.Api and a custom recorder to pull out the input arguments to the Record from argparse. The function generates a filename in advance and a return code so we can check the final status. ''' import asciinema.config as aconfig from asciinema.api import Api # Load the API class cfg = aconfig.load() api = Api(cfg.api_url, os.environ.get("USER"), cfg.install_id) # Create dummy class to pass in as args recorder = HelpMeRecord(api) code = recorder.execute() if code == 0 and os.path.exists(recorder.filename): return recorder.filename print('Problem generating %s, return code %s' %(recorder.filename, code))
[ "def", "record_asciinema", "(", ")", ":", "import", "asciinema", ".", "config", "as", "aconfig", "from", "asciinema", ".", "api", "import", "Api", "# Load the API class", "cfg", "=", "aconfig", ".", "load", "(", ")", "api", "=", "Api", "(", "cfg", ".", "...
a wrapper around generation of an asciinema.api.Api and a custom recorder to pull out the input arguments to the Record from argparse. The function generates a filename in advance and a return code so we can check the final status.
[ "a", "wrapper", "around", "generation", "of", "an", "asciinema", ".", "api", ".", "Api", "and", "a", "custom", "recorder", "to", "pull", "out", "the", "input", "arguments", "to", "the", "Record", "from", "argparse", ".", "The", "function", "generates", "a"...
python
train
inasafe/inasafe
safe/utilities/profiling.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/profiling.py#L56-L65
def elapsed_time(self): """To know the duration of the function. This property might return None if the function is still running. """ if self._end_time: elapsed_time = round(self._end_time - self._start_time, 3) return elapsed_time else: return None
[ "def", "elapsed_time", "(", "self", ")", ":", "if", "self", ".", "_end_time", ":", "elapsed_time", "=", "round", "(", "self", ".", "_end_time", "-", "self", ".", "_start_time", ",", "3", ")", "return", "elapsed_time", "else", ":", "return", "None" ]
To know the duration of the function. This property might return None if the function is still running.
[ "To", "know", "the", "duration", "of", "the", "function", "." ]
python
train
jonhadfield/python-hosts
python_hosts/utils.py
https://github.com/jonhadfield/python-hosts/blob/9ccaa8edc63418a91f10bf732b26070f21dd2ad0/python_hosts/utils.py#L63-L70
def dedupe_list(seq): """ Utility function to remove duplicates from a list :param seq: The sequence (list) to deduplicate :return: A list with original duplicates removed """ seen = set() return [x for x in seq if not (x in seen or seen.add(x))]
[ "def", "dedupe_list", "(", "seq", ")", ":", "seen", "=", "set", "(", ")", "return", "[", "x", "for", "x", "in", "seq", "if", "not", "(", "x", "in", "seen", "or", "seen", ".", "add", "(", "x", ")", ")", "]" ]
Utility function to remove duplicates from a list :param seq: The sequence (list) to deduplicate :return: A list with original duplicates removed
[ "Utility", "function", "to", "remove", "duplicates", "from", "a", "list", ":", "param", "seq", ":", "The", "sequence", "(", "list", ")", "to", "deduplicate", ":", "return", ":", "A", "list", "with", "original", "duplicates", "removed" ]
python
train
ninuxorg/nodeshot
nodeshot/networking/net/views.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/net/views.py#L145-L165
def initial(self, request, *args, **kwargs): """ Custom initial method: * ensure device exists and store it in an instance attribute * change queryset to return only devices of current node """ super(BaseInterfaceList, self).initial(request, *args, **kwargs) # ensure device exists try: self.device = Device.objects.accessible_to(request.user)\ .get(pk=self.kwargs.get('pk', None)) except Device.DoesNotExist: raise Http404(_('Device not found.')) # check permissions on device (for interface creation) self.check_object_permissions(request, self.device) # return only interfaces of current device self.queryset = self.model.objects.filter(device_id=self.device.id)\ .accessible_to(self.request.user)
[ "def", "initial", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "BaseInterfaceList", ",", "self", ")", ".", "initial", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# ensure device ...
Custom initial method: * ensure device exists and store it in an instance attribute * change queryset to return only devices of current node
[ "Custom", "initial", "method", ":", "*", "ensure", "device", "exists", "and", "store", "it", "in", "an", "instance", "attribute", "*", "change", "queryset", "to", "return", "only", "devices", "of", "current", "node" ]
python
train
choderalab/pymbar
pymbar/old_mbar.py
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/old_mbar.py#L1106-L1212
def computePerturbedFreeEnergies(self, u_ln, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False): """Compute the free energies for a new set of states. Here, we desire the free energy differences among a set of new states, as well as the uncertainty estimates in these differences. Parameters ---------- u_ln : np.ndarray, float, shape=(L, Nmax) u_ln[l,n] is the reduced potential energy of uncorrelated configuration n evaluated at new state k. Can be completely indepednent of the original number of states. compute_uncertainty : bool, optional If False, the uncertainties will not be computed (default: True) uncertainty_method : string, optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) Returns ------- Deltaf_ij : np.ndarray, float, shape=(L, L) Deltaf_ij[i,j] = f_j - f_i, the dimensionless free energy difference between new states i and j dDeltaf_ij : np.ndarray, float, shape=(L, L) dDeltaf_ij[i,j] is the estimated statistical uncertainty in Deltaf_ij[i,j] Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> [Deltaf_ij, dDeltaf_ij] = mbar.computePerturbedFreeEnergies(u_kn) """ # Convert to np matrix. u_ln = np.array(u_ln, dtype=np.float64) # Get the dimensions of the matrix of reduced potential energies, and convert if necessary if len(np.shape(u_ln)) == 3: u_ln = kln_to_kn(u_ln, N_k=self.N_k) [L, N] = u_ln.shape # Check dimensions. if (N < self.N): raise DataError("There seems to be too few samples in u_kn. You must evaluate at the new potential with all of the samples used originally.") # Retrieve N and K for convenience. N = self.N K = self.K # Augment W_nk, N_k, and c_k for the new states. W_nk = np.zeros([N, K + L], dtype=np.float64) # weight matrix N_k = np.zeros([K + L], dtype=np.int32) # counts f_k = np.zeros([K + L], dtype=np.float64) # free energies # Fill in first half of matrix with existing q_k(x) from states. W_nk[:, 0:K] = np.exp(self.Log_W_nk) N_k[0:K] = self.N_k f_k[0:K] = self.f_k # Compute normalized weights. for l in range(0, L): # Compute unnormalized log weights. log_w_n = self._computeUnnormalizedLogWeights(u_ln[l, :]) # Compute free energies f_k[K + l] = - _logsum(log_w_n) # Store normalized weights. Keep in exponential not log form # because we will not store W_nk W_nk[:, K + l] = np.exp(log_w_n + f_k[K + l]) if (compute_uncertainty or return_theta): # Compute augmented asymptotic covariance matrix. Theta_ij = self._computeAsymptoticCovarianceMatrix( W_nk, N_k, method = uncertainty_method) # Compute matrix of free energy differences between states and # associated uncertainties. # makes matrix operations easier to recast f_k = np.matrix(f_k[K:K + L]) Deltaf_ij = f_k - f_k.transpose() returns = [] returns.append(Deltaf_ij) if (compute_uncertainty): diag = Theta_ij.diagonal() dii = diag[0, K:K + L] d2DeltaF = dii + dii.transpose() - 2 * Theta_ij[K:K + L, K:K + L] # check for any numbers below zero. if (np.any(d2DeltaF < 0.0)): if(np.any(d2DeltaF) < warning_cutoff): print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)]) else: d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0 # take the square root of entries of the matrix dDeltaf_ij = np.sqrt(d2DeltaF) returns.append(dDeltaf_ij) if (return_theta): returns.append(Theta_ij) # Return matrix of free energy differences and uncertainties. return returns
[ "def", "computePerturbedFreeEnergies", "(", "self", ",", "u_ln", ",", "compute_uncertainty", "=", "True", ",", "uncertainty_method", "=", "None", ",", "warning_cutoff", "=", "1.0e-10", ",", "return_theta", "=", "False", ")", ":", "# Convert to np matrix.", "u_ln", ...
Compute the free energies for a new set of states. Here, we desire the free energy differences among a set of new states, as well as the uncertainty estimates in these differences. Parameters ---------- u_ln : np.ndarray, float, shape=(L, Nmax) u_ln[l,n] is the reduced potential energy of uncorrelated configuration n evaluated at new state k. Can be completely indepednent of the original number of states. compute_uncertainty : bool, optional If False, the uncertainties will not be computed (default: True) uncertainty_method : string, optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) Returns ------- Deltaf_ij : np.ndarray, float, shape=(L, L) Deltaf_ij[i,j] = f_j - f_i, the dimensionless free energy difference between new states i and j dDeltaf_ij : np.ndarray, float, shape=(L, L) dDeltaf_ij[i,j] is the estimated statistical uncertainty in Deltaf_ij[i,j] Examples -------- >>> from pymbar import testsystems >>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn') >>> mbar = MBAR(u_kn, N_k) >>> [Deltaf_ij, dDeltaf_ij] = mbar.computePerturbedFreeEnergies(u_kn)
[ "Compute", "the", "free", "energies", "for", "a", "new", "set", "of", "states", "." ]
python
train
maaku/python-bitcoin
bitcoin/authtree.py
https://github.com/maaku/python-bitcoin/blob/1b80c284170fd3f547cc45f4700ce169f3f99641/bitcoin/authtree.py#L61-L65
def count(self): "The number of items, pruned or otherwise, contained by this branch." if getattr(self, '_count', None) is None: self._count = getattr(self.node, 'count', 0) return self._count
[ "def", "count", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'_count'", ",", "None", ")", "is", "None", ":", "self", ".", "_count", "=", "getattr", "(", "self", ".", "node", ",", "'count'", ",", "0", ")", "return", "self", ".", "_co...
The number of items, pruned or otherwise, contained by this branch.
[ "The", "number", "of", "items", "pruned", "or", "otherwise", "contained", "by", "this", "branch", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15270-L15285
def vzerog(v, ndim): """ Indicate whether a general-dimensional vector is the zero vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vzerog_c.html :param v: Vector to be tested :type v: Array of floats :param ndim: Dimension of v :type ndim: int :return: true if and only if v is the zero vector :rtype: bool """ v = stypes.toDoubleVector(v) ndim = ctypes.c_int(ndim) return bool(libspice.vzerog_c(v, ndim))
[ "def", "vzerog", "(", "v", ",", "ndim", ")", ":", "v", "=", "stypes", ".", "toDoubleVector", "(", "v", ")", "ndim", "=", "ctypes", ".", "c_int", "(", "ndim", ")", "return", "bool", "(", "libspice", ".", "vzerog_c", "(", "v", ",", "ndim", ")", ")"...
Indicate whether a general-dimensional vector is the zero vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vzerog_c.html :param v: Vector to be tested :type v: Array of floats :param ndim: Dimension of v :type ndim: int :return: true if and only if v is the zero vector :rtype: bool
[ "Indicate", "whether", "a", "general", "-", "dimensional", "vector", "is", "the", "zero", "vector", "." ]
python
train
radjkarl/appBase
appbase/Session.py
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L397-L401
def open(self): """open a session to define in a dialog in an extra window""" filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE) if filename: self.new(filename)
[ "def", "open", "(", "self", ")", ":", "filename", "=", "self", ".", "dialogs", ".", "getOpenFileName", "(", "filter", "=", "\"*.%s\"", "%", "self", ".", "FTYPE", ")", "if", "filename", ":", "self", ".", "new", "(", "filename", ")" ]
open a session to define in a dialog in an extra window
[ "open", "a", "session", "to", "define", "in", "a", "dialog", "in", "an", "extra", "window" ]
python
train
voicecom/pgtool
pgtool/pgtool.py
https://github.com/voicecom/pgtool/blob/36b8682bfca614d784fe58451e0cbc41315bc72e/pgtool/pgtool.py#L321-L328
def cmd_kill(): """Kills all active connections to the specified database(s).""" db = connect() count = terminate(db, args.databases) if count == 0: log.error("No connections could be killed") # Return status 1, like killall sys.exit(1)
[ "def", "cmd_kill", "(", ")", ":", "db", "=", "connect", "(", ")", "count", "=", "terminate", "(", "db", ",", "args", ".", "databases", ")", "if", "count", "==", "0", ":", "log", ".", "error", "(", "\"No connections could be killed\"", ")", "# Return stat...
Kills all active connections to the specified database(s).
[ "Kills", "all", "active", "connections", "to", "the", "specified", "database", "(", "s", ")", "." ]
python
train
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L1302-L1310
def namespace(self, namespace): """Setter method; for a description see the getter method.""" # pylint: disable=attribute-defined-outside-init self._namespace = _ensure_unicode(namespace) if self._namespace is not None: # In Python 3, a byte string cannot be stripped by a unicode char # Therefore, the stripping needs to be done after the unicode # conversion. self._namespace = self._namespace.strip('/')
[ "def", "namespace", "(", "self", ",", "namespace", ")", ":", "# pylint: disable=attribute-defined-outside-init", "self", ".", "_namespace", "=", "_ensure_unicode", "(", "namespace", ")", "if", "self", ".", "_namespace", "is", "not", "None", ":", "# In Python 3, a by...
Setter method; for a description see the getter method.
[ "Setter", "method", ";", "for", "a", "description", "see", "the", "getter", "method", "." ]
python
train
saltstack/salt
salt/modules/syslog_ng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/syslog_ng.py#L738-L745
def _run_command(cmd, options=(), env=None): ''' Runs the command cmd with options as its CLI parameters and returns the result as a dictionary. ''' params = [cmd] params.extend(options) return __salt__['cmd.run_all'](params, env=env, python_shell=False)
[ "def", "_run_command", "(", "cmd", ",", "options", "=", "(", ")", ",", "env", "=", "None", ")", ":", "params", "=", "[", "cmd", "]", "params", ".", "extend", "(", "options", ")", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "params", ",", ...
Runs the command cmd with options as its CLI parameters and returns the result as a dictionary.
[ "Runs", "the", "command", "cmd", "with", "options", "as", "its", "CLI", "parameters", "and", "returns", "the", "result", "as", "a", "dictionary", "." ]
python
train
docker/docker-py
docker/api/image.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/image.py#L61-L93
def images(self, name=None, quiet=False, all=False, filters=None): """ List images. Similar to the ``docker images`` command. Args: name (str): Only show images belonging to the repository ``name`` quiet (bool): Only return numeric IDs as a list. all (bool): Show intermediate image layers. By default, these are filtered out. filters (dict): Filters to be processed on the image list. Available filters: - ``dangling`` (bool) - ``label`` (str): format either ``key`` or ``key=value`` Returns: (dict or list): A list if ``quiet=True``, otherwise a dict. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = { 'filter': name, 'only_ids': 1 if quiet else 0, 'all': 1 if all else 0, } if filters: params['filters'] = utils.convert_filters(filters) res = self._result(self._get(self._url("/images/json"), params=params), True) if quiet: return [x['Id'] for x in res] return res
[ "def", "images", "(", "self", ",", "name", "=", "None", ",", "quiet", "=", "False", ",", "all", "=", "False", ",", "filters", "=", "None", ")", ":", "params", "=", "{", "'filter'", ":", "name", ",", "'only_ids'", ":", "1", "if", "quiet", "else", ...
List images. Similar to the ``docker images`` command. Args: name (str): Only show images belonging to the repository ``name`` quiet (bool): Only return numeric IDs as a list. all (bool): Show intermediate image layers. By default, these are filtered out. filters (dict): Filters to be processed on the image list. Available filters: - ``dangling`` (bool) - ``label`` (str): format either ``key`` or ``key=value`` Returns: (dict or list): A list if ``quiet=True``, otherwise a dict. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
[ "List", "images", ".", "Similar", "to", "the", "docker", "images", "command", "." ]
python
train
Microsoft/ApplicationInsights-Python
applicationinsights/channel/contracts/PageViewPerfData.py
https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/contracts/PageViewPerfData.py#L93-L102
def perf_total(self, value): """The perf_total property. Args: value (string). the property value. """ if value == self._defaults['perfTotal'] and 'perfTotal' in self._values: del self._values['perfTotal'] else: self._values['perfTotal'] = value
[ "def", "perf_total", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'perfTotal'", "]", "and", "'perfTotal'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'perfTotal'", "]", "else", "...
The perf_total property. Args: value (string). the property value.
[ "The", "perf_total", "property", ".", "Args", ":", "value", "(", "string", ")", ".", "the", "property", "value", "." ]
python
train