repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
IvanMalison/okcupyd
okcupyd/profile.py
https://github.com/IvanMalison/okcupyd/blob/46f4eaa9419098f6c299738ce148af55c64deb64/okcupyd/profile.py#L220-L229
def age(self): """ :returns: The age of the user associated with this profile. """ if self.is_logged_in_user: # Retrieve the logged-in user's profile age return int(self._user_age_xpb.get_text_(self.profile_tree).strip()) else: # Retrieve a non logged-in user's profile age return int(self._age_xpb.get_text_(self.profile_tree))
[ "def", "age", "(", "self", ")", ":", "if", "self", ".", "is_logged_in_user", ":", "# Retrieve the logged-in user's profile age", "return", "int", "(", "self", ".", "_user_age_xpb", ".", "get_text_", "(", "self", ".", "profile_tree", ")", ".", "strip", "(", ")"...
:returns: The age of the user associated with this profile.
[ ":", "returns", ":", "The", "age", "of", "the", "user", "associated", "with", "this", "profile", "." ]
python
train
mikedh/trimesh
trimesh/util.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L303-L324
def make_sequence(obj): """ Given an object, if it is a sequence return, otherwise add it to a length 1 sequence and return. Useful for wrapping functions which sometimes return single objects and other times return lists of objects. Parameters -------------- obj : object An object to be made a sequence Returns -------------- as_sequence : (n,) sequence Contains input value """ if is_sequence(obj): return np.array(list(obj)) else: return np.array([obj])
[ "def", "make_sequence", "(", "obj", ")", ":", "if", "is_sequence", "(", "obj", ")", ":", "return", "np", ".", "array", "(", "list", "(", "obj", ")", ")", "else", ":", "return", "np", ".", "array", "(", "[", "obj", "]", ")" ]
Given an object, if it is a sequence return, otherwise add it to a length 1 sequence and return. Useful for wrapping functions which sometimes return single objects and other times return lists of objects. Parameters -------------- obj : object An object to be made a sequence Returns -------------- as_sequence : (n,) sequence Contains input value
[ "Given", "an", "object", "if", "it", "is", "a", "sequence", "return", "otherwise", "add", "it", "to", "a", "length", "1", "sequence", "and", "return", "." ]
python
train
totalgood/twip
twip/scripts/clean.py
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/scripts/clean.py#L107-L146
def dropna(df, nonnull_rows=100, nonnull_cols=50, nanstrs=('nan', 'NaN', ''), nullstr=''): """Drop columns/rows with too many NaNs and replace NaNs in columns of strings with '' >>> df = pd.DataFrame([['nan',np.nan,'str'],[np.nan,0.1,'and'],[2.0,None,np.nan]]) >>> dropna(df) Empty DataFrame Columns: [] Index: [] >>> dropna(df, nonnull_cols=0, nonnull_rows=0) 0 1 2 0 NaN str 1 0.1 and 2 2 NaN """ if 0 < nonnull_rows < 1: nonnull_rows = int(nonnull_rows * len(df)) if 0 < nonnull_cols < 1: nonnull_cols = int(nonnull_cols * len(df.columns)) for label in df.columns: series = df[label].copy() if series.dtype in (np.dtype('O'), np.dtype('U'), np.dtype('S')): for nanstr in nanstrs: series[series == nanstr] = np.nan df[label] = series # in iPython Notebook, try dropping with lower thresholds, checking column and row count each time print('The raw table shape is {}'.format(df.shape)) df = df.dropna(axis=1, thresh=nonnull_rows) print('After dropping columns with fewer than {} nonnull values, the table shape is {}'.format(nonnull_rows, df.shape)) df = df.dropna(axis=0, thresh=nonnull_cols) print('After dropping rows with fewer than {} nonnull values, the table shape is {}'.format(nonnull_cols, df.shape)) for label in df.columns: series = df[label].copy() if series.dtype == np.dtype('O'): nonnull_dtype = series.dropna(inplace=False).values.dtype if nonnull_dtype == np.dtype('O'): series[series.isnull()] = nullstr df[label] = series else: df[label] = series.astype(nonnull_dtype) return df
[ "def", "dropna", "(", "df", ",", "nonnull_rows", "=", "100", ",", "nonnull_cols", "=", "50", ",", "nanstrs", "=", "(", "'nan'", ",", "'NaN'", ",", "''", ")", ",", "nullstr", "=", "''", ")", ":", "if", "0", "<", "nonnull_rows", "<", "1", ":", "non...
Drop columns/rows with too many NaNs and replace NaNs in columns of strings with '' >>> df = pd.DataFrame([['nan',np.nan,'str'],[np.nan,0.1,'and'],[2.0,None,np.nan]]) >>> dropna(df) Empty DataFrame Columns: [] Index: [] >>> dropna(df, nonnull_cols=0, nonnull_rows=0) 0 1 2 0 NaN str 1 0.1 and 2 2 NaN
[ "Drop", "columns", "/", "rows", "with", "too", "many", "NaNs", "and", "replace", "NaNs", "in", "columns", "of", "strings", "with" ]
python
train
hydraplatform/hydra-base
hydra_base/lib/scenario.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/scenario.py#L220-L286
def update_scenario(scenario,update_data=True,update_groups=True,flush=True,**kwargs): """ Update a single scenario as all resources already exist, there is no need to worry about negative IDS flush = True flushes to the DB at the end of the function. flush = False does not flush, assuming that it will happen as part of another process, like update_network. """ user_id = kwargs.get('user_id') scen = _get_scenario(scenario.id, user_id) if scen.locked == 'Y': raise PermissionError('Scenario is locked. Unlock before editing.') start_time = None if isinstance(scenario.start_time, float): start_time = six.text_type(scenario.start_time) else: start_time = timestamp_to_ordinal(scenario.start_time) if start_time is not None: start_time = six.text_type(start_time) end_time = None if isinstance(scenario.end_time, float): end_time = six.text_type(scenario.end_time) else: end_time = timestamp_to_ordinal(scenario.end_time) if end_time is not None: end_time = six.text_type(end_time) scen.name = scenario.name scen.description = scenario.description scen.layout = scenario.get_layout() scen.start_time = start_time scen.end_time = end_time scen.time_step = scenario.time_step if scenario.resourcescenarios == None: scenario.resourcescenarios = [] if scenario.resourcegroupitems == None: scenario.resourcegroupitems = [] #lazy load resourcescenarios from the DB scen.resourcescenarios if update_data is True: datasets = [rs.dataset for rs in scenario.resourcescenarios] updated_datasets = data._bulk_insert_data(datasets, user_id, kwargs.get('app_name')) for i, r_scen in enumerate(scenario.resourcescenarios): _update_resourcescenario(scen, r_scen, dataset=updated_datasets[i], user_id=user_id, source=kwargs.get('app_name')) #lazy load resource grou items from the DB scen.resourcegroupitems if update_groups is True: #Get all the exiting resource group items for this scenario. #THen process all the items sent to this handler. #Any in the DB that are not passed in here are removed. for group_item in scenario.resourcegroupitems: _add_resourcegroupitem(group_item, scenario.id) if flush is True: db.DBSession.flush() return scen
[ "def", "update_scenario", "(", "scenario", ",", "update_data", "=", "True", ",", "update_groups", "=", "True", ",", "flush", "=", "True", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "scen", "=", "_get...
Update a single scenario as all resources already exist, there is no need to worry about negative IDS flush = True flushes to the DB at the end of the function. flush = False does not flush, assuming that it will happen as part of another process, like update_network.
[ "Update", "a", "single", "scenario", "as", "all", "resources", "already", "exist", "there", "is", "no", "need", "to", "worry", "about", "negative", "IDS" ]
python
train
toomore/grs
grs/best_buy_or_sell.py
https://github.com/toomore/grs/blob/a1285cb57878284a886952968be9e31fbfa595dd/grs/best_buy_or_sell.py#L50-L57
def best_buy_1(self): """ 量大收紅 :rtype: bool """ result = self.data.value[-1] > self.data.value[-2] and \ self.data.price[-1] > self.data.openprice[-1] return result
[ "def", "best_buy_1", "(", "self", ")", ":", "result", "=", "self", ".", "data", ".", "value", "[", "-", "1", "]", ">", "self", ".", "data", ".", "value", "[", "-", "2", "]", "and", "self", ".", "data", ".", "price", "[", "-", "1", "]", ">", ...
量大收紅 :rtype: bool
[ "量大收紅" ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/functionprofiler.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/functionprofiler.py#L104-L129
def parseprofile(profilelog, out): ''' Parse a profile log and print the result on screen ''' file = open(out, 'w') # opening the output file print('Opening the profile in %s...' % profilelog) p = pstats.Stats(profilelog, stream=file) # parsing the profile with pstats, and output everything to the file print('Generating the stats, please wait...') file.write("=== All stats:\n") p.strip_dirs().sort_stats(-1).print_stats() file.write("=== Cumulative time:\n") p.sort_stats('cumulative').print_stats(100) file.write("=== Time:\n") p.sort_stats('time').print_stats(100) file.write("=== Time + cumulative time:\n") p.sort_stats('time', 'cum').print_stats(.5, 'init') file.write("=== Callees:\n") p.print_callees() file.write("=== Callers:\n") p.print_callers() #p.print_callers(.5, 'init') #p.add('fooprof') file.close() print('Stats generated and saved to %s.' % out) print('Everything is done. Exiting')
[ "def", "parseprofile", "(", "profilelog", ",", "out", ")", ":", "file", "=", "open", "(", "out", ",", "'w'", ")", "# opening the output file", "print", "(", "'Opening the profile in %s...'", "%", "profilelog", ")", "p", "=", "pstats", ".", "Stats", "(", "pro...
Parse a profile log and print the result on screen
[ "Parse", "a", "profile", "log", "and", "print", "the", "result", "on", "screen" ]
python
train
hobson/aima
aima/logic.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/logic.py#L301-L308
def parse_definite_clause(s): "Return the antecedents and the consequent of a definite clause." assert is_definite_clause(s) if is_symbol(s.op): return [], s else: antecedent, consequent = s.args return conjuncts(antecedent), consequent
[ "def", "parse_definite_clause", "(", "s", ")", ":", "assert", "is_definite_clause", "(", "s", ")", "if", "is_symbol", "(", "s", ".", "op", ")", ":", "return", "[", "]", ",", "s", "else", ":", "antecedent", ",", "consequent", "=", "s", ".", "args", "r...
Return the antecedents and the consequent of a definite clause.
[ "Return", "the", "antecedents", "and", "the", "consequent", "of", "a", "definite", "clause", "." ]
python
valid
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/builds_api.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/builds_api.py#L377-L401
def get_ssh_credentials(self, id, **kwargs): """ Gets ssh credentials for a build This GET request is for authenticated users only. The path for the endpoint is not restful to be able to authenticate this GET request only. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_ssh_credentials(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: BuildRecord id (required) :return: SshCredentialsSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_ssh_credentials_with_http_info(id, **kwargs) else: (data) = self.get_ssh_credentials_with_http_info(id, **kwargs) return data
[ "def", "get_ssh_credentials", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "get_ssh_credentials_with_ht...
Gets ssh credentials for a build This GET request is for authenticated users only. The path for the endpoint is not restful to be able to authenticate this GET request only. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_ssh_credentials(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: BuildRecord id (required) :return: SshCredentialsSingleton If the method is called asynchronously, returns the request thread.
[ "Gets", "ssh", "credentials", "for", "a", "build", "This", "GET", "request", "is", "for", "authenticated", "users", "only", ".", "The", "path", "for", "the", "endpoint", "is", "not", "restful", "to", "be", "able", "to", "authenticate", "this", "GET", "requ...
python
train
ns1/ns1-python
ns1/zones.py
https://github.com/ns1/ns1-python/blob/f3e1d90a3b76a1bd18f855f2c622a8a49d4b585e/ns1/zones.py#L228-L238
def loadRecord(self, domain, rtype, callback=None, errback=None): """ Load a high level Record object from a domain within this Zone. :param str domain: The name of the record to load :param str rtype: The DNS record type :rtype: ns1.records.Record :return: new Record """ rec = Record(self, domain, rtype) return rec.load(callback=callback, errback=errback)
[ "def", "loadRecord", "(", "self", ",", "domain", ",", "rtype", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "rec", "=", "Record", "(", "self", ",", "domain", ",", "rtype", ")", "return", "rec", ".", "load", "(", "callback", ...
Load a high level Record object from a domain within this Zone. :param str domain: The name of the record to load :param str rtype: The DNS record type :rtype: ns1.records.Record :return: new Record
[ "Load", "a", "high", "level", "Record", "object", "from", "a", "domain", "within", "this", "Zone", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/state_machine.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/state_machine.py#L148-L157
def join(self): """Wait for root state to finish execution""" self._root_state.join() # execution finished, close execution history log file (if present) if len(self._execution_histories) > 0: if self._execution_histories[-1].execution_history_storage is not None: set_read_and_writable_for_all = global_config.get_config_value("EXECUTION_LOG_SET_READ_AND_WRITABLE_FOR_ALL", False) self._execution_histories[-1].execution_history_storage.close(set_read_and_writable_for_all) from rafcon.core.states.state import StateExecutionStatus self._root_state.state_execution_status = StateExecutionStatus.INACTIVE
[ "def", "join", "(", "self", ")", ":", "self", ".", "_root_state", ".", "join", "(", ")", "# execution finished, close execution history log file (if present)", "if", "len", "(", "self", ".", "_execution_histories", ")", ">", "0", ":", "if", "self", ".", "_execut...
Wait for root state to finish execution
[ "Wait", "for", "root", "state", "to", "finish", "execution" ]
python
train
ranaroussi/ezibpy
ezibpy/ezibpy.py
https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/ezibpy.py#L1217-L1299
def triggerTrailingStops(self, tickerId): """ trigger waiting trailing stops """ # print('.') # test symbol = self.tickerSymbol(tickerId) price = self.marketData[tickerId]['last'][0] # contract = self.contracts[tickerId] if symbol in self.triggerableTrailingStops.keys(): pendingOrder = self.triggerableTrailingStops[symbol] parentId = pendingOrder["parentId"] stopOrderId = pendingOrder["stopOrderId"] triggerPrice = pendingOrder["triggerPrice"] trailAmount = pendingOrder["trailAmount"] trailPercent = pendingOrder["trailPercent"] quantity = pendingOrder["quantity"] ticksize = pendingOrder["ticksize"] # print(">>>>>>>", pendingOrder) # print(">>>>>>>", parentId) # print(">>>>>>>", self.orders) # abort if parentId not in self.orders.keys(): # print("DELETING") del self.triggerableTrailingStops[symbol] return None else: if self.orders[parentId]["status"] != "FILLED": return None # print("\n\n", quantity, triggerPrice, price, "\n\n") # create the order if ((quantity > 0) & (triggerPrice >= price)) | ((quantity < 0) & (triggerPrice <= price)): newStop = price if trailAmount > 0: if quantity > 0: newStop += trailAmount else: newStop -= trailAmount elif trailPercent > 0: if quantity > 0: newStop += price * (trailPercent / 100) else: newStop -= price * (trailPercent / 100) else: del self.triggerableTrailingStops[symbol] return 0 # print("------", stopOrderId , parentId, newStop , quantity, "------") # use valid newStop newStop = self.roundClosestValid(newStop, ticksize) trailingStopOrderId = self.modifyStopOrder( orderId = stopOrderId, parentId = parentId, newStop = newStop, quantity = quantity ) if trailingStopOrderId: # print(">>> TRAILING STOP") del self.triggerableTrailingStops[symbol] # register trailing stop tickerId = self.tickerId(symbol) self.registerTrailingStop( tickerId = tickerId, parentId = parentId, orderId = stopOrderId, lastPrice = price, trailAmount = trailAmount, trailPercent = trailPercent, quantity = quantity, ticksize = ticksize ) return trailingStopOrderId return None
[ "def", "triggerTrailingStops", "(", "self", ",", "tickerId", ")", ":", "# print('.')", "# test", "symbol", "=", "self", ".", "tickerSymbol", "(", "tickerId", ")", "price", "=", "self", ".", "marketData", "[", "tickerId", "]", "[", "'last'", "]", "[", "0", ...
trigger waiting trailing stops
[ "trigger", "waiting", "trailing", "stops" ]
python
train
persephone-tools/persephone
persephone/preprocess/feat_extract.py
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L188-L230
def kaldi_pitch(wav_dir: str, feat_dir: str) -> None: """ Extract Kaldi pitch features. Assumes 16k mono wav files.""" logger.debug("Make wav.scp and pitch.scp files") # Make wav.scp and pitch.scp files prefixes = [] for fn in os.listdir(wav_dir): prefix, ext = os.path.splitext(fn) if ext == ".wav": prefixes.append(prefix) wav_scp_path = os.path.join(feat_dir, "wavs.scp") with open(wav_scp_path, "w") as wav_scp: for prefix in prefixes: logger.info("Writing wav file: %s", os.path.join(wav_dir, prefix + ".wav")) print(prefix, os.path.join(wav_dir, prefix + ".wav"), file=wav_scp) pitch_scp_path = os.path.join(feat_dir, "pitch_feats.scp") with open(pitch_scp_path, "w") as pitch_scp: for prefix in prefixes: logger.info("Writing scp file: %s", os.path.join(feat_dir, prefix + ".pitch.txt")) print(prefix, os.path.join(feat_dir, prefix + ".pitch.txt"), file=pitch_scp) # Call Kaldi pitch feat extraction args = [os.path.join(config.KALDI_ROOT, "src/featbin/compute-kaldi-pitch-feats"), "scp:%s" % (wav_scp_path), "scp,t:%s" % pitch_scp_path] logger.info("Extracting pitch features from wavs listed in {}".format( wav_scp_path)) subprocess.run(args) # Convert the Kaldi pitch *.txt files to numpy arrays. for fn in os.listdir(feat_dir): if fn.endswith(".pitch.txt"): pitch_feats = [] with open(os.path.join(feat_dir, fn)) as f: for line in f: sp = line.split() if len(sp) > 1: pitch_feats.append([float(sp[0]), float(sp[1])]) prefix, _ = os.path.splitext(fn) out_fn = prefix + ".npy" a = np.array(pitch_feats) np.save(os.path.join(feat_dir, out_fn), a)
[ "def", "kaldi_pitch", "(", "wav_dir", ":", "str", ",", "feat_dir", ":", "str", ")", "->", "None", ":", "logger", ".", "debug", "(", "\"Make wav.scp and pitch.scp files\"", ")", "# Make wav.scp and pitch.scp files", "prefixes", "=", "[", "]", "for", "fn", "in", ...
Extract Kaldi pitch features. Assumes 16k mono wav files.
[ "Extract", "Kaldi", "pitch", "features", ".", "Assumes", "16k", "mono", "wav", "files", "." ]
python
train
dwkim78/upsilon
upsilon/extract_features/is_period_alias.py
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/is_period_alias.py#L1-L57
def is_period_alias(period): """ Check if a given period is possibly an alias. Parameters ---------- period : float A period to test if it is a possible alias or not. Returns ------- is_alias : boolean True if the given period is in a range of period alias. """ # Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014). # Period alias occurs mostly at ~1 and ~30. # Check each 1, 2, 3, 4, 5 factors. for i in range(1, 6): # One-day and one-month alias if (.99 / float(i)) < period < (1.004 / float(i)): return True if (1.03 / float(i)) < period < (1.04 / float(i)): return True if (29.2 / float(i)) < period < (29.9 / float(i)): return True # From candidates from the two fields 01, 08. # All of them are close to one day (or sidereal) alias. if (0.96465 / float(i)) < period < (0.96485 / float(i)): return True if (0.96725 / float(i)) < period < (0.96745 / float(i)): return True if (0.98190 / float(i)) < period < (0.98230 / float(i)): return True if (1.01034 / float(i)) < period < (1.01076 / float(i)): return True if (1.01568 / float(i)) < period < (1.01604 / float(i)): return True if (1.01718 / float(i)) < period < (1.01742 / float(i)): return True # From the all candidates from the entire LMC fields. # Some of these could be overlapped with the above cuts. if (0.50776 / float(i)) < period < (0.50861 / float(i)): return True if (0.96434 / float(i)) < period < (0.9652 / float(i)): return True if (0.96688 / float(i)) < period < (0.96731 / float(i)): return True if (1.0722 / float(i)) < period < (1.0729 / float(i)): return True if (27.1 / float(i)) < period < (27.5 / float(i)): return True # Not in the range of any alias. return False
[ "def", "is_period_alias", "(", "period", ")", ":", "# Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014).", "# Period alias occurs mostly at ~1 and ~30.", "# Check each 1, 2, 3, 4, 5 factors.", "for", "i", "in", "range", "(", "1", ",", "6", ")", ":", "# One-day ...
Check if a given period is possibly an alias. Parameters ---------- period : float A period to test if it is a possible alias or not. Returns ------- is_alias : boolean True if the given period is in a range of period alias.
[ "Check", "if", "a", "given", "period", "is", "possibly", "an", "alias", "." ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L1063-L1087
def _split_input_from_params(cls, app, namespaces, entity_kind_name, params, shard_count): """Return input reader objects. Helper for split_input.""" # pylint: disable=redefined-outer-name key_ranges = [] # KeyRanges for all namespaces for namespace in namespaces: key_ranges.extend( cls._split_input_from_namespace(app, namespace, entity_kind_name, shard_count)) # Divide the KeyRanges into shard_count shards. The KeyRanges for different # namespaces might be very different in size so the assignment of KeyRanges # to shards is done round-robin. shared_ranges = [[] for _ in range(shard_count)] for i, k_range in enumerate(key_ranges): shared_ranges[i % shard_count].append(k_range) batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)) return [cls(entity_kind_name, key_ranges=key_ranges, ns_range=None, batch_size=batch_size) for key_ranges in shared_ranges if key_ranges]
[ "def", "_split_input_from_params", "(", "cls", ",", "app", ",", "namespaces", ",", "entity_kind_name", ",", "params", ",", "shard_count", ")", ":", "# pylint: disable=redefined-outer-name", "key_ranges", "=", "[", "]", "# KeyRanges for all namespaces", "for", "namespace...
Return input reader objects. Helper for split_input.
[ "Return", "input", "reader", "objects", ".", "Helper", "for", "split_input", "." ]
python
train
the01/python-paps
paps/si/app/sensorServer.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L392-L405
def stop(self): """ Stop the sensor server (soft stop - signal packet loop to stop) Warning: Is non blocking (server might still do something after this!) :rtype: None """ self.debug("()") super(SensorServer, self).stop() # No new clients if self._multicast_socket is not None: self._shutdown_multicast_socket() # Signal packet loop to shutdown self._is_stopped.set()
[ "def", "stop", "(", "self", ")", ":", "self", ".", "debug", "(", "\"()\"", ")", "super", "(", "SensorServer", ",", "self", ")", ".", "stop", "(", ")", "# No new clients", "if", "self", ".", "_multicast_socket", "is", "not", "None", ":", "self", ".", ...
Stop the sensor server (soft stop - signal packet loop to stop) Warning: Is non blocking (server might still do something after this!) :rtype: None
[ "Stop", "the", "sensor", "server", "(", "soft", "stop", "-", "signal", "packet", "loop", "to", "stop", ")", "Warning", ":", "Is", "non", "blocking", "(", "server", "might", "still", "do", "something", "after", "this!", ")" ]
python
train
UCSBarchlab/PyRTL
pyrtl/passes.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/passes.py#L299-L325
def _remove_unlistened_nets(block): """ Removes all nets that are not connected to an output wirevector """ listened_nets = set() listened_wires = set() prev_listened_net_count = 0 def add_to_listened(net): listened_nets.add(net) listened_wires.update(net.args) for a_net in block.logic: if a_net.op == '@': add_to_listened(a_net) elif any(isinstance(destW, Output) for destW in a_net.dests): add_to_listened(a_net) while len(listened_nets) > prev_listened_net_count: prev_listened_net_count = len(listened_nets) for net in block.logic - listened_nets: if any((destWire in listened_wires) for destWire in net.dests): add_to_listened(net) block.logic = listened_nets _remove_unused_wires(block)
[ "def", "_remove_unlistened_nets", "(", "block", ")", ":", "listened_nets", "=", "set", "(", ")", "listened_wires", "=", "set", "(", ")", "prev_listened_net_count", "=", "0", "def", "add_to_listened", "(", "net", ")", ":", "listened_nets", ".", "add", "(", "n...
Removes all nets that are not connected to an output wirevector
[ "Removes", "all", "nets", "that", "are", "not", "connected", "to", "an", "output", "wirevector" ]
python
train
cltk/cltk
cltk/phonology/orthophonology.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/orthophonology.py#L733-L744
def transcribe(self, text, as_phonemes = False): ''' Trascribes a text, which is first tokenized for words, then each word is transcribed. If as_phonemes is true, returns a list of list of phoneme objects, else returns a string concatenation of the IPA symbols of the phonemes. ''' phoneme_words = [self.transcribe_word(word) for word in self._tokenize(text)] if not as_phonemes: words = [''.join([phoneme.ipa for phoneme in word]) for word in phoneme_words] return ' '.join(words) else: return phoneme_words
[ "def", "transcribe", "(", "self", ",", "text", ",", "as_phonemes", "=", "False", ")", ":", "phoneme_words", "=", "[", "self", ".", "transcribe_word", "(", "word", ")", "for", "word", "in", "self", ".", "_tokenize", "(", "text", ")", "]", "if", "not", ...
Trascribes a text, which is first tokenized for words, then each word is transcribed. If as_phonemes is true, returns a list of list of phoneme objects, else returns a string concatenation of the IPA symbols of the phonemes.
[ "Trascribes", "a", "text", "which", "is", "first", "tokenized", "for", "words", "then", "each", "word", "is", "transcribed", ".", "If", "as_phonemes", "is", "true", "returns", "a", "list", "of", "list", "of", "phoneme", "objects", "else", "returns", "a", "...
python
train
aws/aws-dynamodb-encryption-python
src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py
https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/crypto/jce_bridge/primitives.py#L434-L460
def load_rsa_key(key, key_type, key_encoding): # (bytes, EncryptionKeyType, KeyEncodingType) -> Any # TODO: narrow down the output type """Load an RSA key object from the provided raw key bytes. :param bytes key: Raw key bytes to load :param EncryptionKeyType key_type: Type of key to load :param KeyEncodingType key_encoding: Encoding used to serialize ``key`` :returns: Loaded key :rtype: TODO: :raises ValueError: if ``key_type`` and ``key_encoding`` are not a valid pairing """ try: loader = _RSA_KEY_LOADING[key_type][key_encoding] except KeyError: raise ValueError("Invalid key type and encoding: {} and {}".format(key_type, key_encoding)) kwargs = dict(data=key, backend=default_backend()) if key_type is EncryptionKeyType.PRIVATE: kwargs["password"] = None loaded_key = loader(**kwargs) if loaded_key.key_size < MinimumKeySizes.RSA.value: _LOGGER.warning("RSA keys smaller than %d bits are unsafe" % MinimumKeySizes.RSA.value) return loaded_key
[ "def", "load_rsa_key", "(", "key", ",", "key_type", ",", "key_encoding", ")", ":", "# (bytes, EncryptionKeyType, KeyEncodingType) -> Any", "# TODO: narrow down the output type", "try", ":", "loader", "=", "_RSA_KEY_LOADING", "[", "key_type", "]", "[", "key_encoding", "]",...
Load an RSA key object from the provided raw key bytes. :param bytes key: Raw key bytes to load :param EncryptionKeyType key_type: Type of key to load :param KeyEncodingType key_encoding: Encoding used to serialize ``key`` :returns: Loaded key :rtype: TODO: :raises ValueError: if ``key_type`` and ``key_encoding`` are not a valid pairing
[ "Load", "an", "RSA", "key", "object", "from", "the", "provided", "raw", "key", "bytes", "." ]
python
train
Cadasta/django-jsonattrs
jsonattrs/fields.py
https://github.com/Cadasta/django-jsonattrs/blob/5149e08ec84da00dd73bd3fe548bc52fd361667c/jsonattrs/fields.py#L123-L129
def _check_key(self, key): """ Ensure key is either in schema's attributes or already set on self. """ self.setup_schema() if key not in self._attrs and key not in self: raise KeyError(key)
[ "def", "_check_key", "(", "self", ",", "key", ")", ":", "self", ".", "setup_schema", "(", ")", "if", "key", "not", "in", "self", ".", "_attrs", "and", "key", "not", "in", "self", ":", "raise", "KeyError", "(", "key", ")" ]
Ensure key is either in schema's attributes or already set on self.
[ "Ensure", "key", "is", "either", "in", "schema", "s", "attributes", "or", "already", "set", "on", "self", "." ]
python
train
django-parler/django-parler
parler/views.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/views.py#L214-L218
def get_language(self): """ Get the language parameter from the current request. """ return get_language_parameter(self.request, self.query_language_key, default=self.get_default_language(object=object))
[ "def", "get_language", "(", "self", ")", ":", "return", "get_language_parameter", "(", "self", ".", "request", ",", "self", ".", "query_language_key", ",", "default", "=", "self", ".", "get_default_language", "(", "object", "=", "object", ")", ")" ]
Get the language parameter from the current request.
[ "Get", "the", "language", "parameter", "from", "the", "current", "request", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/graphs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/graphs.py#L424-L465
def alter_edge(self, from_index, to_index, to_jimage=None, new_weight=None, new_edge_properties=None): """ Alters either the weight or the edge_properties of an edge in the StructureGraph. :param from_index: int :param to_index: int :param to_jimage: tuple :param new_weight: alter_edge does not require that weight be altered. As such, by default, this is None. If weight is to be changed, it should be a float. :param new_edge_properties: alter_edge does not require that edge_properties be altered. As such, by default, this is None. If any edge properties are to be changed, it should be a dictionary of edge properties to be changed. :return: """ existing_edges = self.graph.get_edge_data(from_index, to_index) # ensure that edge exists before attempting to change it if not existing_edges: raise ValueError("Edge between {} and {} cannot be altered;\ no edge exists between those sites.".format( from_index, to_index )) if to_jimage is None: edge_index = 0 else: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i if new_weight is not None: self.graph[from_index][to_index][edge_index]['weight'] = new_weight if new_edge_properties is not None: for prop in list(new_edge_properties.keys()): self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
[ "def", "alter_edge", "(", "self", ",", "from_index", ",", "to_index", ",", "to_jimage", "=", "None", ",", "new_weight", "=", "None", ",", "new_edge_properties", "=", "None", ")", ":", "existing_edges", "=", "self", ".", "graph", ".", "get_edge_data", "(", ...
Alters either the weight or the edge_properties of an edge in the StructureGraph. :param from_index: int :param to_index: int :param to_jimage: tuple :param new_weight: alter_edge does not require that weight be altered. As such, by default, this is None. If weight is to be changed, it should be a float. :param new_edge_properties: alter_edge does not require that edge_properties be altered. As such, by default, this is None. If any edge properties are to be changed, it should be a dictionary of edge properties to be changed. :return:
[ "Alters", "either", "the", "weight", "or", "the", "edge_properties", "of", "an", "edge", "in", "the", "StructureGraph", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/ml/metrics/regression.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/ml/metrics/regression.py#L221-L237
def residual_histogram(df, col_true, col_pred=None): """ Compute histogram of residuals of a predicted DataFrame. Note that this method will trigger the defined flow to execute. :param df: predicted data frame :type df: DataFrame :param col_true: column name of true value :type col_true: str :param col_true: column name of predicted value, 'prediction_score' by default. :type col_pred: str :return: histograms for every columns, containing histograms and bins. """ if not col_pred: col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_VALUE) return _run_evaluation_node(df, col_true, col_pred)['hist']
[ "def", "residual_histogram", "(", "df", ",", "col_true", ",", "col_pred", "=", "None", ")", ":", "if", "not", "col_pred", ":", "col_pred", "=", "get_field_name_by_role", "(", "df", ",", "FieldRole", ".", "PREDICTED_VALUE", ")", "return", "_run_evaluation_node", ...
Compute histogram of residuals of a predicted DataFrame. Note that this method will trigger the defined flow to execute. :param df: predicted data frame :type df: DataFrame :param col_true: column name of true value :type col_true: str :param col_true: column name of predicted value, 'prediction_score' by default. :type col_pred: str :return: histograms for every columns, containing histograms and bins.
[ "Compute", "histogram", "of", "residuals", "of", "a", "predicted", "DataFrame", "." ]
python
train
vmlaker/coils
coils/SortedList.py
https://github.com/vmlaker/coils/blob/a3a613b3d661dec010e5879c86e62cbff2519dd0/coils/SortedList.py#L23-L26
def getCountGT(self, item): """Return number of elements greater than *item*.""" index = bisect.bisect_right(self._list, item) return len(self._list) - index
[ "def", "getCountGT", "(", "self", ",", "item", ")", ":", "index", "=", "bisect", ".", "bisect_right", "(", "self", ".", "_list", ",", "item", ")", "return", "len", "(", "self", ".", "_list", ")", "-", "index" ]
Return number of elements greater than *item*.
[ "Return", "number", "of", "elements", "greater", "than", "*", "item", "*", "." ]
python
train
jaraco/irc
irc/client.py
https://github.com/jaraco/irc/blob/571c1f448d5d5bb92bbe2605c33148bf6e698413/irc/client.py#L403-L439
def cap(self, subcommand, *args): """ Send a CAP command according to `the spec <http://ircv3.atheme.org/specification/capability-negotiation-3.1>`_. Arguments: subcommand -- LS, LIST, REQ, ACK, CLEAR, END args -- capabilities, if required for given subcommand Example: .cap('LS') .cap('REQ', 'multi-prefix', 'sasl') .cap('END') """ cap_subcommands = set('LS LIST REQ ACK NAK CLEAR END'.split()) client_subcommands = set(cap_subcommands) - {'NAK'} assert subcommand in client_subcommands, "invalid subcommand" def _multi_parameter(args): """ According to the spec:: If more than one capability is named, the RFC1459 designated sentinel (:) for a multi-parameter argument must be present. It's not obvious where the sentinel should be present or if it must be omitted for a single parameter, so follow convention and only include the sentinel prefixed to the first parameter if more than one parameter is present. """ if len(args) > 1: return (':' + args[0],) + args[1:] return args self.send_items('CAP', subcommand, *_multi_parameter(args))
[ "def", "cap", "(", "self", ",", "subcommand", ",", "*", "args", ")", ":", "cap_subcommands", "=", "set", "(", "'LS LIST REQ ACK NAK CLEAR END'", ".", "split", "(", ")", ")", "client_subcommands", "=", "set", "(", "cap_subcommands", ")", "-", "{", "'NAK'", ...
Send a CAP command according to `the spec <http://ircv3.atheme.org/specification/capability-negotiation-3.1>`_. Arguments: subcommand -- LS, LIST, REQ, ACK, CLEAR, END args -- capabilities, if required for given subcommand Example: .cap('LS') .cap('REQ', 'multi-prefix', 'sasl') .cap('END')
[ "Send", "a", "CAP", "command", "according", "to", "the", "spec", "<http", ":", "//", "ircv3", ".", "atheme", ".", "org", "/", "specification", "/", "capability", "-", "negotiation", "-", "3", ".", "1", ">", "_", "." ]
python
train
pahaz/sshtunnel
sshtunnel.py
https://github.com/pahaz/sshtunnel/blob/66a923e4c6c8e41b8348420523fbf5ddfd53176c/sshtunnel.py#L263-L266
def _remove_none_values(dictionary): """ Remove dictionary keys whose value is None """ return list(map(dictionary.pop, [i for i in dictionary if dictionary[i] is None]))
[ "def", "_remove_none_values", "(", "dictionary", ")", ":", "return", "list", "(", "map", "(", "dictionary", ".", "pop", ",", "[", "i", "for", "i", "in", "dictionary", "if", "dictionary", "[", "i", "]", "is", "None", "]", ")", ")" ]
Remove dictionary keys whose value is None
[ "Remove", "dictionary", "keys", "whose", "value", "is", "None" ]
python
train
chop-dbhi/varify-data-warehouse
vdw/pipeline/load.py
https://github.com/chop-dbhi/varify-data-warehouse/blob/1600ee1bc5fae6c68fd03b23624467298570cca8/vdw/pipeline/load.py#L34-L55
def batch_stream(buff, stream, size=DEFAULT_BATCH_SIZE): """Writes a batch of `size` lines to `buff`. Returns boolean of whether the stream has been exhausted. """ buff.truncate(0) for _ in xrange(size): if hasattr(stream, 'readline'): line = stream.readline() else: try: line = next(stream) except StopIteration: line = '' # No more lines, return the tmp if line == '': buff.seek(0) return True buff.write(line) buff.seek(0) return False
[ "def", "batch_stream", "(", "buff", ",", "stream", ",", "size", "=", "DEFAULT_BATCH_SIZE", ")", ":", "buff", ".", "truncate", "(", "0", ")", "for", "_", "in", "xrange", "(", "size", ")", ":", "if", "hasattr", "(", "stream", ",", "'readline'", ")", ":...
Writes a batch of `size` lines to `buff`. Returns boolean of whether the stream has been exhausted.
[ "Writes", "a", "batch", "of", "size", "lines", "to", "buff", "." ]
python
train
klmitch/turnstile
turnstile/limits.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L146-L150
def _encode(cls, value): """Encode the given value, taking care of '%' and '/'.""" value = json.dumps(value) return cls._ENC_RE.sub(lambda x: '%%%2x' % ord(x.group(0)), value)
[ "def", "_encode", "(", "cls", ",", "value", ")", ":", "value", "=", "json", ".", "dumps", "(", "value", ")", "return", "cls", ".", "_ENC_RE", ".", "sub", "(", "lambda", "x", ":", "'%%%2x'", "%", "ord", "(", "x", ".", "group", "(", "0", ")", ")"...
Encode the given value, taking care of '%' and '/'.
[ "Encode", "the", "given", "value", "taking", "care", "of", "%", "and", "/", "." ]
python
train
keon/algorithms
algorithms/heap/sliding_window_max.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/heap/sliding_window_max.py#L23-L41
def max_sliding_window(nums, k): """ :type nums: List[int] :type k: int :rtype: List[int] """ if not nums: return nums queue = collections.deque() res = [] for num in nums: if len(queue) < k: queue.append(num) else: res.append(max(queue)) queue.popleft() queue.append(num) res.append(max(queue)) return res
[ "def", "max_sliding_window", "(", "nums", ",", "k", ")", ":", "if", "not", "nums", ":", "return", "nums", "queue", "=", "collections", ".", "deque", "(", ")", "res", "=", "[", "]", "for", "num", "in", "nums", ":", "if", "len", "(", "queue", ")", ...
:type nums: List[int] :type k: int :rtype: List[int]
[ ":", "type", "nums", ":", "List", "[", "int", "]", ":", "type", "k", ":", "int", ":", "rtype", ":", "List", "[", "int", "]" ]
python
train
aaugustin/websockets
src/websockets/headers.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/headers.py#L95-L107
def parse_quoted_string(header: str, pos: int, header_name: str) -> Tuple[str, int]: """ Parse a quoted string from ``header`` at the given position. Return the unquoted value and the new position. Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. """ match = _quoted_string_re.match(header, pos) if match is None: raise InvalidHeaderFormat(header_name, "expected quoted string", header, pos) return _unquote_re.sub(r"\1", match.group()[1:-1]), match.end()
[ "def", "parse_quoted_string", "(", "header", ":", "str", ",", "pos", ":", "int", ",", "header_name", ":", "str", ")", "->", "Tuple", "[", "str", ",", "int", "]", ":", "match", "=", "_quoted_string_re", ".", "match", "(", "header", ",", "pos", ")", "i...
Parse a quoted string from ``header`` at the given position. Return the unquoted value and the new position. Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
[ "Parse", "a", "quoted", "string", "from", "header", "at", "the", "given", "position", "." ]
python
train
xtrementl/focus
focus/plugin/modules/apps.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/apps.py#L24-L47
def _get_process_cwd(pid): """ Returns the working directory for the provided process identifier. `pid` System process identifier. Returns string or ``None``. Note this is used as a workaround, since `psutil` isn't consistent on being able to provide this path in all cases, especially MacOS X. """ cmd = 'lsof -a -p {0} -d cwd -Fn'.format(pid) data = common.shell_process(cmd) if not data is None: lines = str(data).split('\n') # the cwd is the second line with 'n' prefix removed from value if len(lines) > 1: return lines[1][1:] or None return None
[ "def", "_get_process_cwd", "(", "pid", ")", ":", "cmd", "=", "'lsof -a -p {0} -d cwd -Fn'", ".", "format", "(", "pid", ")", "data", "=", "common", ".", "shell_process", "(", "cmd", ")", "if", "not", "data", "is", "None", ":", "lines", "=", "str", "(", ...
Returns the working directory for the provided process identifier. `pid` System process identifier. Returns string or ``None``. Note this is used as a workaround, since `psutil` isn't consistent on being able to provide this path in all cases, especially MacOS X.
[ "Returns", "the", "working", "directory", "for", "the", "provided", "process", "identifier", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L322-L327
def get_datatype(cls, db: DATABASE_SUPPORTER_FWD_REF, table: str, column: str) -> str: """Returns database SQL datatype for a column: e.g. VARCHAR.""" raise RuntimeError(_MSG_NO_FLAVOUR)
[ "def", "get_datatype", "(", "cls", ",", "db", ":", "DATABASE_SUPPORTER_FWD_REF", ",", "table", ":", "str", ",", "column", ":", "str", ")", "->", "str", ":", "raise", "RuntimeError", "(", "_MSG_NO_FLAVOUR", ")" ]
Returns database SQL datatype for a column: e.g. VARCHAR.
[ "Returns", "database", "SQL", "datatype", "for", "a", "column", ":", "e", ".", "g", ".", "VARCHAR", "." ]
python
train
tanghaibao/goatools
goatools/anno/annoreader_base.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/annoreader_base.py#L164-L175
def chk_qualifiers(self): """Check format of qualifier""" if self.name == 'id2gos': return for ntd in self.associations: # print(ntd) qual = ntd.Qualifier assert isinstance(qual, set), '{NAME}: QUALIFIER MUST BE A LIST: {NT}'.format( NAME=self.name, NT=ntd) assert qual != set(['']), ntd assert qual != set(['-']), ntd assert 'always' not in qual, 'SPEC SAID IT WOULD BE THERE'
[ "def", "chk_qualifiers", "(", "self", ")", ":", "if", "self", ".", "name", "==", "'id2gos'", ":", "return", "for", "ntd", "in", "self", ".", "associations", ":", "# print(ntd)", "qual", "=", "ntd", ".", "Qualifier", "assert", "isinstance", "(", "qual", "...
Check format of qualifier
[ "Check", "format", "of", "qualifier" ]
python
train
RJT1990/pyflux
pyflux/arma/nnar.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/arma/nnar.py#L136-L154
def _create_latent_variables(self): """ Creates the model's latent variables Returns ---------- None (changes model attributes) """ no_of_features = self.ar self.latent_variables.create(name='Bias', dim=[self.layers, self.units], prior=fam.Cauchy(0, 1, transform=None), q=fam.Normal(0, 3)) self.latent_variables.create(name='Output bias', dim=[1], prior=fam.Cauchy(0, 1, transform=None), q=fam.Normal(0, 3)) self.latent_variables.create(name='Input weight', dim=[self.units, self.ar], prior=fam.Cauchy(0, 1, transform=None), q=fam.Normal(0, 3)) self.latent_variables.create(name='Hidden weight', dim=[self.layers-1, self.units, self.units], prior=fam.Cauchy(0, 1, transform=None), q=fam.Normal(0, 3)) self.latent_variables.create(name='Output weight', dim=[self.units], prior=fam.Cauchy(0, 1, transform=None), q=fam.Normal(0, 3))
[ "def", "_create_latent_variables", "(", "self", ")", ":", "no_of_features", "=", "self", ".", "ar", "self", ".", "latent_variables", ".", "create", "(", "name", "=", "'Bias'", ",", "dim", "=", "[", "self", ".", "layers", ",", "self", ".", "units", "]", ...
Creates the model's latent variables Returns ---------- None (changes model attributes)
[ "Creates", "the", "model", "s", "latent", "variables" ]
python
train
xtrinch/fcm-django
fcm_django/admin.py
https://github.com/xtrinch/fcm-django/blob/8480d1cf935bfb28e2ad6d86a0abf923c2ecb266/fcm_django/admin.py#L24-L75
def send_messages(self, request, queryset, bulk=False, data=False): """ Provides error handling for DeviceAdmin send_message and send_bulk_message methods. """ ret = [] errors = [] total_failure = 0 for device in queryset: if bulk: if data: response = queryset.send_message( data={"Nick": "Mario"} ) else: response = queryset.send_message( title="Test notification", body="Test bulk notification" ) else: if data: response = device.send_message(data={"Nick": "Mario"}) else: response = device.send_message( title="Test notification", body="Test single notification" ) if response: ret.append(response) failure = int(response['failure']) total_failure += failure errors.append(str(response)) if bulk: break if ret: if errors: msg = _("Some messages were sent: %s" % (ret)) else: msg = _("All messages were sent: %s" % (ret)) self.message_user(request, msg) if total_failure > 0: self.message_user( request, _("Some messages failed to send. %d devices were marked as " "inactive." % total_failure), level=messages.WARNING )
[ "def", "send_messages", "(", "self", ",", "request", ",", "queryset", ",", "bulk", "=", "False", ",", "data", "=", "False", ")", ":", "ret", "=", "[", "]", "errors", "=", "[", "]", "total_failure", "=", "0", "for", "device", "in", "queryset", ":", ...
Provides error handling for DeviceAdmin send_message and send_bulk_message methods.
[ "Provides", "error", "handling", "for", "DeviceAdmin", "send_message", "and", "send_bulk_message", "methods", "." ]
python
train
NiklasRosenstein/py-localimport
localimport.py
https://github.com/NiklasRosenstein/py-localimport/blob/69af71c37f8bd3b2121ec39083dff18a9a2d04a1/localimport.py#L73-L107
def eval_pth(filename, sitedir, dest=None, imports=None): ''' Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list *dest*. If *dest* is #None, it will fall back to `sys.path`. If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to that list in tuples of (*filename*, *line*, *stmt*). Returns a tuple of (*dest*, *imports*). ''' if dest is None: dest = sys.path if not os.path.isfile(filename): return with open(filename, 'r') as fp: for index, line in enumerate(fp): if line.startswith('import'): if imports is None: exec_pth_import(filename, index+1, line) else: imports.append((filename, index+1, line)) else: index = line.find('#') if index > 0: line = line[:index] line = line.strip() if not os.path.isabs(line): line = os.path.join(os.path.dirname(filename), line) line = os.path.normpath(line) if line and line not in dest: dest.insert(0, line) return dest
[ "def", "eval_pth", "(", "filename", ",", "sitedir", ",", "dest", "=", "None", ",", "imports", "=", "None", ")", ":", "if", "dest", "is", "None", ":", "dest", "=", "sys", ".", "path", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ...
Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list *dest*. If *dest* is #None, it will fall back to `sys.path`. If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to that list in tuples of (*filename*, *line*, *stmt*). Returns a tuple of (*dest*, *imports*).
[ "Evaluates", "a", ".", "pth", "file", "(", "including", "support", "for", "import", "statements", ")", "and", "appends", "the", "result", "to", "the", "list", "*", "dest", "*", ".", "If", "*", "dest", "*", "is", "#None", "it", "will", "fall", "back", ...
python
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L399-L408
def encode(self): """Encode the DAT packet based on instance variables, populating self.buffer, returning self.""" fmt = b"!HH%dsx" % len(self.errmsgs[self.errorcode]) log.debug("encoding ERR packet with fmt %s", fmt) self.buffer = struct.pack(fmt, self.opcode, self.errorcode, self.errmsgs[self.errorcode]) return self
[ "def", "encode", "(", "self", ")", ":", "fmt", "=", "b\"!HH%dsx\"", "%", "len", "(", "self", ".", "errmsgs", "[", "self", ".", "errorcode", "]", ")", "log", ".", "debug", "(", "\"encoding ERR packet with fmt %s\"", ",", "fmt", ")", "self", ".", "buffer",...
Encode the DAT packet based on instance variables, populating self.buffer, returning self.
[ "Encode", "the", "DAT", "packet", "based", "on", "instance", "variables", "populating", "self", ".", "buffer", "returning", "self", "." ]
python
train
gwpy/gwpy
gwpy/types/index.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/index.py#L68-L77
def regular(self): """`True` if this index is linearly increasing """ try: return self.info.meta['regular'] except (TypeError, KeyError): if self.info.meta is None: self.info.meta = {} self.info.meta['regular'] = self.is_regular() return self.info.meta['regular']
[ "def", "regular", "(", "self", ")", ":", "try", ":", "return", "self", ".", "info", ".", "meta", "[", "'regular'", "]", "except", "(", "TypeError", ",", "KeyError", ")", ":", "if", "self", ".", "info", ".", "meta", "is", "None", ":", "self", ".", ...
`True` if this index is linearly increasing
[ "True", "if", "this", "index", "is", "linearly", "increasing" ]
python
train
IEMLdev/ieml
ieml/grammar/paths/parser/parser.py
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/paths/parser/parser.py#L98-L105
def p_coordinate(self, p): """ coordinate : COORD_KIND | COORD_KIND COORD_INDEX""" if len(p) == 2: p[0] = Coordinate(p[1]) else: p[0] = Coordinate(p[1], int(p[2]))
[ "def", "p_coordinate", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "Coordinate", "(", "p", "[", "1", "]", ")", "else", ":", "p", "[", "0", "]", "=", "Coordinate", "(", "p", "[", "1...
coordinate : COORD_KIND | COORD_KIND COORD_INDEX
[ "coordinate", ":", "COORD_KIND", "|", "COORD_KIND", "COORD_INDEX" ]
python
test
bioidiap/gridtk
gridtk/script/jman.py
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/script/jman.py#L236-L241
def run_job(args): """Starts the wrapper script to execute a job, interpreting the JOB_ID and SGE_TASK_ID keywords that are set by the grid or by us.""" jm = setup(args) job_id = int(os.environ['JOB_ID']) array_id = int(os.environ['SGE_TASK_ID']) if os.environ['SGE_TASK_ID'] != 'undefined' else None jm.run_job(job_id, array_id)
[ "def", "run_job", "(", "args", ")", ":", "jm", "=", "setup", "(", "args", ")", "job_id", "=", "int", "(", "os", ".", "environ", "[", "'JOB_ID'", "]", ")", "array_id", "=", "int", "(", "os", ".", "environ", "[", "'SGE_TASK_ID'", "]", ")", "if", "o...
Starts the wrapper script to execute a job, interpreting the JOB_ID and SGE_TASK_ID keywords that are set by the grid or by us.
[ "Starts", "the", "wrapper", "script", "to", "execute", "a", "job", "interpreting", "the", "JOB_ID", "and", "SGE_TASK_ID", "keywords", "that", "are", "set", "by", "the", "grid", "or", "by", "us", "." ]
python
train
google/grr
grr/server/grr_response_server/sequential_collection.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/sequential_collection.py#L122-L154
def Scan(self, after_timestamp=None, include_suffix=False, max_records=None): """Scans for stored records. Scans through the collection, returning stored values ordered by timestamp. Args: after_timestamp: If set, only returns values recorded after timestamp. include_suffix: If true, the timestamps returned are pairs of the form (micros_since_epoc, suffix) where suffix is a 24 bit random refinement to avoid collisions. Otherwise only micros_since_epoc is returned. max_records: The maximum number of records to return. Defaults to unlimited. Yields: Pairs (timestamp, rdf_value), indicating that rdf_value was stored at timestamp. """ suffix = None if isinstance(after_timestamp, tuple): suffix = after_timestamp[1] after_timestamp = after_timestamp[0] for item, timestamp, suffix in data_store.DB.CollectionScanItems( self.collection_id, self.RDF_TYPE, after_timestamp=after_timestamp, after_suffix=suffix, limit=max_records): if include_suffix: yield ((timestamp, suffix), item) else: yield (timestamp, item)
[ "def", "Scan", "(", "self", ",", "after_timestamp", "=", "None", ",", "include_suffix", "=", "False", ",", "max_records", "=", "None", ")", ":", "suffix", "=", "None", "if", "isinstance", "(", "after_timestamp", ",", "tuple", ")", ":", "suffix", "=", "af...
Scans for stored records. Scans through the collection, returning stored values ordered by timestamp. Args: after_timestamp: If set, only returns values recorded after timestamp. include_suffix: If true, the timestamps returned are pairs of the form (micros_since_epoc, suffix) where suffix is a 24 bit random refinement to avoid collisions. Otherwise only micros_since_epoc is returned. max_records: The maximum number of records to return. Defaults to unlimited. Yields: Pairs (timestamp, rdf_value), indicating that rdf_value was stored at timestamp.
[ "Scans", "for", "stored", "records", "." ]
python
train
lucapinello/Haystack
haystack/external.py
https://github.com/lucapinello/Haystack/blob/cc080d741f36cd77b07c0b59d08ea6a4cf0ef2f7/haystack/external.py#L794-L799
def score(self, seq, fwd='Y'): """ m.score(seq, fwd='Y') -- Returns the score of the first w-bases of the sequence, where w is the motif width. """ matches, endpoints, scores = self._scan(seq,threshold=-100000,forw_only=fwd) return scores[0]
[ "def", "score", "(", "self", ",", "seq", ",", "fwd", "=", "'Y'", ")", ":", "matches", ",", "endpoints", ",", "scores", "=", "self", ".", "_scan", "(", "seq", ",", "threshold", "=", "-", "100000", ",", "forw_only", "=", "fwd", ")", "return", "scores...
m.score(seq, fwd='Y') -- Returns the score of the first w-bases of the sequence, where w is the motif width.
[ "m", ".", "score", "(", "seq", "fwd", "=", "Y", ")", "--", "Returns", "the", "score", "of", "the", "first", "w", "-", "bases", "of", "the", "sequence", "where", "w", "is", "the", "motif", "width", "." ]
python
train
spyder-ide/spyder
spyder/utils/syntaxhighlighters.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/syntaxhighlighters.py#L733-L755
def highlight_block(self, text): """Implement highlight specific for Fortran.""" text = to_text_string(text) self.setFormat(0, len(text), self.formats["normal"]) match = self.PROG.search(text) index = 0 while match: for key, value in list(match.groupdict().items()): if value: start, end = match.span(key) index += end-start self.setFormat(start, end-start, self.formats[key]) if value.lower() in ("subroutine", "module", "function"): match1 = self.IDPROG.match(text, end) if match1: start1, end1 = match1.span(1) self.setFormat(start1, end1-start1, self.formats["definition"]) match = self.PROG.search(text, match.end()) self.highlight_spaces(text)
[ "def", "highlight_block", "(", "self", ",", "text", ")", ":", "text", "=", "to_text_string", "(", "text", ")", "self", ".", "setFormat", "(", "0", ",", "len", "(", "text", ")", ",", "self", ".", "formats", "[", "\"normal\"", "]", ")", "match", "=", ...
Implement highlight specific for Fortran.
[ "Implement", "highlight", "specific", "for", "Fortran", "." ]
python
train
mwouts/jupytext
jupytext/jupytext.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/jupytext.py#L237-L263
def writes(notebook, fmt, version=nbformat.NO_CONVERT, **kwargs): """Write a notebook to a string""" metadata = deepcopy(notebook.metadata) rearrange_jupytext_metadata(metadata) fmt = copy(fmt) fmt = long_form_one_format(fmt, metadata) ext = fmt['extension'] format_name = fmt.get('format_name') jupytext_metadata = metadata.get('jupytext', {}) if ext == '.ipynb': # Remove jupytext section if empty jupytext_metadata.pop('text_representation', {}) if not jupytext_metadata: metadata.pop('jupytext', {}) return nbformat.writes(new_notebook(cells=notebook.cells, metadata=metadata), version, **kwargs) if not format_name: format_name = format_name_for_ext(metadata, ext, explicit_default=False) if format_name: fmt['format_name'] = format_name update_jupytext_formats_metadata(metadata, fmt) writer = TextNotebookConverter(fmt) return writer.writes(notebook, metadata)
[ "def", "writes", "(", "notebook", ",", "fmt", ",", "version", "=", "nbformat", ".", "NO_CONVERT", ",", "*", "*", "kwargs", ")", ":", "metadata", "=", "deepcopy", "(", "notebook", ".", "metadata", ")", "rearrange_jupytext_metadata", "(", "metadata", ")", "f...
Write a notebook to a string
[ "Write", "a", "notebook", "to", "a", "string" ]
python
train
curious-containers/cc-core
cc_core/commons/files.py
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/files.py#L155-L163
def make_file_read_only(file_path): """ Removes the write permissions for the given file for owner, groups and others. :param file_path: The file whose privileges are revoked. :raise FileNotFoundError: If the given file does not exist. """ old_permissions = os.stat(file_path).st_mode os.chmod(file_path, old_permissions & ~WRITE_PERMISSIONS)
[ "def", "make_file_read_only", "(", "file_path", ")", ":", "old_permissions", "=", "os", ".", "stat", "(", "file_path", ")", ".", "st_mode", "os", ".", "chmod", "(", "file_path", ",", "old_permissions", "&", "~", "WRITE_PERMISSIONS", ")" ]
Removes the write permissions for the given file for owner, groups and others. :param file_path: The file whose privileges are revoked. :raise FileNotFoundError: If the given file does not exist.
[ "Removes", "the", "write", "permissions", "for", "the", "given", "file", "for", "owner", "groups", "and", "others", "." ]
python
train
dbcli/athenacli
athenacli/sqlexecute.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/sqlexecute.py#L100-L105
def tables(self): '''Yields table names.''' with self.conn.cursor() as cur: cur.execute(self.TABLES_QUERY) for row in cur: yield row
[ "def", "tables", "(", "self", ")", ":", "with", "self", ".", "conn", ".", "cursor", "(", ")", "as", "cur", ":", "cur", ".", "execute", "(", "self", ".", "TABLES_QUERY", ")", "for", "row", "in", "cur", ":", "yield", "row" ]
Yields table names.
[ "Yields", "table", "names", "." ]
python
train
eighthave/pyvendapin
vendapin.py
https://github.com/eighthave/pyvendapin/blob/270c4da5c31ab4a0435660b25b655692fdffcf01/vendapin.py#L225-L234
def request_status(self): '''request the status of the card dispenser and return the status code''' self.sendcommand(Vendapin.REQUEST_STATUS) # wait for the reply time.sleep(1) response = self.receivepacket() if self.was_packet_accepted(response): return Vendapin.READY else: return self.parsedata(response)[0]
[ "def", "request_status", "(", "self", ")", ":", "self", ".", "sendcommand", "(", "Vendapin", ".", "REQUEST_STATUS", ")", "# wait for the reply", "time", ".", "sleep", "(", "1", ")", "response", "=", "self", ".", "receivepacket", "(", ")", "if", "self", "."...
request the status of the card dispenser and return the status code
[ "request", "the", "status", "of", "the", "card", "dispenser", "and", "return", "the", "status", "code" ]
python
train
twilio/twilio-python
twilio/rest/video/v1/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/video/v1/__init__.py#L46-L52
def composition_settings(self): """ :rtype: twilio.rest.video.v1.composition_settings.CompositionSettingsList """ if self._composition_settings is None: self._composition_settings = CompositionSettingsList(self) return self._composition_settings
[ "def", "composition_settings", "(", "self", ")", ":", "if", "self", ".", "_composition_settings", "is", "None", ":", "self", ".", "_composition_settings", "=", "CompositionSettingsList", "(", "self", ")", "return", "self", ".", "_composition_settings" ]
:rtype: twilio.rest.video.v1.composition_settings.CompositionSettingsList
[ ":", "rtype", ":", "twilio", ".", "rest", ".", "video", ".", "v1", ".", "composition_settings", ".", "CompositionSettingsList" ]
python
train
pyviz/holoviews
holoviews/core/spaces.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/spaces.py#L65-L82
def grid(self, dimensions=None, **kwargs): """Group by supplied dimension(s) and lay out groups in grid Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a GridSpace. Args: dimensions: Dimension/str or list Dimension or list of dimensions to group by Returns: GridSpace with supplied dimensions """ dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return GridSpace(self, **kwargs).reindex(dimensions) return self.groupby(dimensions, container_type=GridSpace, **kwargs)
[ "def", "grid", "(", "self", ",", "dimensions", "=", "None", ",", "*", "*", "kwargs", ")", ":", "dimensions", "=", "self", ".", "_valid_dimensions", "(", "dimensions", ")", "if", "len", "(", "dimensions", ")", "==", "self", ".", "ndims", ":", "with", ...
Group by supplied dimension(s) and lay out groups in grid Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a GridSpace. Args: dimensions: Dimension/str or list Dimension or list of dimensions to group by Returns: GridSpace with supplied dimensions
[ "Group", "by", "supplied", "dimension", "(", "s", ")", "and", "lay", "out", "groups", "in", "grid" ]
python
train
gwastro/pycbc-glue
pycbc_glue/gpstime.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/gpstime.py#L67-L72
def julianDay(year, month, day): "returns julian day=day since Jan 1 of year" hr = 12 #make sure you fall into right day, middle is save t = time.mktime((year, month, day, hr, 0, 0.0, 0, 0, -1)) julDay = time.localtime(t)[7] return julDay
[ "def", "julianDay", "(", "year", ",", "month", ",", "day", ")", ":", "hr", "=", "12", "#make sure you fall into right day, middle is save", "t", "=", "time", ".", "mktime", "(", "(", "year", ",", "month", ",", "day", ",", "hr", ",", "0", ",", "0.0", ",...
returns julian day=day since Jan 1 of year
[ "returns", "julian", "day", "=", "day", "since", "Jan", "1", "of", "year" ]
python
train
ambitioninc/django-query-builder
querybuilder/query.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1415-L1432
def build_joins(self): """ Generates the sql for the JOIN portion of the query :return: the JOIN portion of the query :rtype: str """ join_parts = [] # get the sql for each join object for join_item in self.joins: join_parts.append(join_item.get_sql()) # if there are any joins, combine them if len(join_parts): combined_joins = ' '.join(join_parts) return '{0} '.format(combined_joins) return ''
[ "def", "build_joins", "(", "self", ")", ":", "join_parts", "=", "[", "]", "# get the sql for each join object", "for", "join_item", "in", "self", ".", "joins", ":", "join_parts", ".", "append", "(", "join_item", ".", "get_sql", "(", ")", ")", "# if there are a...
Generates the sql for the JOIN portion of the query :return: the JOIN portion of the query :rtype: str
[ "Generates", "the", "sql", "for", "the", "JOIN", "portion", "of", "the", "query" ]
python
train
deontologician/restnavigator
restnavigator/halnav.py
https://github.com/deontologician/restnavigator/blob/453b9de4e70e602009d3e3ffafcf77d23c8b07c5/restnavigator/halnav.py#L63-L71
def cache(self, link, nav): '''Stores a navigator in the identity map for the current api. Can take a link or a bare uri''' if link is None: return # We don't cache navigators without a Link elif hasattr(link, 'uri'): self.id_map[link.uri] = nav else: self.id_map[link] = nav
[ "def", "cache", "(", "self", ",", "link", ",", "nav", ")", ":", "if", "link", "is", "None", ":", "return", "# We don't cache navigators without a Link", "elif", "hasattr", "(", "link", ",", "'uri'", ")", ":", "self", ".", "id_map", "[", "link", ".", "uri...
Stores a navigator in the identity map for the current api. Can take a link or a bare uri
[ "Stores", "a", "navigator", "in", "the", "identity", "map", "for", "the", "current", "api", ".", "Can", "take", "a", "link", "or", "a", "bare", "uri" ]
python
train
tensorflow/hub
tensorflow_hub/native_module.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/native_module.py#L773-L814
def register_ops_if_needed(graph_ops): """Register graph ops absent in op_def_registry, if present in c++ registry. Args: graph_ops: set with graph op names to register. Raises: RuntimeError: if `graph_ops` contains ops that are not in either python or c++ registry. """ missing_ops = graph_ops - set(op_def_registry.get_registered_ops().keys()) if not missing_ops: return p_buffer = c_api.TF_GetAllOpList() cpp_op_list = op_def_pb2.OpList() cpp_op_list.ParseFromString(c_api.TF_GetBuffer(p_buffer)) cpp_registry_ops = {op.name: op for op in cpp_op_list.op} missing_op_list = op_def_pb2.OpList() for missing_op in missing_ops: if missing_op not in cpp_registry_ops: logging.info( "Op %s is missing from both the python and C++ registry.", missing_op) else: missing_op_list.op.extend([cpp_registry_ops[missing_op]]) logging.info( "Adding op %s from c++ registry to python registry.", missing_op) op_def_registry.register_op_list(missing_op_list) # Note: Only raise missing op ValueError after trying to load ops. # This allows the test to exercise all the calls into TensorFlow # without having to write a C + python test. if not missing_ops <= set(cpp_registry_ops.keys()): raise RuntimeError( "Graph ops missing from the python registry (%s) are also absent from " "the c++ registry." % missing_ops.difference(set(cpp_registry_ops.keys())))
[ "def", "register_ops_if_needed", "(", "graph_ops", ")", ":", "missing_ops", "=", "graph_ops", "-", "set", "(", "op_def_registry", ".", "get_registered_ops", "(", ")", ".", "keys", "(", ")", ")", "if", "not", "missing_ops", ":", "return", "p_buffer", "=", "c_...
Register graph ops absent in op_def_registry, if present in c++ registry. Args: graph_ops: set with graph op names to register. Raises: RuntimeError: if `graph_ops` contains ops that are not in either python or c++ registry.
[ "Register", "graph", "ops", "absent", "in", "op_def_registry", "if", "present", "in", "c", "++", "registry", "." ]
python
train
CivicSpleen/ambry
ambry/library/filesystem.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/filesystem.py#L86-L92
def database_dsn(self): """Substitute the root dir into the database DSN, for Sqlite""" if not self._config.library.database: return 'sqlite:///{root}/library.db'.format(root=self._root) return self._config.library.database.format(root=self._root)
[ "def", "database_dsn", "(", "self", ")", ":", "if", "not", "self", ".", "_config", ".", "library", ".", "database", ":", "return", "'sqlite:///{root}/library.db'", ".", "format", "(", "root", "=", "self", ".", "_root", ")", "return", "self", ".", "_config"...
Substitute the root dir into the database DSN, for Sqlite
[ "Substitute", "the", "root", "dir", "into", "the", "database", "DSN", "for", "Sqlite" ]
python
train
saltstack/salt
salt/modules/boto_elb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elb.py#L357-L380
def apply_security_groups(name, security_groups, region=None, key=None, keyid=None, profile=None): ''' Apply security groups to ELB. CLI example: .. code-block:: bash salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if isinstance(security_groups, six.string_types): security_groups = salt.utils.json.loads(security_groups) try: conn.apply_security_groups_to_lb(name, security_groups) log.info('Applied security_groups on ELB %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to appply security_groups on ELB %s: %s', name, e.message) return False
[ "def", "apply_security_groups", "(", "name", ",", "security_groups", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "ke...
Apply security groups to ELB. CLI example: .. code-block:: bash salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]'
[ "Apply", "security", "groups", "to", "ELB", "." ]
python
train
taborlab/FlowCal
FlowCal/plot.py
https://github.com/taborlab/FlowCal/blob/031a7af82acb1d46879a8e384a1a00f27f0bdc7a/FlowCal/plot.py#L1259-L1405
def scatter2d(data_list, channels=[0,1], xscale='logicle', yscale='logicle', xlabel=None, ylabel=None, xlim=None, ylim=None, title=None, color=None, savefig=None, **kwargs): """ Plot 2D scatter plot from one or more FCSData objects or numpy arrays. Parameters ---------- data_list : array or FCSData or list of array or list of FCSData Flow cytometry data to plot. channels : list of int, list of str Two channels to use for the plot. savefig : str, optional The name of the file to save the figure to. If None, do not save. Other parameters ---------------- xscale : str, optional Scale of the x axis, either ``linear``, ``log``, or ``logicle``. yscale : str, optional Scale of the y axis, either ``linear``, ``log``, or ``logicle``. xlabel : str, optional Label to use on the x axis. If None, attempts to extract channel name from last data object. ylabel : str, optional Label to use on the y axis. If None, attempts to extract channel name from last data object. xlim : tuple, optional Limits for the x axis. If None, attempts to extract limits from the range of the last data object. ylim : tuple, optional Limits for the y axis. If None, attempts to extract limits from the range of the last data object. title : str, optional Plot title. color : matplotlib color or list of matplotlib colors, optional Color for the scatter plot. It can be a list with the same length as `data_list`. If `color` is not specified, elements from `data_list` are plotted with colors taken from the module-level variable `cmap_default`. kwargs : dict, optional Additional parameters passed directly to matploblib's ``scatter``. Notes ----- `scatter2d` calls matplotlib's ``scatter`` function for each object in data_list. Additional keyword arguments provided to `scatter2d` are passed directly to ``plt.scatter``. """ # Check appropriate number of channels if len(channels) != 2: raise ValueError('two channels need to be specified') # Convert to list if necessary if not isinstance(data_list, list): data_list = [data_list] # Default colors if color is None: color = [cmap_default(i) for i in np.linspace(0, 1, len(data_list))] # Convert color to list, if necessary if not isinstance(color, list): color = [color]*len(data_list) # Iterate through data_list for i, data in enumerate(data_list): # Get channels to plot data_plot = data[:, channels] # Make scatter plot plt.scatter(data_plot[:,0], data_plot[:,1], s=5, alpha=0.25, color=color[i], **kwargs) # Set labels if specified, else try to extract channel names if xlabel is not None: plt.xlabel(xlabel) elif hasattr(data_plot, 'channels'): plt.xlabel(data_plot.channels[0]) if ylabel is not None: plt.ylabel(ylabel) elif hasattr(data_plot, 'channels'): plt.ylabel(data_plot.channels[1]) # Set scale of axes if xscale=='logicle': plt.gca().set_xscale(xscale, data=data_list, channel=channels[0]) else: plt.gca().set_xscale(xscale) if yscale=='logicle': plt.gca().set_yscale(yscale, data=data_list, channel=channels[1]) else: plt.gca().set_yscale(yscale) # Set plot limits if specified, else extract range from data_list. # ``.hist_bins`` with one bin works better for visualization that # ``.range``, because it deals with two issues. First, it automatically # deals with range values that are outside the domain of the current scaling # (e.g. when the lower range value is zero and the scaling is logarithmic). # Second, it takes into account events that are outside the limits specified # by .range (e.g. negative events will be shown with logicle scaling, even # when the lower range is zero). if xlim is None: xlim = [np.inf, -np.inf] for data in data_list: if hasattr(data, 'hist_bins') and \ hasattr(data.hist_bins, '__call__'): xlim_data = data.hist_bins(channels=channels[0], nbins=1, scale=xscale) xlim[0] = xlim_data[0] if xlim_data[0] < xlim[0] else xlim[0] xlim[1] = xlim_data[1] if xlim_data[1] > xlim[1] else xlim[1] plt.xlim(xlim) if ylim is None: ylim = [np.inf, -np.inf] for data in data_list: if hasattr(data, 'hist_bins') and \ hasattr(data.hist_bins, '__call__'): ylim_data = data.hist_bins(channels=channels[1], nbins=1, scale=yscale) ylim[0] = ylim_data[0] if ylim_data[0] < ylim[0] else ylim[0] ylim[1] = ylim_data[1] if ylim_data[1] > ylim[1] else ylim[1] plt.ylim(ylim) # Title if title is not None: plt.title(title) # Save if necessary if savefig is not None: plt.tight_layout() plt.savefig(savefig, dpi=savefig_dpi) plt.close()
[ "def", "scatter2d", "(", "data_list", ",", "channels", "=", "[", "0", ",", "1", "]", ",", "xscale", "=", "'logicle'", ",", "yscale", "=", "'logicle'", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "xlim", "=", "None", ",", "ylim", "=...
Plot 2D scatter plot from one or more FCSData objects or numpy arrays. Parameters ---------- data_list : array or FCSData or list of array or list of FCSData Flow cytometry data to plot. channels : list of int, list of str Two channels to use for the plot. savefig : str, optional The name of the file to save the figure to. If None, do not save. Other parameters ---------------- xscale : str, optional Scale of the x axis, either ``linear``, ``log``, or ``logicle``. yscale : str, optional Scale of the y axis, either ``linear``, ``log``, or ``logicle``. xlabel : str, optional Label to use on the x axis. If None, attempts to extract channel name from last data object. ylabel : str, optional Label to use on the y axis. If None, attempts to extract channel name from last data object. xlim : tuple, optional Limits for the x axis. If None, attempts to extract limits from the range of the last data object. ylim : tuple, optional Limits for the y axis. If None, attempts to extract limits from the range of the last data object. title : str, optional Plot title. color : matplotlib color or list of matplotlib colors, optional Color for the scatter plot. It can be a list with the same length as `data_list`. If `color` is not specified, elements from `data_list` are plotted with colors taken from the module-level variable `cmap_default`. kwargs : dict, optional Additional parameters passed directly to matploblib's ``scatter``. Notes ----- `scatter2d` calls matplotlib's ``scatter`` function for each object in data_list. Additional keyword arguments provided to `scatter2d` are passed directly to ``plt.scatter``.
[ "Plot", "2D", "scatter", "plot", "from", "one", "or", "more", "FCSData", "objects", "or", "numpy", "arrays", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/works.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/works.py#L192-L200
def disconnect_signals(self): """ Disable the signals within the work. This function reverses the process of `connect_signals` """ for task in self: try: dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task) except dispatcher.errors.DispatcherKeyError as exc: logger.debug(str(exc))
[ "def", "disconnect_signals", "(", "self", ")", ":", "for", "task", "in", "self", ":", "try", ":", "dispatcher", ".", "disconnect", "(", "self", ".", "on_ok", ",", "signal", "=", "task", ".", "S_OK", ",", "sender", "=", "task", ")", "except", "dispatche...
Disable the signals within the work. This function reverses the process of `connect_signals`
[ "Disable", "the", "signals", "within", "the", "work", ".", "This", "function", "reverses", "the", "process", "of", "connect_signals" ]
python
train
ethereum/pyethereum
ethereum/tools/keys.py
https://github.com/ethereum/pyethereum/blob/b704a5c6577863edc539a1ec3d2620a443b950fb/ethereum/tools/keys.py#L161-L184
def check_keystore_json(jsondata): """Check if ``jsondata`` has the structure of a keystore file version 3. Note that this test is not complete, e.g. it doesn't check key derivation or cipher parameters. :param jsondata: dictionary containing the data from the json file :returns: `True` if the data appears to be valid, otherwise `False` """ if 'crypto' not in jsondata and 'Crypto' not in jsondata: return False if 'version' not in jsondata: return False if jsondata['version'] != 3: return False crypto = jsondata.get('crypto', jsondata.get('Crypto')) if 'cipher' not in crypto: return False if 'ciphertext' not in crypto: return False if 'kdf' not in crypto: return False if 'mac' not in crypto: return False return True
[ "def", "check_keystore_json", "(", "jsondata", ")", ":", "if", "'crypto'", "not", "in", "jsondata", "and", "'Crypto'", "not", "in", "jsondata", ":", "return", "False", "if", "'version'", "not", "in", "jsondata", ":", "return", "False", "if", "jsondata", "[",...
Check if ``jsondata`` has the structure of a keystore file version 3. Note that this test is not complete, e.g. it doesn't check key derivation or cipher parameters. :param jsondata: dictionary containing the data from the json file :returns: `True` if the data appears to be valid, otherwise `False`
[ "Check", "if", "jsondata", "has", "the", "structure", "of", "a", "keystore", "file", "version", "3", "." ]
python
train
pantsbuild/pants
contrib/go/src/python/pants/contrib/go/tasks/go_workspace_task.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/go/src/python/pants/contrib/go/tasks/go_workspace_task.py#L32-L34
def get_gopath(self, target): """Returns the $GOPATH for the given target.""" return os.path.join(self.workdir, target.id)
[ "def", "get_gopath", "(", "self", ",", "target", ")", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "target", ".", "id", ")" ]
Returns the $GOPATH for the given target.
[ "Returns", "the", "$GOPATH", "for", "the", "given", "target", "." ]
python
train
hthiery/python-fritzhome
pyfritzhome/fritzhome.py
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L88-L106
def _aha_request(self, cmd, ain=None, param=None, rf=str): """Send an AHA request.""" url = 'http://' + self._host + '/webservices/homeautoswitch.lua' params = { 'switchcmd': cmd, 'sid': self._sid } if param: params['param'] = param if ain: params['ain'] = ain plain = self._request(url, params) if plain == 'inval': raise InvalidError if rf == bool: return bool(int(plain)) return rf(plain)
[ "def", "_aha_request", "(", "self", ",", "cmd", ",", "ain", "=", "None", ",", "param", "=", "None", ",", "rf", "=", "str", ")", ":", "url", "=", "'http://'", "+", "self", ".", "_host", "+", "'/webservices/homeautoswitch.lua'", "params", "=", "{", "'swi...
Send an AHA request.
[ "Send", "an", "AHA", "request", "." ]
python
train
Kozea/cairocffi
cairocffi/patterns.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/patterns.py#L352-L363
def get_radial_circles(self): """Return this radial gradient’s endpoint circles, each specified as a center coordinate and a radius. :returns: A ``(cx0, cy0, radius0, cx1, cy1, radius1)`` tuple of floats. """ circles = ffi.new('double[6]') _check_status(cairo.cairo_pattern_get_radial_circles( self._pointer, circles + 0, circles + 1, circles + 2, circles + 3, circles + 4, circles + 5)) return tuple(circles)
[ "def", "get_radial_circles", "(", "self", ")", ":", "circles", "=", "ffi", ".", "new", "(", "'double[6]'", ")", "_check_status", "(", "cairo", ".", "cairo_pattern_get_radial_circles", "(", "self", ".", "_pointer", ",", "circles", "+", "0", ",", "circles", "+...
Return this radial gradient’s endpoint circles, each specified as a center coordinate and a radius. :returns: A ``(cx0, cy0, radius0, cx1, cy1, radius1)`` tuple of floats.
[ "Return", "this", "radial", "gradient’s", "endpoint", "circles", "each", "specified", "as", "a", "center", "coordinate", "and", "a", "radius", "." ]
python
train
romana/vpc-router
vpcrouter/watcher/plugins/configfile.py
https://github.com/romana/vpc-router/blob/d696c2e023f1111ceb61f9c6fbabfafed8e14040/vpcrouter/watcher/plugins/configfile.py#L167-L184
def get_info(self): """ Return plugin information. """ return { self.get_plugin_name() : { "version" : self.get_version(), "params" : { "file" : self.conf['file'] }, "stats" : { "last_route_spec_update" : self.last_route_spec_update.isoformat() if self.last_route_spec_update else "(no update, yet)" } } }
[ "def", "get_info", "(", "self", ")", ":", "return", "{", "self", ".", "get_plugin_name", "(", ")", ":", "{", "\"version\"", ":", "self", ".", "get_version", "(", ")", ",", "\"params\"", ":", "{", "\"file\"", ":", "self", ".", "conf", "[", "'file'", "...
Return plugin information.
[ "Return", "plugin", "information", "." ]
python
train
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L540-L550
def add(self, _mapping=None, **kwargs): """ Add the given item/score pairs to the ZSet. Arguments are specified as ``item1, score1, item2, score2...``. """ if _mapping is not None: _mapping.update(kwargs) mapping = _mapping else: mapping = _mapping return self.database.zadd(self.key, mapping)
[ "def", "add", "(", "self", ",", "_mapping", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "_mapping", "is", "not", "None", ":", "_mapping", ".", "update", "(", "kwargs", ")", "mapping", "=", "_mapping", "else", ":", "mapping", "=", "_mapping"...
Add the given item/score pairs to the ZSet. Arguments are specified as ``item1, score1, item2, score2...``.
[ "Add", "the", "given", "item", "/", "score", "pairs", "to", "the", "ZSet", ".", "Arguments", "are", "specified", "as", "item1", "score1", "item2", "score2", "...", "." ]
python
train
LionelR/pyair
pyair/xair.py
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L132-L144
def _connect(self): """ Connexion à la base XAIR """ try: # On passe par Oracle Instant Client avec le TNS ORA_FULL self.conn = cx_Oracle.connect(self._ORA_FULL) self.cursor = self.conn.cursor() print('XAIR: Connexion établie') except cx_Oracle.Error as e: print("Erreur: %s" % (e)) raise cx_Oracle.Error('Echec de connexion')
[ "def", "_connect", "(", "self", ")", ":", "try", ":", "# On passe par Oracle Instant Client avec le TNS ORA_FULL", "self", ".", "conn", "=", "cx_Oracle", ".", "connect", "(", "self", ".", "_ORA_FULL", ")", "self", ".", "cursor", "=", "self", ".", "conn", ".", ...
Connexion à la base XAIR
[ "Connexion", "à", "la", "base", "XAIR" ]
python
valid
mehmetg/streak_client
streak_client/streak_client.py
https://github.com/mehmetg/streak_client/blob/46575510b4e4163a4a3cc06f7283a1ae377cdce6/streak_client/streak_client.py#L764-L776
def _get_newsfeeds(self, uri, detail_level = None): '''General purpose function to get newsfeeds Args: uri uri for the feed base detail_level arguments for req str ['ALL', 'CONDENSED'] return list of feed dicts parse at your convenience ''' if detail_level: if detail_level not in ['ALL', 'CONDENSED']: return requests.codes.bad_request, {'success' : 'False', 'error': 'detailLevel needs to be provided and field_type needs to be \'ALL\' or \'CONDENSED\''} uri += self.detail_level_suffix + detail_level return self._req('get', uri)
[ "def", "_get_newsfeeds", "(", "self", ",", "uri", ",", "detail_level", "=", "None", ")", ":", "if", "detail_level", ":", "if", "detail_level", "not", "in", "[", "'ALL'", ",", "'CONDENSED'", "]", ":", "return", "requests", ".", "codes", ".", "bad_request", ...
General purpose function to get newsfeeds Args: uri uri for the feed base detail_level arguments for req str ['ALL', 'CONDENSED'] return list of feed dicts parse at your convenience
[ "General", "purpose", "function", "to", "get", "newsfeeds", "Args", ":", "uri", "uri", "for", "the", "feed", "base", "detail_level", "arguments", "for", "req", "str", "[", "ALL", "CONDENSED", "]", "return", "list", "of", "feed", "dicts", "parse", "at", "yo...
python
train
miguelgrinberg/python-engineio
engineio/client.py
https://github.com/miguelgrinberg/python-engineio/blob/261fd67103cb5d9a44369415748e66fdf62de6fb/engineio/client.py#L483-L500
def _ping_loop(self): """This background task sends a PING to the server at the requested interval. """ self.pong_received = True self.ping_loop_event.clear() while self.state == 'connected': if not self.pong_received: self.logger.info( 'PONG response has not been received, aborting') if self.ws: self.ws.close() self.queue.put(None) break self.pong_received = False self._send_packet(packet.Packet(packet.PING)) self.ping_loop_event.wait(timeout=self.ping_interval) self.logger.info('Exiting ping task')
[ "def", "_ping_loop", "(", "self", ")", ":", "self", ".", "pong_received", "=", "True", "self", ".", "ping_loop_event", ".", "clear", "(", ")", "while", "self", ".", "state", "==", "'connected'", ":", "if", "not", "self", ".", "pong_received", ":", "self"...
This background task sends a PING to the server at the requested interval.
[ "This", "background", "task", "sends", "a", "PING", "to", "the", "server", "at", "the", "requested", "interval", "." ]
python
train
LionelAuroux/pyrser
pyrser/ast/state.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/ast/state.py#L65-L83
def to_dot(self) -> str: """ Provide a '.dot' representation of all State in the register. """ txt = "" txt += "digraph S%d {\n" % id(self) if self.label is not None: txt += '\tlabel="%s";\n' % (self.label + '\l').replace('\n', '\l') txt += "\trankdir=LR;\n" #txt += '\tlabelloc="t";\n' txt += '\tgraph [labeljust=l, labelloc=t, nojustify=true];\n' txt += "\tesep=1;\n" txt += '\tranksep="equally";\n' txt += "\tnode [shape = circle];\n" txt += "\tsplines = ortho;\n" for s in self.states.values(): txt += s[1].to_dot() txt += "}\n" return txt
[ "def", "to_dot", "(", "self", ")", "->", "str", ":", "txt", "=", "\"\"", "txt", "+=", "\"digraph S%d {\\n\"", "%", "id", "(", "self", ")", "if", "self", ".", "label", "is", "not", "None", ":", "txt", "+=", "'\\tlabel=\"%s\";\\n'", "%", "(", "self", "...
Provide a '.dot' representation of all State in the register.
[ "Provide", "a", ".", "dot", "representation", "of", "all", "State", "in", "the", "register", "." ]
python
test
ska-sa/katcp-python
katcp/resource.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource.py#L893-L964
def wait(self, condition_or_value, timeout=None): """Wait for the sensor to satisfy a condition. Parameters ---------- condition_or_value : obj or callable, or seq of objs or callables If obj, sensor.value is compared with obj. If callable, condition_or_value(reading) is called, and must return True if its condition is satisfied. Since the reading is passed in, the value, status, timestamp or received_timestamp attributes can all be used in the check. TODO: Sequences of conditions (use SensorTransitionWaiter thingum?) timeout : float or None The timeout in seconds (None means wait forever) Returns ------- This command returns a tornado Future that resolves with True when the sensor value satisfies the condition. It will never resolve with False; if a timeout is given a TimeoutError happens instead. Raises ------ :class:`KATCPSensorError` If the sensor does not have a strategy set :class:`tornado.gen.TimeoutError` If the sensor condition still fails after a stated timeout period """ if (isinstance(condition_or_value, collections.Sequence) and not isinstance(condition_or_value, basestring)): raise NotImplementedError( 'Currently only single conditions are supported') condition_test = (condition_or_value if callable(condition_or_value) else lambda s: s.value == condition_or_value) ioloop = tornado.ioloop.IOLoop.current() f = Future() if self.sampling_strategy == ('none', ): raise KATCPSensorError( 'Cannot wait on a sensor that does not have a strategy set') def handle_update(sensor, reading): # This handler is called whenever a sensor update is received try: assert sensor is self if condition_test(reading): self.unregister_listener(handle_update) # Try and be idempotent if called multiple times after the # condition is matched. This should not happen unless the # sensor object is being updated in a thread outside of the # ioloop. if not f.done(): ioloop.add_callback(f.set_result, True) except Exception: f.set_exc_info(sys.exc_info()) self.unregister_listener(handle_update) self.register_listener(handle_update, reading=True) # Handle case where sensor is already at the desired value ioloop.add_callback(handle_update, self, self._reading) if timeout: to = ioloop.time() + timeout timeout_f = with_timeout(to, f) # Make sure we stop listening if the wait times out to prevent a # buildup of listeners timeout_f.add_done_callback( lambda f: self.unregister_listener(handle_update)) return timeout_f else: return f
[ "def", "wait", "(", "self", ",", "condition_or_value", ",", "timeout", "=", "None", ")", ":", "if", "(", "isinstance", "(", "condition_or_value", ",", "collections", ".", "Sequence", ")", "and", "not", "isinstance", "(", "condition_or_value", ",", "basestring"...
Wait for the sensor to satisfy a condition. Parameters ---------- condition_or_value : obj or callable, or seq of objs or callables If obj, sensor.value is compared with obj. If callable, condition_or_value(reading) is called, and must return True if its condition is satisfied. Since the reading is passed in, the value, status, timestamp or received_timestamp attributes can all be used in the check. TODO: Sequences of conditions (use SensorTransitionWaiter thingum?) timeout : float or None The timeout in seconds (None means wait forever) Returns ------- This command returns a tornado Future that resolves with True when the sensor value satisfies the condition. It will never resolve with False; if a timeout is given a TimeoutError happens instead. Raises ------ :class:`KATCPSensorError` If the sensor does not have a strategy set :class:`tornado.gen.TimeoutError` If the sensor condition still fails after a stated timeout period
[ "Wait", "for", "the", "sensor", "to", "satisfy", "a", "condition", "." ]
python
train
instacart/jardin
jardin/tools.py
https://github.com/instacart/jardin/blob/007e283b9ccd621b60b86679148cacd9eab7c4e3/jardin/tools.py#L72-L110
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None): """Retry calling the decorated function using an exponential backoff. original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry :param ExceptionToCheck: the exception to check. may be a tuple of exceptions to check :type ExceptionToCheck: Exception or tuple :param tries: number of times to try (not retry) before giving up :type tries: int :param delay: initial delay between retries in seconds :type delay: int :param backoff: backoff multiplier e.g. value of 2 will double the delay each retry :type backoff: int :param logger: logger to use. If None, print :type logger: logging.Logger instance """ def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck as e: msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) if logger: logger.warning(msg) else: print(msg) time.sleep(mdelay) mtries -= 1 mdelay *= backoff return f(*args, **kwargs) return f_retry # true decorator return deco_retry
[ "def", "retry", "(", "ExceptionToCheck", ",", "tries", "=", "4", ",", "delay", "=", "3", ",", "backoff", "=", "2", ",", "logger", "=", "None", ")", ":", "def", "deco_retry", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "f_retry", "(", ...
Retry calling the decorated function using an exponential backoff. original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry :param ExceptionToCheck: the exception to check. may be a tuple of exceptions to check :type ExceptionToCheck: Exception or tuple :param tries: number of times to try (not retry) before giving up :type tries: int :param delay: initial delay between retries in seconds :type delay: int :param backoff: backoff multiplier e.g. value of 2 will double the delay each retry :type backoff: int :param logger: logger to use. If None, print :type logger: logging.Logger instance
[ "Retry", "calling", "the", "decorated", "function", "using", "an", "exponential", "backoff", ".", "original", "from", ":", "http", ":", "//", "wiki", ".", "python", ".", "org", "/", "moin", "/", "PythonDecoratorLibrary#Retry" ]
python
train
thombashi/pytablewriter
pytablewriter/writer/text/_text_writer.py
https://github.com/thombashi/pytablewriter/blob/52ea85ed8e89097afa64f137c6a1b3acdfefdbda/pytablewriter/writer/text/_text_writer.py#L179-L201
def dump(self, output, close_after_write=True): """Write data to the output with tabular format. Args: output (file descriptor or str): file descriptor or path to the output file. close_after_write (bool, optional): Close the output after write. Defaults to |True|. """ try: output.write self.stream = output except AttributeError: self.stream = io.open(output, "w", encoding="utf-8") try: self.write_table() finally: if close_after_write: self.stream.close() self.stream = sys.stdout
[ "def", "dump", "(", "self", ",", "output", ",", "close_after_write", "=", "True", ")", ":", "try", ":", "output", ".", "write", "self", ".", "stream", "=", "output", "except", "AttributeError", ":", "self", ".", "stream", "=", "io", ".", "open", "(", ...
Write data to the output with tabular format. Args: output (file descriptor or str): file descriptor or path to the output file. close_after_write (bool, optional): Close the output after write. Defaults to |True|.
[ "Write", "data", "to", "the", "output", "with", "tabular", "format", "." ]
python
train
eventable/vobject
vobject/base.py
https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/base.py#L242-L258
def serialize(self, buf=None, lineLength=75, validate=True, behavior=None): """ Serialize to buf if it exists, otherwise return a string. Use self.behavior.serialize if behavior exists. """ if not behavior: behavior = self.behavior if behavior: if DEBUG: logger.debug("serializing {0!s} with behavior {1!s}".format(self.name, behavior)) return behavior.serialize(self, buf, lineLength, validate) else: if DEBUG: logger.debug("serializing {0!s} without behavior".format(self.name)) return defaultSerialize(self, buf, lineLength)
[ "def", "serialize", "(", "self", ",", "buf", "=", "None", ",", "lineLength", "=", "75", ",", "validate", "=", "True", ",", "behavior", "=", "None", ")", ":", "if", "not", "behavior", ":", "behavior", "=", "self", ".", "behavior", "if", "behavior", ":...
Serialize to buf if it exists, otherwise return a string. Use self.behavior.serialize if behavior exists.
[ "Serialize", "to", "buf", "if", "it", "exists", "otherwise", "return", "a", "string", "." ]
python
train
rocky/python3-trepan
trepan/processor/command/info_subcmd/pc.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/command/info_subcmd/pc.py#L42-L73
def run(self, args): """Program counter.""" mainfile = self.core.filename(None) if self.core.is_running(): curframe = self.proc.curframe if curframe: line_no = inspect.getlineno(curframe) offset = curframe.f_lasti self.msg("PC offset is %d." % offset) offset = max(offset, 0) code = curframe.f_code co_code = code.co_code disassemble_bytes(self.msg, self.msg_nocr, co_code, offset, line_no, line_no-1, line_no+1, constants=code.co_consts, cells=code.co_cellvars, varnames=code.co_varnames, freevars=code.co_freevars, linestarts=dict(findlinestarts(code)), end_offset=offset+10) pass pass else: if mainfile: part1 = "Python program '%s'" % mainfile msg = "is not currently running. " self.msg(Mmisc.wrapped_lines(part1, msg, self.settings['width'])) else: self.msg('No Python program is currently running.') pass self.msg(self.core.execution_status) pass return False
[ "def", "run", "(", "self", ",", "args", ")", ":", "mainfile", "=", "self", ".", "core", ".", "filename", "(", "None", ")", "if", "self", ".", "core", ".", "is_running", "(", ")", ":", "curframe", "=", "self", ".", "proc", ".", "curframe", "if", "...
Program counter.
[ "Program", "counter", "." ]
python
test
lucaskjaero/PyCasia
pycasia/CASIA.py
https://github.com/lucaskjaero/PyCasia/blob/511ddb7809d788fc2c7bc7c1e8600db60bac8152/pycasia/CASIA.py#L57-L70
def get_all_datasets(self): """ Make sure the datasets are present. If not, downloads and extracts them. Attempts the download five times because the file hosting is unreliable. :return: True if successful, false otherwise """ success = True for dataset in tqdm(self.datasets): individual_success = self.get_dataset(dataset) if not individual_success: success = False return success
[ "def", "get_all_datasets", "(", "self", ")", ":", "success", "=", "True", "for", "dataset", "in", "tqdm", "(", "self", ".", "datasets", ")", ":", "individual_success", "=", "self", ".", "get_dataset", "(", "dataset", ")", "if", "not", "individual_success", ...
Make sure the datasets are present. If not, downloads and extracts them. Attempts the download five times because the file hosting is unreliable. :return: True if successful, false otherwise
[ "Make", "sure", "the", "datasets", "are", "present", ".", "If", "not", "downloads", "and", "extracts", "them", ".", "Attempts", "the", "download", "five", "times", "because", "the", "file", "hosting", "is", "unreliable", ".", ":", "return", ":", "True", "i...
python
train
nugget/python-insteonplm
insteonplm/tools.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/tools.py#L340-L344
def kpl_off(self, address, group): """Get the status of a KPL button.""" addr = Address(address) device = self.plm.devices[addr.id] device.states[group].off()
[ "def", "kpl_off", "(", "self", ",", "address", ",", "group", ")", ":", "addr", "=", "Address", "(", "address", ")", "device", "=", "self", ".", "plm", ".", "devices", "[", "addr", ".", "id", "]", "device", ".", "states", "[", "group", "]", ".", "...
Get the status of a KPL button.
[ "Get", "the", "status", "of", "a", "KPL", "button", "." ]
python
train
NLeSC/noodles
noodles/run/xenon/dynamic_pool.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/run/xenon/dynamic_pool.py#L130-L143
def wait_until_running(self, callback=None): """Waits until the remote worker is running, then calls the callback. Usually, this method is passed to a different thread; the callback is then a function patching results through to the result queue.""" status = self.machine.scheduler.wait_until_running( self.job, self.worker_config.time_out) if status.running: self.online = True if callback: callback(self) else: raise TimeoutError("Timeout while waiting for worker to run: " + self.worker_config.name)
[ "def", "wait_until_running", "(", "self", ",", "callback", "=", "None", ")", ":", "status", "=", "self", ".", "machine", ".", "scheduler", ".", "wait_until_running", "(", "self", ".", "job", ",", "self", ".", "worker_config", ".", "time_out", ")", "if", ...
Waits until the remote worker is running, then calls the callback. Usually, this method is passed to a different thread; the callback is then a function patching results through to the result queue.
[ "Waits", "until", "the", "remote", "worker", "is", "running", "then", "calls", "the", "callback", ".", "Usually", "this", "method", "is", "passed", "to", "a", "different", "thread", ";", "the", "callback", "is", "then", "a", "function", "patching", "results"...
python
train
iterative/dvc
dvc/config.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/config.py#L32-L43
def supported_cache_type(types): """Checks if link type config option has a valid value. Args: types (list/string): type(s) of links that dvc should try out. """ if isinstance(types, str): types = [typ.strip() for typ in types.split(",")] for typ in types: if typ not in ["reflink", "hardlink", "symlink", "copy"]: return False return True
[ "def", "supported_cache_type", "(", "types", ")", ":", "if", "isinstance", "(", "types", ",", "str", ")", ":", "types", "=", "[", "typ", ".", "strip", "(", ")", "for", "typ", "in", "types", ".", "split", "(", "\",\"", ")", "]", "for", "typ", "in", ...
Checks if link type config option has a valid value. Args: types (list/string): type(s) of links that dvc should try out.
[ "Checks", "if", "link", "type", "config", "option", "has", "a", "valid", "value", "." ]
python
train
pawelad/pymonzo
src/pymonzo/monzo_api.py
https://github.com/pawelad/pymonzo/blob/b5c8d4f46dcb3a2f475797a8b8ef1c15f6493fb9/src/pymonzo/monzo_api.py#L119-L133
def _save_token_on_disk(self): """Helper function that saves the token on disk""" token = self._token.copy() # Client secret is needed for token refreshing and isn't returned # as a pared of OAuth token by default token.update(client_secret=self._client_secret) with codecs.open(config.TOKEN_FILE_PATH, 'w', 'utf8') as f: json.dump( token, f, ensure_ascii=False, sort_keys=True, indent=4, )
[ "def", "_save_token_on_disk", "(", "self", ")", ":", "token", "=", "self", ".", "_token", ".", "copy", "(", ")", "# Client secret is needed for token refreshing and isn't returned", "# as a pared of OAuth token by default", "token", ".", "update", "(", "client_secret", "=...
Helper function that saves the token on disk
[ "Helper", "function", "that", "saves", "the", "token", "on", "disk" ]
python
train
helixyte/everest
everest/repositories/rdb/utils.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/repositories/rdb/utils.py#L228-L240
def inspect(orm_class, attribute_name): """ :param attribute_name: name of the mapped attribute to inspect. :returns: list of 2-tuples containing information about the inspected attribute (first element: mapped entity attribute kind; second attribute: mapped entity attribute) """ key = (orm_class, attribute_name) elems = OrmAttributeInspector.__cache.get(key) if elems is None: elems = OrmAttributeInspector.__inspect(key) OrmAttributeInspector.__cache[key] = elems return elems
[ "def", "inspect", "(", "orm_class", ",", "attribute_name", ")", ":", "key", "=", "(", "orm_class", ",", "attribute_name", ")", "elems", "=", "OrmAttributeInspector", ".", "__cache", ".", "get", "(", "key", ")", "if", "elems", "is", "None", ":", "elems", ...
:param attribute_name: name of the mapped attribute to inspect. :returns: list of 2-tuples containing information about the inspected attribute (first element: mapped entity attribute kind; second attribute: mapped entity attribute)
[ ":", "param", "attribute_name", ":", "name", "of", "the", "mapped", "attribute", "to", "inspect", ".", ":", "returns", ":", "list", "of", "2", "-", "tuples", "containing", "information", "about", "the", "inspected", "attribute", "(", "first", "element", ":",...
python
train
jepegit/cellpy
cellpy/readers/dbreader.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/dbreader.py#L430-L470
def filter_by_col_value(self, column_name, min_val=None, max_val=None): """filters sheet/table by column. The routine returns the serial-numbers with min_val <= values >= max_val in the selected column. Args: column_name (str): column name. min_val (int): minimum value of serial number. max_val (int): maximum value of serial number. Returns: pandas.DataFrame """ sheet = self.table identity = self.db_sheet_cols.id exists_col_number = self.db_sheet_cols.exists exists = sheet.loc[:, exists_col_number] > 0 if min_val is not None and max_val is not None: criterion1 = sheet.loc[:, column_name] >= min_val criterion2 = sheet.loc[:, column_name] <= max_val sheet = sheet[criterion1 & criterion2 & exists] elif min_val is not None or max_val is not None: if min_val is not None: criterion = sheet.loc[:, column_name] >= min_val if max_val is not None: criterion = sheet.loc[:, column_name] <= max_val # noinspection PyUnboundLocalVariable sheet = sheet[criterion & exists] else: sheet = sheet[exists] return sheet.loc[:, identity].values.astype(int)
[ "def", "filter_by_col_value", "(", "self", ",", "column_name", ",", "min_val", "=", "None", ",", "max_val", "=", "None", ")", ":", "sheet", "=", "self", ".", "table", "identity", "=", "self", ".", "db_sheet_cols", ".", "id", "exists_col_number", "=", "self...
filters sheet/table by column. The routine returns the serial-numbers with min_val <= values >= max_val in the selected column. Args: column_name (str): column name. min_val (int): minimum value of serial number. max_val (int): maximum value of serial number. Returns: pandas.DataFrame
[ "filters", "sheet", "/", "table", "by", "column", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/Ambiente.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ambiente.py#L228-L259
def listar_por_equip(self, equip_id): """Lista todos os ambientes por equipamento especifico. :return: Dicionário com a seguinte estrutura: :: {'ambiente': {'id': < id_ambiente >, 'link': < link >, 'id_divisao': < id_divisao >, 'nome_divisao': < nome_divisao >, 'id_ambiente_logico': < id_ambiente_logico >, 'nome_ambiente_logico': < nome_ambiente_logico >, 'id_grupo_l3': < id_grupo_l3 >, 'nome_grupo_l3': < nome_grupo_l3 >, 'id_filter': < id_filter >, 'filter_name': < filter_name >, 'ambiente_rede': < ambiente_rede >}} :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if equip_id is None: raise InvalidParameterError( u'O id do equipamento não foi informado.') url = 'ambiente/equip/' + str(equip_id) + '/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
[ "def", "listar_por_equip", "(", "self", ",", "equip_id", ")", ":", "if", "equip_id", "is", "None", ":", "raise", "InvalidParameterError", "(", "u'O id do equipamento não foi informado.')", "", "url", "=", "'ambiente/equip/'", "+", "str", "(", "equip_id", ")", "+",...
Lista todos os ambientes por equipamento especifico. :return: Dicionário com a seguinte estrutura: :: {'ambiente': {'id': < id_ambiente >, 'link': < link >, 'id_divisao': < id_divisao >, 'nome_divisao': < nome_divisao >, 'id_ambiente_logico': < id_ambiente_logico >, 'nome_ambiente_logico': < nome_ambiente_logico >, 'id_grupo_l3': < id_grupo_l3 >, 'nome_grupo_l3': < nome_grupo_l3 >, 'id_filter': < id_filter >, 'filter_name': < filter_name >, 'ambiente_rede': < ambiente_rede >}} :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta.
[ "Lista", "todos", "os", "ambientes", "por", "equipamento", "especifico", "." ]
python
train
ShawnClake/Apitax
apitax/ah/api/util.py
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/util.py#L130-L141
def _deserialize_dict(data, boxed_type): """Deserializes a dict and its elements. :param data: dict to deserialize. :type data: dict :param boxed_type: class literal. :return: deserialized dict. :rtype: dict """ return {k: _deserialize(v, boxed_type) for k, v in six.iteritems(data)}
[ "def", "_deserialize_dict", "(", "data", ",", "boxed_type", ")", ":", "return", "{", "k", ":", "_deserialize", "(", "v", ",", "boxed_type", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "data", ")", "}" ]
Deserializes a dict and its elements. :param data: dict to deserialize. :type data: dict :param boxed_type: class literal. :return: deserialized dict. :rtype: dict
[ "Deserializes", "a", "dict", "and", "its", "elements", "." ]
python
train
scottrice/pysteam
pysteam/steam.py
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/steam.py#L12-L43
def get_steam(): """ Returns a Steam object representing the current Steam installation on the users computer. If the user doesn't have Steam installed, returns None. """ # Helper function which checks if the potential userdata directory exists # and returns a new Steam instance with that userdata directory if it does. # If the directory doesnt exist it returns None instead helper = lambda udd: Steam(udd) if os.path.exists(udd) else None # For both OS X and Linux, Steam stores it's userdata in a consistent # location. plat = platform.system() if plat == 'Darwin': return helper(paths.default_osx_userdata_path()) if plat == 'Linux': return helper(paths.default_linux_userdata_path()) # Windows is a bit trickier. The userdata directory is stored in the Steam # installation directory, meaning that theoretically it could be anywhere. # Luckily, Valve stores the installation directory in the registry, so its # still possible for us to figure out automatically if plat == 'Windows': possible_dir = winutils.find_userdata_directory() # Unlike the others, `possible_dir` might be None (if something odd # happened with the registry) return helper(possible_dir) if possible_dir is not None else None # This should never be hit. Windows, OS X, and Linux should be the only # supported platforms. # TODO: Add logging here so that the user (developer) knows that something # odd happened. return None
[ "def", "get_steam", "(", ")", ":", "# Helper function which checks if the potential userdata directory exists", "# and returns a new Steam instance with that userdata directory if it does.", "# If the directory doesnt exist it returns None instead", "helper", "=", "lambda", "udd", ":", "St...
Returns a Steam object representing the current Steam installation on the users computer. If the user doesn't have Steam installed, returns None.
[ "Returns", "a", "Steam", "object", "representing", "the", "current", "Steam", "installation", "on", "the", "users", "computer", ".", "If", "the", "user", "doesn", "t", "have", "Steam", "installed", "returns", "None", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/db/dfa_db_models.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/db/dfa_db_models.py#L1129-L1141
def query_topology_db(self, dict_convert=False, **req): """Query an entry to the topology DB. """ session = db.get_session() with session.begin(subtransactions=True): try: # Check if entry exists. topo_disc = session.query(DfaTopologyDb).filter_by(**req).all() except orm_exc.NoResultFound: LOG.info("No Topology results found for %s", req) return None if dict_convert: return self._convert_topo_obj_dict(topo_disc) return topo_disc
[ "def", "query_topology_db", "(", "self", ",", "dict_convert", "=", "False", ",", "*", "*", "req", ")", ":", "session", "=", "db", ".", "get_session", "(", ")", "with", "session", ".", "begin", "(", "subtransactions", "=", "True", ")", ":", "try", ":", ...
Query an entry to the topology DB.
[ "Query", "an", "entry", "to", "the", "topology", "DB", "." ]
python
train
lcharleux/argiope
argiope/mesh.py
https://github.com/lcharleux/argiope/blob/8170e431362dc760589f7d141090fd133dece259/argiope/mesh.py#L327-L332
def nvert(self): """ Returns the number of vertices of eache element according to its type/ """ return self.elements.type.argiope.map( lambda t: ELEMENTS[t].nvert)
[ "def", "nvert", "(", "self", ")", ":", "return", "self", ".", "elements", ".", "type", ".", "argiope", ".", "map", "(", "lambda", "t", ":", "ELEMENTS", "[", "t", "]", ".", "nvert", ")" ]
Returns the number of vertices of eache element according to its type/
[ "Returns", "the", "number", "of", "vertices", "of", "eache", "element", "according", "to", "its", "type", "/" ]
python
test
waqasbhatti/astrobase
astrobase/checkplot/png.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/png.py#L108-L364
def _make_periodogram(axes, lspinfo, objectinfo, findercmap, finderconvolve, verbose=True, findercachedir='~/.astrobase/stamp-cache'): '''Makes periodogram, objectinfo, and finder tile for `checkplot_png` and `twolsp_checkplot_png`. Parameters ---------- axes : matplotlib.axes.Axes object The Axes object which will contain the plot being made. lspinfo : dict Dict containing results from a period-finder in `astrobase.periodbase` or a dict that corresponds to that format. objectinfo : dict Dict containing basic info about the object being processed. findercmap : matplotlib Colormap object or str The Colormap object to use for the finder chart image. finderconvolve : astropy.convolution.Kernel object or None If not None, the Kernel object to use for convolving the finder image. verbose : bool If True, indicates progress. findercachedir : str The directory where the FITS finder images are downloaded and cached. Returns ------- Does not return anything, works on the input Axes object directly. ''' # get the appropriate plot ylabel pgramylabel = PLOTYLABELS[lspinfo['method']] # get the periods and lspvals from lspinfo periods = lspinfo['periods'] lspvals = lspinfo['lspvals'] bestperiod = lspinfo['bestperiod'] nbestperiods = lspinfo['nbestperiods'] nbestlspvals = lspinfo['nbestlspvals'] # make the LSP plot on the first subplot axes.plot(periods,lspvals) axes.set_xscale('log',basex=10) axes.set_xlabel('Period [days]') axes.set_ylabel(pgramylabel) plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']], bestperiod) axes.set_title(plottitle) # show the best five peaks on the plot for bestperiod, bestpeak in zip(nbestperiods, nbestlspvals): axes.annotate('%.6f' % bestperiod, xy=(bestperiod, bestpeak), xycoords='data', xytext=(0.0,25.0), textcoords='offset points', arrowprops=dict(arrowstyle="->"),fontsize='14.0') # make a grid axes.grid(color='#a9a9a9', alpha=0.9, zorder=0, linewidth=1.0, linestyle=':') # if objectinfo is present, get things from it if (objectinfo and isinstance(objectinfo, dict) and ('objectid' in objectinfo or 'hatid' in objectinfo) and 'ra' in objectinfo and 'decl' in objectinfo and objectinfo['ra'] and objectinfo['decl']): if 'objectid' not in objectinfo: objectid = objectinfo['hatid'] else: objectid = objectinfo['objectid'] if verbose: LOGINFO('adding in object information and ' 'finder chart for %s at RA: %.3f, DEC: %.3f' % (objectid, objectinfo['ra'], objectinfo['decl'])) # calculate colors if ('bmag' in objectinfo and 'vmag' in objectinfo and 'jmag' in objectinfo and 'kmag' in objectinfo and 'sdssi' in objectinfo and objectinfo['bmag'] and objectinfo['vmag'] and objectinfo['jmag'] and objectinfo['kmag'] and objectinfo['sdssi']): bvcolor = objectinfo['bmag'] - objectinfo['vmag'] jkcolor = objectinfo['jmag'] - objectinfo['kmag'] ijcolor = objectinfo['sdssi'] - objectinfo['jmag'] else: bvcolor = None jkcolor = None ijcolor = None if ('teff' in objectinfo and 'gmag' in objectinfo and objectinfo['teff'] and objectinfo['gmag']): # Gaia data input teff_val = objectinfo['teff'] gmag = objectinfo['gmag'] # bump the ylim of the LSP plot so that the overplotted finder and # objectinfo can fit in this axes plot lspylim = axes.get_ylim() axes.set_ylim(lspylim[0], lspylim[1]+0.75*(lspylim[1]-lspylim[0])) # get the stamp try: dss, dssheader = skyview_stamp(objectinfo['ra'], objectinfo['decl'], convolvewith=finderconvolve, flip=False, cachedir=findercachedir, verbose=verbose) stamp = dss # inset plot it on the current axes inset = inset_axes(axes, width="40%", height="40%", loc=1) inset.imshow(stamp, cmap=findercmap, origin='lower') inset.set_xticks([]) inset.set_yticks([]) inset.set_frame_on(False) # grid lines pointing to the center of the frame inset.axvline(x=150,ymin=0.375,ymax=0.45,linewidth=2.0,color='b') inset.axhline(y=150,xmin=0.375,xmax=0.45,linewidth=2.0,color='b') except OSError as e: LOGERROR('downloaded FITS appears to be corrupt, retrying...') dss, dssheader = skyview_stamp(objectinfo['ra'], objectinfo['decl'], convolvewith=finderconvolve, flip=False, forcefetch=True, cachedir=findercachedir, verbose=verbose) stamp = dss # inset plot it on the current axes inset = inset_axes(axes, width="40%", height="40%", loc=1) inset.imshow(stamp, cmap=findercmap, origin='lower') inset.set_xticks([]) inset.set_yticks([]) inset.set_frame_on(False) # grid lines pointing to the center of the frame inset.axvline(x=150,ymin=0.375,ymax=0.45,linewidth=2.0,color='b') inset.axhline(y=150,xmin=0.375,xmax=0.45,linewidth=2.0,color='b') except Exception as e: LOGEXCEPTION('could not fetch a DSS stamp for this ' 'object %s using coords (%.3f,%.3f)' % (objectid, objectinfo['ra'], objectinfo['decl'])) # annotate with objectinfo axes.text( 0.05,0.95, '%s' % objectid, ha='left',va='center',transform=axes.transAxes, fontsize=18.0 ) axes.text( 0.05,0.91, 'RA = %.3f, DEC = %.3f' % (objectinfo['ra'], objectinfo['decl']), ha='left',va='center',transform=axes.transAxes, fontsize=18.0 ) if bvcolor: axes.text(0.05,0.87, '$B - V$ = %.3f, $V$ = %.3f' % (bvcolor, objectinfo['vmag']), ha='left',va='center',transform=axes.transAxes, fontsize=18.0) elif 'vmag' in objectinfo and objectinfo['vmag']: axes.text(0.05,0.87, '$V$ = %.3f' % (objectinfo['vmag'],), ha='left',va='center',transform=axes.transAxes, fontsize=18.0) if ijcolor: axes.text(0.05,0.83, '$i - J$ = %.3f, $J$ = %.3f' % (ijcolor, objectinfo['jmag']), ha='left',va='center',transform=axes.transAxes, fontsize=18.0) elif 'jmag' in objectinfo and objectinfo['jmag']: axes.text(0.05,0.83, '$J$ = %.3f' % (objectinfo['jmag'],), ha='left',va='center',transform=axes.transAxes, fontsize=18.0) if jkcolor: axes.text(0.05,0.79, '$J - K$ = %.3f, $K$ = %.3f' % (jkcolor, objectinfo['kmag']), ha='left',va='center',transform=axes.transAxes, fontsize=18.0) elif 'kmag' in objectinfo and objectinfo['kmag']: axes.text(0.05,0.79, '$K$ = %.3f' % (objectinfo['kmag'],), ha='left',va='center',transform=axes.transAxes, fontsize=18.0) if 'sdssr' in objectinfo and objectinfo['sdssr']: axes.text(0.05,0.75,'SDSS $r$ = %.3f' % objectinfo['sdssr'], ha='left',va='center',transform=axes.transAxes, fontsize=18.0) if ('teff' in objectinfo and 'gmag' in objectinfo and objectinfo['teff'] and objectinfo['gmag']): # gaia data available axes.text(0.05,0.87, r'$G$ = %.1f, $T_\mathrm{eff}$ = %d' % ( gmag, int(teff_val)), ha='left',va='center',transform=axes.transAxes, fontsize=18.0) # add in proper motion stuff if available in objectinfo if ('pmra' in objectinfo and objectinfo['pmra'] and 'pmdecl' in objectinfo and objectinfo['pmdecl']): pm = total_proper_motion(objectinfo['pmra'], objectinfo['pmdecl'], objectinfo['decl']) axes.text(0.05,0.67,r'$\mu$ = %.2f mas yr$^{-1}$' % pm, ha='left',va='center',transform=axes.transAxes, fontsize=18.0) if 'jmag' in objectinfo and objectinfo['jmag']: rpm = reduced_proper_motion(objectinfo['jmag'],pm) axes.text(0.05,0.63,'$H_J$ = %.2f' % rpm, ha='left',va='center',transform=axes.transAxes, fontsize=18.0)
[ "def", "_make_periodogram", "(", "axes", ",", "lspinfo", ",", "objectinfo", ",", "findercmap", ",", "finderconvolve", ",", "verbose", "=", "True", ",", "findercachedir", "=", "'~/.astrobase/stamp-cache'", ")", ":", "# get the appropriate plot ylabel", "pgramylabel", "...
Makes periodogram, objectinfo, and finder tile for `checkplot_png` and `twolsp_checkplot_png`. Parameters ---------- axes : matplotlib.axes.Axes object The Axes object which will contain the plot being made. lspinfo : dict Dict containing results from a period-finder in `astrobase.periodbase` or a dict that corresponds to that format. objectinfo : dict Dict containing basic info about the object being processed. findercmap : matplotlib Colormap object or str The Colormap object to use for the finder chart image. finderconvolve : astropy.convolution.Kernel object or None If not None, the Kernel object to use for convolving the finder image. verbose : bool If True, indicates progress. findercachedir : str The directory where the FITS finder images are downloaded and cached. Returns ------- Does not return anything, works on the input Axes object directly.
[ "Makes", "periodogram", "objectinfo", "and", "finder", "tile", "for", "checkplot_png", "and", "twolsp_checkplot_png", "." ]
python
valid
apple/turicreate
src/unity/python/turicreate/util/__init__.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/__init__.py#L47-L70
def _get_aws_credentials(): """ Returns the values stored in the AWS credential environment variables. Returns the value stored in the AWS_ACCESS_KEY_ID environment variable and the value stored in the AWS_SECRET_ACCESS_KEY environment variable. Returns ------- out : tuple [string] The first string of the tuple is the value of the AWS_ACCESS_KEY_ID environment variable. The second string of the tuple is the value of the AWS_SECRET_ACCESS_KEY environment variable. Examples -------- >>> turicreate.aws.get_credentials() ('RBZH792CTQPP7T435BGQ', '7x2hMqplWsLpU/qQCN6xAPKcmWo46TlPJXYTvKcv') """ if (not 'AWS_ACCESS_KEY_ID' in _os.environ): raise KeyError('No access key found. Please set the environment variable AWS_ACCESS_KEY_ID.') if (not 'AWS_SECRET_ACCESS_KEY' in _os.environ): raise KeyError('No secret key found. Please set the environment variable AWS_SECRET_ACCESS_KEY.') return (_os.environ['AWS_ACCESS_KEY_ID'], _os.environ['AWS_SECRET_ACCESS_KEY'])
[ "def", "_get_aws_credentials", "(", ")", ":", "if", "(", "not", "'AWS_ACCESS_KEY_ID'", "in", "_os", ".", "environ", ")", ":", "raise", "KeyError", "(", "'No access key found. Please set the environment variable AWS_ACCESS_KEY_ID.'", ")", "if", "(", "not", "'AWS_SECRET_A...
Returns the values stored in the AWS credential environment variables. Returns the value stored in the AWS_ACCESS_KEY_ID environment variable and the value stored in the AWS_SECRET_ACCESS_KEY environment variable. Returns ------- out : tuple [string] The first string of the tuple is the value of the AWS_ACCESS_KEY_ID environment variable. The second string of the tuple is the value of the AWS_SECRET_ACCESS_KEY environment variable. Examples -------- >>> turicreate.aws.get_credentials() ('RBZH792CTQPP7T435BGQ', '7x2hMqplWsLpU/qQCN6xAPKcmWo46TlPJXYTvKcv')
[ "Returns", "the", "values", "stored", "in", "the", "AWS", "credential", "environment", "variables", ".", "Returns", "the", "value", "stored", "in", "the", "AWS_ACCESS_KEY_ID", "environment", "variable", "and", "the", "value", "stored", "in", "the", "AWS_SECRET_ACC...
python
train
dropbox/stone
stone/frontend/ir_generator.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L841-L850
def _populate_route_attributes(self): """ Converts all routes from forward references to complete definitions. """ route_schema = self._validate_stone_cfg() self.api.add_route_schema(route_schema) for namespace in self.api.namespaces.values(): env = self._get_or_create_env(namespace.name) for route in namespace.routes: self._populate_route_attributes_helper(env, route, route_schema)
[ "def", "_populate_route_attributes", "(", "self", ")", ":", "route_schema", "=", "self", ".", "_validate_stone_cfg", "(", ")", "self", ".", "api", ".", "add_route_schema", "(", "route_schema", ")", "for", "namespace", "in", "self", ".", "api", ".", "namespaces...
Converts all routes from forward references to complete definitions.
[ "Converts", "all", "routes", "from", "forward", "references", "to", "complete", "definitions", "." ]
python
train
LabKey/labkey-api-python
labkey/utils.py
https://github.com/LabKey/labkey-api-python/blob/3c8d393384d7cbb2785f8a7f5fe34007b17a76b8/labkey/utils.py#L114-L137
def create_server_context(domain, container_path, context_path=None, use_ssl=True, verify_ssl=True, api_key=None): # type: (str, str, str, bool, bool, str) -> ServerContext """ Create a LabKey server context. This context is used to encapsulate properties about the LabKey server that is being requested against. This includes, but is not limited to, the domain, container_path, if the server is using SSL, and CSRF token request. :param domain: :param container_path: :param context_path: :param use_ssl: :param verify_ssl: :param api_key: :return: """ config = dict( domain=domain, container_path=container_path, context_path=context_path, use_ssl=use_ssl, verify_ssl=verify_ssl, api_key=api_key ) return ServerContext(**config)
[ "def", "create_server_context", "(", "domain", ",", "container_path", ",", "context_path", "=", "None", ",", "use_ssl", "=", "True", ",", "verify_ssl", "=", "True", ",", "api_key", "=", "None", ")", ":", "# type: (str, str, str, bool, bool, str) -> ServerContext", "...
Create a LabKey server context. This context is used to encapsulate properties about the LabKey server that is being requested against. This includes, but is not limited to, the domain, container_path, if the server is using SSL, and CSRF token request. :param domain: :param container_path: :param context_path: :param use_ssl: :param verify_ssl: :param api_key: :return:
[ "Create", "a", "LabKey", "server", "context", ".", "This", "context", "is", "used", "to", "encapsulate", "properties", "about", "the", "LabKey", "server", "that", "is", "being", "requested", "against", ".", "This", "includes", "but", "is", "not", "limited", ...
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_monitoring.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_monitoring.py#L509-L520
def get_schema_input_version(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_schema = ET.Element("get_schema") config = get_schema input = ET.SubElement(get_schema, "input") version = ET.SubElement(input, "version") version.text = kwargs.pop('version') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_schema_input_version", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_schema", "=", "ET", ".", "Element", "(", "\"get_schema\"", ")", "config", "=", "get_schema", "input", "=", ...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L479-L524
def mapsplice(job, job_vars): """ Maps RNA-Seq reads to a reference genome. job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] files_to_delete = ['R1.fastq'] # I/O return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip') if single_end_reads: return_input_paths(job, work_dir, ids, 'R1.fastq') else: return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq') files_to_delete.extend(['R2.fastq']) for fname in ['chromosomes.zip', 'ebwt.zip']: subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname), '-d', work_dir]) # Command and call parameters = ['-p', str(cores), '-s', '25', '--bam', '--min-map-len', '50', '-x', '/data/ebwt', '-c', '/data/chromosomes', '-1', '/data/R1.fastq', '-o', '/data'] if not single_end_reads: parameters.extend(['-2', '/data/R2.fastq']) docker_call(tool='quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=parameters, work_dir=work_dir, sudo=sudo) # Write to FileStore for fname in ['alignments.bam', 'stats.txt']: ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir, fname)) for fname in files_to_delete: job.fileStore.deleteGlobalFile(ids[fname]) # Run child job # map_id = job.addChildJobFn(mapping_stats, job_vars).rv() if input_args['upload_bam_to_s3'] and input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, job_vars) output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv() return output_ids
[ "def", "mapsplice", "(", "job", ",", "job_vars", ")", ":", "# Unpack variables", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "cores", "=", "input_args", "[", "'cpu_count'", "]", "sudo...
Maps RNA-Seq reads to a reference genome. job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Maps", "RNA", "-", "Seq", "reads", "to", "a", "reference", "genome", "." ]
python
train
benhoff/vexbot
vexbot/extensions/log.py
https://github.com/benhoff/vexbot/blob/9b844eb20e84eea92a0e7db7d86a90094956c38f/vexbot/extensions/log.py#L5-L25
def log_level(self, level: typing.Union[str, int]=None, *args, **kwargs) -> typing.Union[None, str]: """ Args: level: Returns: The log level if a `level` is passed in """ if level is None: return self.root_logger.getEffectiveLevel() # NOTE: `setLevel` takes both string and integers. Try to cast to an integer first try: value = int(level) # if we can't cast to an int, it's probably a string except ValueError: pass self.root_logger.setLevel(value)
[ "def", "log_level", "(", "self", ",", "level", ":", "typing", ".", "Union", "[", "str", ",", "int", "]", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "typing", ".", "Union", "[", "None", ",", "str", "]", ":", "if", "level"...
Args: level: Returns: The log level if a `level` is passed in
[ "Args", ":", "level", ":" ]
python
train
chrisjrn/registrasion
registrasion/controllers/credit_note.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/controllers/credit_note.py#L18-L31
def generate_from_invoice(cls, invoice, value): ''' Generates a credit note of the specified value and pays it against the given invoice. You need to call InvoiceController.update_status() to set the status correctly, if appropriate. ''' credit_note = commerce.CreditNote.objects.create( invoice=invoice, amount=0-value, # Credit notes start off as a payment against inv. reference="ONE MOMENT", ) credit_note.reference = "Generated credit note %d" % credit_note.id credit_note.save() return cls(credit_note)
[ "def", "generate_from_invoice", "(", "cls", ",", "invoice", ",", "value", ")", ":", "credit_note", "=", "commerce", ".", "CreditNote", ".", "objects", ".", "create", "(", "invoice", "=", "invoice", ",", "amount", "=", "0", "-", "value", ",", "# Credit note...
Generates a credit note of the specified value and pays it against the given invoice. You need to call InvoiceController.update_status() to set the status correctly, if appropriate.
[ "Generates", "a", "credit", "note", "of", "the", "specified", "value", "and", "pays", "it", "against", "the", "given", "invoice", ".", "You", "need", "to", "call", "InvoiceController", ".", "update_status", "()", "to", "set", "the", "status", "correctly", "i...
python
test
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/plugins/prof.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/plugins/prof.py#L60-L66
def begin(self): """Create profile stats file and load profiler. """ if not self.available(): return self._create_pfile() self.prof = hotshot.Profile(self.pfile)
[ "def", "begin", "(", "self", ")", ":", "if", "not", "self", ".", "available", "(", ")", ":", "return", "self", ".", "_create_pfile", "(", ")", "self", ".", "prof", "=", "hotshot", ".", "Profile", "(", "self", ".", "pfile", ")" ]
Create profile stats file and load profiler.
[ "Create", "profile", "stats", "file", "and", "load", "profiler", "." ]
python
test
thebigmunch/gmusicapi-wrapper
gmusicapi_wrapper/utils.py
https://github.com/thebigmunch/gmusicapi-wrapper/blob/8708683cd33955def1378fc28319ef37805b851d/gmusicapi_wrapper/utils.py#L398-L418
def walk_depth(path, max_depth=float('inf')): """Walk a directory tree with configurable depth. Parameters: path (str): A directory path to walk. max_depth (int): The depth in the directory tree to walk. A depth of '0' limits the walk to the top directory. Default: No limit. """ start_level = os.path.abspath(path).count(os.path.sep) for dir_entry in os.walk(path): root, dirs, _ = dir_entry level = root.count(os.path.sep) - start_level yield dir_entry if level >= max_depth: dirs[:] = []
[ "def", "walk_depth", "(", "path", ",", "max_depth", "=", "float", "(", "'inf'", ")", ")", ":", "start_level", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", ".", "count", "(", "os", ".", "path", ".", "sep", ")", "for", "dir_entry", "in",...
Walk a directory tree with configurable depth. Parameters: path (str): A directory path to walk. max_depth (int): The depth in the directory tree to walk. A depth of '0' limits the walk to the top directory. Default: No limit.
[ "Walk", "a", "directory", "tree", "with", "configurable", "depth", "." ]
python
valid
gwastro/pycbc-glue
pycbc_glue/ligolw/ilwd.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/ilwd.py#L157-L227
def get_ilwdchar_class(tbl_name, col_name, namespace = globals()): """ Searches this module's namespace for a subclass of _ilwd.ilwdchar whose table_name and column_name attributes match those provided. If a matching subclass is found it is returned; otherwise a new class is defined, added to this module's namespace, and returned. Example: >>> process_id = get_ilwdchar_class("process", "process_id") >>> x = process_id(10) >>> str(type(x)) "<class 'pycbc_glue.ligolw.ilwd.process_process_id_class'>" >>> str(x) 'process:process_id:10' Retrieving and storing the class provides a convenient mechanism for quickly constructing new ID objects. Example: >>> for i in range(10): ... print str(process_id(i)) ... process:process_id:0 process:process_id:1 process:process_id:2 process:process_id:3 process:process_id:4 process:process_id:5 process:process_id:6 process:process_id:7 process:process_id:8 process:process_id:9 """ # # if the class already exists, retrieve and return it # key = (str(tbl_name), str(col_name)) cls_name = "%s_%s_class" % key assert cls_name != "get_ilwdchar_class" try: return namespace[cls_name] except KeyError: pass # # otherwise define a new class, and add it to the cache # class new_class(_ilwd.ilwdchar): __slots__ = () table_name, column_name = key index_offset = len("%s:%s:" % key) new_class.__name__ = cls_name namespace[cls_name] = new_class # # pickle support # copy_reg.pickle(new_class, lambda x: (ilwdchar, (unicode(x),))) # # return the new class # return new_class
[ "def", "get_ilwdchar_class", "(", "tbl_name", ",", "col_name", ",", "namespace", "=", "globals", "(", ")", ")", ":", "#", "# if the class already exists, retrieve and return it", "#", "key", "=", "(", "str", "(", "tbl_name", ")", ",", "str", "(", "col_name", "...
Searches this module's namespace for a subclass of _ilwd.ilwdchar whose table_name and column_name attributes match those provided. If a matching subclass is found it is returned; otherwise a new class is defined, added to this module's namespace, and returned. Example: >>> process_id = get_ilwdchar_class("process", "process_id") >>> x = process_id(10) >>> str(type(x)) "<class 'pycbc_glue.ligolw.ilwd.process_process_id_class'>" >>> str(x) 'process:process_id:10' Retrieving and storing the class provides a convenient mechanism for quickly constructing new ID objects. Example: >>> for i in range(10): ... print str(process_id(i)) ... process:process_id:0 process:process_id:1 process:process_id:2 process:process_id:3 process:process_id:4 process:process_id:5 process:process_id:6 process:process_id:7 process:process_id:8 process:process_id:9
[ "Searches", "this", "module", "s", "namespace", "for", "a", "subclass", "of", "_ilwd", ".", "ilwdchar", "whose", "table_name", "and", "column_name", "attributes", "match", "those", "provided", ".", "If", "a", "matching", "subclass", "is", "found", "it", "is", ...
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/views.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/views.py#L268-L276
def show_item_dict(self, item): """Returns a json-able dict for show""" d = {} for col in self.show_columns: v = getattr(item, col) if not isinstance(v, (int, float, string_types)): v = str(v) d[col] = v return d
[ "def", "show_item_dict", "(", "self", ",", "item", ")", ":", "d", "=", "{", "}", "for", "col", "in", "self", ".", "show_columns", ":", "v", "=", "getattr", "(", "item", ",", "col", ")", "if", "not", "isinstance", "(", "v", ",", "(", "int", ",", ...
Returns a json-able dict for show
[ "Returns", "a", "json", "-", "able", "dict", "for", "show" ]
python
train
shaypal5/strct
strct/sortedlists/sortedlist.py
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/sortedlists/sortedlist.py#L56-L107
def find_range_ix_in_section_list(start, end, section_list): """Returns the index range all sections belonging to the given range. The given list is assumed to contain start points of consecutive sections, except for the final point, assumed to be the end point of the last section. For example, the list [5, 8, 30, 31] is interpreted as the following list of sections: [5-8), [8-30), [30-31]. As such, this function will return [5,8] for the range (7,9) and [5,8,30] while for (7, 30). Parameters --------- start : float The start of the desired range. end : float The end of the desired range. section_list : sortedcontainers.SortedList A list of start points of consecutive sections. Returns ------- iterable The index range of all sections belonging to the given range. Example ------- >>> from sortedcontainers import SortedList >>> seclist = SortedList([5, 8, 30, 31]) >>> find_range_ix_in_section_list(3, 4, seclist) [0, 0] >>> find_range_ix_in_section_list(6, 7, seclist) [0, 1] >>> find_range_ix_in_section_list(7, 9, seclist) [0, 2] >>> find_range_ix_in_section_list(7, 30, seclist) [0, 3] >>> find_range_ix_in_section_list(7, 321, seclist) [0, 3] >>> find_range_ix_in_section_list(4, 321, seclist) [0, 3] """ if start > section_list[-1] or end < section_list[0]: return [0, 0] if start < section_list[0]: start_section = section_list[0] else: start_section = find_point_in_section_list(start, section_list) if end > section_list[-1]: end_section = section_list[-2] else: end_section = find_point_in_section_list(end, section_list) return [ section_list.index(start_section), section_list.index(end_section)+1]
[ "def", "find_range_ix_in_section_list", "(", "start", ",", "end", ",", "section_list", ")", ":", "if", "start", ">", "section_list", "[", "-", "1", "]", "or", "end", "<", "section_list", "[", "0", "]", ":", "return", "[", "0", ",", "0", "]", "if", "s...
Returns the index range all sections belonging to the given range. The given list is assumed to contain start points of consecutive sections, except for the final point, assumed to be the end point of the last section. For example, the list [5, 8, 30, 31] is interpreted as the following list of sections: [5-8), [8-30), [30-31]. As such, this function will return [5,8] for the range (7,9) and [5,8,30] while for (7, 30). Parameters --------- start : float The start of the desired range. end : float The end of the desired range. section_list : sortedcontainers.SortedList A list of start points of consecutive sections. Returns ------- iterable The index range of all sections belonging to the given range. Example ------- >>> from sortedcontainers import SortedList >>> seclist = SortedList([5, 8, 30, 31]) >>> find_range_ix_in_section_list(3, 4, seclist) [0, 0] >>> find_range_ix_in_section_list(6, 7, seclist) [0, 1] >>> find_range_ix_in_section_list(7, 9, seclist) [0, 2] >>> find_range_ix_in_section_list(7, 30, seclist) [0, 3] >>> find_range_ix_in_section_list(7, 321, seclist) [0, 3] >>> find_range_ix_in_section_list(4, 321, seclist) [0, 3]
[ "Returns", "the", "index", "range", "all", "sections", "belonging", "to", "the", "given", "range", "." ]
python
train
zetaops/zengine
zengine/engine.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/engine.py#L410-L418
def _clear_current_task(self): """ Clear tasks related attributes, checks permissions While switching WF to WF, authentication and permissions are checked for new WF. """ self.current.task_name = None self.current.task_type = None self.current.task = None
[ "def", "_clear_current_task", "(", "self", ")", ":", "self", ".", "current", ".", "task_name", "=", "None", "self", ".", "current", ".", "task_type", "=", "None", "self", ".", "current", ".", "task", "=", "None" ]
Clear tasks related attributes, checks permissions While switching WF to WF, authentication and permissions are checked for new WF.
[ "Clear", "tasks", "related", "attributes", "checks", "permissions", "While", "switching", "WF", "to", "WF", "authentication", "and", "permissions", "are", "checked", "for", "new", "WF", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/DFReader.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/DFReader.py#L166-L171
def find_time_base(self, gps, first_us_stamp): '''work out time basis for the log - even newer style''' t = self._gpsTimeToTime(gps.GWk, gps.GMS) self.set_timebase(t - gps.TimeUS*0.000001) # this ensures FMT messages get appropriate timestamp: self.timestamp = self.timebase + first_us_stamp*0.000001
[ "def", "find_time_base", "(", "self", ",", "gps", ",", "first_us_stamp", ")", ":", "t", "=", "self", ".", "_gpsTimeToTime", "(", "gps", ".", "GWk", ",", "gps", ".", "GMS", ")", "self", ".", "set_timebase", "(", "t", "-", "gps", ".", "TimeUS", "*", ...
work out time basis for the log - even newer style
[ "work", "out", "time", "basis", "for", "the", "log", "-", "even", "newer", "style" ]
python
train
hayd/ctox
ctox/subst.py
https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L188-L197
def _replace_config(s, env): """[sectionname]optionname""" m = re.match(r"\[(.*?)\](.*)", s) if m: section, option = m.groups() expanded = env.config.get(section, option) return '\n'.join([expand_factor_conditions(e, env) for e in expanded.split("\n")]) else: raise ValueError()
[ "def", "_replace_config", "(", "s", ",", "env", ")", ":", "m", "=", "re", ".", "match", "(", "r\"\\[(.*?)\\](.*)\"", ",", "s", ")", "if", "m", ":", "section", ",", "option", "=", "m", ".", "groups", "(", ")", "expanded", "=", "env", ".", "config", ...
[sectionname]optionname
[ "[", "sectionname", "]", "optionname" ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L971-L984
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") blade_name = ET.SubElement(fwdl_entries, "blade-name") blade_name.text = kwargs.pop('blade_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "logical_chassis_fwdl_status", "=", "ET", ".", "Element", "(", "\"log...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train