repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
msikma/kanaconv
kanaconv/converter.py
https://github.com/msikma/kanaconv/blob/194f142e616ab5dd6d13a687b96b9f8abd1b4ea8/kanaconv/converter.py#L455-L463
def _set_digraph_b(self, char): ''' Sets the second part of a digraph. ''' self.has_digraph_b = True # Change the active vowel to the one provided by the second part # of the digraph. self.active_vowel_ro = di_b_lt[char][0] self.active_dgr_b_info = di_b_lt[char]
[ "def", "_set_digraph_b", "(", "self", ",", "char", ")", ":", "self", ".", "has_digraph_b", "=", "True", "# Change the active vowel to the one provided by the second part", "# of the digraph.", "self", ".", "active_vowel_ro", "=", "di_b_lt", "[", "char", "]", "[", "0",...
Sets the second part of a digraph.
[ "Sets", "the", "second", "part", "of", "a", "digraph", "." ]
python
train
ArangoDB-Community/pyArango
pyArango/document.py
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L103-L120
def set(self, dct) : """Set the store using a dictionary""" # if not self.mustValidate : # self.store = dct # self.patchStore = dct # return for field, value in dct.items() : if field not in self.collection.arangoPrivates : if isinstance(value, dict) : if field in self.validators and isinstance(self.validators[field], dict): vals = self.validators[field] else : vals = {} self[field] = DocumentStore(self.collection, validators = vals, initDct = value, patch = self.patching, subStore=True, validateInit=self.validateInit) self.subStores[field] = self.store[field] else : self[field] = value
[ "def", "set", "(", "self", ",", "dct", ")", ":", "# if not self.mustValidate :", "# self.store = dct", "# self.patchStore = dct", "# return", "for", "field", ",", "value", "in", "dct", ".", "items", "(", ")", ":", "if", "field", "not", "in", "self", ...
Set the store using a dictionary
[ "Set", "the", "store", "using", "a", "dictionary" ]
python
train
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L1246-L1253
def _basilisp_fn(f): """Create a Basilisp function, setting meta and supplying a with_meta method implementation.""" assert not hasattr(f, "meta") f._basilisp_fn = True f.meta = None f.with_meta = partial(_fn_with_meta, f) return f
[ "def", "_basilisp_fn", "(", "f", ")", ":", "assert", "not", "hasattr", "(", "f", ",", "\"meta\"", ")", "f", ".", "_basilisp_fn", "=", "True", "f", ".", "meta", "=", "None", "f", ".", "with_meta", "=", "partial", "(", "_fn_with_meta", ",", "f", ")", ...
Create a Basilisp function, setting meta and supplying a with_meta method implementation.
[ "Create", "a", "Basilisp", "function", "setting", "meta", "and", "supplying", "a", "with_meta", "method", "implementation", "." ]
python
test
dshean/pygeotools
pygeotools/lib/malib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1630-L1655
def get_stats_dict(a_in, full=True): """Compute and print statistics for input array """ d = {} a = checkma(a_in) d['count'] = a.count() thresh = 4E6 if not full and d['count'] > thresh: a = a.compressed() stride = int(np.around(a.size / thresh)) #a = np.ma.array(a[::stride]) a = a[::stride] d['min'] = a.min() d['max'] = a.max() d['ptp'] = d['max'] - d['min'] d['mean'] = a.mean(dtype='float64') d['std'] = a.std(dtype='float64') d['nmad'], d['med'] = mad(a, return_med=True) d['median'] = d['med'] d['p16'], d['p84'], d['spread'] = robust_spread(a) from scipy.stats.mstats import mode d['mode'] = mode(a, axis=None)[0] for i in d: d[i] = float(d[i]) d['count'] = int(d['count']) return d
[ "def", "get_stats_dict", "(", "a_in", ",", "full", "=", "True", ")", ":", "d", "=", "{", "}", "a", "=", "checkma", "(", "a_in", ")", "d", "[", "'count'", "]", "=", "a", ".", "count", "(", ")", "thresh", "=", "4E6", "if", "not", "full", "and", ...
Compute and print statistics for input array
[ "Compute", "and", "print", "statistics", "for", "input", "array" ]
python
train
lago-project/lago
lago/config.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/config.py#L122-L146
def load(self): """ Load all configurations from available resources, skip if empty: 1. :attr:`default`` dict passed to :func:`ConfigLoad.__init__`. 2. Custom paths as defined in :attr:`CONFS_PATH` in :class:`~lago.constants`. 3. XDG standard paths. 4. Environment variables. Returns: dict: dict of dicts. """ configp = configparser.ConfigParser() configp.read_dict(self._defaults) for path in _get_configs_path(): try: with open(path, 'r') as config_file: configp.read_file(config_file) except IOError: pass configp.read_dict(get_env_dict(self.root_section)) return {s: dict(configp.items(s)) for s in configp.sections()}
[ "def", "load", "(", "self", ")", ":", "configp", "=", "configparser", ".", "ConfigParser", "(", ")", "configp", ".", "read_dict", "(", "self", ".", "_defaults", ")", "for", "path", "in", "_get_configs_path", "(", ")", ":", "try", ":", "with", "open", "...
Load all configurations from available resources, skip if empty: 1. :attr:`default`` dict passed to :func:`ConfigLoad.__init__`. 2. Custom paths as defined in :attr:`CONFS_PATH` in :class:`~lago.constants`. 3. XDG standard paths. 4. Environment variables. Returns: dict: dict of dicts.
[ "Load", "all", "configurations", "from", "available", "resources", "skip", "if", "empty", ":" ]
python
train
dmulcahey/zha-device-handlers
zhaquirks/xiaomi/__init__.py
https://github.com/dmulcahey/zha-device-handlers/blob/bab2a53724c6fb5caee2e796dd46ebcb45400f93/zhaquirks/xiaomi/__init__.py#L93-L98
def _calculate_remaining_battery_percentage(self, voltage): """Calculate percentage.""" min_voltage = 2500 max_voltage = 3000 percent = (voltage - min_voltage) / (max_voltage - min_voltage) * 200 return min(200, percent)
[ "def", "_calculate_remaining_battery_percentage", "(", "self", ",", "voltage", ")", ":", "min_voltage", "=", "2500", "max_voltage", "=", "3000", "percent", "=", "(", "voltage", "-", "min_voltage", ")", "/", "(", "max_voltage", "-", "min_voltage", ")", "*", "20...
Calculate percentage.
[ "Calculate", "percentage", "." ]
python
train
horazont/aioxmpp
aioxmpp/pep/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/pep/service.py#L90-L146
def claim_pep_node(self, node_namespace, *, register_feature=True, notify=False): """ Claim node `node_namespace`. :param node_namespace: the pubsub node whose events shall be handled. :param register_feature: Whether to publish the `node_namespace` as feature. :param notify: Whether to register the ``+notify`` feature to receive notification without explicit subscription. :raises RuntimeError: if a handler for `node_namespace` is already set. :returns: a :class:`~aioxmpp.pep.service.RegisteredPEPNode` instance representing the claim. .. seealso:: :class:`aioxmpp.pep.register_pep_node` a descriptor which can be used with :class:`~aioxmpp.service.Service` subclasses to claim a PEP node automatically. This registers `node_namespace` as feature for service discovery unless ``register_feature=False`` is passed. .. note:: For `notify` to work, it is required that :class:`aioxmpp.EntityCapsService` is loaded and that presence is re-sent soon after :meth:`~aioxmpp.EntityCapsService.on_ver_changed` fires. See the documentation of the class and the signal for details. """ if node_namespace in self._pep_node_claims: raise RuntimeError( "claiming already claimed node" ) registered_node = RegisteredPEPNode( self, node_namespace, register_feature=register_feature, notify=notify, ) finalizer = weakref.finalize( registered_node, weakref.WeakMethod(registered_node._unregister) ) # we cannot guarantee that disco is not cleared up already, # so we do not unclaim the feature on exit finalizer.atexit = False self._pep_node_claims[node_namespace] = registered_node return registered_node
[ "def", "claim_pep_node", "(", "self", ",", "node_namespace", ",", "*", ",", "register_feature", "=", "True", ",", "notify", "=", "False", ")", ":", "if", "node_namespace", "in", "self", ".", "_pep_node_claims", ":", "raise", "RuntimeError", "(", "\"claiming al...
Claim node `node_namespace`. :param node_namespace: the pubsub node whose events shall be handled. :param register_feature: Whether to publish the `node_namespace` as feature. :param notify: Whether to register the ``+notify`` feature to receive notification without explicit subscription. :raises RuntimeError: if a handler for `node_namespace` is already set. :returns: a :class:`~aioxmpp.pep.service.RegisteredPEPNode` instance representing the claim. .. seealso:: :class:`aioxmpp.pep.register_pep_node` a descriptor which can be used with :class:`~aioxmpp.service.Service` subclasses to claim a PEP node automatically. This registers `node_namespace` as feature for service discovery unless ``register_feature=False`` is passed. .. note:: For `notify` to work, it is required that :class:`aioxmpp.EntityCapsService` is loaded and that presence is re-sent soon after :meth:`~aioxmpp.EntityCapsService.on_ver_changed` fires. See the documentation of the class and the signal for details.
[ "Claim", "node", "node_namespace", "." ]
python
train
Ouranosinc/xclim
xclim/indices.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/indices.py#L1971-L1998
def tx_min(tasmax, freq='YS'): r"""Lowest max temperature The minimum of daily maximum temperature. Parameters ---------- tasmax : xarray.DataArray Maximum daily temperature [℃] or [K] freq : str, optional Resampling frequency Returns ------- xarray.DataArray Minimum of daily maximum temperature. Notes ----- Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the minimum daily maximum temperature for period :math:`j` is: .. math:: TXn_j = min(TX_{ij}) """ return tasmax.resample(time=freq).min(dim='time', keep_attrs=True)
[ "def", "tx_min", "(", "tasmax", ",", "freq", "=", "'YS'", ")", ":", "return", "tasmax", ".", "resample", "(", "time", "=", "freq", ")", ".", "min", "(", "dim", "=", "'time'", ",", "keep_attrs", "=", "True", ")" ]
r"""Lowest max temperature The minimum of daily maximum temperature. Parameters ---------- tasmax : xarray.DataArray Maximum daily temperature [℃] or [K] freq : str, optional Resampling frequency Returns ------- xarray.DataArray Minimum of daily maximum temperature. Notes ----- Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the minimum daily maximum temperature for period :math:`j` is: .. math:: TXn_j = min(TX_{ij})
[ "r", "Lowest", "max", "temperature" ]
python
train
coldfix/udiskie
udiskie/udisks2.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/udisks2.py#L357-L366
def drive(self): """Get wrapper to the drive containing this device.""" if self.is_drive: return self cleartext = self.luks_cleartext_slave if cleartext: return cleartext.drive if self.is_block: return self._daemon[self._P.Block.Drive] return None
[ "def", "drive", "(", "self", ")", ":", "if", "self", ".", "is_drive", ":", "return", "self", "cleartext", "=", "self", ".", "luks_cleartext_slave", "if", "cleartext", ":", "return", "cleartext", ".", "drive", "if", "self", ".", "is_block", ":", "return", ...
Get wrapper to the drive containing this device.
[ "Get", "wrapper", "to", "the", "drive", "containing", "this", "device", "." ]
python
train
jciskey/pygraph
pygraph/functions/planarity/kocay_algorithm.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/functions/planarity/kocay_algorithm.py#L403-L436
def __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data): """Encapsulates the work that will be done for case 5 of __embed_frond, since it gets used in more than one place.""" # --We should only ever see u-cases 1 and 2 if case_3: # --We should never get here return False comp_d_w = abs(d_w) #if case_1: # --Add the frond to the left side __insert_frond_LF(d_w, d_u, dfs_data) # --Add uw to Lm m = dfs_data['FG']['m'] Lm = L(m, dfs_data) if comp_d_w < Lm['u']: Lm['u'] = d_w if d_u > Lm['v']: Lm['v'] = d_u # --Case 2 requires a bit of extra work if case_2: Lm['u'] = d_w x_m1 = fn_x(m-1, dfs_data) while comp_d_w < x_m1: merge_Fm(dfs_data) m = dfs_data['FG']['m'] x_m1 = fn_x(m-1, dfs_data) #else: #print "Case 5 work, u-case 1" return True
[ "def", "__do_case_5_work", "(", "d_w", ",", "d_u", ",", "case_1", ",", "case_2", ",", "case_3", ",", "dfs_data", ")", ":", "# --We should only ever see u-cases 1 and 2", "if", "case_3", ":", "# --We should never get here", "return", "False", "comp_d_w", "=", "abs", ...
Encapsulates the work that will be done for case 5 of __embed_frond, since it gets used in more than one place.
[ "Encapsulates", "the", "work", "that", "will", "be", "done", "for", "case", "5", "of", "__embed_frond", "since", "it", "gets", "used", "in", "more", "than", "one", "place", "." ]
python
train
CalebBell/fluids
fluids/fittings.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L2579-L2629
def diffuser_conical_staged(Di1, Di2, DEs, ls, fd=None, method='Rennels'): r'''Returns loss coefficient for any series of staged conical pipe expansions as shown in [1]_. Five different formulas are used, depending on the angle and the ratio of diameters. This function calls diffuser_conical. Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] DEs : array Diameters of intermediate sections, [m] ls : array Lengths of the various sections, [m] fd : float Darcy friction factor [-] method : str The method to use for the calculation; one of 'Rennels', 'Crane', 'Miller', 'Swamee', or 'Idelchik' [-] Returns ------- K : float Loss coefficient [-] Notes ----- Only lengths of sections currently allowed. This could be changed to understand angles also. Formula doesn't make much sense, as observed by the example comparing a series of conical sections. Use only for small numbers of segments of highly differing angles. Examples -------- >>> diffuser_conical(Di1=1., Di2=10.,l=9, fd=0.01) 0.973137914861591 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. ''' K = 0 DEs.insert(0, Di1) DEs.append(Di2) for i in range(len(ls)): K += diffuser_conical(Di1=float(DEs[i]), Di2=float(DEs[i+1]), l=float(ls[i]), fd=fd, method=method) return K
[ "def", "diffuser_conical_staged", "(", "Di1", ",", "Di2", ",", "DEs", ",", "ls", ",", "fd", "=", "None", ",", "method", "=", "'Rennels'", ")", ":", "K", "=", "0", "DEs", ".", "insert", "(", "0", ",", "Di1", ")", "DEs", ".", "append", "(", "Di2", ...
r'''Returns loss coefficient for any series of staged conical pipe expansions as shown in [1]_. Five different formulas are used, depending on the angle and the ratio of diameters. This function calls diffuser_conical. Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] DEs : array Diameters of intermediate sections, [m] ls : array Lengths of the various sections, [m] fd : float Darcy friction factor [-] method : str The method to use for the calculation; one of 'Rennels', 'Crane', 'Miller', 'Swamee', or 'Idelchik' [-] Returns ------- K : float Loss coefficient [-] Notes ----- Only lengths of sections currently allowed. This could be changed to understand angles also. Formula doesn't make much sense, as observed by the example comparing a series of conical sections. Use only for small numbers of segments of highly differing angles. Examples -------- >>> diffuser_conical(Di1=1., Di2=10.,l=9, fd=0.01) 0.973137914861591 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
[ "r", "Returns", "loss", "coefficient", "for", "any", "series", "of", "staged", "conical", "pipe", "expansions", "as", "shown", "in", "[", "1", "]", "_", ".", "Five", "different", "formulas", "are", "used", "depending", "on", "the", "angle", "and", "the", ...
python
train
pypa/pipenv
pipenv/vendor/distlib/_backport/tarfile.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L2328-L2341
def makedev(self, tarinfo, targetpath): """Make a character or block device called targetpath. """ if not hasattr(os, "mknod") or not hasattr(os, "makedev"): raise ExtractError("special devices not supported by system") mode = tarinfo.mode if tarinfo.isblk(): mode |= stat.S_IFBLK else: mode |= stat.S_IFCHR os.mknod(targetpath, mode, os.makedev(tarinfo.devmajor, tarinfo.devminor))
[ "def", "makedev", "(", "self", ",", "tarinfo", ",", "targetpath", ")", ":", "if", "not", "hasattr", "(", "os", ",", "\"mknod\"", ")", "or", "not", "hasattr", "(", "os", ",", "\"makedev\"", ")", ":", "raise", "ExtractError", "(", "\"special devices not supp...
Make a character or block device called targetpath.
[ "Make", "a", "character", "or", "block", "device", "called", "targetpath", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L228-L242
def __get_pid_by_scanning(self): 'Internally used by get_pid().' dwProcessId = None dwThreadId = self.get_tid() with win32.CreateToolhelp32Snapshot(win32.TH32CS_SNAPTHREAD) as hSnapshot: te = win32.Thread32First(hSnapshot) while te is not None: if te.th32ThreadID == dwThreadId: dwProcessId = te.th32OwnerProcessID break te = win32.Thread32Next(hSnapshot) if dwProcessId is None: msg = "Cannot find thread ID %d in any process" % dwThreadId raise RuntimeError(msg) return dwProcessId
[ "def", "__get_pid_by_scanning", "(", "self", ")", ":", "dwProcessId", "=", "None", "dwThreadId", "=", "self", ".", "get_tid", "(", ")", "with", "win32", ".", "CreateToolhelp32Snapshot", "(", "win32", ".", "TH32CS_SNAPTHREAD", ")", "as", "hSnapshot", ":", "te",...
Internally used by get_pid().
[ "Internally", "used", "by", "get_pid", "()", "." ]
python
train
Esri/ArcREST
src/arcresthelper/publishingtools.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcresthelper/publishingtools.py#L2004-L2044
def publishApp(self, app_info, map_info=None, fsInfo=None): """Publishes apps to AGOL/Portal Args: app_info (list): A list of JSON configuration apps to publish. map_info (list): Defaults to ``None``. fsInfo (list): Defaults to ``None``. Returns: dict: A dictionary of results objects. """ if self.securityhandler is None: print ("Security handler required") return appDet = None try: app_results = [] if isinstance(app_info, list): for appDet in app_info: app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo)) else: app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo)) return app_results except (common.ArcRestHelperError) as e: raise e except Exception as e: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "publishApp", "line": line, "filename": filename, "synerror": synerror, }) finally: appDet = None del appDet gc.collect()
[ "def", "publishApp", "(", "self", ",", "app_info", ",", "map_info", "=", "None", ",", "fsInfo", "=", "None", ")", ":", "if", "self", ".", "securityhandler", "is", "None", ":", "print", "(", "\"Security handler required\"", ")", "return", "appDet", "=", "No...
Publishes apps to AGOL/Portal Args: app_info (list): A list of JSON configuration apps to publish. map_info (list): Defaults to ``None``. fsInfo (list): Defaults to ``None``. Returns: dict: A dictionary of results objects.
[ "Publishes", "apps", "to", "AGOL", "/", "Portal", "Args", ":", "app_info", "(", "list", ")", ":", "A", "list", "of", "JSON", "configuration", "apps", "to", "publish", ".", "map_info", "(", "list", ")", ":", "Defaults", "to", "None", ".", "fsInfo", "(",...
python
train
pypa/pipenv
pipenv/vendor/ptyprocess/ptyprocess.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/ptyprocess/ptyprocess.py#L564-L590
def sendcontrol(self, char): '''Helper method that wraps send() with mnemonic access for sending control character to the child (such as Ctrl-C or Ctrl-D). For example, to send Ctrl-G (ASCII 7, bell, '\a'):: child.sendcontrol('g') See also, sendintr() and sendeof(). ''' char = char.lower() a = ord(char) if 97 <= a <= 122: a = a - ord('a') + 1 byte = _byte(a) return self._writeb(byte), byte d = {'@': 0, '`': 0, '[': 27, '{': 27, '\\': 28, '|': 28, ']': 29, '}': 29, '^': 30, '~': 30, '_': 31, '?': 127} if char not in d: return 0, b'' byte = _byte(d[char]) return self._writeb(byte), byte
[ "def", "sendcontrol", "(", "self", ",", "char", ")", ":", "char", "=", "char", ".", "lower", "(", ")", "a", "=", "ord", "(", "char", ")", "if", "97", "<=", "a", "<=", "122", ":", "a", "=", "a", "-", "ord", "(", "'a'", ")", "+", "1", "byte",...
Helper method that wraps send() with mnemonic access for sending control character to the child (such as Ctrl-C or Ctrl-D). For example, to send Ctrl-G (ASCII 7, bell, '\a'):: child.sendcontrol('g') See also, sendintr() and sendeof().
[ "Helper", "method", "that", "wraps", "send", "()", "with", "mnemonic", "access", "for", "sending", "control", "character", "to", "the", "child", "(", "such", "as", "Ctrl", "-", "C", "or", "Ctrl", "-", "D", ")", ".", "For", "example", "to", "send", "Ctr...
python
train
ironfroggy/django-better-cache
bettercache/objects.py
https://github.com/ironfroggy/django-better-cache/blob/5350e8c646cef1c1ca74eab176f856ddd9eaf5c3/bettercache/objects.py#L163-L177
def from_miss(self, **kwargs): """Called to initialize an instance when it is not found in the cache. For example, if your CacheModel should pull data from the database to populate the cache, ... def from_miss(self, username): user = User.objects.get(username=username) self.email = user.email self.full_name = user.get_full_name() """ raise type(self).Missing(type(self)(**kwargs).key())
[ "def", "from_miss", "(", "self", ",", "*", "*", "kwargs", ")", ":", "raise", "type", "(", "self", ")", ".", "Missing", "(", "type", "(", "self", ")", "(", "*", "*", "kwargs", ")", ".", "key", "(", ")", ")" ]
Called to initialize an instance when it is not found in the cache. For example, if your CacheModel should pull data from the database to populate the cache, ... def from_miss(self, username): user = User.objects.get(username=username) self.email = user.email self.full_name = user.get_full_name()
[ "Called", "to", "initialize", "an", "instance", "when", "it", "is", "not", "found", "in", "the", "cache", "." ]
python
train
boriel/zxbasic
arch/zx48k/backend/__32bit.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__32bit.py#L439-L457
def _gtu32(ins): """ Compares & pops top 2 operands out of the stack, and checks if the 1st operand > 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 32 bit unsigned version """ op1, op2 = tuple(ins.quad[2:]) rev = op1[0] != 't' and not is_int(op1) and op2[0] == 't' output = _32bit_oper(op1, op2, rev) output.append('pop bc') output.append('or a') output.append('sbc hl, bc') output.append('ex de, hl') output.append('pop de') output.append('sbc hl, de') output.append('sbc a, a') output.append('push af') return output
[ "def", "_gtu32", "(", "ins", ")", ":", "op1", ",", "op2", "=", "tuple", "(", "ins", ".", "quad", "[", "2", ":", "]", ")", "rev", "=", "op1", "[", "0", "]", "!=", "'t'", "and", "not", "is_int", "(", "op1", ")", "and", "op2", "[", "0", "]", ...
Compares & pops top 2 operands out of the stack, and checks if the 1st operand > 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 32 bit unsigned version
[ "Compares", "&", "pops", "top", "2", "operands", "out", "of", "the", "stack", "and", "checks", "if", "the", "1st", "operand", ">", "2nd", "operand", "(", "top", "of", "the", "stack", ")", ".", "Pushes", "0", "if", "False", "1", "if", "True", "." ]
python
train
robertmartin8/PyPortfolioOpt
pypfopt/cla.py
https://github.com/robertmartin8/PyPortfolioOpt/blob/dfad1256cb6995c7fbd7a025eedb54b1ca04b2fc/pypfopt/cla.py#L347-L363
def efficient_frontier(self, points): """Get the efficient frontier""" mu, sigma, weights = [], [], [] # remove the 1, to avoid duplications a = np.linspace(0, 1, points / len(self.w))[:-1] b = list(range(len(self.w) - 1)) for i in b: w0, w1 = self.w[i], self.w[i + 1] if i == b[-1]: # include the 1 in the last iteration a = np.linspace(0, 1, points / len(self.w)) for j in a: w = w1 * j + (1 - j) * w0 weights.append(np.copy(w)) mu.append(np.dot(w.T, self.mean)[0, 0]) sigma.append(np.dot(np.dot(w.T, self.cov_matrix), w)[0, 0] ** 0.5) return mu, sigma, weights
[ "def", "efficient_frontier", "(", "self", ",", "points", ")", ":", "mu", ",", "sigma", ",", "weights", "=", "[", "]", ",", "[", "]", ",", "[", "]", "# remove the 1, to avoid duplications", "a", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "po...
Get the efficient frontier
[ "Get", "the", "efficient", "frontier" ]
python
train
loli/medpy
medpy/metric/binary.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/binary.py#L723-L775
def obj_assd(result, reference, voxelspacing=None, connectivity=1): """ Average symmetric surface distance. Computes the average symmetric surface distance (ASSD) between the binary objects in two images. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. voxelspacing : float or sequence of floats, optional The voxelspacing in a distance unit i.e. spacing of elements along each dimension. If a sequence, must be of length equal to the input rank; if a single number, this is used for all axes. If not specified, a grid spacing of unity is implied. connectivity : int The neighbourhood/connectivity considered when determining what accounts for a distinct binary object as well as when determining the surface of the binary objects. This value is passed to `scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`. The decision on the connectivity is important, as it can influence the results strongly. If in doubt, leave it as it is. Returns ------- assd : float The average symmetric surface distance between all mutually existing distinct binary object(s) in ``result`` and ``reference``. The distance unit is the same as for the spacing of elements along each dimension, which is usually given in mm. See also -------- :func:`obj_asd` Notes ----- This is a real metric, obtained by calling and averaging >>> obj_asd(result, reference) and >>> obj_asd(reference, result) The binary images can therefore be supplied in any order. """ assd = numpy.mean( (obj_asd(result, reference, voxelspacing, connectivity), obj_asd(reference, result, voxelspacing, connectivity)) ) return assd
[ "def", "obj_assd", "(", "result", ",", "reference", ",", "voxelspacing", "=", "None", ",", "connectivity", "=", "1", ")", ":", "assd", "=", "numpy", ".", "mean", "(", "(", "obj_asd", "(", "result", ",", "reference", ",", "voxelspacing", ",", "connectivit...
Average symmetric surface distance. Computes the average symmetric surface distance (ASSD) between the binary objects in two images. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. voxelspacing : float or sequence of floats, optional The voxelspacing in a distance unit i.e. spacing of elements along each dimension. If a sequence, must be of length equal to the input rank; if a single number, this is used for all axes. If not specified, a grid spacing of unity is implied. connectivity : int The neighbourhood/connectivity considered when determining what accounts for a distinct binary object as well as when determining the surface of the binary objects. This value is passed to `scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`. The decision on the connectivity is important, as it can influence the results strongly. If in doubt, leave it as it is. Returns ------- assd : float The average symmetric surface distance between all mutually existing distinct binary object(s) in ``result`` and ``reference``. The distance unit is the same as for the spacing of elements along each dimension, which is usually given in mm. See also -------- :func:`obj_asd` Notes ----- This is a real metric, obtained by calling and averaging >>> obj_asd(result, reference) and >>> obj_asd(reference, result) The binary images can therefore be supplied in any order.
[ "Average", "symmetric", "surface", "distance", ".", "Computes", "the", "average", "symmetric", "surface", "distance", "(", "ASSD", ")", "between", "the", "binary", "objects", "in", "two", "images", ".", "Parameters", "----------", "result", ":", "array_like", "I...
python
train
MJL85/natlas
natlas/mac.py
https://github.com/MJL85/natlas/blob/5e7ae3cc7b5dd7ad884fa2b8b93bbdd9275474c4/natlas/mac.py#L64-L97
def get_macs(self, ip, display_progress): ''' Return array of MAC addresses from single node at IP ''' if (ip == '0.0.0.0'): return None ret_macs = [] snmpobj = natlas_snmp(ip) # find valid credentials for this node if (snmpobj.get_cred(self.config.snmp_creds) == 0): return None system_name = util.shorten_host_name(snmpobj.get_val(OID_SYSNAME), self.config.host_domains) # cache some common MIB trees vlan_vbtbl = snmpobj.get_bulk(OID_VLANS) ifname_vbtbl = snmpobj.get_bulk(OID_IFNAME) for vlan_row in vlan_vbtbl: for vlan_n, vlan_v in vlan_row: # get VLAN ID from OID vlan = natlas_snmp.get_last_oid_token(vlan_n) if (vlan >= 1002): continue vmacs = self.get_macs_for_vlan(ip, vlan, display_progress, snmpobj, system_name, ifname_vbtbl) if (vmacs != None): ret_macs.extend(vmacs) if (display_progress == 1): print('') return ret_macs
[ "def", "get_macs", "(", "self", ",", "ip", ",", "display_progress", ")", ":", "if", "(", "ip", "==", "'0.0.0.0'", ")", ":", "return", "None", "ret_macs", "=", "[", "]", "snmpobj", "=", "natlas_snmp", "(", "ip", ")", "# find valid credentials for this node", ...
Return array of MAC addresses from single node at IP
[ "Return", "array", "of", "MAC", "addresses", "from", "single", "node", "at", "IP" ]
python
train
CxAalto/gtfspy
gtfspy/routing/node_profile_simple.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_simple.py#L76-L97
def evaluate_earliest_arrival_time_at_target(self, dep_time, transfer_margin): """ Get the earliest arrival time at the target, given a departure time. Parameters ---------- dep_time : float, int time in unix seconds transfer_margin: float, int transfer margin in seconds Returns ------- arrival_time : float Arrival time in the given time unit (seconds after unix epoch). """ minimum = dep_time + self._walk_to_target_duration dep_time_plus_transfer_margin = dep_time + transfer_margin for label in self._labels: if label.departure_time >= dep_time_plus_transfer_margin and label.arrival_time_target < minimum: minimum = label.arrival_time_target return float(minimum)
[ "def", "evaluate_earliest_arrival_time_at_target", "(", "self", ",", "dep_time", ",", "transfer_margin", ")", ":", "minimum", "=", "dep_time", "+", "self", ".", "_walk_to_target_duration", "dep_time_plus_transfer_margin", "=", "dep_time", "+", "transfer_margin", "for", ...
Get the earliest arrival time at the target, given a departure time. Parameters ---------- dep_time : float, int time in unix seconds transfer_margin: float, int transfer margin in seconds Returns ------- arrival_time : float Arrival time in the given time unit (seconds after unix epoch).
[ "Get", "the", "earliest", "arrival", "time", "at", "the", "target", "given", "a", "departure", "time", "." ]
python
valid
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/easy_install.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/easy_install.py#L1716-L1745
def _update_zipimporter_cache(normalized_path, cache, updater=None): """ Update zipimporter cache data for a given normalized path. Any sub-path entries are processed as well, i.e. those corresponding to zip archives embedded in other zip archives. Given updater is a callable taking a cache entry key and the original entry (after already removing the entry from the cache), and expected to update the entry and possibly return a new one to be inserted in its place. Returning None indicates that the entry should not be replaced with a new one. If no updater is given, the cache entries are simply removed without any additional processing, the same as if the updater simply returned None. """ for p in _collect_zipimporter_cache_entries(normalized_path, cache): # N.B. pypy's custom zipimport._zip_directory_cache implementation does # not support the complete dict interface: # * Does not support item assignment, thus not allowing this function # to be used only for removing existing cache entries. # * Does not support the dict.pop() method, forcing us to use the # get/del patterns instead. For more detailed information see the # following links: # https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960 # https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99 old_entry = cache[p] del cache[p] new_entry = updater and updater(p, old_entry) if new_entry is not None: cache[p] = new_entry
[ "def", "_update_zipimporter_cache", "(", "normalized_path", ",", "cache", ",", "updater", "=", "None", ")", ":", "for", "p", "in", "_collect_zipimporter_cache_entries", "(", "normalized_path", ",", "cache", ")", ":", "# N.B. pypy's custom zipimport._zip_directory_cache im...
Update zipimporter cache data for a given normalized path. Any sub-path entries are processed as well, i.e. those corresponding to zip archives embedded in other zip archives. Given updater is a callable taking a cache entry key and the original entry (after already removing the entry from the cache), and expected to update the entry and possibly return a new one to be inserted in its place. Returning None indicates that the entry should not be replaced with a new one. If no updater is given, the cache entries are simply removed without any additional processing, the same as if the updater simply returned None.
[ "Update", "zipimporter", "cache", "data", "for", "a", "given", "normalized", "path", "." ]
python
test
eleme/meepo
meepo/apps/eventsourcing/event_store.py
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/event_store.py#L190-L210
def replay(self, event, ts=0, end_ts=None, with_ts=False): """Replay events based on timestamp. If you split namespace with ts, the replay will only return events within the same namespace. :param event: event name :param ts: replay events after ts, default from 0. :param end_ts: replay events to ts, default to "+inf". :param with_ts: return timestamp with events, default to False. :return: list of pks when with_ts set to False, list of (pk, ts) tuples when with_ts is True. """ key = self._keygen(event, ts) end_ts = end_ts if end_ts else "+inf" elements = self.r.zrangebyscore(key, ts, end_ts, withscores=with_ts) if not with_ts: return [s(e) for e in elements] else: return [(s(e[0]), int(e[1])) for e in elements]
[ "def", "replay", "(", "self", ",", "event", ",", "ts", "=", "0", ",", "end_ts", "=", "None", ",", "with_ts", "=", "False", ")", ":", "key", "=", "self", ".", "_keygen", "(", "event", ",", "ts", ")", "end_ts", "=", "end_ts", "if", "end_ts", "else"...
Replay events based on timestamp. If you split namespace with ts, the replay will only return events within the same namespace. :param event: event name :param ts: replay events after ts, default from 0. :param end_ts: replay events to ts, default to "+inf". :param with_ts: return timestamp with events, default to False. :return: list of pks when with_ts set to False, list of (pk, ts) tuples when with_ts is True.
[ "Replay", "events", "based", "on", "timestamp", "." ]
python
train
gazpachoking/jsonref
jsonref.py
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L324-L345
def load(fp, base_uri="", loader=None, jsonschema=False, load_on_repr=True, **kwargs): """ Drop in replacement for :func:`json.load`, where JSON references are proxied to their referent data. :param fp: File-like object containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.load` """ if loader is None: loader = functools.partial(jsonloader, **kwargs) return JsonRef.replace_refs( json.load(fp, **kwargs), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
[ "def", "load", "(", "fp", ",", "base_uri", "=", "\"\"", ",", "loader", "=", "None", ",", "jsonschema", "=", "False", ",", "load_on_repr", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "loader", "is", "None", ":", "loader", "=", "functools", ...
Drop in replacement for :func:`json.load`, where JSON references are proxied to their referent data. :param fp: File-like object containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.load`
[ "Drop", "in", "replacement", "for", ":", "func", ":", "json", ".", "load", "where", "JSON", "references", "are", "proxied", "to", "their", "referent", "data", "." ]
python
train
noahgoldman/adbpy
adbpy/socket.py
https://github.com/noahgoldman/adbpy/blob/ecbff8a8f151852b5c36847dc812582a8674a503/adbpy/socket.py#L102-L131
def receive_until_end(self, timeout=None): """ Reads and blocks until the socket closes Used for the "shell" command, where STDOUT and STDERR are just redirected to the terminal with no length """ if self.receive_fixed_length(4) != "OKAY": raise SocketError("Socket communication failed: " "the server did not return a valid response") # The time at which the receive starts start_time = time.clock() output = "" while True: if timeout is not None: self.socket.settimeout(timeout - (time.clock() - start_time)) chunk = '' try: chunk = self.socket.recv(4096).decode("ascii") except socket.timeout: return output if not chunk: return output output += chunk
[ "def", "receive_until_end", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "receive_fixed_length", "(", "4", ")", "!=", "\"OKAY\"", ":", "raise", "SocketError", "(", "\"Socket communication failed: \"", "\"the server did not return a valid resp...
Reads and blocks until the socket closes Used for the "shell" command, where STDOUT and STDERR are just redirected to the terminal with no length
[ "Reads", "and", "blocks", "until", "the", "socket", "closes" ]
python
train
google/apitools
apitools/base/protorpclite/util.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/util.py#L55-L155
def positional(max_positional_args): """A decorator that declares only the first N arguments may be positional. This decorator makes it easy to support Python 3 style keyword-only parameters. For example, in Python 3 it is possible to write: def fn(pos1, *, kwonly1=None, kwonly1=None): ... All named parameters after * must be a keyword: fn(10, 'kw1', 'kw2') # Raises exception. fn(10, kwonly1='kw1') # Ok. Example: To define a function like above, do: @positional(1) def fn(pos1, kwonly1=None, kwonly2=None): ... If no default value is provided to a keyword argument, it becomes a required keyword argument: @positional(0) def fn(required_kw): ... This must be called with the keyword parameter: fn() # Raises exception. fn(10) # Raises exception. fn(required_kw=10) # Ok. When defining instance or class methods always remember to account for 'self' and 'cls': class MyClass(object): @positional(2) def my_method(self, pos1, kwonly1=None): ... @classmethod @positional(2) def my_method(cls, pos1, kwonly1=None): ... One can omit the argument to 'positional' altogether, and then no arguments with default values may be passed positionally. This would be equivalent to placing a '*' before the first argument with a default value in Python 3. If there are no arguments with default values, and no argument is given to 'positional', an error is raised. @positional def fn(arg1, arg2, required_kw1=None, required_kw2=0): ... fn(1, 3, 5) # Raises exception. fn(1, 3) # Ok. fn(1, 3, required_kw1=5) # Ok. Args: max_positional_arguments: Maximum number of positional arguments. All parameters after the this index must be keyword only. Returns: A decorator that prevents using arguments after max_positional_args from being used as positional parameters. Raises: TypeError if a keyword-only argument is provided as a positional parameter. ValueError if no maximum number of arguments is provided and the function has no arguments with default values. """ def positional_decorator(wrapped): """Creates a function wraper to enforce number of arguments.""" @functools.wraps(wrapped) def positional_wrapper(*args, **kwargs): if len(args) > max_positional_args: plural_s = '' if max_positional_args != 1: plural_s = 's' raise TypeError('%s() takes at most %d positional argument%s ' '(%d given)' % (wrapped.__name__, max_positional_args, plural_s, len(args))) return wrapped(*args, **kwargs) return positional_wrapper if isinstance(max_positional_args, six.integer_types): return positional_decorator else: args, _, _, defaults = inspect.getargspec(max_positional_args) if defaults is None: raise ValueError( 'Functions with no keyword arguments must specify ' 'max_positional_args') return positional(len(args) - len(defaults))(max_positional_args)
[ "def", "positional", "(", "max_positional_args", ")", ":", "def", "positional_decorator", "(", "wrapped", ")", ":", "\"\"\"Creates a function wraper to enforce number of arguments.\"\"\"", "@", "functools", ".", "wraps", "(", "wrapped", ")", "def", "positional_wrapper", "...
A decorator that declares only the first N arguments may be positional. This decorator makes it easy to support Python 3 style keyword-only parameters. For example, in Python 3 it is possible to write: def fn(pos1, *, kwonly1=None, kwonly1=None): ... All named parameters after * must be a keyword: fn(10, 'kw1', 'kw2') # Raises exception. fn(10, kwonly1='kw1') # Ok. Example: To define a function like above, do: @positional(1) def fn(pos1, kwonly1=None, kwonly2=None): ... If no default value is provided to a keyword argument, it becomes a required keyword argument: @positional(0) def fn(required_kw): ... This must be called with the keyword parameter: fn() # Raises exception. fn(10) # Raises exception. fn(required_kw=10) # Ok. When defining instance or class methods always remember to account for 'self' and 'cls': class MyClass(object): @positional(2) def my_method(self, pos1, kwonly1=None): ... @classmethod @positional(2) def my_method(cls, pos1, kwonly1=None): ... One can omit the argument to 'positional' altogether, and then no arguments with default values may be passed positionally. This would be equivalent to placing a '*' before the first argument with a default value in Python 3. If there are no arguments with default values, and no argument is given to 'positional', an error is raised. @positional def fn(arg1, arg2, required_kw1=None, required_kw2=0): ... fn(1, 3, 5) # Raises exception. fn(1, 3) # Ok. fn(1, 3, required_kw1=5) # Ok. Args: max_positional_arguments: Maximum number of positional arguments. All parameters after the this index must be keyword only. Returns: A decorator that prevents using arguments after max_positional_args from being used as positional parameters. Raises: TypeError if a keyword-only argument is provided as a positional parameter. ValueError if no maximum number of arguments is provided and the function has no arguments with default values.
[ "A", "decorator", "that", "declares", "only", "the", "first", "N", "arguments", "may", "be", "positional", "." ]
python
train
EpistasisLab/tpot
tpot/builtins/stacking_estimator.py
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/stacking_estimator.py#L70-L92
def transform(self, X): """Transform data by adding two synthetic feature(s). Parameters ---------- X: numpy ndarray, {n_samples, n_components} New data, where n_samples is the number of samples and n_components is the number of components. Returns ------- X_transformed: array-like, shape (n_samples, n_features + 1) or (n_samples, n_features + 1 + n_classes) for classifier with predict_proba attribute The transformed feature set. """ X = check_array(X) X_transformed = np.copy(X) # add class probabilities as a synthetic feature if issubclass(self.estimator.__class__, ClassifierMixin) and hasattr(self.estimator, 'predict_proba'): X_transformed = np.hstack((self.estimator.predict_proba(X), X)) # add class prodiction as a synthetic feature X_transformed = np.hstack((np.reshape(self.estimator.predict(X), (-1, 1)), X_transformed)) return X_transformed
[ "def", "transform", "(", "self", ",", "X", ")", ":", "X", "=", "check_array", "(", "X", ")", "X_transformed", "=", "np", ".", "copy", "(", "X", ")", "# add class probabilities as a synthetic feature", "if", "issubclass", "(", "self", ".", "estimator", ".", ...
Transform data by adding two synthetic feature(s). Parameters ---------- X: numpy ndarray, {n_samples, n_components} New data, where n_samples is the number of samples and n_components is the number of components. Returns ------- X_transformed: array-like, shape (n_samples, n_features + 1) or (n_samples, n_features + 1 + n_classes) for classifier with predict_proba attribute The transformed feature set.
[ "Transform", "data", "by", "adding", "two", "synthetic", "feature", "(", "s", ")", "." ]
python
train
ace0/pyrelic
pyrelic/vpopProfile.py
https://github.com/ace0/pyrelic/blob/f23d4e6586674675f72304d5938548267d6413bf/pyrelic/vpopProfile.py#L52-L72
def proveGt(x,tTilde,kw,y): """ Generate a zero-knowledge proof that DL(g^kw) == DL(e(x,t)^kw) where g,e(..) \in Gt. @return pi = (p,c,u) """ # Verify types assertType(x, G1Element) assertType(tTilde, G2Element) # Compute the proof. beta = pair(x,tTilde) g = generatorGt() p = g**kw v = randomZ(orderGt()) t1 = g**v t2 = beta**v c = hashZ(g,p,beta,y,t1,t2) u = (v- (c*kw)) % orderGt() return (p,c,u)
[ "def", "proveGt", "(", "x", ",", "tTilde", ",", "kw", ",", "y", ")", ":", "# Verify types", "assertType", "(", "x", ",", "G1Element", ")", "assertType", "(", "tTilde", ",", "G2Element", ")", "# Compute the proof.", "beta", "=", "pair", "(", "x", ",", "...
Generate a zero-knowledge proof that DL(g^kw) == DL(e(x,t)^kw) where g,e(..) \in Gt. @return pi = (p,c,u)
[ "Generate", "a", "zero", "-", "knowledge", "proof", "that", "DL", "(", "g^kw", ")", "==", "DL", "(", "e", "(", "x", "t", ")", "^kw", ")", "where", "g", "e", "(", "..", ")", "\\", "in", "Gt", "." ]
python
train
spry-group/python-vultr
vultr/v1_dns.py
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_dns.py#L38-L46
def delete_domain(self, domain, params=None): ''' /v1/dns/delete_domain POST - account Delete a domain name (and all associated records) Link: https://www.vultr.com/api/#dns_delete_domain ''' params = update_params(params, {'domain': domain}) return self.request('/v1/dns/delete_domain', params, 'POST')
[ "def", "delete_domain", "(", "self", ",", "domain", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'domain'", ":", "domain", "}", ")", "return", "self", ".", "request", "(", "'/v1/dns/delete_domain'", ",", ...
/v1/dns/delete_domain POST - account Delete a domain name (and all associated records) Link: https://www.vultr.com/api/#dns_delete_domain
[ "/", "v1", "/", "dns", "/", "delete_domain", "POST", "-", "account", "Delete", "a", "domain", "name", "(", "and", "all", "associated", "records", ")" ]
python
train
nesaro/pydsl
pydsl/file/python.py
https://github.com/nesaro/pydsl/blob/00b4fffd72036b80335e1a44a888fac57917ab41/pydsl/file/python.py#L34-L63
def load_python_file(moduleobject): """ Try to create an indexable instance from a module""" if isinstance(moduleobject, str): moduleobject = load_module(moduleobject) if not hasattr(moduleobject, "iclass"): raise KeyError("Element" + str(moduleobject)) iclass = getattr(moduleobject, "iclass") mylist = getattr(moduleobject, "__all__", None) or list(filter(lambda x:x[:1] != "_", (dir(moduleobject)))) mylist.remove('iclass') resultdic = {} for x in mylist: resultdic[x] = getattr(moduleobject, x) if iclass == "SymbolGrammar": from pydsl.grammar.BNF import BNFGrammar return BNFGrammar(**resultdic) elif iclass == "PLY": from pydsl.grammar.definition import PLYGrammar return PLYGrammar(moduleobject) elif iclass in ["PythonGrammar"]: from pydsl.grammar.definition import PythonGrammar return PythonGrammar(resultdic) elif iclass == "PythonTranslator": return resultdic elif iclass == "parsley": from pydsl.grammar.parsley import ParsleyGrammar return ParsleyGrammar(**resultdic) elif iclass == "pyparsing": return resultdic['root_symbol'] else: raise ValueError(str(moduleobject))
[ "def", "load_python_file", "(", "moduleobject", ")", ":", "if", "isinstance", "(", "moduleobject", ",", "str", ")", ":", "moduleobject", "=", "load_module", "(", "moduleobject", ")", "if", "not", "hasattr", "(", "moduleobject", ",", "\"iclass\"", ")", ":", "...
Try to create an indexable instance from a module
[ "Try", "to", "create", "an", "indexable", "instance", "from", "a", "module" ]
python
train
base4sistemas/satcfe
satcfe/clientesathub.py
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/clientesathub.py#L271-L284
def trocar_codigo_de_ativacao(self, novo_codigo_ativacao, opcao=constantes.CODIGO_ATIVACAO_REGULAR, codigo_emergencia=None): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.trocar_codigo_de_ativacao`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT """ resp = self._http_post('trocarcodigodeativacao', novo_codigo_ativacao=novo_codigo_ativacao, opcao=opcao, codigo_emergencia=codigo_emergencia) conteudo = resp.json() return RespostaSAT.trocar_codigo_de_ativacao(conteudo.get('retorno'))
[ "def", "trocar_codigo_de_ativacao", "(", "self", ",", "novo_codigo_ativacao", ",", "opcao", "=", "constantes", ".", "CODIGO_ATIVACAO_REGULAR", ",", "codigo_emergencia", "=", "None", ")", ":", "resp", "=", "self", ".", "_http_post", "(", "'trocarcodigodeativacao'", "...
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.trocar_codigo_de_ativacao`. :return: Uma resposta SAT padrão. :rtype: satcfe.resposta.padrao.RespostaSAT
[ "Sobrepõe", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "trocar_codigo_de_ativacao", "." ]
python
train
nickoala/telepot
telepot/helper.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/helper.py#L77-L88
def wait(self): """ Block until a matched message appears. """ if not self._patterns: raise RuntimeError('Listener has nothing to capture') while 1: msg = self._queue.get(block=True) if any(map(lambda p: filtering.match_all(msg, p), self._patterns)): return msg
[ "def", "wait", "(", "self", ")", ":", "if", "not", "self", ".", "_patterns", ":", "raise", "RuntimeError", "(", "'Listener has nothing to capture'", ")", "while", "1", ":", "msg", "=", "self", ".", "_queue", ".", "get", "(", "block", "=", "True", ")", ...
Block until a matched message appears.
[ "Block", "until", "a", "matched", "message", "appears", "." ]
python
train
NICTA/revrand
revrand/basis_functions.py
https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/basis_functions.py#L1599-L1627
def transform(self, X, *params): """ Return the basis function applied to X. I.e. Phi(X, params), where params can also optionally be used and learned. Parameters ---------- X : ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. *params : optional parameter aguments, these are the parameters of the concatenated bases `in the order` they were concatenated. Returns ------- ndarray : of shape (N, D) where D is the number of basis functions. """ Phi = [] args = list(params) for base in self.bases: phi, args = base._transform_popargs(X, *args) Phi.append(phi) return np.hstack(Phi)
[ "def", "transform", "(", "self", ",", "X", ",", "*", "params", ")", ":", "Phi", "=", "[", "]", "args", "=", "list", "(", "params", ")", "for", "base", "in", "self", ".", "bases", ":", "phi", ",", "args", "=", "base", ".", "_transform_popargs", "(...
Return the basis function applied to X. I.e. Phi(X, params), where params can also optionally be used and learned. Parameters ---------- X : ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. *params : optional parameter aguments, these are the parameters of the concatenated bases `in the order` they were concatenated. Returns ------- ndarray : of shape (N, D) where D is the number of basis functions.
[ "Return", "the", "basis", "function", "applied", "to", "X", "." ]
python
train
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L6882-L6890
def debug_callback(event, *args, **kwds): '''Example callback, useful for debugging. ''' l = ['event %s' % (event.type,)] if args: l.extend(map(str, args)) if kwds: l.extend(sorted('%s=%s' % t for t in kwds.items())) print('Debug callback (%s)' % ', '.join(l))
[ "def", "debug_callback", "(", "event", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "l", "=", "[", "'event %s'", "%", "(", "event", ".", "type", ",", ")", "]", "if", "args", ":", "l", ".", "extend", "(", "map", "(", "str", ",", "args", ...
Example callback, useful for debugging.
[ "Example", "callback", "useful", "for", "debugging", "." ]
python
train
biolink/ontobio
ontobio/golr/golr_query.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L1263-L1375
def exec(self, **kwargs): """ Execute solr query Result object is a dict with the following keys: - raw - associations : list - compact_associations : list - facet_counts - facet_pivot """ params = self.solr_params() logging.info("PARAMS="+str(params)) results = self.solr.search(**params) n_docs = len(results.docs) logging.info("Docs found: {}".format(results.hits)) if self.iterate: docs = results.docs start = n_docs while n_docs >= self.rows: logging.info("Iterating; start={}".format(start)) next_results = self.solr.search(start=start, **params) next_docs = next_results.docs n_docs = len(next_docs) docs += next_docs start += self.rows results.docs = docs fcs = results.facets payload = { 'facet_counts': translate_facet_field(fcs, self.invert_subject_object), 'pagination': {}, 'numFound': results.hits } include_raw=self.include_raw if include_raw: # note: this is not JSON serializable, do not send via REST payload['raw'] = results # TODO - check if truncated logging.info("COMPACT={} INV={}".format(self.use_compact_associations, self.invert_subject_object)) if self.use_compact_associations: payload['compact_associations'] = self.translate_docs_compact(results.docs, field_mapping=self.field_mapping, slim=self.slim, invert_subject_object=self.invert_subject_object, map_identifiers=self.map_identifiers, **kwargs) else: payload['associations'] = self.translate_docs(results.docs, field_mapping=self.field_mapping, map_identifiers=self.map_identifiers, **kwargs) if 'facet_pivot' in fcs: payload['facet_pivot'] = fcs['facet_pivot'] if 'facets' in results.raw_response: payload['facets'] = results.raw_response['facets'] # For solr, we implement this by finding all facets # TODO: no need to do 2nd query, see https://wiki.apache.org/solr/SimpleFacetParameters#Parameters fetch_objects=self.fetch_objects if fetch_objects: core_object_field = M.OBJECT if self.slim is not None and len(self.slim)>0: core_object_field = M.OBJECT_CLOSURE object_field = map_field(core_object_field, self.field_mapping) if self.invert_subject_object: object_field = map_field(M.SUBJECT, self.field_mapping) oq_params = params.copy() oq_params['fl'] = [] oq_params['facet.field'] = [object_field] oq_params['facet.limit'] = -1 oq_params['rows'] = 0 oq_params['facet.mincount'] = 1 oq_results = self.solr.search(**oq_params) ff = oq_results.facets['facet_fields'] ofl = ff.get(object_field) # solr returns facets counts as list, every 2nd element is number, we don't need the numbers here payload['objects'] = ofl[0::2] fetch_subjects=self.fetch_subjects if fetch_subjects: core_subject_field = M.SUBJECT if self.slim is not None and len(self.slim)>0: core_subject_field = M.SUBJECT_CLOSURE subject_field = map_field(core_subject_field, self.field_mapping) if self.invert_subject_object: subject_field = map_field(M.SUBJECT, self.field_mapping) oq_params = params.copy() oq_params['fl'] = [] oq_params['facet.field'] = [subject_field] oq_params['facet.limit'] = self.max_rows oq_params['rows'] = 0 oq_params['facet.mincount'] = 1 oq_results = self.solr.search(**oq_params) ff = oq_results.facets['facet_fields'] ofl = ff.get(subject_field) # solr returns facets counts as list, every 2nd element is number, we don't need the numbers here payload['subjects'] = ofl[0::2] if len(payload['subjects']) == self.max_rows: payload['is_truncated'] = True if self.slim is not None and len(self.slim)>0: if 'objects' in payload: payload['objects'] = [x for x in payload['objects'] if x in self.slim] if 'associations' in payload: for a in payload['associations']: a['slim'] = [x for x in a['object_closure'] if x in self.slim] del a['object_closure'] return payload
[ "def", "exec", "(", "self", ",", "*", "*", "kwargs", ")", ":", "params", "=", "self", ".", "solr_params", "(", ")", "logging", ".", "info", "(", "\"PARAMS=\"", "+", "str", "(", "params", ")", ")", "results", "=", "self", ".", "solr", ".", "search",...
Execute solr query Result object is a dict with the following keys: - raw - associations : list - compact_associations : list - facet_counts - facet_pivot
[ "Execute", "solr", "query" ]
python
train
sibirrer/lenstronomy
lenstronomy/SimulationAPI/observation_api.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/SimulationAPI/observation_api.py#L140-L151
def scaled_exposure_time(self): """ scaled "effective" exposure time of IID counts. This can be used by lenstronomy to estimate the Poisson errors keeping the assumption that the counts are IIDs (even if they are not). :return: scaled exposure time """ if self._data_count_unit == 'ADU': exp_time = self.ccd_gain * self.exposure_time else: exp_time = self.exposure_time return exp_time
[ "def", "scaled_exposure_time", "(", "self", ")", ":", "if", "self", ".", "_data_count_unit", "==", "'ADU'", ":", "exp_time", "=", "self", ".", "ccd_gain", "*", "self", ".", "exposure_time", "else", ":", "exp_time", "=", "self", ".", "exposure_time", "return"...
scaled "effective" exposure time of IID counts. This can be used by lenstronomy to estimate the Poisson errors keeping the assumption that the counts are IIDs (even if they are not). :return: scaled exposure time
[ "scaled", "effective", "exposure", "time", "of", "IID", "counts", ".", "This", "can", "be", "used", "by", "lenstronomy", "to", "estimate", "the", "Poisson", "errors", "keeping", "the", "assumption", "that", "the", "counts", "are", "IIDs", "(", "even", "if", ...
python
train
iotile/coretools
iotilecore/iotile/core/scripts/virtualdev_script.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/scripts/virtualdev_script.py#L49-L162
def main(argv=None, loop=SharedLoop): """Serve access to a virtual IOTile device using a virtual iotile interface.""" if argv is None: argv = sys.argv[1:] list_parser = argparse.ArgumentParser(add_help=False) list_parser.add_argument('-l', '--list', action='store_true', help="List all known installed interfaces and devices and then exit") list_parser.add_argument('-v', '--verbose', action="count", default=0, help="Increase logging level (goes error, warn, info, debug)") parser = argparse.ArgumentParser(description="Serve acess to a virtual IOTile device using a virtual IOTile interface") parser.add_argument('interface', help="The name of the virtual device interface to use") parser.add_argument('device', help="The name of the virtual device to create") parser.add_argument('-c', '--config', help="An optional JSON config file with arguments for the interface and device") parser.add_argument('-l', '--list', action='store_true', help="List all known installed interfaces and devices and then exit") parser.add_argument('-n', '--scenario', help="Load a test scenario from the given file") parser.add_argument('-s', '--state', help="Load a given state into the device before starting to serve it. Only works with emulated devices.") parser.add_argument('-d', '--dump', help="Dump the device's state when we exit the program. Only works with emulated devices.") parser.add_argument('-t', '--track', help="Track all changes to the device's state. Only works with emulated devices.") parser.add_argument('-v', '--verbose', action="count", default=0, help="Increase logging level (goes error, warn, info, debug)") args, _rest = list_parser.parse_known_args(argv) if args.list: configure_logging(args.verbose) reg = ComponentRegistry() print("Installed Device Servers:") for name, _iface in reg.load_extensions('iotile.device_server', class_filter=AbstractDeviceServer): print('- {}'.format(name)) print("\nInstalled Virtual Devices:") for name, dev in reg.load_extensions('iotile.virtual_device', class_filter=VirtualIOTileDevice, product_name="virtual_device"): print('- {}: {}'.format(name, one_line_desc(dev))) return 0 args = parser.parse_args(argv) configure_logging(args.verbose) config = {} if args.config is not None: with open(args.config, "r") as conf_file: config = json.load(conf_file) started = False device = None stop_immediately = args.interface == 'null' try: server = instantiate_interface(args.interface, config, loop) device = instantiate_device(args.device, config, loop) if args.state is not None: print("Loading device state from file %s" % args.state) device.load_state(args.state) if args.scenario is not None: print("Loading scenario from file %s" % args.scenario) with open(args.scenario, "r") as infile: scenario = json.load(infile) # load_metascenario expects a list of scenarios even when there is only one if isinstance(scenario, dict): scenario = [scenario] device.load_metascenario(scenario) if args.track is not None: print("Tracking all state changes to device") device.state_history.enable() adapter = VirtualDeviceAdapter(devices=[device], loop=loop) server.adapter = adapter loop.run_coroutine(adapter.start()) try: loop.run_coroutine(server.start()) except: loop.run_coroutine(adapter.stop()) adapter = None raise started = True print("Starting to serve virtual IOTile device") if stop_immediately: return 0 # We need to periodically process events that are queued up in the interface while True: time.sleep(0.5) except KeyboardInterrupt: print("Break received, cleanly exiting...") finally: if args.dump is not None and device is not None: print("Dumping final device state to %s" % args.dump) device.save_state(args.dump) if started: loop.run_coroutine(server.stop()) loop.run_coroutine(adapter.stop()) if args.track is not None and device is not None: print("Saving state history to file %s" % args.track) device.state_history.dump(args.track) return 0
[ "def", "main", "(", "argv", "=", "None", ",", "loop", "=", "SharedLoop", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "list_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "F...
Serve access to a virtual IOTile device using a virtual iotile interface.
[ "Serve", "access", "to", "a", "virtual", "IOTile", "device", "using", "a", "virtual", "iotile", "interface", "." ]
python
train
gagneurlab/concise
concise/hyopt.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L354-L389
def eval_model(model, test, add_eval_metrics={}): """Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics """ # evaluate the model logger.info("Evaluate...") # - model_metrics model_metrics_values = model.evaluate(test[0], test[1], verbose=0, batch_size=test[1].shape[0]) # evaluation is done in a single pass to have more precise metics model_metrics = dict(zip(_listify(model.metrics_names), _listify(model_metrics_values))) # - eval_metrics y_true = test[1] y_pred = model.predict(test[0], verbose=0) eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()} # handle the case where the two metrics names intersect # - omit duplicates from eval_metrics intersected_keys = set(model_metrics).intersection(set(eval_metrics)) if len(intersected_keys) > 0: logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones". format(intersected_keys)) eval_metrics = _delete_keys(eval_metrics, intersected_keys) return merge_dicts(model_metrics, eval_metrics)
[ "def", "eval_model", "(", "model", ",", "test", ",", "add_eval_metrics", "=", "{", "}", ")", ":", "# evaluate the model", "logger", ".", "info", "(", "\"Evaluate...\"", ")", "# - model_metrics", "model_metrics_values", "=", "model", ".", "evaluate", "(", "test",...
Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics
[ "Evaluate", "model", "s", "performance", "on", "the", "test", "-", "set", "." ]
python
train
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L375-L454
def service_configuration_check(config): """Perform a sanity check against options for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all sanity checks are successfully passed otherwise raises a ValueError exception. """ ipv4_enabled = config.getboolean('daemon', 'ipv4') ipv6_enabled = config.getboolean('daemon', 'ipv6') services = config.sections() # we don't need it during sanity check for services check services.remove('daemon') ip_prefixes = [] for service in services: for option, getter in SERVICE_OPTIONS_TYPE.items(): try: getattr(config, getter)(service, option) except configparser.NoOptionError as error: if option not in SERVICE_OPTIONAL_OPTIONS: raise ValueError(error) except configparser.Error as error: raise ValueError(error) except ValueError as exc: msg = ("invalid data for '{opt}' option in service check " "{name}: {err}" .format(opt=option, name=service, err=exc)) raise ValueError(msg) if (config.get(service, 'on_disabled') != 'withdraw' and config.get(service, 'on_disabled') != 'advertise'): msg = ("'on_disabled' option has invalid value ({val}) for " "service check {name}, 'on_disabled option should be set " "either to 'withdraw' or to 'advertise'" .format(name=service, val=config.get(service, 'on_disabled'))) raise ValueError(msg) ip_prefixes.append(config.get(service, 'ip_prefix')) if not valid_ip_prefix(config.get(service, 'ip_prefix')): msg = ("invalid value ({val}) for 'ip_prefix' option in service " "check {name}. It should be an IP PREFIX in form of " "ip/prefixlen." .format(name=service, val=config.get(service, 'ip_prefix'))) raise ValueError(msg) _ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix')) if not ipv6_enabled and _ip_prefix.version == 6: raise ValueError("IPv6 support is disabled in " "anycast-healthchecker while there is an IPv6 " "prefix configured for {name} service check" .format(name=service)) if not ipv4_enabled and _ip_prefix.version == 4: raise ValueError("IPv4 support is disabled in " "anycast-healthchecker while there is an IPv4 " "prefix configured for {name} service check" .format(name=service)) cmd = shlex.split(config.get(service, 'check_cmd')) try: proc = subprocess.Popen(cmd) proc.kill() except (OSError, subprocess.SubprocessError) as exc: msg = ("failed to run check command '{cmd}' for service check " "{name}: {err}" .format(name=service, cmd=config.get(service, 'check_cmd'), err=exc)) raise ValueError(msg) occurrences_of_ip_prefixes = Counter(ip_prefixes) for ip_prefix, counter in occurrences_of_ip_prefixes.items(): if counter > 1: raise ValueError("{ip} is used by {c} service checks" .format(ip=ip_prefix, c=counter))
[ "def", "service_configuration_check", "(", "config", ")", ":", "ipv4_enabled", "=", "config", ".", "getboolean", "(", "'daemon'", ",", "'ipv4'", ")", "ipv6_enabled", "=", "config", ".", "getboolean", "(", "'daemon'", ",", "'ipv6'", ")", "services", "=", "confi...
Perform a sanity check against options for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all sanity checks are successfully passed otherwise raises a ValueError exception.
[ "Perform", "a", "sanity", "check", "against", "options", "for", "each", "service", "check", "." ]
python
train
RockFeng0/rtsf-web
webuidriver/actions.py
https://github.com/RockFeng0/rtsf-web/blob/ceabcf62ddf1c969a97b5c7a4a4c547198b6ea71/webuidriver/actions.py#L637-L643
def Ctrl(cls, key): """ 在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X' """ element = cls._element() element.send_keys(Keys.CONTROL, key)
[ "def", "Ctrl", "(", "cls", ",", "key", ")", ":", "element", "=", "cls", ".", "_element", "(", ")", "element", ".", "send_keys", "(", "Keys", ".", "CONTROL", ",", "key", ")" ]
在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X'
[ "在指定元素上执行ctrl组合键事件" ]
python
train
saltstack/salt
salt/cloud/clouds/qingcloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L204-L232
def avail_locations(call=None): ''' Return a dict of all available locations on the provider with relevant data. CLI Examples: .. code-block:: bash salt-cloud --list-locations my-qingcloud ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) params = { 'action': 'DescribeZones', } items = query(params=params) result = {} for region in items['zone_set']: result[region['zone_id']] = {} for key in region: result[region['zone_id']][key] = six.text_type(region[key]) return result
[ "def", "avail_locations", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The avail_locations function must be called with '", "'-f or --function, or with the --list-locations option'", ")", "params", "=", "{",...
Return a dict of all available locations on the provider with relevant data. CLI Examples: .. code-block:: bash salt-cloud --list-locations my-qingcloud
[ "Return", "a", "dict", "of", "all", "available", "locations", "on", "the", "provider", "with", "relevant", "data", "." ]
python
train
idmillington/layout
layout/managers/grid.py
https://github.com/idmillington/layout/blob/c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5/layout/managers/grid.py#L143-L218
def render(self, rect, data): """Draws the cells in grid.""" size = self.get_minimum_size(data) # Find how much extra space we have. extra_width = rect.w - size.x extra_height = rect.h - size.y # Distribute the extra space into the correct rows and columns. if self.scaling_col is None or not 0 <= self.scaling_col < self.cols: width_per_col = extra_width / float(self.cols) col_widths = [ width + width_per_col for width in self.col_widths ] else: col_widths = self.col_widths[:] col_widths[self.scaling_col] += extra_width if self.scaling_row is None or not 0 <= self.scaling_row < self.rows: height_per_row = extra_height / float(self.rows) row_heights = [ height + height_per_row for height in self.row_heights ] else: row_heights = self.row_heights[:] row_heights[self.scaling_row] += extra_height # Find the (start, end) positions of each row and column. col_xs = [] last_x = rect.left + self.outside_margin for width in col_widths: col_xs.append((last_x, last_x + width)) last_x += width + self.margin row_ys = [] last_y = rect.top - self.outside_margin for height in row_heights: row_ys.append((last_y, last_y - height)) last_y -= height + self.margin # Now we can loop over the elements and have them rendered. for col, row, cols, rows, element in self.elements: x_start = col_xs[col][0] y_start = row_ys[row][0] x_end = col_xs[col+cols-1][1] y_end = row_ys[row+rows-1][1] element.render(datatypes.Rectangle( x_start, y_end, x_end-x_start, y_start-y_end ), data) # And finally we can draw the rules def _get_value(array, index, sign): """Returns the value of the index in the given array, where the array (like col_xs and row_ys), consists of start-end pairs of values.""" if index <= 0: # Special case, it is the start of the first range return array[0][0]-self.outside_margin*sign elif index >= len(array): # Special case, it is the end of the last range return array[-1][1]+self.outside_margin*sign else: # Otherwise it is the blend of a start and end. return (array[index-1][1] + array[index][0])*0.5 for start_col, start_row, end_col, end_row, width, color in self.rules: x_start = _get_value(col_xs, start_col, 1) y_start = _get_value(row_ys, start_row, -1) x_end = _get_value(col_xs, end_col, 1) y_end = _get_value(row_ys, end_row, -1) data['output'].line( x_start, y_start, x_end, y_end, stroke=color, stroke_width=width )
[ "def", "render", "(", "self", ",", "rect", ",", "data", ")", ":", "size", "=", "self", ".", "get_minimum_size", "(", "data", ")", "# Find how much extra space we have.", "extra_width", "=", "rect", ".", "w", "-", "size", ".", "x", "extra_height", "=", "rec...
Draws the cells in grid.
[ "Draws", "the", "cells", "in", "grid", "." ]
python
train
postmanlabs/httpbin
httpbin/filters.py
https://github.com/postmanlabs/httpbin/blob/f8ec666b4d1b654e4ff6aedd356f510dcac09f83/httpbin/filters.py#L71-L92
def deflate(f, *args, **kwargs): """Deflate Flask Response Decorator.""" data = f(*args, **kwargs) if isinstance(data, Response): content = data.data else: content = data deflater = zlib.compressobj() deflated_data = deflater.compress(content) deflated_data += deflater.flush() if isinstance(data, Response): data.data = deflated_data data.headers['Content-Encoding'] = 'deflate' data.headers['Content-Length'] = str(len(data.data)) return data return deflated_data
[ "def", "deflate", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "data", ",", "Response", ")", ":", "content", "=", "data", ".", "data", ...
Deflate Flask Response Decorator.
[ "Deflate", "Flask", "Response", "Decorator", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/DupSNPs/duplicated_snps.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/DupSNPs/duplicated_snps.py#L181-L228
def createFinalTPEDandTFAM(tped, toReadPrefix, prefix, snpToRemove): """Creates the final TPED and TFAM. :param tped: a representation of the ``tped`` of duplicated markers. :param toReadPrefix: the prefix of the unique files. :param prefix: the prefix of the output files. :param snpToRemove: the markers to remove. :type tped: numpy.array :type toReadPrefix: str :type prefix: str :type snpToRemove: set Starts by copying the unique markers' ``tfam`` file to ``prefix.final.tfam``. Then, it copies the unique markers' ``tped`` file, in which the chosen markers will be appended. The final data set will include the unique markers, the chosen markers which were completed, and the problematic duplicated markers (for further analysis). The markers that were used to complete the chosen ones are not present in the final data set. """ # First, copying the tfam try: shutil.copy(toReadPrefix + ".tfam", prefix + ".final.tfam") except IOError: msg = "%(toReadPrefix)s.tfam: can't copy file to " \ "%(prefix)s.final.tfam" % locals() raise ProgramError(msg) # Next, copy the tped, and append at the end try: shutil.copy(toReadPrefix + ".tped", prefix + ".final.tped") except IOError: msg = "%(toReadPrefix)s.tped: can't copy fil to " \ "%(prefix)s.final.tped" % locals() raise ProgramError(msg) tpedFile = None try: tpedFile = open(prefix + ".final.tped", "a") except IOError: msg = "%(prefix)s.final.tped: can't append to file" % locals() raise ProgramError(msg) for i, row in enumerate(tped): if i not in snpToRemove: print >>tpedFile, "\t".join(row) tpedFile.close()
[ "def", "createFinalTPEDandTFAM", "(", "tped", ",", "toReadPrefix", ",", "prefix", ",", "snpToRemove", ")", ":", "# First, copying the tfam", "try", ":", "shutil", ".", "copy", "(", "toReadPrefix", "+", "\".tfam\"", ",", "prefix", "+", "\".final.tfam\"", ")", "ex...
Creates the final TPED and TFAM. :param tped: a representation of the ``tped`` of duplicated markers. :param toReadPrefix: the prefix of the unique files. :param prefix: the prefix of the output files. :param snpToRemove: the markers to remove. :type tped: numpy.array :type toReadPrefix: str :type prefix: str :type snpToRemove: set Starts by copying the unique markers' ``tfam`` file to ``prefix.final.tfam``. Then, it copies the unique markers' ``tped`` file, in which the chosen markers will be appended. The final data set will include the unique markers, the chosen markers which were completed, and the problematic duplicated markers (for further analysis). The markers that were used to complete the chosen ones are not present in the final data set.
[ "Creates", "the", "final", "TPED", "and", "TFAM", "." ]
python
train
nocarryr/python-dispatch
pydispatch/dispatch.py
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L236-L251
def emit(self, name, *args, **kwargs): """Dispatches an event to any subscribed listeners Note: If a listener returns :obj:`False`, the event will stop dispatching to other listeners. Any other return value is ignored. Args: name (str): The name of the :class:`Event` to dispatch *args (Optional): Positional arguments to be sent to listeners **kwargs (Optional): Keyword arguments to be sent to listeners """ e = self.__property_events.get(name) if e is None: e = self.__events[name] return e(*args, **kwargs)
[ "def", "emit", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "e", "=", "self", ".", "__property_events", ".", "get", "(", "name", ")", "if", "e", "is", "None", ":", "e", "=", "self", ".", "__events", "[", "name"...
Dispatches an event to any subscribed listeners Note: If a listener returns :obj:`False`, the event will stop dispatching to other listeners. Any other return value is ignored. Args: name (str): The name of the :class:`Event` to dispatch *args (Optional): Positional arguments to be sent to listeners **kwargs (Optional): Keyword arguments to be sent to listeners
[ "Dispatches", "an", "event", "to", "any", "subscribed", "listeners" ]
python
train
xu2243051/easyui-menu
easyui/mixins/view_mixins.py
https://github.com/xu2243051/easyui-menu/blob/4da0b50cf2d3ddb0f1ec7a4da65fd3c4339f8dfb/easyui/mixins/view_mixins.py#L21-L27
def get_template_names(self): """ datagrid的默认模板 """ names = super(EasyUIDatagridView, self).get_template_names() names.append('easyui/datagrid.html') return names
[ "def", "get_template_names", "(", "self", ")", ":", "names", "=", "super", "(", "EasyUIDatagridView", ",", "self", ")", ".", "get_template_names", "(", ")", "names", ".", "append", "(", "'easyui/datagrid.html'", ")", "return", "names" ]
datagrid的默认模板
[ "datagrid的默认模板" ]
python
valid
yamcs/yamcs-python
yamcs-client/examples/archive_retrieval.py
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_retrieval.py#L38-L47
def iterate_specific_event_range(): """Count the number of events in a specific range.""" now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for event in archive.list_events(start=start, stop=now): total += 1 # print(event) print('Found', total, 'events in range')
[ "def", "iterate_specific_event_range", "(", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", "start", "=", "now", "-", "timedelta", "(", "hours", "=", "1", ")", "total", "=", "0", "for", "event", "in", "archive", ".", "list_events", "(", "sta...
Count the number of events in a specific range.
[ "Count", "the", "number", "of", "events", "in", "a", "specific", "range", "." ]
python
train
DataMedSci/mcpartools
mcpartools/mcengine/shieldhit.py
https://github.com/DataMedSci/mcpartools/blob/84f869094d05bf70f09e8aaeca671ddaa1c56ec4/mcpartools/mcengine/shieldhit.py#L225-L244
def _rewrite_paths_in_file(config_file, paths_to_replace): """ Rewrite paths in config files to match convention job_xxxx/symlink Requires path to run_xxxx/input/config_file and a list of paths_to_replace """ lines = [] # make a copy of config import shutil shutil.copyfile(config_file, str(config_file + '_original')) with open(config_file) as infile: for line in infile: for old_path in paths_to_replace: if old_path in line: new_path = os.path.split(old_path)[-1] line = line.replace(old_path, new_path) logger.debug("Changed path {0} ---> {1} in file {2}".format(old_path, new_path, config_file)) lines.append(line) with open(config_file, 'w') as outfile: for line in lines: outfile.write(line)
[ "def", "_rewrite_paths_in_file", "(", "config_file", ",", "paths_to_replace", ")", ":", "lines", "=", "[", "]", "# make a copy of config", "import", "shutil", "shutil", ".", "copyfile", "(", "config_file", ",", "str", "(", "config_file", "+", "'_original'", ")", ...
Rewrite paths in config files to match convention job_xxxx/symlink Requires path to run_xxxx/input/config_file and a list of paths_to_replace
[ "Rewrite", "paths", "in", "config", "files", "to", "match", "convention", "job_xxxx", "/", "symlink", "Requires", "path", "to", "run_xxxx", "/", "input", "/", "config_file", "and", "a", "list", "of", "paths_to_replace" ]
python
train
flo-compbio/goparser
goparser/parser.py
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/parser.py#L246-L286
def get_term_by_name(self, name): """Get the GO term with the given GO term name. If the given name is not associated with any GO term, the function will search for it among synonyms. Parameters ---------- name: str The name of the GO term. Returns ------- GOTerm The GO term with the given name. Raises ------ ValueError If the given name is found neither among the GO term names, nor among synonyms. """ term = None func_name = 'get_term_by_name' try: term = self.terms[self._name2id[name]] except KeyError: try: term = self.terms[self._syn2id[name]] except KeyError: pass else: logger.warning( '%s: GO term name "%s" is a synonym for "%s".', func_name, name, term.name) if term is None: raise ValueError('%s : GO term name "%s" not found!' % (func_name, name)) return term
[ "def", "get_term_by_name", "(", "self", ",", "name", ")", ":", "term", "=", "None", "func_name", "=", "'get_term_by_name'", "try", ":", "term", "=", "self", ".", "terms", "[", "self", ".", "_name2id", "[", "name", "]", "]", "except", "KeyError", ":", "...
Get the GO term with the given GO term name. If the given name is not associated with any GO term, the function will search for it among synonyms. Parameters ---------- name: str The name of the GO term. Returns ------- GOTerm The GO term with the given name. Raises ------ ValueError If the given name is found neither among the GO term names, nor among synonyms.
[ "Get", "the", "GO", "term", "with", "the", "given", "GO", "term", "name", "." ]
python
train
ulf1/oxyba
oxyba/leland94.py
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/leland94.py#L2-L225
def leland94(V, s, r, a, t, C=None, d=None, PosEq=False): """Leland94 Capital Structure model, Corporate Bond valuation model Parameters: ----------- V : float Asset Value of the unlevered firm s : float Volatility s of the asset value V of the unlevered firm r : float Risk free rate a : float Bankruptcy cost t : float Corporate tax rate C : float (option, default C=None) The Coupon in $ per $100. - If C>0 then exogenous bancruptcy case, i.e. a failure to pay credit event is triggered when the firm cannot pay the coupon C - If C=None then an endogenous bankcruptcy case, i.e. the management can set endogenously an 'optimal' coupon: min VB, max W=E+D, E>=0 (see pp.1222). The internally computed 'optimal' coupon is retured as output argument. d : float (optional, default d=None) Required dividend by investors, or resp the net cash payout by the firm. - if d=None then 100% retained profits - if d>0 then d is the fixed dividend rate proportional to the firm's asset value. The intermediate result 'X' dependends on 'd'. PosEq : bool (optional, default PosEq=False) If True, then enforce a positive net worth, i.e. obligors demand a "protected bond covenant with positive net worth requirement" (pp.1233) [dt. Positive Eigenkapitalbasis] Returns: -------- D : float Value of debt (p.1219) [dt. Wert des Fremdkapital] E : float Value of equity Wert (p.1221) [dt. Eigenkapitalwert] W : float Value of levered company, or Total value of the firm (p.1221) [dt. Firmenwert] W = V + T - B W = D + E T : float Value of tax benefit (p.1220) [dt. Steuervorteil] B : float Value of bankruptcy costs (p.1220) [dt. Insolvenzkosten] VB : float Level of bankruptcy, i.e. the asset value V at which bankruptcy is declared [dt. Restwert bei Insolvenz] - if PosEq=False then formula in pp.1222 - if PosEq=True then the covenant "VB - D = 0" is applied to protect creditors (pp.1233) PV : float PV of $1 if bankruptcy (p.1219) [dt. Kapitalwert 1 GE bei Insolvenz] Returns (shiny financial metrics): ---------------------------------- lr : float Leverage Ratio [dt. Kredithebel] i.e. value of debt divided by value of levered firm value D / W yld : float Yield on Debt [dt. Fremdkapitalrendite] i.e. coupon in $ divided by value of debt C / D sprd : float Yield Spread in bp [dt. Kreditspread in bp] i.e. yield on debt minus riskfree rate converted to bps (C/D - r) * 10000 Returns (intermediate results): ------------------------------- X : float Net Cash Payout X will differ depending on the dividend policy. - If d=None, then 100% retained profits (p.1218) [dt. Thesaurierend] - If d>0, then fixed dividend per firm value (p.1241) [dt. Prozentuale Dividendenausschüttung] (intermediate result) C : float The Coupon in $ per $100. - If input argument is C>0 then the input argument C is returned as is (exogenous brankruptcy case). - If input argument C=None, then the internally computed 'optimal' coupon the the endogenous brankruptcy case is returned (pp.1222) (intermediate result) A : float Annuity value (Wert der Annuitaet), "A=C/r", The coupon (in $) divded by the risk-free rate. (intermediate result) Examples: --------- PosEq: No (False), Pos Net Worth covenant (True) Coupon: Endo (C=None), Exo (C>0) Source: ------- Leland, Hayne E. 1994. "Corporate Debt Value, Bond Covenants, and Optimal Capital Structure." The Journal of Finance 49 (4): 1213–52. https://doi.org/10.1111/j.1540-6261.1994.tb02452.x. """ # subfunction for def netcashpayout_by_dividend(r, d, s): """net cash payout proportional to the firm's asset value for a given required dividend rate (p.1241) """ import math s2 = s * s tmp = r - d - 0.5 * s2 return (tmp + math.sqrt(tmp * tmp + 2.0 * s2 * r)) / s2 def optimal_coupon(V, r, a, t, X): """Coupon for the endogenous bankcruptcy case (pp.1222)""" m = ((1.0 - t) * X / (r * (1.0 + X)))**X / (1.0 + X) h = (1.0 + X + a * (1 - t) * X / t) * m return V * ((1.0 + X) * h)**(-1.0 / X) def positivenetworth_target(VB, V, a, A, X): """protected bond covenant with positive net worth requirement""" return VB - A - ((1.0 - a) * VB - A) * (VB / V)**X # (1a) Net Cash Payout 'X' if d is None: # Net cash Payout if 100% retained profits (p.1218) X = (2.0 * r) / (s * s) else: # net cash payout proportional to the firm's asset value # for a given required dividend rate (p.1241) X = netcashpayout_by_dividend(r, d, s) # (1b) Optimal coupon of the endogenous bankruptcy # case (p.1222ff.) if C is None: C = optimal_coupon(V, r, a, t, X) # (1c) Wert der Annuitaet A = C / r # (2a) Level of bankruptcy VB (pp.1222) VB = (1.0 - t) * C / (r + 0.5 * s * s) # (2b) protected bond covenant with positive net worth # requirement (pp.1233) if PosEq: from scipy.optimize import fsolve VB = fsolve(func=positivenetworth_target, x0=VB, args=(V, a, A, X)) VB = float(VB) # (3a) PV of $1 if bankruptcy (p.1219) PV = (VB / V)**X # (3b) Value of debt (p.1219) D = A + ((1.0 - a) * VB - A) * PV # (3c) Value of bankruptcy costs (p.1220) B = a * VB * PV # (3d) Value of tax benefit (p.1220) T = t * A * (1.0 - PV) # (3e) Total value of the firm, or Value of levered company (p.1221) W = V + T - B # (3f) Value of equity (p.1221) E = W - D # (4a) Leverage Ratio lr = D / W # (4b) Yield on Debt yld = C / D # (4c) Yield Spread in bp sprd = (yld - r) * 10000.0 # return results return D, E, W, T, B, VB, PV, lr, yld, sprd, X, C, A
[ "def", "leland94", "(", "V", ",", "s", ",", "r", ",", "a", ",", "t", ",", "C", "=", "None", ",", "d", "=", "None", ",", "PosEq", "=", "False", ")", ":", "# subfunction for", "def", "netcashpayout_by_dividend", "(", "r", ",", "d", ",", "s", ")", ...
Leland94 Capital Structure model, Corporate Bond valuation model Parameters: ----------- V : float Asset Value of the unlevered firm s : float Volatility s of the asset value V of the unlevered firm r : float Risk free rate a : float Bankruptcy cost t : float Corporate tax rate C : float (option, default C=None) The Coupon in $ per $100. - If C>0 then exogenous bancruptcy case, i.e. a failure to pay credit event is triggered when the firm cannot pay the coupon C - If C=None then an endogenous bankcruptcy case, i.e. the management can set endogenously an 'optimal' coupon: min VB, max W=E+D, E>=0 (see pp.1222). The internally computed 'optimal' coupon is retured as output argument. d : float (optional, default d=None) Required dividend by investors, or resp the net cash payout by the firm. - if d=None then 100% retained profits - if d>0 then d is the fixed dividend rate proportional to the firm's asset value. The intermediate result 'X' dependends on 'd'. PosEq : bool (optional, default PosEq=False) If True, then enforce a positive net worth, i.e. obligors demand a "protected bond covenant with positive net worth requirement" (pp.1233) [dt. Positive Eigenkapitalbasis] Returns: -------- D : float Value of debt (p.1219) [dt. Wert des Fremdkapital] E : float Value of equity Wert (p.1221) [dt. Eigenkapitalwert] W : float Value of levered company, or Total value of the firm (p.1221) [dt. Firmenwert] W = V + T - B W = D + E T : float Value of tax benefit (p.1220) [dt. Steuervorteil] B : float Value of bankruptcy costs (p.1220) [dt. Insolvenzkosten] VB : float Level of bankruptcy, i.e. the asset value V at which bankruptcy is declared [dt. Restwert bei Insolvenz] - if PosEq=False then formula in pp.1222 - if PosEq=True then the covenant "VB - D = 0" is applied to protect creditors (pp.1233) PV : float PV of $1 if bankruptcy (p.1219) [dt. Kapitalwert 1 GE bei Insolvenz] Returns (shiny financial metrics): ---------------------------------- lr : float Leverage Ratio [dt. Kredithebel] i.e. value of debt divided by value of levered firm value D / W yld : float Yield on Debt [dt. Fremdkapitalrendite] i.e. coupon in $ divided by value of debt C / D sprd : float Yield Spread in bp [dt. Kreditspread in bp] i.e. yield on debt minus riskfree rate converted to bps (C/D - r) * 10000 Returns (intermediate results): ------------------------------- X : float Net Cash Payout X will differ depending on the dividend policy. - If d=None, then 100% retained profits (p.1218) [dt. Thesaurierend] - If d>0, then fixed dividend per firm value (p.1241) [dt. Prozentuale Dividendenausschüttung] (intermediate result) C : float The Coupon in $ per $100. - If input argument is C>0 then the input argument C is returned as is (exogenous brankruptcy case). - If input argument C=None, then the internally computed 'optimal' coupon the the endogenous brankruptcy case is returned (pp.1222) (intermediate result) A : float Annuity value (Wert der Annuitaet), "A=C/r", The coupon (in $) divded by the risk-free rate. (intermediate result) Examples: --------- PosEq: No (False), Pos Net Worth covenant (True) Coupon: Endo (C=None), Exo (C>0) Source: ------- Leland, Hayne E. 1994. "Corporate Debt Value, Bond Covenants, and Optimal Capital Structure." The Journal of Finance 49 (4): 1213–52. https://doi.org/10.1111/j.1540-6261.1994.tb02452.x.
[ "Leland94", "Capital", "Structure", "model", "Corporate", "Bond", "valuation", "model" ]
python
train
mozilla/taar
taar/flask_app.py
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/flask_app.py#L41-L86
def flaskrun(app, default_host="127.0.0.1", default_port="8000"): """ Takes a flask.Flask instance and runs it. Parses command-line flags to configure the app. """ # Set up the command-line options parser = optparse.OptionParser() parser.add_option( "-H", "--host", help="Hostname of the Flask app " + "[default %s]" % default_host, default=default_host, ) parser.add_option( "-P", "--port", help="Port for the Flask app " + "[default %s]" % default_port, default=default_port, ) # Two options useful for debugging purposes, but # a bit dangerous so not exposed in the help message. parser.add_option( "-d", "--debug", action="store_true", dest="debug", help=optparse.SUPPRESS_HELP ) parser.add_option( "-p", "--profile", action="store_true", dest="profile", help=optparse.SUPPRESS_HELP, ) options, _ = parser.parse_args() # If the user selects the profiling option, then we need # to do a little extra setup if options.profile: from werkzeug.contrib.profiler import ProfilerMiddleware app.config["PROFILE"] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) options.debug = True app.run(debug=options.debug, host=options.host, port=int(options.port))
[ "def", "flaskrun", "(", "app", ",", "default_host", "=", "\"127.0.0.1\"", ",", "default_port", "=", "\"8000\"", ")", ":", "# Set up the command-line options", "parser", "=", "optparse", ".", "OptionParser", "(", ")", "parser", ".", "add_option", "(", "\"-H\"", "...
Takes a flask.Flask instance and runs it. Parses command-line flags to configure the app.
[ "Takes", "a", "flask", ".", "Flask", "instance", "and", "runs", "it", ".", "Parses", "command", "-", "line", "flags", "to", "configure", "the", "app", "." ]
python
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/widgets.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/widgets.py#L416-L421
def sizeHint(self): """ gives qt a starting point for widget size during window resizing """ w, h = self.get_width_height() return QtCore.QSize(w, h)
[ "def", "sizeHint", "(", "self", ")", ":", "w", ",", "h", "=", "self", ".", "get_width_height", "(", ")", "return", "QtCore", ".", "QSize", "(", "w", ",", "h", ")" ]
gives qt a starting point for widget size during window resizing
[ "gives", "qt", "a", "starting", "point", "for", "widget", "size", "during", "window", "resizing" ]
python
train
shad7/tvrenamer
tvrenamer/core/formatter.py
https://github.com/shad7/tvrenamer/blob/7fb59cb02669357e73b7acb92dcb6d74fdff4654/tvrenamer/core/formatter.py#L153-L239
def _make_valid_filename(value): """Takes a string and makes it into a valid filename. replaces accented characters with ASCII equivalent, and removes characters that cannot be converted sensibly to ASCII. additional characters that will removed. This will not touch the extension separator: >>> _make_valid_filename("T.est.avi") 'T_est.avi' """ sysname = platform.system() # If the filename starts with a . prepend it with an underscore, so it # doesn't become hidden. # This is done before calling splitext to handle filename of ".", as # splitext acts differently in python 2.5 and 2.6 - 2.5 returns ('', '.') # and 2.6 returns ('.', ''), so rather than special case '.', this # special-cases all files starting with "." equally (since dotfiles have # no extension) if value.startswith('.'): value = cfg.CONF.replacement_character + value # Treat extension seperatly value, extension = os.path.splitext(value) # Remove any null bytes value = value.replace('\0', '') # Blacklist of characters if sysname == 'Darwin': # : is technically allowed, but Finder will treat it as / and will # generally cause weird behaviour, so treat it as invalid. blacklist = r'/:' elif sysname in ['Linux', 'FreeBSD']: blacklist = r'/' else: # platform.system docs say it could also return "Windows" or "Java". # Failsafe and use Windows sanitisation for Java, as it could be any # operating system. blacklist = r'\/:*?\"<>|' # Append custom blacklisted characters blacklist += cfg.CONF.filename_character_blacklist # Replace every blacklisted character with a underscore value = re.sub('[%s]' % re.escape(blacklist), cfg.CONF.replacement_character, value) # Remove any trailing whitespace value = value.strip() # There are a bunch of filenames that are not allowed on Windows. # As with character blacklist, treat non Darwin/Linux platforms as Windows if sysname not in ['Darwin', 'Linux']: invalid_filenames = ['CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', 'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9'] if value in invalid_filenames: value = cfg.CONF.replacement_character + value # Replace accented characters with ASCII equivalent value = encodeutils.safe_encode(value, encoding='ascii', errors='ignore') extension = encodeutils.safe_encode(extension, encoding='ascii', errors='ignore') # Truncate filenames to valid/sane length. # NTFS is limited to 255 characters, HFS+ and EXT3 don't seem to have # limits, FAT32 is 254. I doubt anyone will take issue with losing that # one possible character, and files over 254 are pointlessly unweidly max_len = 254 if len(value + extension) > max_len: if len(extension) > len(value): # Truncate extension instead of filename, no extension should be # this long.. new_length = max_len - len(value) extension = extension[:new_length] else: # File name is longer than extension, truncate filename. new_length = max_len - len(extension) value = value[:new_length] return encodeutils.safe_decode(value + extension, incoming='ascii')
[ "def", "_make_valid_filename", "(", "value", ")", ":", "sysname", "=", "platform", ".", "system", "(", ")", "# If the filename starts with a . prepend it with an underscore, so it", "# doesn't become hidden.", "# This is done before calling splitext to handle filename of \".\", as", ...
Takes a string and makes it into a valid filename. replaces accented characters with ASCII equivalent, and removes characters that cannot be converted sensibly to ASCII. additional characters that will removed. This will not touch the extension separator: >>> _make_valid_filename("T.est.avi") 'T_est.avi'
[ "Takes", "a", "string", "and", "makes", "it", "into", "a", "valid", "filename", "." ]
python
train
msztolcman/versionner
versionner/commands/files_management.py
https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/commands/files_management.py#L14-L76
def update_project_files(cfg, proj_version): """ Update version string in project files :rtype : dict :param cfg:project configuration :param proj_version:current version :return:dict :raise ValueError: """ counters = {'files': 0, 'changes': 0} for project_file in cfg.files: if not project_file.file.exists(): print("File \"%s\" not found" % project_file.filename, file=sys.stderr) continue # prepare data date_format = project_file.date_format or cfg.date_format rxp = re.compile(project_file.search, project_file.search_flags) replace = project_file.replace % { "date": time.strftime(date_format), "major": proj_version.major, "minor": proj_version.minor, "patch": proj_version.patch, "prerelease": proj_version.prerelease, "version": str(proj_version), "build": proj_version.build, } # update project files with \ project_file.file.open(mode="r", encoding=project_file.encoding) as fh_in, \ tempfile.NamedTemporaryFile(mode="w", encoding=project_file.encoding, delete=False) as fh_out: if project_file.match == 'line': changes = 0 for line in fh_in: (line, cnt) = rxp.subn(replace, line) if cnt: changes += cnt fh_out.write(line) if changes: counters['files'] += 1 counters['changes'] += changes elif project_file.match == 'file': data = fh_in.read() (data, cnt) = rxp.subn(replace, data) if cnt: counters['files'] += 1 counters['changes'] += cnt fh_out.write(data) else: raise ConfigError("Unknown match type: \"%s\"" % project_file.match) fh_out.close() shutil.copystat(project_file.filename, fh_out.name) pathlib.Path(fh_out.name).rename(project_file.filename) return counters
[ "def", "update_project_files", "(", "cfg", ",", "proj_version", ")", ":", "counters", "=", "{", "'files'", ":", "0", ",", "'changes'", ":", "0", "}", "for", "project_file", "in", "cfg", ".", "files", ":", "if", "not", "project_file", ".", "file", ".", ...
Update version string in project files :rtype : dict :param cfg:project configuration :param proj_version:current version :return:dict :raise ValueError:
[ "Update", "version", "string", "in", "project", "files" ]
python
train
cloudnull/turbolift
turbolift/clouderator/actions.py
https://github.com/cloudnull/turbolift/blob/da33034e88959226529ce762e2895e6f6356c448/turbolift/clouderator/actions.py#L346-L384
def _list_getter(self, uri, headers, last_obj=None, spr=False): """Get a list of all objects in a container. :param uri: :param headers: :return list: :param spr: "single page return" Limit the returned data to one page :type spr: ``bol`` """ # Quote the file path. base_path = marked_path = ('%s?limit=10000&format=json' % uri.path) if last_obj: marked_path = self._last_marker( base_path=base_path, last_object=cloud_utils.quoter(last_obj) ) file_list = self._obj_index( uri=uri, base_path=base_path, marked_path=marked_path, headers=headers, spr=spr ) LOG.debug( 'Found [ %d ] entries(s) at [ %s ]', len(file_list), uri.geturl() ) if spr: return file_list else: return cloud_utils.unique_list_dicts( dlist=file_list, key='name' )
[ "def", "_list_getter", "(", "self", ",", "uri", ",", "headers", ",", "last_obj", "=", "None", ",", "spr", "=", "False", ")", ":", "# Quote the file path.", "base_path", "=", "marked_path", "=", "(", "'%s?limit=10000&format=json'", "%", "uri", ".", "path", ")...
Get a list of all objects in a container. :param uri: :param headers: :return list: :param spr: "single page return" Limit the returned data to one page :type spr: ``bol``
[ "Get", "a", "list", "of", "all", "objects", "in", "a", "container", "." ]
python
train
pysal/giddy
giddy/markov.py
https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L1212-L1333
def spillover(self, quadrant=1, neighbors_on=False): """ Detect spillover locations for diffusion in LISA Markov. Parameters ---------- quadrant : int which quadrant in the scatterplot should form the core of a cluster. neighbors_on : binary If false, then only the 1st order neighbors of a core location are included in the cluster. If true, neighbors of cluster core 1st order neighbors are included in the cluster. Returns ------- results : dictionary two keys - values pairs: 'components' - array (n, t) values are integer ids (starting at 1) indicating which component/cluster observation i in period t belonged to. 'spillover' - array (n, t-1) binary values indicating if the location was a spill-over location that became a new member of a previously existing cluster. Examples -------- >>> import libpysal >>> from giddy.markov import LISA_Markov >>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv")) >>> years = list(range(1929, 2010)) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read() >>> np.random.seed(10) >>> lm_random = LISA_Markov(pci, w, permutations=99) >>> r = lm_random.spillover() >>> (r['components'][:, 12] > 0).sum() 17 >>> (r['components'][:, 13]>0).sum() 23 >>> (r['spill_over'][:,12]>0).sum() 6 Including neighbors of core neighbors >>> rn = lm_random.spillover(neighbors_on=True) >>> (rn['components'][:, 12] > 0).sum() 26 >>> (rn["components"][:, 13] > 0).sum() 34 >>> (rn["spill_over"][:, 12] > 0).sum() 8 """ n, k = self.q.shape if self.permutations: spill_over = np.zeros((n, k - 1)) components = np.zeros((n, k)) i2id = {} # handle string keys for key in list(self.w.neighbors.keys()): idx = self.w.id2i[key] i2id[idx] = key sig_lisas = (self.q == quadrant) \ * (self.p_values <= self.significance_level) sig_ids = [np.nonzero( sig_lisas[:, i])[0].tolist() for i in range(k)] neighbors = self.w.neighbors for t in range(k - 1): s1 = sig_ids[t] s2 = sig_ids[t + 1] g1 = Graph(undirected=True) for i in s1: for neighbor in neighbors[i2id[i]]: g1.add_edge(i2id[i], neighbor, 1.0) if neighbors_on: for nn in neighbors[neighbor]: g1.add_edge(neighbor, nn, 1.0) components1 = g1.connected_components(op=gt) components1 = [list(c.nodes) for c in components1] g2 = Graph(undirected=True) for i in s2: for neighbor in neighbors[i2id[i]]: g2.add_edge(i2id[i], neighbor, 1.0) if neighbors_on: for nn in neighbors[neighbor]: g2.add_edge(neighbor, nn, 1.0) components2 = g2.connected_components(op=gt) components2 = [list(c.nodes) for c in components2] c2 = [] c1 = [] for c in components2: c2.extend(c) for c in components1: c1.extend(c) new_ids = [j for j in c2 if j not in c1] spill_ids = [] for j in new_ids: # find j's component in period 2 cj = [c for c in components2 if j in c][0] # for members of j's component in period 2, check if they # belonged to any components in period 1 for i in cj: if i in c1: spill_ids.append(j) break for spill_id in spill_ids: id = self.w.id2i[spill_id] spill_over[id, t] = 1 for c, component in enumerate(components1): for i in component: ii = self.w.id2i[i] components[ii, t] = c + 1 results = {} results['components'] = components results['spill_over'] = spill_over return results else: return None
[ "def", "spillover", "(", "self", ",", "quadrant", "=", "1", ",", "neighbors_on", "=", "False", ")", ":", "n", ",", "k", "=", "self", ".", "q", ".", "shape", "if", "self", ".", "permutations", ":", "spill_over", "=", "np", ".", "zeros", "(", "(", ...
Detect spillover locations for diffusion in LISA Markov. Parameters ---------- quadrant : int which quadrant in the scatterplot should form the core of a cluster. neighbors_on : binary If false, then only the 1st order neighbors of a core location are included in the cluster. If true, neighbors of cluster core 1st order neighbors are included in the cluster. Returns ------- results : dictionary two keys - values pairs: 'components' - array (n, t) values are integer ids (starting at 1) indicating which component/cluster observation i in period t belonged to. 'spillover' - array (n, t-1) binary values indicating if the location was a spill-over location that became a new member of a previously existing cluster. Examples -------- >>> import libpysal >>> from giddy.markov import LISA_Markov >>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv")) >>> years = list(range(1929, 2010)) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read() >>> np.random.seed(10) >>> lm_random = LISA_Markov(pci, w, permutations=99) >>> r = lm_random.spillover() >>> (r['components'][:, 12] > 0).sum() 17 >>> (r['components'][:, 13]>0).sum() 23 >>> (r['spill_over'][:,12]>0).sum() 6 Including neighbors of core neighbors >>> rn = lm_random.spillover(neighbors_on=True) >>> (rn['components'][:, 12] > 0).sum() 26 >>> (rn["components"][:, 13] > 0).sum() 34 >>> (rn["spill_over"][:, 12] > 0).sum() 8
[ "Detect", "spillover", "locations", "for", "diffusion", "in", "LISA", "Markov", "." ]
python
train
senaite/senaite.core
bika/lims/browser/publish/emailview.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/publish/emailview.py#L605-L645
def get_recipients(self, ar): """Return the AR recipients in the same format like the AR Report expects in the records field `Recipients` """ plone_utils = api.get_tool("plone_utils") def is_email(email): if not plone_utils.validateSingleEmailAddress(email): return False return True def recipient_from_contact(contact): if not contact: return None email = contact.getEmailAddress() return { "UID": api.get_uid(contact), "Username": contact.getUsername(), "Fullname": to_utf8(contact.Title()), "EmailAddress": email, } def recipient_from_email(email): if not is_email(email): return None return { "UID": "", "Username": "", "Fullname": email, "EmailAddress": email, } # Primary Contacts to = filter(None, [recipient_from_contact(ar.getContact())]) # CC Contacts cc = filter(None, map(recipient_from_contact, ar.getCCContact())) # CC Emails cc_emails = map(lambda x: x.strip(), ar.getCCEmails().split(",")) cc_emails = filter(None, map(recipient_from_email, cc_emails)) return to + cc + cc_emails
[ "def", "get_recipients", "(", "self", ",", "ar", ")", ":", "plone_utils", "=", "api", ".", "get_tool", "(", "\"plone_utils\"", ")", "def", "is_email", "(", "email", ")", ":", "if", "not", "plone_utils", ".", "validateSingleEmailAddress", "(", "email", ")", ...
Return the AR recipients in the same format like the AR Report expects in the records field `Recipients`
[ "Return", "the", "AR", "recipients", "in", "the", "same", "format", "like", "the", "AR", "Report", "expects", "in", "the", "records", "field", "Recipients" ]
python
train
SpectoLabs/hoverpy
hoverpy/hp.py
https://github.com/SpectoLabs/hoverpy/blob/e153ec57f80634019d827d378f184c01fedc5a0e/hoverpy/hp.py#L292-L304
def __writepid(self, pid): """ HoverFly fails to launch if it's already running on the same ports. So we have to keep track of them using temp files with the proxy port and admin port, containing the processe's PID. """ import tempfile d = tempfile.gettempdir() name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort)) with open(name, 'w') as f: f.write(str(pid)) logging.debug("writing to %s"%name)
[ "def", "__writepid", "(", "self", ",", "pid", ")", ":", "import", "tempfile", "d", "=", "tempfile", ".", "gettempdir", "(", ")", "name", "=", "os", ".", "path", ".", "join", "(", "d", ",", "\"hoverpy.%i.%i\"", "%", "(", "self", ".", "_proxyPort", ","...
HoverFly fails to launch if it's already running on the same ports. So we have to keep track of them using temp files with the proxy port and admin port, containing the processe's PID.
[ "HoverFly", "fails", "to", "launch", "if", "it", "s", "already", "running", "on", "the", "same", "ports", ".", "So", "we", "have", "to", "keep", "track", "of", "them", "using", "temp", "files", "with", "the", "proxy", "port", "and", "admin", "port", "c...
python
train
kimvais/ike
ike/protocol.py
https://github.com/kimvais/ike/blob/4a5622c878a43a3d3cc19c54aa7cc7be29318eae/ike/protocol.py#L130-L173
def init_recv(self): """ Parses the IKE_INIT response packet received from Responder. Assigns the correct values of rSPI and Nr Calculates Diffie-Hellman exchange and assigns all keys to self. """ assert len(self.packets) == 2 packet = self.packets[-1] for p in packet.payloads: if p._type == payloads.Type.Nr: self.Nr = p._data logger.debug(u"Responder nonce {}".format(binascii.hexlify(self.Nr))) elif p._type == payloads.Type.KE: int_from_bytes = int.from_bytes(p.kex_data, 'big') self.diffie_hellman.derivate(int_from_bytes) else: logger.debug('Ignoring: {}'.format(p)) logger.debug('Nonce I: {}\nNonce R: {}'.format(binascii.hexlify(self.Ni), binascii.hexlify(self.Nr))) logger.debug('DH shared secret: {}'.format(binascii.hexlify(self.diffie_hellman.shared_secret))) SKEYSEED = prf(self.Ni + self.Nr, self.diffie_hellman.shared_secret) logger.debug(u"SKEYSEED is: {0!r:s}\n".format(binascii.hexlify(SKEYSEED))) keymat = prfplus(SKEYSEED, (self.Ni + self.Nr + to_bytes(self.iSPI) + to_bytes(self.rSPI)), 32 * 7) #3 * 32 + 2 * 32 + 2 * 32) logger.debug("Got %d bytes of key material" % len(keymat)) # get keys from material ( self.SK_d, self.SK_ai, self.SK_ar, self.SK_ei, self.SK_er, self.SK_pi, self.SK_pr ) = unpack("32s" * 7, keymat) # XXX: Should support other than 256-bit algorithms, really. logger.debug("SK_ai: {}".format(dump(self.SK_ai))) logger.debug("SK_ei: {}".format(dump(self.SK_ei)))
[ "def", "init_recv", "(", "self", ")", ":", "assert", "len", "(", "self", ".", "packets", ")", "==", "2", "packet", "=", "self", ".", "packets", "[", "-", "1", "]", "for", "p", "in", "packet", ".", "payloads", ":", "if", "p", ".", "_type", "==", ...
Parses the IKE_INIT response packet received from Responder. Assigns the correct values of rSPI and Nr Calculates Diffie-Hellman exchange and assigns all keys to self.
[ "Parses", "the", "IKE_INIT", "response", "packet", "received", "from", "Responder", "." ]
python
train
napalm-automation/napalm
napalm/base/helpers.py
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/base/helpers.py#L353-L380
def canonical_interface_name(interface, addl_name_map=None): """Function to return an interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential match. Regex and other looser matching methods were not implmented to avoid false positive matches. As an example, it would make sense to do "[P|p][O|o]" which would incorrectly match PO = POS and Po = Port-channel, leading to a false positive, not easily troubleshot, found, or known. :param interface: The interface you are attempting to expand. :param addl_name_map (optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"} """ name_map = {} name_map.update(base_interfaces) interface_type, interface_number = split_interface(interface) if isinstance(addl_name_map, dict): name_map.update(addl_name_map) # check in dict for mapping if name_map.get(interface_type): long_int = name_map.get(interface_type) return long_int + py23_compat.text_type(interface_number) # if nothing matched, return the original name else: return interface
[ "def", "canonical_interface_name", "(", "interface", ",", "addl_name_map", "=", "None", ")", ":", "name_map", "=", "{", "}", "name_map", ".", "update", "(", "base_interfaces", ")", "interface_type", ",", "interface_number", "=", "split_interface", "(", "interface"...
Function to return an interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential match. Regex and other looser matching methods were not implmented to avoid false positive matches. As an example, it would make sense to do "[P|p][O|o]" which would incorrectly match PO = POS and Po = Port-channel, leading to a false positive, not easily troubleshot, found, or known. :param interface: The interface you are attempting to expand. :param addl_name_map (optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}
[ "Function", "to", "return", "an", "interface", "s", "canonical", "name", "(", "fully", "expanded", "name", ")", "." ]
python
train
apache/incubator-mxnet
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L104-L135
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train, learning_rate, weight_decay, batch_size): """Conducts k-fold cross validation for the model.""" assert k > 1 fold_size = X_train.shape[0] // k train_loss_sum = 0.0 test_loss_sum = 0.0 for test_idx in range(k): X_val_test = X_train[test_idx * fold_size: (test_idx + 1) * fold_size, :] y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size] val_train_defined = False for i in range(k): if i != test_idx: X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :] y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size] if not val_train_defined: X_val_train = X_cur_fold y_val_train = y_cur_fold val_train_defined = True else: X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0) y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0) net = get_net() train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) train_loss_sum += train_loss test_loss = get_rmse_log(net, X_val_test, y_val_test) print("Test loss: %f" % test_loss) test_loss_sum += test_loss return train_loss_sum / k, test_loss_sum / k
[ "def", "k_fold_cross_valid", "(", "k", ",", "epochs", ",", "verbose_epoch", ",", "X_train", ",", "y_train", ",", "learning_rate", ",", "weight_decay", ",", "batch_size", ")", ":", "assert", "k", ">", "1", "fold_size", "=", "X_train", ".", "shape", "[", "0"...
Conducts k-fold cross validation for the model.
[ "Conducts", "k", "-", "fold", "cross", "validation", "for", "the", "model", "." ]
python
train
sentinel-hub/sentinelhub-py
sentinelhub/geometry.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geometry.py#L431-L446
def transform(self, crs): """ Transforms Geometry from current CRS to target CRS :param crs: target CRS :type crs: constants.CRS :return: Geometry in target CRS :rtype: Geometry """ new_crs = CRS(crs) geometry = self.geometry if new_crs is not self.crs: project = functools.partial(pyproj.transform, self.crs.projection(), new_crs.projection()) geometry = shapely.ops.transform(project, geometry) return Geometry(geometry, crs=new_crs)
[ "def", "transform", "(", "self", ",", "crs", ")", ":", "new_crs", "=", "CRS", "(", "crs", ")", "geometry", "=", "self", ".", "geometry", "if", "new_crs", "is", "not", "self", ".", "crs", ":", "project", "=", "functools", ".", "partial", "(", "pyproj"...
Transforms Geometry from current CRS to target CRS :param crs: target CRS :type crs: constants.CRS :return: Geometry in target CRS :rtype: Geometry
[ "Transforms", "Geometry", "from", "current", "CRS", "to", "target", "CRS" ]
python
train
pandas-dev/pandas
pandas/core/indexes/multi.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2191-L2252
def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.MultiIndex Resulting index indexer : np.ndarray or None Indices of output values in original index. """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, 'names') if level is not None: if method is not None: raise TypeError('Fill method not supported if level passed') # GH7774: preserve dtype/tz if target is empty and not an Index. # target may be an iterator target = ibase._ensure_has_len(target) if len(target) == 0 and not isinstance(target, Index): idx = self.levels[level] attrs = idx._get_attributes_dict() attrs.pop('freq', None) # don't preserve freq target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs) else: target = ensure_index(target) target, indexer, _ = self._join_level(target, level, how='right', return_indexers=True, keep_order=False) else: target = ensure_index(target) if self.equals(target): indexer = None else: if self.is_unique: indexer = self.get_indexer(target, method=method, limit=limit, tolerance=tolerance) else: raise ValueError("cannot handle a non-unique multi-index!") if not isinstance(target, MultiIndex): if indexer is None: target = self elif (indexer >= 0).all(): target = self.take(indexer) else: # hopefully? target = MultiIndex.from_tuples(target) if (preserve_names and target.nlevels == self.nlevels and target.names != self.names): target = target.copy(deep=False) target.names = self.names return target, indexer
[ "def", "reindex", "(", "self", ",", "target", ",", "method", "=", "None", ",", "level", "=", "None", ",", "limit", "=", "None", ",", "tolerance", "=", "None", ")", ":", "# GH6552: preserve names when reindexing to non-named target", "# (i.e. neither Index nor Series...
Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.MultiIndex Resulting index indexer : np.ndarray or None Indices of output values in original index.
[ "Create", "index", "with", "target", "s", "values", "(", "move", "/", "add", "/", "delete", "values", "as", "necessary", ")" ]
python
train
futursolo/magichttp
magichttp/writers.py
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/writers.py#L137-L160
def finish(self, data: bytes=b"") -> None: """ Finish the stream. """ if self.finished(): if self._exc: raise self._exc if data: raise WriteAfterFinishedError return try: self._delegate.write_data(data, finished=True) except BaseWriteException as e: if self._exc is None: self._exc = e raise finally: self._finished.set()
[ "def", "finish", "(", "self", ",", "data", ":", "bytes", "=", "b\"\"", ")", "->", "None", ":", "if", "self", ".", "finished", "(", ")", ":", "if", "self", ".", "_exc", ":", "raise", "self", ".", "_exc", "if", "data", ":", "raise", "WriteAfterFinish...
Finish the stream.
[ "Finish", "the", "stream", "." ]
python
train
eofs/aws
aws/main.py
https://github.com/eofs/aws/blob/479cbe27a9f289b43f32f8e3de7d048a4a8993fe/aws/main.py#L222-L330
def main(): """ AWS support script's main method """ p = argparse.ArgumentParser(description='Manage Amazon AWS services', prog='aws', version=__version__) subparsers = p.add_subparsers(help='Select Amazon AWS service to use') # Auto Scaling as_service = subparsers.add_parser('as', help='Amazon Auto Scaling') as_subparsers = as_service.add_subparsers(help='Perform action') as_service_list = as_subparsers.add_parser('list', help='List Auto Scaling groups') as_service_list.set_defaults(func=as_list_handler) # Elastic Cloud Computing ec2_service = subparsers.add_parser('ec2', help='Amazon Elastic Compute Cloud') ec2_subparsers = ec2_service.add_subparsers(help='Perform action') ec2_service_list = ec2_subparsers.add_parser('list', help='List items') ec2_service_list.add_argument('--elb', '-e', help='Filter instances inside this ELB instance') ec2_service_list.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_list.add_argument('--type', default='instances', choices=['instances', 'regions', 'images'], help='List items of this type') ec2_service_list.set_defaults(func=ec2_list_handler) ec2_service_fab = ec2_subparsers.add_parser('fab', help='Run Fabric commands') ec2_service_fab.add_argument('--elb', '-e', help='Run against EC2 instances for this ELB') ec2_service_fab.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_fab.add_argument('--file', '-f', nargs='+', help='Define fabfile to use') ec2_service_fab.add_argument('methods', metavar='method:arg1,arg2=val2,host=foo,hosts=\'h1;h2\',', nargs='+', help='Specify one or more methods to execute.') ec2_service_fab.set_defaults(func=ec2_fab_handler) ec2_service_create = ec2_subparsers.add_parser('create', help='Create and start new instances') ec2_service_create.set_defaults(func=ec2_create_handler) ec2_service_start = ec2_subparsers.add_parser('start', help='Start existing instances') ec2_service_start.add_argument('instance', nargs='+', help='ID of an instance to start') ec2_service_start.set_defaults(func=ec2_start_handler) ec2_service_stop = ec2_subparsers.add_parser('stop', help='Stop instances') ec2_service_stop.add_argument('instance', nargs='+', help='ID of an instance to stop') ec2_service_stop.add_argument('--force', '-f', action='store_true', help='Force stop') ec2_service_stop.set_defaults(func=ec2_stop_handler) ec2_service_terminate = ec2_subparsers.add_parser('terminate', help='Terminate instances') ec2_service_terminate.add_argument('instance', nargs='+', help='ID of an instance to terminate') ec2_service_terminate.set_defaults(func=ec2_terminate_handler) ec2_service_images = ec2_subparsers.add_parser('images', help='List AMI images') ec2_service_images.add_argument('image', nargs='*', help='Image ID to use as filter') ec2_service_images.set_defaults(func=ec2_images_handler) ec2_service_create_image = ec2_subparsers.add_parser('create-image', help='Create AMI image from instance') ec2_service_create_image.add_argument('instance', help='ID of an instance to image') ec2_service_create_image.add_argument('name', help='The name of the image') ec2_service_create_image.add_argument('--description', '-d', help='Optional description for the image') ec2_service_create_image.add_argument('--noreboot', action='store_true', default=False, help='Do not shutdown the instance before creating image. ' + 'Note: System integrity might suffer if used.') ec2_service_create_image.set_defaults(func=ec2_create_image_handler) # Elastic Load Balancing elb_service = subparsers.add_parser('elb', help='Amazon Elastic Load Balancing') elb_subparsers = elb_service.add_subparsers(help='Perform action') elb_service_list = elb_subparsers.add_parser('list', help='List items') elb_service_list.add_argument('--type', default='balancers', choices=['balancers', 'regions'], help='List items of this type') elb_service_list.set_defaults(func=elb_list_handler) elb_service_instances = elb_subparsers.add_parser('instances', help='List registered instances') elb_service_instances.add_argument('balancer', help='Name of the Load Balancer') elb_service_instances.set_defaults(func=elb_instances_handler) elb_service_register = elb_subparsers.add_parser('register', help='Register instances to balancer') elb_service_register.add_argument('balancer', help='Name of the load balancer') elb_service_register.add_argument('instance', nargs='+', help='ID of an instance to register') elb_service_register.set_defaults(func=elb_register_handler) elb_service_deregister = elb_subparsers.add_parser('deregister', help='Deregister instances of balancer') elb_service_deregister.add_argument('balancer', help='Name of the Load Balancer') elb_service_deregister.add_argument('instance', nargs='+', help='ID of an instance to deregister') elb_service_deregister.set_defaults(func=elb_deregister_handler) elb_service_zones = elb_subparsers.add_parser('zones', help='Enable or disable availability zones') elb_service_zones.add_argument('balancer', help='Name of the Load Balancer') elb_service_zones.add_argument('zone', nargs='+', help='Name of the availability zone') elb_service_zones.add_argument('status', help='Disable of enable zones', choices=['enable', 'disable']) elb_service_zones.set_defaults(func=elb_zones_handler) elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') elb_service_delete.add_argument('balancer', help='Name of the Load Balancer') elb_service_delete.set_defaults(func=elb_delete_handler) # elb_service_create = elb_subparsers.add_parser('create', help='Create new Load Balancer') # elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') # elb_service_register = elb_subparsers.add_parser('register', help='Register EC2 instance') # elb_service_zone = elb_subparsers.add_parser('zone', help='Enable or disable region') arguments = p.parse_args() arguments.func(p, arguments)
[ "def", "main", "(", ")", ":", "p", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Manage Amazon AWS services'", ",", "prog", "=", "'aws'", ",", "version", "=", "__version__", ")", "subparsers", "=", "p", ".", "add_subparsers", "(", "help"...
AWS support script's main method
[ "AWS", "support", "script", "s", "main", "method" ]
python
train
getsentry/raven-python
raven/utils/serializer/base.py
https://github.com/getsentry/raven-python/blob/d891c20f0f930153f508e9d698d9de42e910face/raven/utils/serializer/base.py#L51-L68
def recurse(self, value, max_depth=6, _depth=0, **kwargs): """ Given ``value``, recurse (using the parent serializer) to handle coercing of newly defined values. """ string_max_length = kwargs.get('string_max_length', None) _depth += 1 if _depth >= max_depth: try: value = text_type(repr(value))[:string_max_length] except Exception as e: import traceback traceback.print_exc() self.manager.logger.exception(e) return text_type(type(value)) return self.manager.transform(value, max_depth=max_depth, _depth=_depth, **kwargs)
[ "def", "recurse", "(", "self", ",", "value", ",", "max_depth", "=", "6", ",", "_depth", "=", "0", ",", "*", "*", "kwargs", ")", ":", "string_max_length", "=", "kwargs", ".", "get", "(", "'string_max_length'", ",", "None", ")", "_depth", "+=", "1", "i...
Given ``value``, recurse (using the parent serializer) to handle coercing of newly defined values.
[ "Given", "value", "recurse", "(", "using", "the", "parent", "serializer", ")", "to", "handle", "coercing", "of", "newly", "defined", "values", "." ]
python
train
diffeo/rejester
rejester/_task_master.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_task_master.py#L786-L798
def workers(self, alive=True): '''Get a listing of all workers. This returns a dictionary mapping worker ID to the mode constant for their last observed mode. :param bool alive: if true (default), only include workers that have called :meth:`Worker.heartbeat` sufficiently recently ''' return self.registry.filter( WORKER_OBSERVED_MODE, priority_min=alive and time.time() or '-inf')
[ "def", "workers", "(", "self", ",", "alive", "=", "True", ")", ":", "return", "self", ".", "registry", ".", "filter", "(", "WORKER_OBSERVED_MODE", ",", "priority_min", "=", "alive", "and", "time", ".", "time", "(", ")", "or", "'-inf'", ")" ]
Get a listing of all workers. This returns a dictionary mapping worker ID to the mode constant for their last observed mode. :param bool alive: if true (default), only include workers that have called :meth:`Worker.heartbeat` sufficiently recently
[ "Get", "a", "listing", "of", "all", "workers", "." ]
python
train
tcalmant/ipopo
pelix/misc/ssl_wrap.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/misc/ssl_wrap.py#L54-L139
def wrap_socket(socket, certfile, keyfile, password=None): """ Wraps an existing TCP socket and returns an SSLSocket object :param socket: The socket to wrap :param certfile: The server certificate file :param keyfile: The server private key file :param password: Password for the private key file (Python >= 3.3) :return: The wrapped socket :raise SSLError: Error wrapping the socket / loading the certificate :raise OSError: A password has been given, but ciphered key files are not supported by the current version of Python """ # Log warnings when some logger = logging.getLogger("ssl_wrap") def _password_support_error(): """ Logs a warning and raises an OSError if a password has been given but Python doesn't support ciphered key files. :raise OSError: If a password has been given """ if password: logger.error( "The ssl.wrap_socket() fallback method doesn't " "support key files with a password." ) raise OSError( "Can't decode the SSL key file: " "this version of Python doesn't support it" ) try: # Prefer the default context factory, as it will be updated to reflect # security issues (Python >= 2.7.9 and >= 3.4) default_context = ssl.create_default_context() except AttributeError: default_context = None try: # Try to equivalent to create_default_context() in Python 3.5 # Create an SSL context and set its options context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if default_context is not None: # Copy options context.options = default_context.options else: # Set up the context as create_default_context() does in Python 3.5 # SSLv2 considered harmful # SSLv3 has problematic security context.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 # disallow ciphers with known vulnerabilities context.set_ciphers(_RESTRICTED_SERVER_CIPHERS) try: # Load the certificate, with a password context.load_cert_chain(certfile, keyfile, password) except TypeError: # The "password" argument isn't supported # Check support for key file password _password_support_error() # Load the certificate, without the password argument context.load_cert_chain(certfile, keyfile) # Return the wrapped socket return context.wrap_socket(socket, server_side=True) except AttributeError as ex: # Log a warning to advise the user of possible security holes logger.warning( "Can't create a custom SSLContext. " "The server should be considered insecure." ) logger.debug("Missing attribute: %s", ex) # Check support for key file password _password_support_error() # Fall back to the "old" wrap_socket method return ssl.wrap_socket( socket, server_side=True, certfile=certfile, keyfile=keyfile )
[ "def", "wrap_socket", "(", "socket", ",", "certfile", ",", "keyfile", ",", "password", "=", "None", ")", ":", "# Log warnings when some", "logger", "=", "logging", ".", "getLogger", "(", "\"ssl_wrap\"", ")", "def", "_password_support_error", "(", ")", ":", "\"...
Wraps an existing TCP socket and returns an SSLSocket object :param socket: The socket to wrap :param certfile: The server certificate file :param keyfile: The server private key file :param password: Password for the private key file (Python >= 3.3) :return: The wrapped socket :raise SSLError: Error wrapping the socket / loading the certificate :raise OSError: A password has been given, but ciphered key files are not supported by the current version of Python
[ "Wraps", "an", "existing", "TCP", "socket", "and", "returns", "an", "SSLSocket", "object" ]
python
train
UDST/urbansim
urbansim/models/dcm.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L278-L320
def from_yaml(cls, yaml_str=None, str_or_buffer=None): """ Create a DiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- MNLDiscreteChoiceModel """ cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) model = cls( cfg['model_expression'], cfg['sample_size'], probability_mode=cfg.get('probability_mode', 'full_product'), choice_mode=cfg.get('choice_mode', 'individual'), choosers_fit_filters=cfg.get('choosers_fit_filters', None), choosers_predict_filters=cfg.get('choosers_predict_filters', None), alts_fit_filters=cfg.get('alts_fit_filters', None), alts_predict_filters=cfg.get('alts_predict_filters', None), interaction_predict_filters=cfg.get( 'interaction_predict_filters', None), estimation_sample_size=cfg.get('estimation_sample_size', None), prediction_sample_size=cfg.get('prediction_sample_size', None), choice_column=cfg.get('choice_column', None), name=cfg.get('name', None) ) if cfg.get('log_likelihoods', None): model.log_likelihoods = cfg['log_likelihoods'] if cfg.get('fit_parameters', None): model.fit_parameters = pd.DataFrame(cfg['fit_parameters']) logger.debug('loaded LCM model {} from YAML'.format(model.name)) return model
[ "def", "from_yaml", "(", "cls", ",", "yaml_str", "=", "None", ",", "str_or_buffer", "=", "None", ")", ":", "cfg", "=", "yamlio", ".", "yaml_to_dict", "(", "yaml_str", ",", "str_or_buffer", ")", "model", "=", "cls", "(", "cfg", "[", "'model_expression'", ...
Create a DiscreteChoiceModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- MNLDiscreteChoiceModel
[ "Create", "a", "DiscreteChoiceModel", "instance", "from", "a", "saved", "YAML", "configuration", ".", "Arguments", "are", "mutally", "exclusive", "." ]
python
train
pkkid/python-plexapi
plexapi/myplex.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/myplex.py#L853-L864
def _chooseConnection(ctype, name, results): """ Chooses the first (best) connection from the given _connect results. """ # At this point we have a list of result tuples containing (url, token, PlexServer, runtime) # or (url, token, None, runtime) in the case a connection could not be established. for url, token, result, runtime in results: okerr = 'OK' if result else 'ERR' log.info('%s connection %s (%ss): %s?X-Plex-Token=%s', ctype, okerr, runtime, url, token) results = [r[2] for r in results if r and r[2] is not None] if results: log.info('Connecting to %s: %s?X-Plex-Token=%s', ctype, results[0]._baseurl, results[0]._token) return results[0] raise NotFound('Unable to connect to %s: %s' % (ctype.lower(), name))
[ "def", "_chooseConnection", "(", "ctype", ",", "name", ",", "results", ")", ":", "# At this point we have a list of result tuples containing (url, token, PlexServer, runtime)", "# or (url, token, None, runtime) in the case a connection could not be established.", "for", "url", ",", "to...
Chooses the first (best) connection from the given _connect results.
[ "Chooses", "the", "first", "(", "best", ")", "connection", "from", "the", "given", "_connect", "results", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/parsers/firefox3_history.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/firefox3_history.py#L54-L60
def Parse(self): """Iterator returning dict for each entry in history.""" for timestamp, url, title in self.Query(self.VISITS_QUERY): if not isinstance(timestamp, (long, int)): timestamp = 0 yield [timestamp, "FIREFOX3_VISIT", url, title]
[ "def", "Parse", "(", "self", ")", ":", "for", "timestamp", ",", "url", ",", "title", "in", "self", ".", "Query", "(", "self", ".", "VISITS_QUERY", ")", ":", "if", "not", "isinstance", "(", "timestamp", ",", "(", "long", ",", "int", ")", ")", ":", ...
Iterator returning dict for each entry in history.
[ "Iterator", "returning", "dict", "for", "each", "entry", "in", "history", "." ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/cert/subject_info_renderer.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info_renderer.py#L72-L105
def render_to_image_file( self, image_out_path, width_pixels=None, height_pixels=None, dpi=90 ): """Render the SubjectInfo to an image file. Args: image_out_path : str Path to where image image will be written. Valid extensions are ``.svg,`` ``.pdf``, and ``.png``. width_pixels : int Width of image to write. height_pixels : int Height of image to write, in pixels. dpi: Dots Per Inch to declare in image file. This does not change the resolution of the image but may change the size of the image when rendered. Returns: None """ self._render_type = "file" self._tree.render( file_name=image_out_path, w=width_pixels, h=height_pixels, dpi=dpi, units="px", tree_style=self._get_tree_style(), )
[ "def", "render_to_image_file", "(", "self", ",", "image_out_path", ",", "width_pixels", "=", "None", ",", "height_pixels", "=", "None", ",", "dpi", "=", "90", ")", ":", "self", ".", "_render_type", "=", "\"file\"", "self", ".", "_tree", ".", "render", "(",...
Render the SubjectInfo to an image file. Args: image_out_path : str Path to where image image will be written. Valid extensions are ``.svg,`` ``.pdf``, and ``.png``. width_pixels : int Width of image to write. height_pixels : int Height of image to write, in pixels. dpi: Dots Per Inch to declare in image file. This does not change the resolution of the image but may change the size of the image when rendered. Returns: None
[ "Render", "the", "SubjectInfo", "to", "an", "image", "file", "." ]
python
train
vatlab/SoS
src/sos/utils.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/utils.py#L933-L939
def natural_keys(text): ''' alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) ''' return [int(c) if c.isdigit() else c for c in re.split(r'(\d+)', text)]
[ "def", "natural_keys", "(", "text", ")", ":", "return", "[", "int", "(", "c", ")", "if", "c", ".", "isdigit", "(", ")", "else", "c", "for", "c", "in", "re", ".", "split", "(", "r'(\\d+)'", ",", "text", ")", "]" ]
alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments)
[ "alist", ".", "sort", "(", "key", "=", "natural_keys", ")", "sorts", "in", "human", "order", "http", ":", "//", "nedbatchelder", ".", "com", "/", "blog", "/", "200712", "/", "human_sorting", ".", "html", "(", "See", "Toothy", "s", "implementation", "in",...
python
train
portfors-lab/sparkle
sparkle/gui/stim/stimulusview.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulusview.py#L338-L349
def grabImage(self, index): """Gets an image of the item at *index* :param index: index of an item in the view :type index: :qtdoc:`QModelIndex` :returns: :qtdoc:`QPixmap` """ # rect = self._rects[index.row()][index.column()] rect = self.visualRect(index) pixmap = QtGui.QPixmap() pixmap = pixmap.grabWidget(self, rect) return pixmap
[ "def", "grabImage", "(", "self", ",", "index", ")", ":", "# rect = self._rects[index.row()][index.column()]", "rect", "=", "self", ".", "visualRect", "(", "index", ")", "pixmap", "=", "QtGui", ".", "QPixmap", "(", ")", "pixmap", "=", "pixmap", ".", "grabWidget...
Gets an image of the item at *index* :param index: index of an item in the view :type index: :qtdoc:`QModelIndex` :returns: :qtdoc:`QPixmap`
[ "Gets", "an", "image", "of", "the", "item", "at", "*", "index", "*" ]
python
train
Azure/azure-sdk-for-python
azure-keyvault/azure/keyvault/v2016_10_01/key_vault_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-keyvault/azure/keyvault/v2016_10_01/key_vault_client.py#L1566-L1644
def set_secret( self, vault_base_url, secret_name, value, tags=None, content_type=None, secret_attributes=None, custom_headers=None, raw=False, **operation_config): """Sets a secret in a specified key vault. The SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key Vault creates a new version of that secret. This operation requires the secrets/set permission. :param vault_base_url: The vault name, for example https://myvault.vault.azure.net. :type vault_base_url: str :param secret_name: The name of the secret. :type secret_name: str :param value: The value of the secret. :type value: str :param tags: Application specific metadata in the form of key-value pairs. :type tags: dict[str, str] :param content_type: Type of the secret value such as a password. :type content_type: str :param secret_attributes: The secret management attributes. :type secret_attributes: ~azure.keyvault.v2016_10_01.models.SecretAttributes :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: SecretBundle or ClientRawResponse if raw=true :rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle or ~msrest.pipeline.ClientRawResponse :raises: :class:`KeyVaultErrorException<azure.keyvault.v2016_10_01.models.KeyVaultErrorException>` """ parameters = models.SecretSetParameters(value=value, tags=tags, content_type=content_type, secret_attributes=secret_attributes) # Construct URL url = self.set_secret.metadata['url'] path_format_arguments = { 'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True), 'secret-name': self._serialize.url("secret_name", secret_name, 'str', pattern=r'^[0-9a-zA-Z-]+$') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(parameters, 'SecretSetParameters') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200]: raise models.KeyVaultErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('SecretBundle', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
[ "def", "set_secret", "(", "self", ",", "vault_base_url", ",", "secret_name", ",", "value", ",", "tags", "=", "None", ",", "content_type", "=", "None", ",", "secret_attributes", "=", "None", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",",...
Sets a secret in a specified key vault. The SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key Vault creates a new version of that secret. This operation requires the secrets/set permission. :param vault_base_url: The vault name, for example https://myvault.vault.azure.net. :type vault_base_url: str :param secret_name: The name of the secret. :type secret_name: str :param value: The value of the secret. :type value: str :param tags: Application specific metadata in the form of key-value pairs. :type tags: dict[str, str] :param content_type: Type of the secret value such as a password. :type content_type: str :param secret_attributes: The secret management attributes. :type secret_attributes: ~azure.keyvault.v2016_10_01.models.SecretAttributes :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: SecretBundle or ClientRawResponse if raw=true :rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle or ~msrest.pipeline.ClientRawResponse :raises: :class:`KeyVaultErrorException<azure.keyvault.v2016_10_01.models.KeyVaultErrorException>`
[ "Sets", "a", "secret", "in", "a", "specified", "key", "vault", "." ]
python
test
pudo/normality
normality/cleaning.py
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/cleaning.py#L17-L28
def decompose_nfkd(text): """Perform unicode compatibility decomposition. This will replace some non-standard value representations in unicode and normalise them, while also separating characters and their diacritics into two separate codepoints. """ if text is None: return None if not hasattr(decompose_nfkd, '_tr'): decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD') return decompose_nfkd._tr.transliterate(text)
[ "def", "decompose_nfkd", "(", "text", ")", ":", "if", "text", "is", "None", ":", "return", "None", "if", "not", "hasattr", "(", "decompose_nfkd", ",", "'_tr'", ")", ":", "decompose_nfkd", ".", "_tr", "=", "Transliterator", ".", "createInstance", "(", "'Any...
Perform unicode compatibility decomposition. This will replace some non-standard value representations in unicode and normalise them, while also separating characters and their diacritics into two separate codepoints.
[ "Perform", "unicode", "compatibility", "decomposition", "." ]
python
train
blockstack-packages/blockstack-gpg
blockstack_gpg/gpg.py
https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L142-L172
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ): """ Store a key locally to our app keyring. Does NOT put it into a blockchain ID Return the key ID on success Return None on error """ assert is_valid_appname(appname) key_bin = str(key_bin) assert len(key_bin) > 0 if gpghome is None: config_dir = get_config_dir( config_dir ) keydir = make_gpg_home( appname, config_dir=config_dir ) else: keydir = gpghome gpg = gnupg.GPG( homedir=keydir ) res = gpg.import_keys( key_bin ) try: assert res.count == 1, "Failed to store key (%s)" % res except AssertionError, e: log.exception(e) log.error("Failed to store key to %s" % keydir) log.debug("res: %s" % res.__dict__) log.debug("(%s)\n%s" % (len(key_bin), key_bin)) return None return res.fingerprints[0]
[ "def", "gpg_stash_key", "(", "appname", ",", "key_bin", ",", "config_dir", "=", "None", ",", "gpghome", "=", "None", ")", ":", "assert", "is_valid_appname", "(", "appname", ")", "key_bin", "=", "str", "(", "key_bin", ")", "assert", "len", "(", "key_bin", ...
Store a key locally to our app keyring. Does NOT put it into a blockchain ID Return the key ID on success Return None on error
[ "Store", "a", "key", "locally", "to", "our", "app", "keyring", ".", "Does", "NOT", "put", "it", "into", "a", "blockchain", "ID", "Return", "the", "key", "ID", "on", "success", "Return", "None", "on", "error" ]
python
train
gwastro/pycbc
pycbc/inference/option_utils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/option_utils.py#L436-L463
def prior_from_config(cp, prior_section='prior'): """Loads a prior distribution from the given config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser The config file to read. sections : list of str, optional The sections to retrieve the prior from. If ``None`` (the default), will look in sections starting with 'prior'. Returns ------- distributions.JointDistribution The prior distribution. """ # Read variable and static parameters from the config file variable_params, _ = distributions.read_params_from_config( cp, prior_section=prior_section, vargs_section='variable_params', sargs_section='static_params') # Read constraints to apply to priors from the config file constraints = distributions.read_constraints_from_config(cp) # Get PyCBC distribution instances for each variable parameter in the # config file dists = distributions.read_distributions_from_config(cp, prior_section) # construct class that will return draws from the prior return distributions.JointDistribution(variable_params, *dists, **{"constraints": constraints})
[ "def", "prior_from_config", "(", "cp", ",", "prior_section", "=", "'prior'", ")", ":", "# Read variable and static parameters from the config file", "variable_params", ",", "_", "=", "distributions", ".", "read_params_from_config", "(", "cp", ",", "prior_section", "=", ...
Loads a prior distribution from the given config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser The config file to read. sections : list of str, optional The sections to retrieve the prior from. If ``None`` (the default), will look in sections starting with 'prior'. Returns ------- distributions.JointDistribution The prior distribution.
[ "Loads", "a", "prior", "distribution", "from", "the", "given", "config", "file", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/component.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/component.py#L160-L203
def _process_node(self,node): """Process first level element of the stream. Handle component handshake (authentication) element, and treat elements in "jabber:component:accept", "jabber:client" and "jabber:server" equally (pass to `self.process_stanza`). All other elements are passed to `Stream._process_node`. :Parameters: - `node`: XML node describing the element """ ns=node.ns() if ns: ns_uri=node.ns().getContent() if (not ns or ns_uri=="jabber:component:accept") and node.name=="handshake": if self.initiator and not self.authenticated: self.authenticated=1 self.state_change("authenticated",self.me) self._post_auth() return elif not self.authenticated and node.getContent()==self._compute_handshake(): self.peer=self.me n=common_doc.newChild(None,"handshake",None) self._write_node(n) n.unlinkNode() n.freeNode() self.peer_authenticated=1 self.state_change("authenticated",self.peer) self._post_auth() return else: self._send_stream_error("not-authorized") raise FatalComponentStreamError("Hanshake error.") if ns_uri in ("jabber:component:accept","jabber:client","jabber:server"): stanza=stanza_factory(node) self.lock.release() try: self.process_stanza(stanza) finally: self.lock.acquire() stanza.free() return return Stream._process_node(self,node)
[ "def", "_process_node", "(", "self", ",", "node", ")", ":", "ns", "=", "node", ".", "ns", "(", ")", "if", "ns", ":", "ns_uri", "=", "node", ".", "ns", "(", ")", ".", "getContent", "(", ")", "if", "(", "not", "ns", "or", "ns_uri", "==", "\"jabbe...
Process first level element of the stream. Handle component handshake (authentication) element, and treat elements in "jabber:component:accept", "jabber:client" and "jabber:server" equally (pass to `self.process_stanza`). All other elements are passed to `Stream._process_node`. :Parameters: - `node`: XML node describing the element
[ "Process", "first", "level", "element", "of", "the", "stream", "." ]
python
valid
collectiveacuity/labPack
labpack/parsing/conversion.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/parsing/conversion.py#L105-L123
def camelcase_to_lowercase(camelcase_input, python_input=None): ''' a function to recursively convert data with camelcase key names into lowercase keys :param camelcase_input: list or dictionary with camelcase keys :param python_input: [optional] list or dictionary with default lowercase keys in output :return: dictionary with lowercase key names ''' if python_input: if camelcase_input.__class__ != python_input.__class__: raise ValueError('python_input type %s does not match camelcase_input type %s' % (python_input.__class__, camelcase_input.__class__)) if isinstance(camelcase_input, dict): return _to_python_dict(camelcase_input, python_input) elif isinstance(camelcase_input, list): return _ingest_list(camelcase_input, _to_python_dict, python_input) else: return camelcase_input
[ "def", "camelcase_to_lowercase", "(", "camelcase_input", ",", "python_input", "=", "None", ")", ":", "if", "python_input", ":", "if", "camelcase_input", ".", "__class__", "!=", "python_input", ".", "__class__", ":", "raise", "ValueError", "(", "'python_input type %s...
a function to recursively convert data with camelcase key names into lowercase keys :param camelcase_input: list or dictionary with camelcase keys :param python_input: [optional] list or dictionary with default lowercase keys in output :return: dictionary with lowercase key names
[ "a", "function", "to", "recursively", "convert", "data", "with", "camelcase", "key", "names", "into", "lowercase", "keys", ":", "param", "camelcase_input", ":", "list", "or", "dictionary", "with", "camelcase", "keys", ":", "param", "python_input", ":", "[", "o...
python
train
dropbox/stone
stone/backends/python_client.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_client.py#L529-L540
def _format_type_in_doc(self, namespace, data_type): """ Returns a string that can be recognized by Sphinx as a type reference in a docstring. """ if is_void_type(data_type): return 'None' elif is_user_defined_type(data_type): return ':class:`{}.{}.{}`'.format( self.args.types_package, namespace.name, fmt_type(data_type)) else: return fmt_type(data_type)
[ "def", "_format_type_in_doc", "(", "self", ",", "namespace", ",", "data_type", ")", ":", "if", "is_void_type", "(", "data_type", ")", ":", "return", "'None'", "elif", "is_user_defined_type", "(", "data_type", ")", ":", "return", "':class:`{}.{}.{}`'", ".", "form...
Returns a string that can be recognized by Sphinx as a type reference in a docstring.
[ "Returns", "a", "string", "that", "can", "be", "recognized", "by", "Sphinx", "as", "a", "type", "reference", "in", "a", "docstring", "." ]
python
train
UDST/urbansim
scripts/cache_to_hdf5.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/scripts/cache_to_hdf5.py#L72-L130
def convert_dirs(base_dir, hdf_name, complib=None, complevel=0): """ Convert nested set of directories to """ print('Converting directories in {}'.format(base_dir)) dirs = glob.glob(os.path.join(base_dir, '*')) dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES} if not dirs: raise RuntimeError('No direcotries found matching known data.') store = pd.HDFStore( hdf_name, mode='w', complevel=complevel, complib=complib) for dirpath in dirs: dirname = os.path.basename(dirpath) print(dirname) df = cache_to_df(dirpath) if dirname == 'travel_data': keys = ['from_zone_id', 'to_zone_id'] elif dirname == 'annual_employment_control_totals': keys = ['sector_id', 'year', 'home_based_status'] elif dirname == 'annual_job_relocation_rates': keys = ['sector_id'] elif dirname == 'annual_household_control_totals': keys = ['year'] elif dirname == 'annual_household_relocation_rates': keys = ['age_of_head_max', 'age_of_head_min', 'income_min', 'income_max'] elif dirname == 'building_sqft_per_job': keys = ['zone_id', 'building_type_id'] elif dirname == 'counties': keys = ['county_id'] elif dirname == 'development_event_history': keys = ['building_id'] elif dirname == 'target_vacancies': keys = ['building_type_id', 'year'] else: keys = [dirname[:-1] + '_id'] if dirname != 'annual_household_relocation_rates': df = df.set_index(keys) for colname in df.columns: if df[colname].dtype == np.float64: df[colname] = df[colname].astype(np.float32) elif df[colname].dtype == np.int64: df[colname] = df[colname].astype(np.int32) else: df[colname] = df[colname] df.info() print(os.linesep) store.put(dirname, df) store.close()
[ "def", "convert_dirs", "(", "base_dir", ",", "hdf_name", ",", "complib", "=", "None", ",", "complevel", "=", "0", ")", ":", "print", "(", "'Converting directories in {}'", ".", "format", "(", "base_dir", ")", ")", "dirs", "=", "glob", ".", "glob", "(", "...
Convert nested set of directories to
[ "Convert", "nested", "set", "of", "directories", "to" ]
python
train
pandas-dev/pandas
pandas/core/arrays/categorical.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2549-L2582
def _get_codes_for_values(values, categories): """ utility routine to turn values into codes given the specified categories """ from pandas.core.algorithms import _get_data_algo, _hashtables dtype_equal = is_dtype_equal(values.dtype, categories.dtype) if dtype_equal: # To prevent erroneous dtype coercion in _get_data_algo, retrieve # the underlying numpy array. gh-22702 values = getattr(values, '_ndarray_values', values) categories = getattr(categories, '_ndarray_values', categories) elif (is_extension_array_dtype(categories.dtype) and is_object_dtype(values)): # Support inferring the correct extension dtype from an array of # scalar objects. e.g. # Categorical(array[Period, Period], categories=PeriodIndex(...)) try: values = ( categories.dtype.construct_array_type()._from_sequence(values) ) except Exception: # but that may fail for any reason, so fall back to object values = ensure_object(values) categories = ensure_object(categories) else: values = ensure_object(values) categories = ensure_object(categories) (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) (_, _), cats = _get_data_algo(categories, _hashtables) t = hash_klass(len(cats)) t.map_locations(cats) return coerce_indexer_dtype(t.lookup(vals), cats)
[ "def", "_get_codes_for_values", "(", "values", ",", "categories", ")", ":", "from", "pandas", ".", "core", ".", "algorithms", "import", "_get_data_algo", ",", "_hashtables", "dtype_equal", "=", "is_dtype_equal", "(", "values", ".", "dtype", ",", "categories", "....
utility routine to turn values into codes given the specified categories
[ "utility", "routine", "to", "turn", "values", "into", "codes", "given", "the", "specified", "categories" ]
python
train
ronaldguillen/wave
wave/views.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/views.py#L355-L367
def initialize_request(self, request, *args, **kwargs): """ Returns the initial request object. """ parser_context = self.get_parser_context(request) return Request( request, parsers=self.get_parsers(), authenticators=self.get_authenticators(), negotiator=self.get_content_negotiator(), parser_context=parser_context )
[ "def", "initialize_request", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "parser_context", "=", "self", ".", "get_parser_context", "(", "request", ")", "return", "Request", "(", "request", ",", "parsers", "=", "self", ...
Returns the initial request object.
[ "Returns", "the", "initial", "request", "object", "." ]
python
train
SetBased/py-etlt
etlt/condition/InListCondition.py
https://github.com/SetBased/py-etlt/blob/1c5b8ea60293c14f54d7845a9fe5c595021f66f2/etlt/condition/InListCondition.py#L73-L88
def match(self, row): """ Returns True if the field is in the list of conditions. Returns False otherwise. :param dict row: The row. :rtype: bool """ if row[self._field] in self._values: return True for condition in self._conditions: if condition.match(row): return True return False
[ "def", "match", "(", "self", ",", "row", ")", ":", "if", "row", "[", "self", ".", "_field", "]", "in", "self", ".", "_values", ":", "return", "True", "for", "condition", "in", "self", ".", "_conditions", ":", "if", "condition", ".", "match", "(", "...
Returns True if the field is in the list of conditions. Returns False otherwise. :param dict row: The row. :rtype: bool
[ "Returns", "True", "if", "the", "field", "is", "in", "the", "list", "of", "conditions", ".", "Returns", "False", "otherwise", "." ]
python
train
belbio/bel
bel/lang/bel_utils.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/bel_utils.py#L450-L457
def _dump_spec(spec): """Dump bel specification dictionary using YAML Formats this with an extra indentation for lists to make it easier to use cold folding on the YAML version of the spec dictionary. """ with open("spec.yaml", "w") as f: yaml.dump(spec, f, Dumper=MyDumper, default_flow_style=False)
[ "def", "_dump_spec", "(", "spec", ")", ":", "with", "open", "(", "\"spec.yaml\"", ",", "\"w\"", ")", "as", "f", ":", "yaml", ".", "dump", "(", "spec", ",", "f", ",", "Dumper", "=", "MyDumper", ",", "default_flow_style", "=", "False", ")" ]
Dump bel specification dictionary using YAML Formats this with an extra indentation for lists to make it easier to use cold folding on the YAML version of the spec dictionary.
[ "Dump", "bel", "specification", "dictionary", "using", "YAML" ]
python
train
bwohlberg/sporco
sporco/admm/tvl2.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/tvl2.py#L319-L324
def cnst_c(self): r"""Compute constant component :math:`\mathbf{c}` of ADMM problem constraint. In this case :math:`\mathbf{c} = \mathbf{0}`. """ return np.zeros(self.S.shape + (len(self.axes),), self.dtype)
[ "def", "cnst_c", "(", "self", ")", ":", "return", "np", ".", "zeros", "(", "self", ".", "S", ".", "shape", "+", "(", "len", "(", "self", ".", "axes", ")", ",", ")", ",", "self", ".", "dtype", ")" ]
r"""Compute constant component :math:`\mathbf{c}` of ADMM problem constraint. In this case :math:`\mathbf{c} = \mathbf{0}`.
[ "r", "Compute", "constant", "component", ":", "math", ":", "\\", "mathbf", "{", "c", "}", "of", "ADMM", "problem", "constraint", ".", "In", "this", "case", ":", "math", ":", "\\", "mathbf", "{", "c", "}", "=", "\\", "mathbf", "{", "0", "}", "." ]
python
train
miyakogi/wdom
wdom/element.py
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/element.py#L276-L286
def removeNamedItem(self, item: Attr) -> Optional[Attr]: """Set ``Attr`` object and return it (if exists).""" from wdom.web_node import WdomElement if not isinstance(item, Attr): raise TypeError('item must be an instance of Attr') if isinstance(self._owner, WdomElement): self._owner.js_exec('removeAttribute', item.name) removed_item = self._dict.pop(item.name, None) if removed_item: removed_item._owner = self._owner return removed_item
[ "def", "removeNamedItem", "(", "self", ",", "item", ":", "Attr", ")", "->", "Optional", "[", "Attr", "]", ":", "from", "wdom", ".", "web_node", "import", "WdomElement", "if", "not", "isinstance", "(", "item", ",", "Attr", ")", ":", "raise", "TypeError", ...
Set ``Attr`` object and return it (if exists).
[ "Set", "Attr", "object", "and", "return", "it", "(", "if", "exists", ")", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/platformfunc.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/platformfunc.py#L293-L307
def contains_unquoted_target(x: str, quote: str = '"', target: str = '&') -> bool: """ Checks if ``target`` exists in ``x`` outside quotes (as defined by ``quote``). Principal use: from :func:`contains_unquoted_ampersand_dangerous_to_windows`. """ in_quote = False for c in x: if c == quote: in_quote = not in_quote elif c == target: if not in_quote: return True return False
[ "def", "contains_unquoted_target", "(", "x", ":", "str", ",", "quote", ":", "str", "=", "'\"'", ",", "target", ":", "str", "=", "'&'", ")", "->", "bool", ":", "in_quote", "=", "False", "for", "c", "in", "x", ":", "if", "c", "==", "quote", ":", "i...
Checks if ``target`` exists in ``x`` outside quotes (as defined by ``quote``). Principal use: from :func:`contains_unquoted_ampersand_dangerous_to_windows`.
[ "Checks", "if", "target", "exists", "in", "x", "outside", "quotes", "(", "as", "defined", "by", "quote", ")", ".", "Principal", "use", ":", "from", ":", "func", ":", "contains_unquoted_ampersand_dangerous_to_windows", "." ]
python
train
EntilZha/PyFunctional
functional/transformations.py
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/transformations.py#L634-L647
def sliding_impl(wrap, size, step, sequence): """ Implementation for sliding_t :param wrap: wrap children values with this :param size: size of window :param step: step size :param sequence: sequence to create sliding windows from :return: sequence of sliding windows """ i = 0 n = len(sequence) while i + size <= n or (step != 1 and i < n): yield wrap(sequence[i: i + size]) i += step
[ "def", "sliding_impl", "(", "wrap", ",", "size", ",", "step", ",", "sequence", ")", ":", "i", "=", "0", "n", "=", "len", "(", "sequence", ")", "while", "i", "+", "size", "<=", "n", "or", "(", "step", "!=", "1", "and", "i", "<", "n", ")", ":",...
Implementation for sliding_t :param wrap: wrap children values with this :param size: size of window :param step: step size :param sequence: sequence to create sliding windows from :return: sequence of sliding windows
[ "Implementation", "for", "sliding_t", ":", "param", "wrap", ":", "wrap", "children", "values", "with", "this", ":", "param", "size", ":", "size", "of", "window", ":", "param", "step", ":", "step", "size", ":", "param", "sequence", ":", "sequence", "to", ...
python
train
cloudendpoints/endpoints-python
endpoints/users_id_token.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/users_id_token.py#L365-L375
def _are_scopes_sufficient(authorized_scopes, sufficient_scopes): """Check if a list of authorized scopes satisfies any set of sufficient scopes. Args: authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes sufficient_scopes: a set of sets of strings, return value from _process_scopes """ for sufficient_scope_set in sufficient_scopes: if sufficient_scope_set.issubset(authorized_scopes): return True return False
[ "def", "_are_scopes_sufficient", "(", "authorized_scopes", ",", "sufficient_scopes", ")", ":", "for", "sufficient_scope_set", "in", "sufficient_scopes", ":", "if", "sufficient_scope_set", ".", "issubset", "(", "authorized_scopes", ")", ":", "return", "True", "return", ...
Check if a list of authorized scopes satisfies any set of sufficient scopes. Args: authorized_scopes: a list of strings, return value from oauth.get_authorized_scopes sufficient_scopes: a set of sets of strings, return value from _process_scopes
[ "Check", "if", "a", "list", "of", "authorized", "scopes", "satisfies", "any", "set", "of", "sufficient", "scopes", "." ]
python
train
saltstack/salt
salt/utils/stringutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/stringutils.py#L594-L609
def camel_to_snake_case(camel_input): ''' Converts camelCase (or CamelCase) to snake_case. From https://codereview.stackexchange.com/questions/185966/functions-to-convert-camelcase-strings-to-snake-case :param str camel_input: The camelcase or CamelCase string to convert to snake_case :return str ''' res = camel_input[0].lower() for i, letter in enumerate(camel_input[1:], 1): if letter.isupper(): if camel_input[i-1].islower() or (i != len(camel_input)-1 and camel_input[i+1].islower()): res += '_' res += letter.lower() return res
[ "def", "camel_to_snake_case", "(", "camel_input", ")", ":", "res", "=", "camel_input", "[", "0", "]", ".", "lower", "(", ")", "for", "i", ",", "letter", "in", "enumerate", "(", "camel_input", "[", "1", ":", "]", ",", "1", ")", ":", "if", "letter", ...
Converts camelCase (or CamelCase) to snake_case. From https://codereview.stackexchange.com/questions/185966/functions-to-convert-camelcase-strings-to-snake-case :param str camel_input: The camelcase or CamelCase string to convert to snake_case :return str
[ "Converts", "camelCase", "(", "or", "CamelCase", ")", "to", "snake_case", ".", "From", "https", ":", "//", "codereview", ".", "stackexchange", ".", "com", "/", "questions", "/", "185966", "/", "functions", "-", "to", "-", "convert", "-", "camelcase", "-", ...
python
train
thefab/tornadis
tornadis/client.py
https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/client.py#L218-L259
def async_call(self, *args, **kwargs): """Calls a redis command, waits for the reply and call a callback. Following options are available (not part of the redis command itself): - callback Function called (with the result as argument) when the result is available. If not set, the reply is silently discarded. In case of errors, the callback is called with a TornadisException object as argument. Args: *args: full redis command as variable length argument list or a Pipeline object (as a single argument). **kwargs: options as keyword parameters. Examples: >>> def cb(result): pass >>> client.async_call("HSET", "key", "field", "val", callback=cb) """ def after_autoconnect_callback(future): if self.is_connected(): self._call(*args, **kwargs) else: # FIXME pass if 'callback' not in kwargs: kwargs['callback'] = discard_reply_cb if not self.is_connected(): if self.autoconnect: connect_future = self.connect() cb = after_autoconnect_callback self.__connection._ioloop.add_future(connect_future, cb) else: error = ConnectionError("you are not connected and " "autoconnect=False") kwargs['callback'](error) else: self._call(*args, **kwargs)
[ "def", "async_call", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "after_autoconnect_callback", "(", "future", ")", ":", "if", "self", ".", "is_connected", "(", ")", ":", "self", ".", "_call", "(", "*", "args", ",", "*", ...
Calls a redis command, waits for the reply and call a callback. Following options are available (not part of the redis command itself): - callback Function called (with the result as argument) when the result is available. If not set, the reply is silently discarded. In case of errors, the callback is called with a TornadisException object as argument. Args: *args: full redis command as variable length argument list or a Pipeline object (as a single argument). **kwargs: options as keyword parameters. Examples: >>> def cb(result): pass >>> client.async_call("HSET", "key", "field", "val", callback=cb)
[ "Calls", "a", "redis", "command", "waits", "for", "the", "reply", "and", "call", "a", "callback", "." ]
python
train
not-na/peng3d
peng3d/keybind.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/keybind.py#L144-L152
def mod_is_held(self,modname,modifiers): """ Helper method to simplify checking if a modifier is held. :param str modname: Name of the modifier, see :py:data:`MODNAME2MODIFIER` :param int modifiers: Bitmask to check in, same as the modifiers argument of the on_key_press etc. handlers """ modifier = MODNAME2MODIFIER[modname.lower()] return modifiers&modifier
[ "def", "mod_is_held", "(", "self", ",", "modname", ",", "modifiers", ")", ":", "modifier", "=", "MODNAME2MODIFIER", "[", "modname", ".", "lower", "(", ")", "]", "return", "modifiers", "&", "modifier" ]
Helper method to simplify checking if a modifier is held. :param str modname: Name of the modifier, see :py:data:`MODNAME2MODIFIER` :param int modifiers: Bitmask to check in, same as the modifiers argument of the on_key_press etc. handlers
[ "Helper", "method", "to", "simplify", "checking", "if", "a", "modifier", "is", "held", ".", ":", "param", "str", "modname", ":", "Name", "of", "the", "modifier", "see", ":", "py", ":", "data", ":", "MODNAME2MODIFIER", ":", "param", "int", "modifiers", ":...
python
test
jrief/django-angular
djng/forms/fields.py
https://github.com/jrief/django-angular/blob/9f2f8247027173e3b3ad3b245ca299a9c9f31b40/djng/forms/fields.py#L355-L364
def implode_multi_values(self, name, data): """ Due to the way Angular organizes it model, when Form data is sent via a POST request, then for this kind of widget, the posted data must to be converted into a format suitable for Django's Form validation. """ mkeys = [k for k in data.keys() if k.startswith(name + '.')] mvls = [data.pop(k)[0] for k in mkeys] if mvls: data.setlist(name, mvls)
[ "def", "implode_multi_values", "(", "self", ",", "name", ",", "data", ")", ":", "mkeys", "=", "[", "k", "for", "k", "in", "data", ".", "keys", "(", ")", "if", "k", ".", "startswith", "(", "name", "+", "'.'", ")", "]", "mvls", "=", "[", "data", ...
Due to the way Angular organizes it model, when Form data is sent via a POST request, then for this kind of widget, the posted data must to be converted into a format suitable for Django's Form validation.
[ "Due", "to", "the", "way", "Angular", "organizes", "it", "model", "when", "Form", "data", "is", "sent", "via", "a", "POST", "request", "then", "for", "this", "kind", "of", "widget", "the", "posted", "data", "must", "to", "be", "converted", "into", "a", ...
python
train
ioam/lancet
lancet/core.py
https://github.com/ioam/lancet/blob/1fbbf88fa0e8974ff9ed462e3cb11722ddebdd6e/lancet/core.py#L35-L43
def to_table(args, vdims=[]): "Helper function to convet an Args object to a HoloViews Table" if not Table: return "HoloViews Table not available" kdims = [dim for dim in args.constant_keys + args.varying_keys if dim not in vdims] items = [tuple([spec[k] for k in kdims+vdims]) for spec in args.specs] return Table(items, kdims=kdims, vdims=vdims)
[ "def", "to_table", "(", "args", ",", "vdims", "=", "[", "]", ")", ":", "if", "not", "Table", ":", "return", "\"HoloViews Table not available\"", "kdims", "=", "[", "dim", "for", "dim", "in", "args", ".", "constant_keys", "+", "args", ".", "varying_keys", ...
Helper function to convet an Args object to a HoloViews Table
[ "Helper", "function", "to", "convet", "an", "Args", "object", "to", "a", "HoloViews", "Table" ]
python
valid
ml31415/numpy-groupies
numpy_groupies/benchmarks/generic.py
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/benchmarks/generic.py#L13-L23
def aggregate_grouploop(*args, **kwargs): """wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.""" extrafuncs = {'allnan': allnan, 'anynan': anynan, 'first': itemgetter(0), 'last': itemgetter(-1), 'nanfirst': nanfirst, 'nanlast': nanlast} func = kwargs.pop('func') func = extrafuncs.get(func, func) if isinstance(func, str): raise NotImplementedError("Grouploop needs to be called with a function") return aggregate_numpy.aggregate(*args, func=lambda x: func(x), **kwargs)
[ "def", "aggregate_grouploop", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "extrafuncs", "=", "{", "'allnan'", ":", "allnan", ",", "'anynan'", ":", "anynan", ",", "'first'", ":", "itemgetter", "(", "0", ")", ",", "'last'", ":", "itemgetter", "(...
wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.
[ "wraps", "func", "in", "lambda", "which", "prevents", "aggregate_numpy", "from", "recognising", "and", "optimising", "it", ".", "Instead", "it", "groups", "and", "loops", "." ]
python
train
Dallinger/Dallinger
dallinger/registration.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/registration.py#L50-L64
def _upload_assets_to_OSF(dlgr_id, osf_id, provider="osfstorage"): """Upload experimental assets to the OSF.""" root = "https://files.osf.io/v1" snapshot_filename = "{}-code.zip".format(dlgr_id) snapshot_path = os.path.join("snapshots", snapshot_filename) r = requests.put( "{}/resources/{}/providers/{}/".format(root, osf_id, provider), params={"kind": "file", "name": snapshot_filename}, headers={ "Authorization": "Bearer {}".format(config.get("osf_access_token")), "Content-Type": "text/plain", }, data=open(snapshot_path, "rb"), ) r.raise_for_status()
[ "def", "_upload_assets_to_OSF", "(", "dlgr_id", ",", "osf_id", ",", "provider", "=", "\"osfstorage\"", ")", ":", "root", "=", "\"https://files.osf.io/v1\"", "snapshot_filename", "=", "\"{}-code.zip\"", ".", "format", "(", "dlgr_id", ")", "snapshot_path", "=", "os", ...
Upload experimental assets to the OSF.
[ "Upload", "experimental", "assets", "to", "the", "OSF", "." ]
python
train
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L3966-L3988
def calc_mass_from_fit_and_conv_factor(A, Damping, ConvFactor): """ Calculates mass from the A parameter from fitting, the damping from fitting in angular units and the Conversion factor calculated from comparing the ratio of the z signal and first harmonic of z. Parameters ---------- A : float A factor calculated from fitting Damping : float damping in radians/second calcualted from fitting ConvFactor : float conversion factor between volts and nms Returns ------- mass : float mass in kgs """ T0 = 300 mFromA = 2*Boltzmann*T0/(pi*A) * ConvFactor**2 * Damping return mFromA
[ "def", "calc_mass_from_fit_and_conv_factor", "(", "A", ",", "Damping", ",", "ConvFactor", ")", ":", "T0", "=", "300", "mFromA", "=", "2", "*", "Boltzmann", "*", "T0", "/", "(", "pi", "*", "A", ")", "*", "ConvFactor", "**", "2", "*", "Damping", "return"...
Calculates mass from the A parameter from fitting, the damping from fitting in angular units and the Conversion factor calculated from comparing the ratio of the z signal and first harmonic of z. Parameters ---------- A : float A factor calculated from fitting Damping : float damping in radians/second calcualted from fitting ConvFactor : float conversion factor between volts and nms Returns ------- mass : float mass in kgs
[ "Calculates", "mass", "from", "the", "A", "parameter", "from", "fitting", "the", "damping", "from", "fitting", "in", "angular", "units", "and", "the", "Conversion", "factor", "calculated", "from", "comparing", "the", "ratio", "of", "the", "z", "signal", "and",...
python
train
abourget/gevent-socketio
socketio/virtsocket.py
https://github.com/abourget/gevent-socketio/blob/1cdb1594a315326987a17ce0924ea448a82fab01/socketio/virtsocket.py#L477-L480
def _spawn_heartbeat(self): """This functions returns a list of jobs""" self.spawn(self._heartbeat) self.spawn(self._heartbeat_timeout)
[ "def", "_spawn_heartbeat", "(", "self", ")", ":", "self", ".", "spawn", "(", "self", ".", "_heartbeat", ")", "self", ".", "spawn", "(", "self", ".", "_heartbeat_timeout", ")" ]
This functions returns a list of jobs
[ "This", "functions", "returns", "a", "list", "of", "jobs" ]
python
valid