repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
jimzhan/pyx
rex/core/system.py
https://github.com/jimzhan/pyx/blob/819e8251323a7923e196c0c438aa8524f5aaee6e/rex/core/system.py#L13-L27
def execute(command, cwd=os.path.curdir, **options): """ Run the system command with optional options. Args: * command: system command. * cwd: current working directory. * verbose: direct options for :func:`subprocess.Popen`. Returns: Opened process, standard output & error. """ process = subprocess.Popen(shlex.split(command), cwd=cwd, **options) stdout, stderr = process.communicate() return process, stdout, stderr
[ "def", "execute", "(", "command", ",", "cwd", "=", "os", ".", "path", ".", "curdir", ",", "*", "*", "options", ")", ":", "process", "=", "subprocess", ".", "Popen", "(", "shlex", ".", "split", "(", "command", ")", ",", "cwd", "=", "cwd", ",", "*"...
Run the system command with optional options. Args: * command: system command. * cwd: current working directory. * verbose: direct options for :func:`subprocess.Popen`. Returns: Opened process, standard output & error.
[ "Run", "the", "system", "command", "with", "optional", "options", "." ]
python
train
bwohlberg/sporco
docs/source/docntbk.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/docs/source/docntbk.py#L705-L738
def get_sphinx_ref(self, url, label=None): """ Get an internal sphinx cross reference corresponding to `url` into the online docs, associated with a link with label `label` (if not None). """ # Raise an exception if the initial part of url does not match # the base url for this object n = len(self.baseurl) if url[0:n] != self.baseurl: raise KeyError('base of url %s does not match base url %s' % (url, self.baseurl)) # The reverse lookup key is either the full url or the postfix # to the base url, depending on flag addbase if self.addbase: pstfx = url[n:] else: pstfx = url # Look up the cross-reference role and referenced object # name via the postfix to the base url role, name = self.revinv[pstfx] # If the label string is provided and is shorter than the name # string we have lookup up, assume it is a partial name for # the same object: append a '.' at the front and use it as the # object name in the cross-reference if label is not None and len(label) < len(name): name = '.' + label # Construct cross-reference ref = ':%s:`%s`' % (role, name) return ref
[ "def", "get_sphinx_ref", "(", "self", ",", "url", ",", "label", "=", "None", ")", ":", "# Raise an exception if the initial part of url does not match", "# the base url for this object", "n", "=", "len", "(", "self", ".", "baseurl", ")", "if", "url", "[", "0", ":"...
Get an internal sphinx cross reference corresponding to `url` into the online docs, associated with a link with label `label` (if not None).
[ "Get", "an", "internal", "sphinx", "cross", "reference", "corresponding", "to", "url", "into", "the", "online", "docs", "associated", "with", "a", "link", "with", "label", "label", "(", "if", "not", "None", ")", "." ]
python
train
GNS3/gns3-server
gns3server/compute/dynamips/nodes/bridge.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/bridge.py#L100-L108
def remove_nio(self, nio): """ Removes the specified NIO as member of this bridge. :param nio: NIO instance to remove """ if self._hypervisor: yield from self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio)) self._nios.remove(nio)
[ "def", "remove_nio", "(", "self", ",", "nio", ")", ":", "if", "self", ".", "_hypervisor", ":", "yield", "from", "self", ".", "_hypervisor", ".", "send", "(", "'nio_bridge remove_nio \"{name}\" {nio}'", ".", "format", "(", "name", "=", "self", ".", "_name", ...
Removes the specified NIO as member of this bridge. :param nio: NIO instance to remove
[ "Removes", "the", "specified", "NIO", "as", "member", "of", "this", "bridge", "." ]
python
train
spyder-ide/spyder
spyder/widgets/comboboxes.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/comboboxes.py#L351-L355
def is_valid(self, qstr=None): """Return True if string is valid""" if qstr is None: qstr = self.currentText() return is_module_or_package(to_text_string(qstr))
[ "def", "is_valid", "(", "self", ",", "qstr", "=", "None", ")", ":", "if", "qstr", "is", "None", ":", "qstr", "=", "self", ".", "currentText", "(", ")", "return", "is_module_or_package", "(", "to_text_string", "(", "qstr", ")", ")" ]
Return True if string is valid
[ "Return", "True", "if", "string", "is", "valid" ]
python
train
Alignak-monitoring/alignak
alignak/downtime.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/downtime.py#L174-L227
def enter(self, timeperiods, hosts, services): """Set ref in scheduled downtime and raise downtime log entry (start) :param hosts: hosts objects to get item ref :type hosts: alignak.objects.host.Hosts :param services: services objects to get item ref :type services: alignak.objects.service.Services :return: broks :rtype: list of broks """ if self.ref in hosts: item = hosts[self.ref] else: item = services[self.ref] broks = [] self.is_in_effect = True if self.fixed is False: now = time.time() self.real_end_time = now + self.duration item.scheduled_downtime_depth += 1 item.in_scheduled_downtime = True if item.scheduled_downtime_depth == 1: item.raise_enter_downtime_log_entry() notification_period = None if getattr(item, 'notification_period', None) is not None: notification_period = timeperiods[item.notification_period] # Notification author data # todo: note that alias and name are not implemented yet author_data = { 'author': self.author, 'author_name': u'Not available', 'author_alias': u'Not available', 'author_comment': self.comment } item.create_notifications('DOWNTIMESTART', notification_period, hosts, services, author_data=author_data) if self.ref in hosts: broks.append(self.get_raise_brok(item.get_name())) # For an host, acknowledge the host problem (and its services problems) # Acknowledge the host with a sticky ack and notifications # The acknowledge will expire at the same time as the downtime end item.acknowledge_problem(notification_period, hosts, services, 2, 1, "Alignak", "Acknowledged because of an host downtime") else: broks.append(self.get_raise_brok(item.host_name, item.get_name())) for downtime_id in self.activate_me: for host in hosts: if downtime_id in host.downtimes: downtime = host.downtimes[downtime_id] broks.extend(downtime.enter(timeperiods, hosts, services)) for service in services: if downtime_id in service.downtimes: downtime = service.downtimes[downtime_id] broks.extend(downtime.enter(timeperiods, hosts, services)) return broks
[ "def", "enter", "(", "self", ",", "timeperiods", ",", "hosts", ",", "services", ")", ":", "if", "self", ".", "ref", "in", "hosts", ":", "item", "=", "hosts", "[", "self", ".", "ref", "]", "else", ":", "item", "=", "services", "[", "self", ".", "r...
Set ref in scheduled downtime and raise downtime log entry (start) :param hosts: hosts objects to get item ref :type hosts: alignak.objects.host.Hosts :param services: services objects to get item ref :type services: alignak.objects.service.Services :return: broks :rtype: list of broks
[ "Set", "ref", "in", "scheduled", "downtime", "and", "raise", "downtime", "log", "entry", "(", "start", ")" ]
python
train
Cog-Creators/Red-Lavalink
lavalink/lavalink.py
https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/lavalink.py#L211-L231
def register_update_listener(coro): """ Registers a coroutine to receive lavalink player update information. This coroutine will accept a two arguments: an instance of :py:class:`Player` and an instance of :py:class:`PlayerState`. Parameters ---------- coro Raises ------ TypeError If ``coro`` is not a coroutine. """ if not asyncio.iscoroutinefunction(coro): raise TypeError("Function is not a coroutine.") if coro not in _update_listeners: _update_listeners.append(coro)
[ "def", "register_update_listener", "(", "coro", ")", ":", "if", "not", "asyncio", ".", "iscoroutinefunction", "(", "coro", ")", ":", "raise", "TypeError", "(", "\"Function is not a coroutine.\"", ")", "if", "coro", "not", "in", "_update_listeners", ":", "_update_l...
Registers a coroutine to receive lavalink player update information. This coroutine will accept a two arguments: an instance of :py:class:`Player` and an instance of :py:class:`PlayerState`. Parameters ---------- coro Raises ------ TypeError If ``coro`` is not a coroutine.
[ "Registers", "a", "coroutine", "to", "receive", "lavalink", "player", "update", "information", "." ]
python
train
python-diamond/Diamond
src/diamond/metric.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/metric.py#L148-L164
def getMetricPath(self): """ Returns the metric path after the collector name servers.host.cpu.total.idle return "total.idle" """ # If we don't have a host name, assume it's just the fourth+ part of the # metric path if self.host is None: path = self.path.split('.')[3:] return '.'.join(path) prefix = '.'.join([self.getPathPrefix(), self.host, self.getCollectorPath()]) offset = len(prefix) + 1 return self.path[offset:]
[ "def", "getMetricPath", "(", "self", ")", ":", "# If we don't have a host name, assume it's just the fourth+ part of the", "# metric path", "if", "self", ".", "host", "is", "None", ":", "path", "=", "self", ".", "path", ".", "split", "(", "'.'", ")", "[", "3", "...
Returns the metric path after the collector name servers.host.cpu.total.idle return "total.idle"
[ "Returns", "the", "metric", "path", "after", "the", "collector", "name", "servers", ".", "host", ".", "cpu", ".", "total", ".", "idle", "return", "total", ".", "idle" ]
python
train
has2k1/plotnine
plotnine/geoms/geom.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom.py#L195-L221
def draw_layer(self, data, layout, coord, **params): """ Draw layer across all panels geoms should not override this method. Parameters ---------- data : DataFrame DataFrame specific for this layer layout : Lanel Layout object created when the plot is getting built coord : coord Type of coordinate axes params : dict Combined *geom* and *stat* parameters. Also includes the stacking order of the layer in the plot (*zorder*) """ for pid, pdata in data.groupby('PANEL'): if len(pdata) == 0: continue ploc = pid - 1 panel_params = layout.panel_params[ploc] ax = layout.axs[ploc] self.draw_panel(pdata, panel_params, coord, ax, **params)
[ "def", "draw_layer", "(", "self", ",", "data", ",", "layout", ",", "coord", ",", "*", "*", "params", ")", ":", "for", "pid", ",", "pdata", "in", "data", ".", "groupby", "(", "'PANEL'", ")", ":", "if", "len", "(", "pdata", ")", "==", "0", ":", "...
Draw layer across all panels geoms should not override this method. Parameters ---------- data : DataFrame DataFrame specific for this layer layout : Lanel Layout object created when the plot is getting built coord : coord Type of coordinate axes params : dict Combined *geom* and *stat* parameters. Also includes the stacking order of the layer in the plot (*zorder*)
[ "Draw", "layer", "across", "all", "panels" ]
python
train
jxtech/wechatpy
wechatpy/client/api/merchant/__init__.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/merchant/__init__.py#L123-L139
def add_stock(self, product_id, sku_info, quantity): """ 增加库存 :param product_id: 商品ID :param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可 :param quantity: 增加的库存数量 :return: 返回的 JSON 数据包 """ return self._post( 'merchant/stock/add', data={ "product_id": product_id, "sku_info": sku_info, "quantity": quantity } )
[ "def", "add_stock", "(", "self", ",", "product_id", ",", "sku_info", ",", "quantity", ")", ":", "return", "self", ".", "_post", "(", "'merchant/stock/add'", ",", "data", "=", "{", "\"product_id\"", ":", "product_id", ",", "\"sku_info\"", ":", "sku_info", ","...
增加库存 :param product_id: 商品ID :param sku_info: sku信息,格式"id1:vid1;id2:vid2",如商品为统一规格,则此处赋值为空字符串即可 :param quantity: 增加的库存数量 :return: 返回的 JSON 数据包
[ "增加库存" ]
python
train
apache/incubator-mxnet
python/mxnet/symbol/symbol.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/symbol.py#L1824-L1862
def eval(self, ctx=None, **kwargs): """Evaluates a symbol given arguments. The `eval` method combines a call to `bind` (which returns an executor) with a call to `forward` (executor method). For the common use case, where you might repeatedly evaluate with same arguments, eval is slow. In that case, you should call `bind` once and then repeatedly call forward. This function allows simpler syntax for less cumbersome introspection. Example ------- >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> c = a + b >>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3])) >>> ex [<NDArray 2x3 @cpu(0)>] >>> ex[0].asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) Parameters ---------- ctx : Context The device context the generated executor to run on. kwargs : Keyword arguments of type `NDArray` Input arguments to the symbol. All the arguments must be provided. Returns ---------- result : a list of NDArrays corresponding to the values taken by each symbol when evaluated on given args. When called on a single symbol (not a group), the result will be a list with one element. """ if ctx is None: ctx = current_context() return self.bind(ctx, kwargs).forward()
[ "def", "eval", "(", "self", ",", "ctx", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ctx", "is", "None", ":", "ctx", "=", "current_context", "(", ")", "return", "self", ".", "bind", "(", "ctx", ",", "kwargs", ")", ".", "forward", "(", ...
Evaluates a symbol given arguments. The `eval` method combines a call to `bind` (which returns an executor) with a call to `forward` (executor method). For the common use case, where you might repeatedly evaluate with same arguments, eval is slow. In that case, you should call `bind` once and then repeatedly call forward. This function allows simpler syntax for less cumbersome introspection. Example ------- >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> c = a + b >>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3])) >>> ex [<NDArray 2x3 @cpu(0)>] >>> ex[0].asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) Parameters ---------- ctx : Context The device context the generated executor to run on. kwargs : Keyword arguments of type `NDArray` Input arguments to the symbol. All the arguments must be provided. Returns ---------- result : a list of NDArrays corresponding to the values taken by each symbol when evaluated on given args. When called on a single symbol (not a group), the result will be a list with one element.
[ "Evaluates", "a", "symbol", "given", "arguments", "." ]
python
train
saltstack/salt
salt/modules/gem.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L313-L348
def list_upgrades(ruby=None, runas=None, gem_bin=None): ''' .. versionadded:: 2015.8.0 Check if an upgrade is available for installed gems gem_bin : None Full path to ``gem`` binary to use. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. Ignored if ``gem_bin`` is specified. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.list_upgrades ''' result = _gem(['outdated'], ruby, gem_bin=gem_bin, runas=runas) ret = {} for line in salt.utils.itertools.split(result, '\n'): match = re.search(r'(\S+) \(\S+ < (\S+)\)', line) if match: name, version = match.groups() else: log.error('Can\'t parse line \'%s\'', line) continue ret[name] = version return ret
[ "def", "list_upgrades", "(", "ruby", "=", "None", ",", "runas", "=", "None", ",", "gem_bin", "=", "None", ")", ":", "result", "=", "_gem", "(", "[", "'outdated'", "]", ",", "ruby", ",", "gem_bin", "=", "gem_bin", ",", "runas", "=", "runas", ")", "r...
.. versionadded:: 2015.8.0 Check if an upgrade is available for installed gems gem_bin : None Full path to ``gem`` binary to use. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. Ignored if ``gem_bin`` is specified. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.list_upgrades
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0" ]
python
train
capless/warrant
warrant/__init__.py
https://github.com/capless/warrant/blob/ff2e4793d8479e770f2461ef7cbc0c15ee784395/warrant/__init__.py#L245-L263
def check_token(self, renew=True): """ Checks the exp attribute of the access_token and either refreshes the tokens by calling the renew_access_tokens method or does nothing :param renew: bool indicating whether to refresh on expiration :return: bool indicating whether access_token has expired """ if not self.access_token: raise AttributeError('Access Token Required to Check Token') now = datetime.datetime.now() dec_access_token = jwt.get_unverified_claims(self.access_token) if now > datetime.datetime.fromtimestamp(dec_access_token['exp']): expired = True if renew: self.renew_access_token() else: expired = False return expired
[ "def", "check_token", "(", "self", ",", "renew", "=", "True", ")", ":", "if", "not", "self", ".", "access_token", ":", "raise", "AttributeError", "(", "'Access Token Required to Check Token'", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")...
Checks the exp attribute of the access_token and either refreshes the tokens by calling the renew_access_tokens method or does nothing :param renew: bool indicating whether to refresh on expiration :return: bool indicating whether access_token has expired
[ "Checks", "the", "exp", "attribute", "of", "the", "access_token", "and", "either", "refreshes", "the", "tokens", "by", "calling", "the", "renew_access_tokens", "method", "or", "does", "nothing", ":", "param", "renew", ":", "bool", "indicating", "whether", "to", ...
python
train
InQuest/python-sandboxapi
sandboxapi/vmray.py
https://github.com/InQuest/python-sandboxapi/blob/9bad73f453e25d7d23e7b4b1ae927f44a35a5bc3/sandboxapi/vmray.py#L107-L152
def report(self, item_id, report_format="json"): """Retrieves the specified report for the analyzed item, referenced by item_id. Available formats include: json. :type item_id: str :param item_id: File ID number :type report_format: str :param report_format: Return format :rtype: dict :return: Dictionary representing the JSON parsed data or raw, for other formats / JSON parsing failure. """ if report_format == "html": return "Report Unavailable" # grab an analysis id from the submission id. response = self._request("/analysis/sample/{sample_id}".format(sample_id=item_id), headers=self.headers) try: # the highest score is probably the most interesting. # vmray uses this internally with sample_highest_vti_score so this seems like a safe assumption. analysis_id = 0 top_score = -1 for analysis in response.json()['data']: if analysis['analysis_vti_score'] > top_score: top_score = analysis['analysis_vti_score'] analysis_id = analysis['analysis_id'] except (ValueError, KeyError) as e: raise sandboxapi.SandboxError(e) # assume report format json. response = self._request("/analysis/{analysis_id}/archive/logs/summary.json".format(analysis_id=analysis_id), headers=self.headers) # if response is JSON, return it as an object. try: return response.json() except ValueError: pass # otherwise, return the raw content. return response.content
[ "def", "report", "(", "self", ",", "item_id", ",", "report_format", "=", "\"json\"", ")", ":", "if", "report_format", "==", "\"html\"", ":", "return", "\"Report Unavailable\"", "# grab an analysis id from the submission id.", "response", "=", "self", ".", "_request", ...
Retrieves the specified report for the analyzed item, referenced by item_id. Available formats include: json. :type item_id: str :param item_id: File ID number :type report_format: str :param report_format: Return format :rtype: dict :return: Dictionary representing the JSON parsed data or raw, for other formats / JSON parsing failure.
[ "Retrieves", "the", "specified", "report", "for", "the", "analyzed", "item", "referenced", "by", "item_id", "." ]
python
train
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L1260-L1275
def get_file_splits(self, id, source, target, fsize, splitsize): '''Get file splits for upload/download/copy operation.''' pos = 0 part = 1 # S3 part id starts from 1 mpi = ThreadUtil.MultipartItem(id) splits = [] while pos < fsize: chunk = min(splitsize, fsize - pos) assert(chunk > 0) splits.append((source, target, mpi, pos, chunk, part)) part += 1 pos += chunk mpi.total = len(splits) return splits
[ "def", "get_file_splits", "(", "self", ",", "id", ",", "source", ",", "target", ",", "fsize", ",", "splitsize", ")", ":", "pos", "=", "0", "part", "=", "1", "# S3 part id starts from 1", "mpi", "=", "ThreadUtil", ".", "MultipartItem", "(", "id", ")", "sp...
Get file splits for upload/download/copy operation.
[ "Get", "file", "splits", "for", "upload", "/", "download", "/", "copy", "operation", "." ]
python
test
Rikanishu/static-bundle
static_bundle/builders.py
https://github.com/Rikanishu/static-bundle/blob/2f6458cb9d9d9049b4fd829f7d6951a45d547c68/static_bundle/builders.py#L44-L61
def add_bundle(self, *args): """ Add some bundle to build group :type bundle: static_bundle.bundles.AbstractBundle @rtype: BuildGroup """ for bundle in args: if not self.multitype and self.has_bundles(): first_bundle = self.get_first_bundle() if first_bundle.get_type() != bundle.get_type(): raise Exception( 'Different bundle types for one Asset: %s[%s -> %s]' 'check types or set multitype parameter to True' % (self.name, first_bundle.get_type(), bundle.get_type()) ) self.bundles.append(bundle) return self
[ "def", "add_bundle", "(", "self", ",", "*", "args", ")", ":", "for", "bundle", "in", "args", ":", "if", "not", "self", ".", "multitype", "and", "self", ".", "has_bundles", "(", ")", ":", "first_bundle", "=", "self", ".", "get_first_bundle", "(", ")", ...
Add some bundle to build group :type bundle: static_bundle.bundles.AbstractBundle @rtype: BuildGroup
[ "Add", "some", "bundle", "to", "build", "group" ]
python
valid
mpasternak/django-dsl
django_dsl/parser.py
https://github.com/mpasternak/django-dsl/blob/da1fe4da92503841a4ba5ae9db100248cf98437d/django_dsl/parser.py#L64-L84
def p_expression_ID(p): """expression : FIELD operation value """ lookup = compa2lookup[p[2]] try: field = get_shortcut(p[1]) except KeyError: field = p[1] if lookup: field = '%s__%s' % (field, lookup) # In some situations (which ones?), python # refuses unicode strings as dict keys for # Q(**d) field = str(field) d = {field: p[3]} p[0] = Q(**d)
[ "def", "p_expression_ID", "(", "p", ")", ":", "lookup", "=", "compa2lookup", "[", "p", "[", "2", "]", "]", "try", ":", "field", "=", "get_shortcut", "(", "p", "[", "1", "]", ")", "except", "KeyError", ":", "field", "=", "p", "[", "1", "]", "if", ...
expression : FIELD operation value
[ "expression", ":", "FIELD", "operation", "value" ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9589-L9615
def linreg(x, y): """ does a linear regression """ if len(x) != len(y): print('x and y must be same length') return xx, yy, xsum, ysum, xy, n, sum = 0, 0, 0, 0, 0, len(x), 0 linpars = {} for i in range(n): xx += x[i] * x[i] yy += y[i] * y[i] xy += x[i] * y[i] xsum += x[i] ysum += y[i] xsig = np.sqrt(old_div((xx - old_div(xsum**2, n)), (n - 1.))) ysig = np.sqrt(old_div((yy - old_div(ysum**2, n)), (n - 1.))) linpars['slope'] = old_div( (xy - (xsum * ysum / n)), (xx - old_div((xsum**2), n))) linpars['b'] = old_div((ysum - linpars['slope'] * xsum), n) linpars['r'] = old_div((linpars['slope'] * xsig), ysig) for i in range(n): a = y[i] - linpars['b'] - linpars['slope'] * x[i] sum += a linpars['sigma'] = old_div(sum, (n - 2.)) linpars['n'] = n return linpars
[ "def", "linreg", "(", "x", ",", "y", ")", ":", "if", "len", "(", "x", ")", "!=", "len", "(", "y", ")", ":", "print", "(", "'x and y must be same length'", ")", "return", "xx", ",", "yy", ",", "xsum", ",", "ysum", ",", "xy", ",", "n", ",", "sum"...
does a linear regression
[ "does", "a", "linear", "regression" ]
python
train
Fantomas42/django-blog-zinnia
zinnia/sitemaps.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/sitemaps.py#L123-L129
def get_queryset(self): """ Return the published Tags with option counts. """ self.entries_qs = Entry.published.all() return Tag.objects.usage_for_queryset( self.entries_qs, counts=True)
[ "def", "get_queryset", "(", "self", ")", ":", "self", ".", "entries_qs", "=", "Entry", ".", "published", ".", "all", "(", ")", "return", "Tag", ".", "objects", ".", "usage_for_queryset", "(", "self", ".", "entries_qs", ",", "counts", "=", "True", ")" ]
Return the published Tags with option counts.
[ "Return", "the", "published", "Tags", "with", "option", "counts", "." ]
python
train
CxAalto/gtfspy
gtfspy/shapes.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/shapes.py#L324-L381
def get_shape_between_stops(cur, trip_I, seq_stop1=None, seq_stop2=None, shape_breaks=None): """ Given a trip_I (shortened id), return shape points between two stops (seq_stop1 and seq_stop2). Trip_I is used for matching obtaining the full shape of one trip (route). From the resulting shape we then obtain only shape points between stop_seq1 and stop_seq2 trip_I---(trips)--->shape_id trip_I, seq_stop1----(stop_times)---> shape_break1 trip_I, seq_stop2----(stop_times)---> shape_break2 shapes_id+shape_break1+shape_break2 --(shapes)--> result Parameters ---------- cur : sqlite3.Cursor cursor to sqlite3 DB containing GTFS trip_I : int transformed trip_id (i.e. a new column that is created when GTFS is imported to a DB) seq_stop1: int a positive inger describing the index of the point of the shape that corresponds to the first stop seq_stop2: int a positive inger describing the index of the point of the shape that corresponds to the second stop shape_breaks: ?? Returns ------- shapedict: dict Dictionary containing the latitudes and longitudes: lats=shapedict['lat'] lons=shapedict['lon'] """ assert (seq_stop1 and seq_stop2) or shape_breaks if not shape_breaks: shape_breaks = [] for seq_stop in [seq_stop1, seq_stop2]: query = """SELECT shape_break FROM stop_times WHERE trip_I=%d AND seq=%d """ % (trip_I, seq_stop) for row in cur.execute(query): shape_breaks.append(row[0]) assert len(shape_breaks) == 2 query = """SELECT seq, lat, lon FROM (SELECT shape_id FROM trips WHERE trip_I=%d) JOIN shapes USING (shape_id) WHERE seq>=%d AND seq <= %d; """ % (trip_I, shape_breaks[0], shape_breaks[1]) shapedict = {'lat': [], 'lon': [], 'seq': []} for row in cur.execute(query): shapedict['seq'].append(row[0]) shapedict['lat'].append(row[1]) shapedict['lon'].append(row[2]) return shapedict
[ "def", "get_shape_between_stops", "(", "cur", ",", "trip_I", ",", "seq_stop1", "=", "None", ",", "seq_stop2", "=", "None", ",", "shape_breaks", "=", "None", ")", ":", "assert", "(", "seq_stop1", "and", "seq_stop2", ")", "or", "shape_breaks", "if", "not", "...
Given a trip_I (shortened id), return shape points between two stops (seq_stop1 and seq_stop2). Trip_I is used for matching obtaining the full shape of one trip (route). From the resulting shape we then obtain only shape points between stop_seq1 and stop_seq2 trip_I---(trips)--->shape_id trip_I, seq_stop1----(stop_times)---> shape_break1 trip_I, seq_stop2----(stop_times)---> shape_break2 shapes_id+shape_break1+shape_break2 --(shapes)--> result Parameters ---------- cur : sqlite3.Cursor cursor to sqlite3 DB containing GTFS trip_I : int transformed trip_id (i.e. a new column that is created when GTFS is imported to a DB) seq_stop1: int a positive inger describing the index of the point of the shape that corresponds to the first stop seq_stop2: int a positive inger describing the index of the point of the shape that corresponds to the second stop shape_breaks: ?? Returns ------- shapedict: dict Dictionary containing the latitudes and longitudes: lats=shapedict['lat'] lons=shapedict['lon']
[ "Given", "a", "trip_I", "(", "shortened", "id", ")", "return", "shape", "points", "between", "two", "stops", "(", "seq_stop1", "and", "seq_stop2", ")", "." ]
python
valid
MolSSI-BSE/basis_set_exchange
basis_set_exchange/api.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/api.py#L318-L330
def get_reference_data(data_dir=None): '''Obtain information for all stored references This is a nested dictionary with all the data for all the references The reference data is read from the REFERENCES.json file in the given `data_dir` directory. ''' data_dir = fix_data_dir(data_dir) reffile_path = os.path.join(data_dir, 'REFERENCES.json') return fileio.read_references(reffile_path)
[ "def", "get_reference_data", "(", "data_dir", "=", "None", ")", ":", "data_dir", "=", "fix_data_dir", "(", "data_dir", ")", "reffile_path", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "'REFERENCES.json'", ")", "return", "fileio", ".", "read_re...
Obtain information for all stored references This is a nested dictionary with all the data for all the references The reference data is read from the REFERENCES.json file in the given `data_dir` directory.
[ "Obtain", "information", "for", "all", "stored", "references" ]
python
train
quantopian/zipline
zipline/assets/asset_writer.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L335-L368
def _split_symbol_mappings(df, exchanges): """Split out the symbol: sid mappings from the raw data. Parameters ---------- df : pd.DataFrame The dataframe with multiple rows for each symbol: sid pair. exchanges : pd.DataFrame The exchanges table. Returns ------- asset_info : pd.DataFrame The asset info with one row per asset. symbol_mappings : pd.DataFrame The dataframe of just symbol: sid mappings. The index will be the sid, then there will be three columns: symbol, start_date, and end_date. """ mappings = df[list(mapping_columns)] with pd.option_context('mode.chained_assignment', None): mappings['sid'] = mappings.index mappings.reset_index(drop=True, inplace=True) # take the most recent sid->exchange mapping based on end date asset_exchange = df[ ['exchange', 'end_date'] ].sort_values('end_date').groupby(level=0)['exchange'].nth(-1) _check_symbol_mappings(mappings, exchanges, asset_exchange) return ( df.groupby(level=0).apply(_check_asset_group), mappings, )
[ "def", "_split_symbol_mappings", "(", "df", ",", "exchanges", ")", ":", "mappings", "=", "df", "[", "list", "(", "mapping_columns", ")", "]", "with", "pd", ".", "option_context", "(", "'mode.chained_assignment'", ",", "None", ")", ":", "mappings", "[", "'sid...
Split out the symbol: sid mappings from the raw data. Parameters ---------- df : pd.DataFrame The dataframe with multiple rows for each symbol: sid pair. exchanges : pd.DataFrame The exchanges table. Returns ------- asset_info : pd.DataFrame The asset info with one row per asset. symbol_mappings : pd.DataFrame The dataframe of just symbol: sid mappings. The index will be the sid, then there will be three columns: symbol, start_date, and end_date.
[ "Split", "out", "the", "symbol", ":", "sid", "mappings", "from", "the", "raw", "data", "." ]
python
train
opendatateam/udata
udata/core/metrics/commands.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/commands.py#L70-L76
def list(): '''List all known metrics''' for cls, metrics in metric_catalog.items(): echo(white(cls.__name__)) for metric in metrics.keys(): echo('> {0}'.format(metric))
[ "def", "list", "(", ")", ":", "for", "cls", ",", "metrics", "in", "metric_catalog", ".", "items", "(", ")", ":", "echo", "(", "white", "(", "cls", ".", "__name__", ")", ")", "for", "metric", "in", "metrics", ".", "keys", "(", ")", ":", "echo", "(...
List all known metrics
[ "List", "all", "known", "metrics" ]
python
train
andreikop/qutepart
qutepart/margins.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/margins.py#L175-L185
def clear(self): """Convenience method to reset all the block values to 0 """ if self._bit_count == 0: return block = self._qpart.document().begin() while block.isValid(): if self.getBlockValue(block): self.setBlockValue(block, 0) block = block.next()
[ "def", "clear", "(", "self", ")", ":", "if", "self", ".", "_bit_count", "==", "0", ":", "return", "block", "=", "self", ".", "_qpart", ".", "document", "(", ")", ".", "begin", "(", ")", "while", "block", ".", "isValid", "(", ")", ":", "if", "self...
Convenience method to reset all the block values to 0
[ "Convenience", "method", "to", "reset", "all", "the", "block", "values", "to", "0" ]
python
train
wonambi-python/wonambi
wonambi/detect/spindle.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/detect/spindle.py#L1804-L1849
def peak_in_power(events, dat, s_freq, method, value=None): """Define peak in power of the signal. Parameters ---------- events : ndarray (dtype='int') N x 3 matrix with start, peak, end samples dat : ndarray (dtype='float') vector with the original data s_freq : float sampling frequency method : str or None 'peak' or 'interval'. If None, values will be all NaN value : float size of the window around peak, or nothing (for 'interval') Returns ------- ndarray (dtype='float') vector with peak frequency """ dat = diff(dat) # remove 1/f peak = empty(events.shape[0]) peak.fill(nan) if method is not None: for i, one_event in enumerate(events): if method == 'peak': x0 = one_event[1] - value / 2 * s_freq x1 = one_event[1] + value / 2 * s_freq elif method == 'interval': x0 = one_event[0] x1 = one_event[2] if x0 < 0 or x1 >= len(dat): peak[i] = nan else: f, Pxx = periodogram(dat[x0:x1], s_freq) idx_peak = Pxx[f < MAX_FREQUENCY_OF_INTEREST].argmax() peak[i] = f[idx_peak] return peak
[ "def", "peak_in_power", "(", "events", ",", "dat", ",", "s_freq", ",", "method", ",", "value", "=", "None", ")", ":", "dat", "=", "diff", "(", "dat", ")", "# remove 1/f", "peak", "=", "empty", "(", "events", ".", "shape", "[", "0", "]", ")", "peak"...
Define peak in power of the signal. Parameters ---------- events : ndarray (dtype='int') N x 3 matrix with start, peak, end samples dat : ndarray (dtype='float') vector with the original data s_freq : float sampling frequency method : str or None 'peak' or 'interval'. If None, values will be all NaN value : float size of the window around peak, or nothing (for 'interval') Returns ------- ndarray (dtype='float') vector with peak frequency
[ "Define", "peak", "in", "power", "of", "the", "signal", "." ]
python
train
fxsjy/jieba
jieba/__init__.py
https://github.com/fxsjy/jieba/blob/8212b6c5725d08311952a3a08e5509eeaee33eb7/jieba/__init__.py#L424-L453
def suggest_freq(self, segment, tune=False): """ Suggest word frequency to force the characters in a word to be joined or splitted. Parameter: - segment : The segments that the word is expected to be cut into, If the word should be treated as a whole, use a str. - tune : If True, tune the word frequency. Note that HMM may affect the final result. If the result doesn't change, set HMM=False. """ self.check_initialized() ftotal = float(self.total) freq = 1 if isinstance(segment, string_types): word = segment for seg in self.cut(word, HMM=False): freq *= self.FREQ.get(seg, 1) / ftotal freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1)) else: segment = tuple(map(strdecode, segment)) word = ''.join(segment) for seg in segment: freq *= self.FREQ.get(seg, 1) / ftotal freq = min(int(freq * self.total), self.FREQ.get(word, 0)) if tune: add_word(word, freq) return freq
[ "def", "suggest_freq", "(", "self", ",", "segment", ",", "tune", "=", "False", ")", ":", "self", ".", "check_initialized", "(", ")", "ftotal", "=", "float", "(", "self", ".", "total", ")", "freq", "=", "1", "if", "isinstance", "(", "segment", ",", "s...
Suggest word frequency to force the characters in a word to be joined or splitted. Parameter: - segment : The segments that the word is expected to be cut into, If the word should be treated as a whole, use a str. - tune : If True, tune the word frequency. Note that HMM may affect the final result. If the result doesn't change, set HMM=False.
[ "Suggest", "word", "frequency", "to", "force", "the", "characters", "in", "a", "word", "to", "be", "joined", "or", "splitted", "." ]
python
train
zwischenloesung/ardu-report-lib
libardurep/datastore.py
https://github.com/zwischenloesung/ardu-report-lib/blob/51bd4a07e036065aafcb1273b151bea3fdfa50fa/libardurep/datastore.py#L207-L219
def get_json_tuples(self, prettyprint=False, translate=True): """ Get the data as JSON tuples """ j = self.get_json(prettyprint, translate) if len(j) > 2: if prettyprint: j = j[1:-2] + ",\n" else: j = j[1:-1] + "," else: j = "" return j
[ "def", "get_json_tuples", "(", "self", ",", "prettyprint", "=", "False", ",", "translate", "=", "True", ")", ":", "j", "=", "self", ".", "get_json", "(", "prettyprint", ",", "translate", ")", "if", "len", "(", "j", ")", ">", "2", ":", "if", "prettypr...
Get the data as JSON tuples
[ "Get", "the", "data", "as", "JSON", "tuples" ]
python
valid
rgs1/zk_shell
zk_shell/shell.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/shell.py#L1775-L1789
def do_rmr(self, params): """ \x1b[1mNAME\x1b[0m rmr - Delete a path and all its children \x1b[1mSYNOPSIS\x1b[0m rmr <path> [path] [path] ... [path] \x1b[1mEXAMPLES\x1b[0m > rmr /foo > rmr /foo /bar """ for path in params.paths: self._zk.delete(path, recursive=True)
[ "def", "do_rmr", "(", "self", ",", "params", ")", ":", "for", "path", "in", "params", ".", "paths", ":", "self", ".", "_zk", ".", "delete", "(", "path", ",", "recursive", "=", "True", ")" ]
\x1b[1mNAME\x1b[0m rmr - Delete a path and all its children \x1b[1mSYNOPSIS\x1b[0m rmr <path> [path] [path] ... [path] \x1b[1mEXAMPLES\x1b[0m > rmr /foo > rmr /foo /bar
[ "\\", "x1b", "[", "1mNAME", "\\", "x1b", "[", "0m", "rmr", "-", "Delete", "a", "path", "and", "all", "its", "children" ]
python
train
i3visio/osrframework
osrframework/alias_generator.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/alias_generator.py#L50-L2822
def generate(name=None, surname1=None, surname2=None, city=None, country=None, year=None, useNumbers=False, useCommonWords=False, useLeet=False, useLocales=False, extraWords=[]): """ The method that generates the given aliases. It receives several parameters as parsed by this module's `getParser()`. Previously referenced as `main`. Args: ----- name: String representing the known name of the investigated profile. surname1: String representing the first surname of the investigated profile. surname2: String representing the second surname of the investigated profile. city: String representing the city where the profile was born or works. country: String representing the country. year: String representing a year linked to the profile. useNumbers: Boolean representing whether to use random numbers. useCommonWords: Boolean representing whether to use known commond words to generate new nicknames. useNumbers: Boolean representing whether to use random numbers. useLeet: Boolean representing whether to modify certain letters by numbers using the leet (*133t*) codification. extraWords: A list of strings with extra words to be appended to the generatednicknames. Returns ------- list: An ordered list of the nicknames generated. """ # Lowering all the info received name = name.lower() surname1 = surname1.lower() surname2 = surname2.lower() year = year.lower() country = country.lower() city = city.lower() # Check if the value provided is a '' string if name == '': name = None if surname1 == '': surname1 = None if surname2 == '': surname2 = None if year == '': year = None if city == '': city = None if country == '': country = None print("\nGenerating new aliases...") lista = [] try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2[0] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2 + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + name + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + name + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + name + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2 + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + name + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + name + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2 + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + name + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + name + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + city if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + country if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + name + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1[0] + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + name + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + surname1 + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + surname2[0] + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1[0] + "<SEPARATOR>" + surname2 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0] + "<SEPARATOR>" + surname1 + "<SEPARATOR>" + year[-2:] if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0:1] + "<SEPARATOR>" + surname1 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0:2] + "<SEPARATOR>" + surname1 if tmp not in lista: lista.append(tmp) except: pass # An element is missing try: tmp = name[0:3] + "<SEPARATOR>" + surname1 if tmp not in lista: lista.append(tmp) except: pass # An element is missing # AFTER THE CREATION, WE WILL PERFORM ADDITIONAL TRANSFORMATIONS # -------------------------------------------------------------- # Creating the output list listaAdditions = [] listaAdditions += lista # Adding common words if useCommonWords: for n in lista: for w in COMMON_WORDS: try: tmp = n + "<SEPARATOR>" + w if tmp not in listaAdditions: listaAdditions.append(tmp) except: pass # An element is missing # Adding extra words provided by the user for n in lista: for w in extraWords: try: tmp = n + "<SEPARATOR>" + w if tmp not in listaAdditions: listaAdditions.append(tmp) except: pass # An element is missing # Adding loales if useLocales: for n in lista: for l in LOCALES: try: tmp = n + "<SEPARATOR>" + l if tmp not in listaAdditions: listaAdditions.append(tmp) except: pass # An element is missing # Appending Numbers to the nicks created if useNumbers: for n in lista: for i in range(100): try: tmp = n + "<SEPARATOR>" + str(i).rjust(2, "0") if tmp not in listaAdditions: listaAdditions.append(tmp) except: pass # An element is missing # Appending Numbers to the nicks if useLeet: for n in lista: # This will store the variations of the nicks with all the possible combinations possibleChanges = [] possibleChanges += [n] for k in LEET_TRANSFORMS.keys(): try: # Iterating through the list of possible changes found in the array for change in LEET_TRANSFORMS[k]: # Replacing the option tmp = n.replace(k, change ) if tmp not in listaAdditions: listaAdditions.append(tmp) # Applying all the possible changes newAliases = [] for f in possibleChanges: newAliases.append( f.replace(k, change ) ) # Adding the new changes possibleChanges += newAliases except: pass # An element is missing # Appending the possible combinations which include ALL the possible leet options for changedAll in possibleChanges: if changedAll not in listaAdditions: listaAdditions.append(changedAll) listaFinal = [] # REMOVING THE "<SEPARATOR>" TAGS TO GET THE FINAL NICKNAMES for s in SEPARATORS: for n in listaAdditions: try: tmp = n.replace("<SEPARATOR>", s) lastChar = tmp[-1:] # Verifying if the last char is or not one of the separators to remove it if not lastChar in SEPARATORS: if tmp not in listaFinal: listaFinal.append(tmp) except: pass # An element is missing # Sorting list listaFinal.sort() # Showing the execution time... endTime= dt.datetime.now() print("\n{}\tGeneration finished...\n".format(endTime)) try: print("\nGenerated nicks:\n") print(general.success(json.dumps(listaFinal, indent=2, sort_keys=True))) except UnicodeDecodeError as _: for n in listaFinal: print(general.success(n)) print(general.warning("\nThe input provided includes a Unicode character. You may try it again without it.")) print("\nUp to " + general.emphasis(str(len(listaFinal))) + " nicks generated.\n") return listaFinal
[ "def", "generate", "(", "name", "=", "None", ",", "surname1", "=", "None", ",", "surname2", "=", "None", ",", "city", "=", "None", ",", "country", "=", "None", ",", "year", "=", "None", ",", "useNumbers", "=", "False", ",", "useCommonWords", "=", "Fa...
The method that generates the given aliases. It receives several parameters as parsed by this module's `getParser()`. Previously referenced as `main`. Args: ----- name: String representing the known name of the investigated profile. surname1: String representing the first surname of the investigated profile. surname2: String representing the second surname of the investigated profile. city: String representing the city where the profile was born or works. country: String representing the country. year: String representing a year linked to the profile. useNumbers: Boolean representing whether to use random numbers. useCommonWords: Boolean representing whether to use known commond words to generate new nicknames. useNumbers: Boolean representing whether to use random numbers. useLeet: Boolean representing whether to modify certain letters by numbers using the leet (*133t*) codification. extraWords: A list of strings with extra words to be appended to the generatednicknames. Returns ------- list: An ordered list of the nicknames generated.
[ "The", "method", "that", "generates", "the", "given", "aliases", "." ]
python
train
quadrismegistus/prosodic
prosodic/entity.py
https://github.com/quadrismegistus/prosodic/blob/8af66ed9be40c922d03a0b09bc11c87d2061b618/prosodic/entity.py#L132-L156
def feat(self,k,v): """ Store value 'v' as a feature name 'k' for this object. Features are stored in the dictionary self.feats. [IMPORTANT NOTE:] If the feature 'k' is not yet defined, then: self.feats[k]=v OTHERWISE: self.feats[k] becomes a list (if not already) 'v' is added to this list """ if (not hasattr(self,'feats')): self.feats = {} if (not hasattr(self,'featpaths')): self.featpaths={} if (not k in self.feats): self.feats[k] = v else: if type(self.feats[k])==type([]): self.feats[k].append(v) else: obj=self.feats[k] self.feats[k]=[obj,v]
[ "def", "feat", "(", "self", ",", "k", ",", "v", ")", ":", "if", "(", "not", "hasattr", "(", "self", ",", "'feats'", ")", ")", ":", "self", ".", "feats", "=", "{", "}", "if", "(", "not", "hasattr", "(", "self", ",", "'featpaths'", ")", ")", ":...
Store value 'v' as a feature name 'k' for this object. Features are stored in the dictionary self.feats. [IMPORTANT NOTE:] If the feature 'k' is not yet defined, then: self.feats[k]=v OTHERWISE: self.feats[k] becomes a list (if not already) 'v' is added to this list
[ "Store", "value", "v", "as", "a", "feature", "name", "k", "for", "this", "object", ".", "Features", "are", "stored", "in", "the", "dictionary", "self", ".", "feats", ".", "[", "IMPORTANT", "NOTE", ":", "]", "If", "the", "feature", "k", "is", "not", "...
python
train
ninuxorg/nodeshot
nodeshot/networking/links/views.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/links/views.py#L73-L93
def initial(self, request, *args, **kwargs): """ Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only links of current node """ super(NodeLinkList, self).initial(request, *args, **kwargs) # ensure node exists try: self.node = Node.objects.published()\ .accessible_to(request.user)\ .get(slug=self.kwargs.get('slug', None)) except Node.DoesNotExist: raise Http404(_('Node not found.')) # check permissions on node (for link creation) self.check_object_permissions(request, self.node) # return only links of current node self.queryset = Link.objects.select_related('node_a', 'node_b')\ .accessible_to(self.request.user)\ .filter(Q(node_a_id=self.node.id) | Q(node_b_id=self.node.id))
[ "def", "initial", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "NodeLinkList", ",", "self", ")", ".", "initial", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# ensure node exists"...
Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only links of current node
[ "Custom", "initial", "method", ":", "*", "ensure", "node", "exists", "and", "store", "it", "in", "an", "instance", "attribute", "*", "change", "queryset", "to", "return", "only", "links", "of", "current", "node" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/wrappers.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/wrappers.py#L159-L172
def set_polygon_offset(self, factor=0., units=0.): """Set the scale and units used to calculate depth values Parameters ---------- factor : float Scale factor used to create a variable depth offset for each polygon. units : float Multiplied by an implementation-specific value to create a constant depth offset. """ self.glir.command('FUNC', 'glPolygonOffset', float(factor), float(units))
[ "def", "set_polygon_offset", "(", "self", ",", "factor", "=", "0.", ",", "units", "=", "0.", ")", ":", "self", ".", "glir", ".", "command", "(", "'FUNC'", ",", "'glPolygonOffset'", ",", "float", "(", "factor", ")", ",", "float", "(", "units", ")", ")...
Set the scale and units used to calculate depth values Parameters ---------- factor : float Scale factor used to create a variable depth offset for each polygon. units : float Multiplied by an implementation-specific value to create a constant depth offset.
[ "Set", "the", "scale", "and", "units", "used", "to", "calculate", "depth", "values", "Parameters", "----------", "factor", ":", "float", "Scale", "factor", "used", "to", "create", "a", "variable", "depth", "offset", "for", "each", "polygon", ".", "units", ":...
python
train
SBRG/ssbio
ssbio/pipeline/gempro.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L1500-L1524
def get_freesasa_annotations_parallelize(self, sc, include_hetatms=False, representatives_only=True, force_rerun=False): """Run freesasa on structures and store calculations. Annotations are stored in the protein structure's chain sequence at: ``<chain_prop>.seq_record.letter_annotations['*-freesasa']`` Args: include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``. representative_only (bool): If analysis should only be run on the representative structure force_rerun (bool): If calculations should be rerun even if an output file exists """ genes_rdd = sc.parallelize(self.genes) def get_freesasa_annotation(g): g.protein.get_freesasa_annotations(include_hetatms=include_hetatms, representative_only=representatives_only, force_rerun=force_rerun) return g result = genes_rdd.map(get_freesasa_annotation).collect() for modified_g in result: original_gene = self.genes.get_by_id(modified_g.id) original_gene.copy_modified_gene(modified_g)
[ "def", "get_freesasa_annotations_parallelize", "(", "self", ",", "sc", ",", "include_hetatms", "=", "False", ",", "representatives_only", "=", "True", ",", "force_rerun", "=", "False", ")", ":", "genes_rdd", "=", "sc", ".", "parallelize", "(", "self", ".", "ge...
Run freesasa on structures and store calculations. Annotations are stored in the protein structure's chain sequence at: ``<chain_prop>.seq_record.letter_annotations['*-freesasa']`` Args: include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``. representative_only (bool): If analysis should only be run on the representative structure force_rerun (bool): If calculations should be rerun even if an output file exists
[ "Run", "freesasa", "on", "structures", "and", "store", "calculations", "." ]
python
train
google/importlab
importlab/import_finder.py
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/import_finder.py#L123-L137
def get_imports(filename): """Get all the imports in a file. Each import is a tuple of: (name, alias, is_from, is_star, source_file) """ with open(filename, "rb") as f: src = f.read() finder = ImportFinder() finder.visit(ast.parse(src, filename=filename)) imports = [] for i in finder.imports: name, _, is_from, is_star = i imports.append(i + (resolve_import(name, is_from, is_star),)) return imports
[ "def", "get_imports", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "src", "=", "f", ".", "read", "(", ")", "finder", "=", "ImportFinder", "(", ")", "finder", ".", "visit", "(", "ast", ".", "parse",...
Get all the imports in a file. Each import is a tuple of: (name, alias, is_from, is_star, source_file)
[ "Get", "all", "the", "imports", "in", "a", "file", "." ]
python
train
veltzer/pypitools
pypitools/common.py
https://github.com/veltzer/pypitools/blob/5f097be21e9bc65578eed5b6b7855c1945540701/pypitools/common.py#L22-L42
def check_call_no_output(args): """ Run a process and check that it returns an OK return code and has no output :param args: :return: """ logger = logging.getLogger(__name__) logger.debug("running %s", args) process = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) (res_stdout, res_stderr) = process.communicate() if process.returncode: res_stdout = res_stdout res_stderr = res_stderr print(res_stdout, end='') print(res_stderr, end='') raise ValueError('exit code from [{}] was [{}]'.format(" ".join(args), process.returncode))
[ "def", "check_call_no_output", "(", "args", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"running %s\"", ",", "args", ")", "process", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout",...
Run a process and check that it returns an OK return code and has no output :param args: :return:
[ "Run", "a", "process", "and", "check", "that", "it", "returns", "an", "OK", "return", "code", "and", "has", "no", "output", ":", "param", "args", ":", ":", "return", ":" ]
python
train
cdgriffith/Reusables
reusables/file_operations.py
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L684-L715
def directory_duplicates(directory, hash_type='md5', **kwargs): """ Find all duplicates in a directory. Will return a list, in that list are lists of duplicate files. .. code: python dups = reusables.directory_duplicates('C:\\Users\\Me\\Pictures') print(len(dups)) # 56 print(dups) # [['C:\\Users\\Me\\Pictures\\IMG_20161127.jpg', # 'C:\\Users\\Me\\Pictures\\Phone\\IMG_20161127.jpg'], ... :param directory: Directory to search :param hash_type: Type of hash to perform :param kwargs: Arguments to pass to find_files to narrow file types :return: list of lists of dups""" size_map, hash_map = defaultdict(list), defaultdict(list) for item in find_files(directory, **kwargs): file_size = os.path.getsize(item) size_map[file_size].append(item) for possible_dups in (v for v in size_map.values() if len(v) > 1): for each_item in possible_dups: item_hash = file_hash(each_item, hash_type=hash_type) hash_map[item_hash].append(each_item) return [v for v in hash_map.values() if len(v) > 1]
[ "def", "directory_duplicates", "(", "directory", ",", "hash_type", "=", "'md5'", ",", "*", "*", "kwargs", ")", ":", "size_map", ",", "hash_map", "=", "defaultdict", "(", "list", ")", ",", "defaultdict", "(", "list", ")", "for", "item", "in", "find_files", ...
Find all duplicates in a directory. Will return a list, in that list are lists of duplicate files. .. code: python dups = reusables.directory_duplicates('C:\\Users\\Me\\Pictures') print(len(dups)) # 56 print(dups) # [['C:\\Users\\Me\\Pictures\\IMG_20161127.jpg', # 'C:\\Users\\Me\\Pictures\\Phone\\IMG_20161127.jpg'], ... :param directory: Directory to search :param hash_type: Type of hash to perform :param kwargs: Arguments to pass to find_files to narrow file types :return: list of lists of dups
[ "Find", "all", "duplicates", "in", "a", "directory", ".", "Will", "return", "a", "list", "in", "that", "list", "are", "lists", "of", "duplicate", "files", "." ]
python
train
pierre-rouanet/hampy
hampy/hamming.py
https://github.com/pierre-rouanet/hampy/blob/bb633a3936f8a3b5f619fb0d92c7448f3dc3c92d/hampy/hamming.py#L52-L82
def decode(C): """ Decode data using Hamming(7, 4) code. E.g.: decode([1, 0, 0, 0, 0, 1, 1]) encode([[1, 1, 0, 1, 0, 0, 1], [0, 1, 0, 0, 1, 0, 1]]) :param array C: binary data to code (must be shaped as (7, ) or (-1, 7)). """ C = array(C) flatten = False if len(C.shape) == 1: flatten = True C = C.reshape(1, -1) if C.shape[1] != msg_size: raise ValueError('Data must be shaped as (7, ) or (-1, 7)') if 1 in dot(H, C.T).T % 2: raise ValueError('Incorrect code given as input.') B = C[:, data_bits] if flatten: B = B.flatten() return B
[ "def", "decode", "(", "C", ")", ":", "C", "=", "array", "(", "C", ")", "flatten", "=", "False", "if", "len", "(", "C", ".", "shape", ")", "==", "1", ":", "flatten", "=", "True", "C", "=", "C", ".", "reshape", "(", "1", ",", "-", "1", ")", ...
Decode data using Hamming(7, 4) code. E.g.: decode([1, 0, 0, 0, 0, 1, 1]) encode([[1, 1, 0, 1, 0, 0, 1], [0, 1, 0, 0, 1, 0, 1]]) :param array C: binary data to code (must be shaped as (7, ) or (-1, 7)).
[ "Decode", "data", "using", "Hamming", "(", "7", "4", ")", "code", "." ]
python
train
inveniosoftware/invenio-oauth2server
invenio_oauth2server/models.py
https://github.com/inveniosoftware/invenio-oauth2server/blob/7033d3495c1a2b830e101e43918e92a37bbb49f2/invenio_oauth2server/models.py#L219-L229
def redirect_uris(self, value): """Validate and store redirect URIs for client.""" if isinstance(value, six.text_type): value = value.split("\n") value = [v.strip() for v in value] for v in value: validate_redirect_uri(v) self._redirect_uris = "\n".join(value) or ""
[ "def", "redirect_uris", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "value", "=", "value", ".", "split", "(", "\"\\n\"", ")", "value", "=", "[", "v", ".", "strip", "(", ")", "for", ...
Validate and store redirect URIs for client.
[ "Validate", "and", "store", "redirect", "URIs", "for", "client", "." ]
python
train
SuperCowPowers/chains
examples/tag_example.py
https://github.com/SuperCowPowers/chains/blob/b0227847b0c43083b456f0bae52daee0b62a3e03/examples/tag_example.py#L10-L29
def run(iface_name=None, bpf=None, summary=None, max_packets=50): """Run the Simple Packet Printer Example""" # Create the classes streamer = packet_streamer.PacketStreamer(iface_name=iface_name, bpf=bpf, max_packets=max_packets) meta = packet_meta.PacketMeta() rdns = reverse_dns.ReverseDNS() tags = packet_tags.PacketTags() tmeta = transport_meta.TransportMeta() printer = packet_summary.PacketSummary() # Set up the chain meta.link(streamer) rdns.link(meta) tags.link(rdns) tmeta.link(tags) printer.link(tmeta) # Pull the chain printer.pull()
[ "def", "run", "(", "iface_name", "=", "None", ",", "bpf", "=", "None", ",", "summary", "=", "None", ",", "max_packets", "=", "50", ")", ":", "# Create the classes", "streamer", "=", "packet_streamer", ".", "PacketStreamer", "(", "iface_name", "=", "iface_nam...
Run the Simple Packet Printer Example
[ "Run", "the", "Simple", "Packet", "Printer", "Example" ]
python
train
uchicago-cs/deepdish
deepdish/image.py
https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L289-L305
def bounding_box_as_binary_map(alpha, threshold=0.1): """ Similar to `bounding_box`, except returns the bounding box as a binary map the same size as the input. Same parameters as `bounding_box`. Returns ------- binary_map : ndarray, ndim=2, dtype=np.bool_ Binary map with True if object and False if background. """ bb = bounding_box(alpha) x = np.zeros(alpha.shape, dtype=np.bool_) x[bb[0]:bb[2], bb[1]:bb[3]] = 1 return x
[ "def", "bounding_box_as_binary_map", "(", "alpha", ",", "threshold", "=", "0.1", ")", ":", "bb", "=", "bounding_box", "(", "alpha", ")", "x", "=", "np", ".", "zeros", "(", "alpha", ".", "shape", ",", "dtype", "=", "np", ".", "bool_", ")", "x", "[", ...
Similar to `bounding_box`, except returns the bounding box as a binary map the same size as the input. Same parameters as `bounding_box`. Returns ------- binary_map : ndarray, ndim=2, dtype=np.bool_ Binary map with True if object and False if background.
[ "Similar", "to", "bounding_box", "except", "returns", "the", "bounding", "box", "as", "a", "binary", "map", "the", "same", "size", "as", "the", "input", "." ]
python
train
vanheeringen-lab/gimmemotifs
gimmemotifs/rocmetrics.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/rocmetrics.py#L357-L444
def roc_auc_xlim(x_bla, y_bla, xlim=0.1): """ Computes the ROC Area Under Curve until a certain FPR value. Parameters ---------- fg_vals : array_like list of values for positive set bg_vals : array_like list of values for negative set xlim : float, optional FPR value Returns ------- score : float ROC AUC score """ x = x_bla[:] y = y_bla[:] x.sort() y.sort() u = {} for i in x + y: u[i] = 1 vals = sorted(u.keys()) len_x = float(len(x)) len_y = float(len(y)) new_x = [] new_y = [] x_p = 0 y_p = 0 for val in vals[::-1]: while len(x) > 0 and x[-1] >= val: x.pop() x_p += 1 while len(y) > 0 and y[-1] >= val: y.pop() y_p += 1 new_y.append((len_x - x_p) / len_x) new_x.append((len_y - y_p) / len_y) #print new_x #print new_y new_x = 1 - np.array(new_x) new_y = 1 - np.array(new_y) #plot(new_x, new_y) #show() x = new_x y = new_y if len(x) != len(y): raise ValueError("Unequal!") if not xlim: xlim = 1.0 auc = 0.0 bla = zip(stats.rankdata(x), range(len(x))) bla = sorted(bla, key=lambda x: x[1]) prev_x = x[bla[0][1]] prev_y = y[bla[0][1]] index = 1 while index < len(bla) and x[bla[index][1]] <= xlim: _, i = bla[index] auc += y[i] * (x[i] - prev_x) - ((x[i] - prev_x) * (y[i] - prev_y) / 2.0) prev_x = x[i] prev_y = y[i] index += 1 if index < len(bla): (rank, i) = bla[index] auc += prev_y * (xlim - prev_x) + ((y[i] - prev_y)/(x[i] - prev_x) * (xlim -prev_x) * (xlim - prev_x)/2) return auc
[ "def", "roc_auc_xlim", "(", "x_bla", ",", "y_bla", ",", "xlim", "=", "0.1", ")", ":", "x", "=", "x_bla", "[", ":", "]", "y", "=", "y_bla", "[", ":", "]", "x", ".", "sort", "(", ")", "y", ".", "sort", "(", ")", "u", "=", "{", "}", "for", "...
Computes the ROC Area Under Curve until a certain FPR value. Parameters ---------- fg_vals : array_like list of values for positive set bg_vals : array_like list of values for negative set xlim : float, optional FPR value Returns ------- score : float ROC AUC score
[ "Computes", "the", "ROC", "Area", "Under", "Curve", "until", "a", "certain", "FPR", "value", "." ]
python
train
shidenggui/easytrader
easytrader/webtrader.py
https://github.com/shidenggui/easytrader/blob/e5ae4daeda4ea125763a95b280dd694c7f68257d/easytrader/webtrader.py#L76-L81
def keepalive(self): """启动保持在线的进程 """ if self.heart_thread.is_alive(): self.heart_active = True else: self.heart_thread.start()
[ "def", "keepalive", "(", "self", ")", ":", "if", "self", ".", "heart_thread", ".", "is_alive", "(", ")", ":", "self", ".", "heart_active", "=", "True", "else", ":", "self", ".", "heart_thread", ".", "start", "(", ")" ]
启动保持在线的进程
[ "启动保持在线的进程" ]
python
train
theiviaxx/Frog
frog/views/gallery.py
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/gallery.py#L219-L331
def _filter(request, object_, tags=None, more=False, orderby='created'): """Filters Piece objects from self based on filters, search, and range :param tags: List of tag IDs to filter :type tags: list :param more -- bool, Returns more of the same filtered set of images based on session range return list, Objects filtered """ res = Result() models = QUERY_MODELS idDict = {} objDict = {} data = {} modelmap = {} length = 75 # -- Get all IDs for each model for m in models: modelmap[m.model_class()] = m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None: continue if tags: for bucket in tags: searchQuery = "" o = None for item in bucket: if item == 0: # -- filter by tagless idDict[m.model].annotate(num_tags=Count('tags')) if not o: o = Q() o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): # -- filter by tag if not o: o = Q() o |= Q(tags__id=item) else: # -- add to search string searchQuery += item + ' ' if not HAYSTACK: if not o: o = Q() # -- use a basic search o |= Q(title__icontains=item) if HAYSTACK and searchQuery != "": # -- once all tags have been filtered, filter by search searchIDs = search(searchQuery, m.model_class()) if searchIDs: if not o: o = Q() o |= Q(id__in=searchIDs) if o: # -- apply the filters idDict[m.model] = idDict[m.model].annotate(num_tags=Count('tags')).filter(o) else: idDict[m.model] = idDict[m.model].none() # -- Get all ids of filtered objects, this will be a very fast query idDict[m.model] = list(idDict[m.model].order_by('-{}'.format(orderby)).values_list('id', flat=True)) lastid = request.session.get('last_{}'.format(m.model), 0) if not idDict[m.model]: continue if not more: lastid = idDict[m.model][0] index = idDict[m.model].index(lastid) if more and lastid != 0: index += 1 idDict[m.model] = idDict[m.model][index:index + length] # -- perform the main query to retrieve the objects we want objDict[m.model] = m.model_class().objects.filter(id__in=idDict[m.model]) objDict[m.model] = objDict[m.model].select_related('author').prefetch_related('tags').order_by('-{}'.format(orderby)) objDict[m.model] = list(objDict[m.model]) # -- combine and sort all objects by date objects = _sortObjects(orderby, **objDict) if len(models) > 1 else objDict.values()[0] objects = objects[:length] # -- Find out last ids lastids = {} for obj in objects: lastids['last_{}'.format(modelmap[obj.__class__])] = obj.id for key, value in lastids.items(): request.session[key] = value # -- serialize objects for i in objects: res.append(i.json()) data['count'] = len(objects) if settings.DEBUG: data['queries'] = connection.queries res.value = data return JsonResponse(res.asDict())
[ "def", "_filter", "(", "request", ",", "object_", ",", "tags", "=", "None", ",", "more", "=", "False", ",", "orderby", "=", "'created'", ")", ":", "res", "=", "Result", "(", ")", "models", "=", "QUERY_MODELS", "idDict", "=", "{", "}", "objDict", "=",...
Filters Piece objects from self based on filters, search, and range :param tags: List of tag IDs to filter :type tags: list :param more -- bool, Returns more of the same filtered set of images based on session range return list, Objects filtered
[ "Filters", "Piece", "objects", "from", "self", "based", "on", "filters", "search", "and", "range" ]
python
train
pandas-dev/pandas
pandas/io/sql.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L454-L475
def has_table(table_name, con, schema=None): """ Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). Returns ------- boolean """ pandas_sql = pandasSQL_builder(con, schema=schema) return pandas_sql.has_table(table_name)
[ "def", "has_table", "(", "table_name", ",", "con", ",", "schema", "=", "None", ")", ":", "pandas_sql", "=", "pandasSQL_builder", "(", "con", ",", "schema", "=", "schema", ")", "return", "pandas_sql", ".", "has_table", "(", "table_name", ")" ]
Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). Returns ------- boolean
[ "Check", "if", "DataBase", "has", "named", "table", "." ]
python
train
spyder-ide/spyder
spyder/widgets/mixins.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/mixins.py#L440-L447
def is_cursor_before(self, position, char_offset=0): """Return True if cursor is before *position*""" position = self.get_position(position) + char_offset cursor = self.textCursor() cursor.movePosition(QTextCursor.End) if position < cursor.position(): cursor.setPosition(position) return self.textCursor() < cursor
[ "def", "is_cursor_before", "(", "self", ",", "position", ",", "char_offset", "=", "0", ")", ":", "position", "=", "self", ".", "get_position", "(", "position", ")", "+", "char_offset", "cursor", "=", "self", ".", "textCursor", "(", ")", "cursor", ".", "m...
Return True if cursor is before *position*
[ "Return", "True", "if", "cursor", "is", "before", "*", "position", "*" ]
python
train
Jaymon/pyt
pyt/__init__.py
https://github.com/Jaymon/pyt/blob/801581fd0ae238158134bde1c937fa199fa626b2/pyt/__init__.py#L60-L70
def is_single_module(): """Returns True if only a module is being run""" ret = False counts = get_counts() if counts["modules"] == 1: ret = True elif counts["modules"] < 1: ret = is_single_class() return ret
[ "def", "is_single_module", "(", ")", ":", "ret", "=", "False", "counts", "=", "get_counts", "(", ")", "if", "counts", "[", "\"modules\"", "]", "==", "1", ":", "ret", "=", "True", "elif", "counts", "[", "\"modules\"", "]", "<", "1", ":", "ret", "=", ...
Returns True if only a module is being run
[ "Returns", "True", "if", "only", "a", "module", "is", "being", "run" ]
python
test
Autodesk/pyccc
pyccc/utils.py
https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/utils.py#L34-L41
def gist_diff(): """Diff this file with the gist on github""" remote_file = wget(RAW_GIST) proc = subprocess.Popen(('diff - %s'%MY_PATH).split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = proc.communicate(remote_file) return stdout
[ "def", "gist_diff", "(", ")", ":", "remote_file", "=", "wget", "(", "RAW_GIST", ")", "proc", "=", "subprocess", ".", "Popen", "(", "(", "'diff - %s'", "%", "MY_PATH", ")", ".", "split", "(", ")", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "st...
Diff this file with the gist on github
[ "Diff", "this", "file", "with", "the", "gist", "on", "github" ]
python
train
DarkEnergySurvey/ugali
ugali/analysis/mcmc.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L280-L291
def createMCMC(config,srcfile,section='source',samples=None): """ Create an MCMC instance """ source = ugali.analysis.source.Source() source.load(srcfile,section=section) loglike = ugali.analysis.loglike.createLoglike(config,source) mcmc = MCMC(config,loglike) if samples is not None: mcmc.load_samples(samples) return mcmc
[ "def", "createMCMC", "(", "config", ",", "srcfile", ",", "section", "=", "'source'", ",", "samples", "=", "None", ")", ":", "source", "=", "ugali", ".", "analysis", ".", "source", ".", "Source", "(", ")", "source", ".", "load", "(", "srcfile", ",", "...
Create an MCMC instance
[ "Create", "an", "MCMC", "instance" ]
python
train
PythonCharmers/python-future
src/future/backports/email/header.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/header.py#L62-L154
def decode_header(header): """Decode a message header value without converting charset. Returns a list of (string, charset) pairs containing each of the decoded parts of the header. Charset is None for non-encoded parts of the header, otherwise a lower-case string containing the name of the character set specified in the encoded string. header may be a string that may or may not contain RFC2047 encoded words, or it may be a Header object. An email.errors.HeaderParseError may be raised when certain decoding error occurs (e.g. a base64 decoding exception). """ # If it is a Header object, we can just return the encoded chunks. if hasattr(header, '_chunks'): return [(_charset._encode(string, str(charset)), str(charset)) for string, charset in header._chunks] # If no encoding, just return the header with no charset. if not ecre.search(header): return [(header, None)] # First step is to parse all the encoded parts into triplets of the form # (encoded_string, encoding, charset). For unencoded strings, the last # two parts will be None. words = [] for line in header.splitlines(): parts = ecre.split(line) first = True while parts: unencoded = parts.pop(0) if first: unencoded = unencoded.lstrip() first = False if unencoded: words.append((unencoded, None, None)) if parts: charset = parts.pop(0).lower() encoding = parts.pop(0).lower() encoded = parts.pop(0) words.append((encoded, encoding, charset)) # Now loop over words and remove words that consist of whitespace # between two encoded strings. import sys droplist = [] for n, w in enumerate(words): if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace(): droplist.append(n-1) for d in reversed(droplist): del words[d] # The next step is to decode each encoded word by applying the reverse # base64 or quopri transformation. decoded_words is now a list of the # form (decoded_word, charset). decoded_words = [] for encoded_string, encoding, charset in words: if encoding is None: # This is an unencoded word. decoded_words.append((encoded_string, charset)) elif encoding == 'q': word = header_decode(encoded_string) decoded_words.append((word, charset)) elif encoding == 'b': paderr = len(encoded_string) % 4 # Postel's law: add missing padding if paderr: encoded_string += '==='[:4 - paderr] try: word = base64mime.decode(encoded_string) except binascii.Error: raise HeaderParseError('Base64 decoding error') else: decoded_words.append((word, charset)) else: raise AssertionError('Unexpected encoding: ' + encoding) # Now convert all words to bytes and collapse consecutive runs of # similarly encoded words. collapsed = [] last_word = last_charset = None for word, charset in decoded_words: if isinstance(word, str): word = bytes(word, 'raw-unicode-escape') if last_word is None: last_word = word last_charset = charset elif charset != last_charset: collapsed.append((last_word, last_charset)) last_word = word last_charset = charset elif last_charset is None: last_word += BSPACE + word else: last_word += word collapsed.append((last_word, last_charset)) return collapsed
[ "def", "decode_header", "(", "header", ")", ":", "# If it is a Header object, we can just return the encoded chunks.", "if", "hasattr", "(", "header", ",", "'_chunks'", ")", ":", "return", "[", "(", "_charset", ".", "_encode", "(", "string", ",", "str", "(", "char...
Decode a message header value without converting charset. Returns a list of (string, charset) pairs containing each of the decoded parts of the header. Charset is None for non-encoded parts of the header, otherwise a lower-case string containing the name of the character set specified in the encoded string. header may be a string that may or may not contain RFC2047 encoded words, or it may be a Header object. An email.errors.HeaderParseError may be raised when certain decoding error occurs (e.g. a base64 decoding exception).
[ "Decode", "a", "message", "header", "value", "without", "converting", "charset", "." ]
python
train
SuperCowPowers/workbench
workbench/workers/rekall_adapter/rekall_adapter.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/rekall_adapter/rekall_adapter.py#L59-L76
def process_row(cls, data, column_map): """Process the row data from Rekall""" row = {} for key,value in data.iteritems(): if not value: value = '-' elif isinstance(value, list): value = value[1] elif isinstance(value, dict): if 'type_name' in value: if 'UnixTimeStamp' in value['type_name']: value = datetime.datetime.utcfromtimestamp(value['epoch']) if value == datetime.datetime(1970, 1, 1, 0, 0): value = '-' # Assume the value is somehow well formed when we get here row[column_map[key]] = value return row
[ "def", "process_row", "(", "cls", ",", "data", ",", "column_map", ")", ":", "row", "=", "{", "}", "for", "key", ",", "value", "in", "data", ".", "iteritems", "(", ")", ":", "if", "not", "value", ":", "value", "=", "'-'", "elif", "isinstance", "(", ...
Process the row data from Rekall
[ "Process", "the", "row", "data", "from", "Rekall" ]
python
train
astropy/regions
regions/shapes/rectangle.py
https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/shapes/rectangle.py#L228-L244
def _lower_left_xy(self): """ Compute lower left `xy` position. This is used for the conversion to matplotlib in ``as_artist`` Taken from http://photutils.readthedocs.io/en/latest/_modules/photutils/aperture/rectangle.html#RectangularAperture.plot """ hw = self.width / 2. hh = self.height / 2. sint = np.sin(self.angle) cost = np.cos(self.angle) dx = (hh * sint) - (hw * cost) dy = -(hh * cost) - (hw * sint) x = self.center.x + dx y = self.center.y + dy return x, y
[ "def", "_lower_left_xy", "(", "self", ")", ":", "hw", "=", "self", ".", "width", "/", "2.", "hh", "=", "self", ".", "height", "/", "2.", "sint", "=", "np", ".", "sin", "(", "self", ".", "angle", ")", "cost", "=", "np", ".", "cos", "(", "self", ...
Compute lower left `xy` position. This is used for the conversion to matplotlib in ``as_artist`` Taken from http://photutils.readthedocs.io/en/latest/_modules/photutils/aperture/rectangle.html#RectangularAperture.plot
[ "Compute", "lower", "left", "xy", "position", "." ]
python
train
timothydmorton/VESPA
vespa/stars/populations.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/stars/populations.py#L1269-L1281
def rsky_lhood(self,rsky,**kwargs): """ Evaluates Rsky likelihood at provided position(s) :param rsky: position :param **kwargs: Keyword arguments passed to :func:`BinaryPopulation.rsky_distribution` """ dist = self.rsky_distribution(**kwargs) return dist(rsky)
[ "def", "rsky_lhood", "(", "self", ",", "rsky", ",", "*", "*", "kwargs", ")", ":", "dist", "=", "self", ".", "rsky_distribution", "(", "*", "*", "kwargs", ")", "return", "dist", "(", "rsky", ")" ]
Evaluates Rsky likelihood at provided position(s) :param rsky: position :param **kwargs: Keyword arguments passed to :func:`BinaryPopulation.rsky_distribution`
[ "Evaluates", "Rsky", "likelihood", "at", "provided", "position", "(", "s", ")" ]
python
train
dmcc/PyStanfordDependencies
StanfordDependencies/JPypeBackend.py
https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/JPypeBackend.py#L193-L199
def _listify(collection): """This is a workaround where Collections are no longer iterable when using JPype.""" new_list = [] for index in range(len(collection)): new_list.append(collection[index]) return new_list
[ "def", "_listify", "(", "collection", ")", ":", "new_list", "=", "[", "]", "for", "index", "in", "range", "(", "len", "(", "collection", ")", ")", ":", "new_list", ".", "append", "(", "collection", "[", "index", "]", ")", "return", "new_list" ]
This is a workaround where Collections are no longer iterable when using JPype.
[ "This", "is", "a", "workaround", "where", "Collections", "are", "no", "longer", "iterable", "when", "using", "JPype", "." ]
python
train
jobovy/galpy
galpy/potential/TwoPowerSphericalPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/TwoPowerSphericalPotential.py#L168-L185
def _dens(self,R,z,phi=0.,t=0.): """ NAME: _dens PURPOSE: evaluate the density for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the density HISTORY: 2010-08-08 - Written - Bovy (NYU) """ r= numpy.sqrt(R**2.+z**2.) return (self.a/r)**self.alpha/(1.+r/self.a)**(self.beta-self.alpha)/4./m.pi/self.a**3.
[ "def", "_dens", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "r", "=", "numpy", ".", "sqrt", "(", "R", "**", "2.", "+", "z", "**", "2.", ")", "return", "(", "self", ".", "a", "/", "r", ")", "**"...
NAME: _dens PURPOSE: evaluate the density for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the density HISTORY: 2010-08-08 - Written - Bovy (NYU)
[ "NAME", ":", "_dens", "PURPOSE", ":", "evaluate", "the", "density", "for", "this", "potential", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "the"...
python
train
samirelanduk/quickplots
quickplots/series.py
https://github.com/samirelanduk/quickplots/blob/59f5e6ff367b2c1c24ba7cf1805d03552034c6d8/quickplots/series.py#L353-L365
def write_to_canvas(self, canvas, name): """Writes the series to an OmniCanvas canvas. :param Canvas canvas: The canvas to write to. :param str name: The name to give the line graphic on the canvas.""" points = self.canvas_points() for point in points: canvas.add_oval( point[0] - (self.size() / 2), point[1] - (self.size() / 2), self.size(), self.size(), fill_color=self.color(), line_width=self.linewidth(), name=name )
[ "def", "write_to_canvas", "(", "self", ",", "canvas", ",", "name", ")", ":", "points", "=", "self", ".", "canvas_points", "(", ")", "for", "point", "in", "points", ":", "canvas", ".", "add_oval", "(", "point", "[", "0", "]", "-", "(", "self", ".", ...
Writes the series to an OmniCanvas canvas. :param Canvas canvas: The canvas to write to. :param str name: The name to give the line graphic on the canvas.
[ "Writes", "the", "series", "to", "an", "OmniCanvas", "canvas", "." ]
python
train
gawel/panoramisk
panoramisk/fast_agi.py
https://github.com/gawel/panoramisk/blob/2ccb5d18be28a8e8f444dc0cd3a3bfb59aa19a8e/panoramisk/fast_agi.py#L18-L50
def send_command(self, command): """Send a command for FastAGI request: :param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds' :type command: String :Example: :: @asyncio.coroutine def call_waiting(request): print(['AGI variables:', request.headers]) yield from request.send_command('ANSWER') yield from request.send_command('EXEC StartMusicOnHold') yield from request.send_command('EXEC Wait 10') """ command += '\n' self.writer.write(command.encode(self.encoding)) yield from self.writer.drain() agi_result = yield from self._read_result() # If Asterisk returns `100 Trying...`, wait for next the response. while agi_result.get('status_code') == 100: agi_result = yield from self._read_result() # when we got AGIUsageError the following line contains some indication if 'error' in agi_result and agi_result['error'] == 'AGIUsageError': buff_usage_error = yield from self.reader.readline() agi_result['msg'] += buff_usage_error.decode(self.encoding) return agi_result
[ "def", "send_command", "(", "self", ",", "command", ")", ":", "command", "+=", "'\\n'", "self", ".", "writer", ".", "write", "(", "command", ".", "encode", "(", "self", ".", "encoding", ")", ")", "yield", "from", "self", ".", "writer", ".", "drain", ...
Send a command for FastAGI request: :param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds' :type command: String :Example: :: @asyncio.coroutine def call_waiting(request): print(['AGI variables:', request.headers]) yield from request.send_command('ANSWER') yield from request.send_command('EXEC StartMusicOnHold') yield from request.send_command('EXEC Wait 10')
[ "Send", "a", "command", "for", "FastAGI", "request", ":" ]
python
test
simodalla/pygmount
pygmount/utils/mount.py
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/pygmount/utils/mount.py#L57-L81
def requirements(self): """ Verifica che tutti i pacchetti apt necessari al "funzionamento" della classe siano installati. Se cosi' non fosse li installa. """ cache = apt.cache.Cache() for pkg in self.pkgs_required: try: pkg = cache[pkg] if not pkg.is_installed: try: pkg.mark_install() cache.commit() except LockFailedException as lfe: logging.error( 'Errore "{}" probabilmente l\'utente {} non ha i ' 'diritti di amministratore'.format(lfe, self.username)) raise lfe except Exception as e: logging.error('Errore non classificato "{}"'.format(e)) raise e except KeyError: logging.error('Il pacchetto "{}" non e\' presente in questa' ' distribuzione'.format(pkg))
[ "def", "requirements", "(", "self", ")", ":", "cache", "=", "apt", ".", "cache", ".", "Cache", "(", ")", "for", "pkg", "in", "self", ".", "pkgs_required", ":", "try", ":", "pkg", "=", "cache", "[", "pkg", "]", "if", "not", "pkg", ".", "is_installed...
Verifica che tutti i pacchetti apt necessari al "funzionamento" della classe siano installati. Se cosi' non fosse li installa.
[ "Verifica", "che", "tutti", "i", "pacchetti", "apt", "necessari", "al", "funzionamento", "della", "classe", "siano", "installati", ".", "Se", "cosi", "non", "fosse", "li", "installa", "." ]
python
train
google/google-visualization-python
gviz_api.py
https://github.com/google/google-visualization-python/blob/cbfb4d69ad2f4ca30dc55791629280aa3214c8e3/gviz_api.py#L968-L1009
def ToJSon(self, columns_order=None, order_by=()): """Returns a string that can be used in a JS DataTable constructor. This method writes a JSON string that can be passed directly into a Google Visualization API DataTable constructor. Use this output if you are hosting the visualization HTML on your site, and want to code the data table in Python. Pass this string into the google.visualization.DataTable constructor, e.g,: ... on my page that hosts my visualization ... google.setOnLoadCallback(drawTable); function drawTable() { var data = new google.visualization.DataTable(_my_JSon_string, 0.6); myTable.draw(data); } Args: columns_order: Optional. Specifies the order of columns in the output table. Specify a list of all column IDs in the order in which you want the table created. Note that you must list all column IDs in this parameter, if you use it. order_by: Optional. Specifies the name of the column(s) to sort by. Passed as is to _PreparedData(). Returns: A JSon constructor string to generate a JS DataTable with the data stored in the DataTable object. Example result (the result is without the newlines): {cols: [{id:"a",label:"a",type:"number"}, {id:"b",label:"b",type:"string"}, {id:"c",label:"c",type:"number"}], rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}], p: {'foo': 'bar'}} Raises: DataTableException: The data does not match the type. """ encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by)) if not isinstance(encoded_response_str, str): return encoded_response_str.encode("utf-8") return encoded_response_str
[ "def", "ToJSon", "(", "self", ",", "columns_order", "=", "None", ",", "order_by", "=", "(", ")", ")", ":", "encoded_response_str", "=", "DataTableJSONEncoder", "(", ")", ".", "encode", "(", "self", ".", "_ToJSonObj", "(", "columns_order", ",", "order_by", ...
Returns a string that can be used in a JS DataTable constructor. This method writes a JSON string that can be passed directly into a Google Visualization API DataTable constructor. Use this output if you are hosting the visualization HTML on your site, and want to code the data table in Python. Pass this string into the google.visualization.DataTable constructor, e.g,: ... on my page that hosts my visualization ... google.setOnLoadCallback(drawTable); function drawTable() { var data = new google.visualization.DataTable(_my_JSon_string, 0.6); myTable.draw(data); } Args: columns_order: Optional. Specifies the order of columns in the output table. Specify a list of all column IDs in the order in which you want the table created. Note that you must list all column IDs in this parameter, if you use it. order_by: Optional. Specifies the name of the column(s) to sort by. Passed as is to _PreparedData(). Returns: A JSon constructor string to generate a JS DataTable with the data stored in the DataTable object. Example result (the result is without the newlines): {cols: [{id:"a",label:"a",type:"number"}, {id:"b",label:"b",type:"string"}, {id:"c",label:"c",type:"number"}], rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}], p: {'foo': 'bar'}} Raises: DataTableException: The data does not match the type.
[ "Returns", "a", "string", "that", "can", "be", "used", "in", "a", "JS", "DataTable", "constructor", "." ]
python
train
angr/angr
angr/analyses/bindiff.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/bindiff.py#L962-L973
def get_function_diff(self, function_addr_a, function_addr_b): """ :param function_addr_a: The address of the first function (in the first binary) :param function_addr_b: The address of the second function (in the second binary) :returns: the FunctionDiff of the two functions """ pair = (function_addr_a, function_addr_b) if pair not in self._function_diffs: function_a = self.cfg_a.kb.functions.function(function_addr_a) function_b = self.cfg_b.kb.functions.function(function_addr_b) self._function_diffs[pair] = FunctionDiff(function_a, function_b, self) return self._function_diffs[pair]
[ "def", "get_function_diff", "(", "self", ",", "function_addr_a", ",", "function_addr_b", ")", ":", "pair", "=", "(", "function_addr_a", ",", "function_addr_b", ")", "if", "pair", "not", "in", "self", ".", "_function_diffs", ":", "function_a", "=", "self", ".",...
:param function_addr_a: The address of the first function (in the first binary) :param function_addr_b: The address of the second function (in the second binary) :returns: the FunctionDiff of the two functions
[ ":", "param", "function_addr_a", ":", "The", "address", "of", "the", "first", "function", "(", "in", "the", "first", "binary", ")", ":", "param", "function_addr_b", ":", "The", "address", "of", "the", "second", "function", "(", "in", "the", "second", "bina...
python
train
AirtestProject/Poco
poco/sdk/Selector.py
https://github.com/AirtestProject/Poco/blob/2c559a586adf3fd11ee81cabc446d4d3f6f2d119/poco/sdk/Selector.py#L73-L78
def select(self, cond, multiple=False): """ See Also: :py:meth:`select <poco.sdk.Selector.ISelector.select>` method in ``ISelector``. """ return self.selectImpl(cond, multiple, self.getRoot(), 9999, True, True)
[ "def", "select", "(", "self", ",", "cond", ",", "multiple", "=", "False", ")", ":", "return", "self", ".", "selectImpl", "(", "cond", ",", "multiple", ",", "self", ".", "getRoot", "(", ")", ",", "9999", ",", "True", ",", "True", ")" ]
See Also: :py:meth:`select <poco.sdk.Selector.ISelector.select>` method in ``ISelector``.
[ "See", "Also", ":", ":", "py", ":", "meth", ":", "select", "<poco", ".", "sdk", ".", "Selector", ".", "ISelector", ".", "select", ">", "method", "in", "ISelector", "." ]
python
train
openid/JWTConnect-Python-OidcService
src/oidcservice/client_auth.py
https://github.com/openid/JWTConnect-Python-OidcService/blob/759ab7adef30a7e3b9d75475e2971433b9613788/src/oidcservice/client_auth.py#L264-L287
def construct(self, request, service=None, http_args=None, **kwargs): """ Will add a token to the request if not present :param request: The request :param service_context: A :py:class:`oidcservice.service.Service` instance :param http_args: HTTP arguments :param kwargs: extra keyword arguments :return: A possibly modified dictionary with HTTP arguments. """ _acc_token = '' for _token_type in ['access_token', 'refresh_token']: _acc_token = find_token(request, _token_type, service, **kwargs) if _acc_token: break if not _acc_token: raise KeyError('No access or refresh token available') else: request["access_token"] = _acc_token return http_args
[ "def", "construct", "(", "self", ",", "request", ",", "service", "=", "None", ",", "http_args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "_acc_token", "=", "''", "for", "_token_type", "in", "[", "'access_token'", ",", "'refresh_token'", "]", ":", ...
Will add a token to the request if not present :param request: The request :param service_context: A :py:class:`oidcservice.service.Service` instance :param http_args: HTTP arguments :param kwargs: extra keyword arguments :return: A possibly modified dictionary with HTTP arguments.
[ "Will", "add", "a", "token", "to", "the", "request", "if", "not", "present" ]
python
train
twilio/twilio-python
twilio/rest/sync/v1/service/sync_list/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/sync/v1/service/sync_list/__init__.py#L125-L140
def get_page(self, target_url): """ Retrieve a specific page of SyncListInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return SyncListPage(self._version, response, self._solution)
[ "def", "get_page", "(", "self", ",", "target_url", ")", ":", "response", "=", "self", ".", "_version", ".", "domain", ".", "twilio", ".", "request", "(", "'GET'", ",", "target_url", ",", ")", "return", "SyncListPage", "(", "self", ".", "_version", ",", ...
Retrieve a specific page of SyncListInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of SyncListInstance :rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage
[ "Retrieve", "a", "specific", "page", "of", "SyncListInstance", "records", "from", "the", "API", ".", "Request", "is", "executed", "immediately" ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/cmap.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/cmap.py#L10-L30
def com_google_fonts_check_family_equal_unicode_encodings(ttFonts): """Fonts have equal unicode encodings?""" encoding = None failed = False for ttFont in ttFonts: cmap = None for table in ttFont['cmap'].tables: if table.format == 4: cmap = table break # Could a font lack a format 4 cmap table ? # If we ever find one of those, it would crash the check here. # Then we'd have to yield a FAIL regarding the missing table entry. if not encoding: encoding = cmap.platEncID if encoding != cmap.platEncID: failed = True if failed: yield FAIL, "Fonts have different unicode encodings." else: yield PASS, "Fonts have equal unicode encodings."
[ "def", "com_google_fonts_check_family_equal_unicode_encodings", "(", "ttFonts", ")", ":", "encoding", "=", "None", "failed", "=", "False", "for", "ttFont", "in", "ttFonts", ":", "cmap", "=", "None", "for", "table", "in", "ttFont", "[", "'cmap'", "]", ".", "tab...
Fonts have equal unicode encodings?
[ "Fonts", "have", "equal", "unicode", "encodings?" ]
python
train
spyder-ide/spyder
spyder/plugins/base.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/base.py#L112-L131
def create_dockwidget(self): """Add to parent QMainWindow as a dock widget""" # Creating dock widget dock = SpyderDockWidget(self.get_plugin_title(), self.main) # Set properties dock.setObjectName(self.__class__.__name__+"_dw") dock.setAllowedAreas(self.ALLOWED_AREAS) dock.setFeatures(self.FEATURES) dock.setWidget(self) self.update_margins() dock.visibilityChanged.connect(self.visibility_changed) dock.topLevelChanged.connect(self.on_top_level_changed) dock.sig_plugin_closed.connect(self.plugin_closed) self.dockwidget = dock if self.shortcut is not None: sc = QShortcut(QKeySequence(self.shortcut), self.main, self.switch_to_plugin) self.register_shortcut(sc, "_", "Switch to %s" % self.CONF_SECTION) return (dock, self.LOCATION)
[ "def", "create_dockwidget", "(", "self", ")", ":", "# Creating dock widget", "dock", "=", "SpyderDockWidget", "(", "self", ".", "get_plugin_title", "(", ")", ",", "self", ".", "main", ")", "# Set properties", "dock", ".", "setObjectName", "(", "self", ".", "__...
Add to parent QMainWindow as a dock widget
[ "Add", "to", "parent", "QMainWindow", "as", "a", "dock", "widget" ]
python
train
saltstack/salt
salt/states/rbenv.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rbenv.py#L87-L106
def _check_and_install_ruby(ret, ruby, default=False, user=None): ''' Verify that ruby is installed, install if unavailable ''' ret = _ruby_installed(ret, ruby, user=user) if not ret['result']: if __salt__['rbenv.install_ruby'](ruby, runas=user): ret['result'] = True ret['changes'][ruby] = 'Installed' ret['comment'] = 'Successfully installed ruby' ret['default'] = default else: ret['result'] = False ret['comment'] = 'Failed to install ruby' return ret if default: __salt__['rbenv.default'](ruby, runas=user) return ret
[ "def", "_check_and_install_ruby", "(", "ret", ",", "ruby", ",", "default", "=", "False", ",", "user", "=", "None", ")", ":", "ret", "=", "_ruby_installed", "(", "ret", ",", "ruby", ",", "user", "=", "user", ")", "if", "not", "ret", "[", "'result'", "...
Verify that ruby is installed, install if unavailable
[ "Verify", "that", "ruby", "is", "installed", "install", "if", "unavailable" ]
python
train
abau171/highfive
highfive/jobs.py
https://github.com/abau171/highfive/blob/07b3829331072035ab100d1d66deca3e8f3f372a/highfive/jobs.py#L114-L124
async def wait_changed(self): """ Waits until the result set changes. Possible changes can be a result being added or the result set becoming complete. If the result set is already completed, this method returns immediately. """ if not self.is_complete(): waiter = self._loop.create_future() self._waiters.append(waiter) await waiter
[ "async", "def", "wait_changed", "(", "self", ")", ":", "if", "not", "self", ".", "is_complete", "(", ")", ":", "waiter", "=", "self", ".", "_loop", ".", "create_future", "(", ")", "self", ".", "_waiters", ".", "append", "(", "waiter", ")", "await", "...
Waits until the result set changes. Possible changes can be a result being added or the result set becoming complete. If the result set is already completed, this method returns immediately.
[ "Waits", "until", "the", "result", "set", "changes", ".", "Possible", "changes", "can", "be", "a", "result", "being", "added", "or", "the", "result", "set", "becoming", "complete", ".", "If", "the", "result", "set", "is", "already", "completed", "this", "m...
python
test
saltstack/salt
salt/modules/firewalld.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/firewalld.py#L437-L453
def add_service_port(service, port): ''' Add a new port to the specified service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_service_port zone 80 ''' if service not in get_services(permanent=True): raise CommandExecutionError('The service does not exist.') cmd = '--permanent --service={0} --add-port={1}'.format(service, port) return __firewall_cmd(cmd)
[ "def", "add_service_port", "(", "service", ",", "port", ")", ":", "if", "service", "not", "in", "get_services", "(", "permanent", "=", "True", ")", ":", "raise", "CommandExecutionError", "(", "'The service does not exist.'", ")", "cmd", "=", "'--permanent --servic...
Add a new port to the specified service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_service_port zone 80
[ "Add", "a", "new", "port", "to", "the", "specified", "service", "." ]
python
train
log2timeline/plaso
plaso/output/mysql_4n6time.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/mysql_4n6time.py#L254-L262
def SetServerInformation(self, server, port): """Sets the server information. Args: server (str): hostname or IP address of the database server. port (int): port number of the database server. """ self._host = server self._port = port
[ "def", "SetServerInformation", "(", "self", ",", "server", ",", "port", ")", ":", "self", ".", "_host", "=", "server", "self", ".", "_port", "=", "port" ]
Sets the server information. Args: server (str): hostname or IP address of the database server. port (int): port number of the database server.
[ "Sets", "the", "server", "information", "." ]
python
train
terrycain/aioboto3
aioboto3/resources.py
https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/resources.py#L183-L240
def _create_action(factory_self, action_model, resource_name, service_context, is_load=False): """ Creates a new method which makes a request to the underlying AWS service. """ # Create the action in in this closure but before the ``do_action`` # method below is invoked, which allows instances of the resource # to share the ServiceAction instance. action = AIOServiceAction( action_model, factory=factory_self, service_context=service_context ) # A resource's ``load`` method is special because it sets # values on the resource instead of returning the response. if is_load: # We need a new method here because we want access to the # instance via ``self``. async def do_action(self, *args, **kwargs): # response = action(self, *args, **kwargs) response = await action.async_call(self, *args, **kwargs) self.meta.data = response # Create the docstring for the load/reload mehtods. lazy_docstring = docstring.LoadReloadDocstring( action_name=action_model.name, resource_name=resource_name, event_emitter=factory_self._emitter, load_model=action_model, service_model=service_context.service_model, include_signature=False ) else: # We need a new method here because we want access to the # instance via ``self``. async def do_action(self, *args, **kwargs): response = await action.async_call(self, *args, **kwargs) if hasattr(self, 'load'): # Clear cached data. It will be reloaded the next # time that an attribute is accessed. # TODO: Make this configurable in the future? self.meta.data = None return response lazy_docstring = docstring.ActionDocstring( resource_name=resource_name, event_emitter=factory_self._emitter, action_model=action_model, service_model=service_context.service_model, include_signature=False ) do_action.__name__ = str(action_model.name) do_action.__doc__ = lazy_docstring return do_action
[ "def", "_create_action", "(", "factory_self", ",", "action_model", ",", "resource_name", ",", "service_context", ",", "is_load", "=", "False", ")", ":", "# Create the action in in this closure but before the ``do_action``", "# method below is invoked, which allows instances of the ...
Creates a new method which makes a request to the underlying AWS service.
[ "Creates", "a", "new", "method", "which", "makes", "a", "request", "to", "the", "underlying", "AWS", "service", "." ]
python
train
Josef-Friedrich/phrydy
phrydy/mediafile.py
https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1150-L1157
def delete(self, mutagen_file): """Remove all images from the file. """ for cover_tag in self.TAG_NAMES.values(): try: del mutagen_file[cover_tag] except KeyError: pass
[ "def", "delete", "(", "self", ",", "mutagen_file", ")", ":", "for", "cover_tag", "in", "self", ".", "TAG_NAMES", ".", "values", "(", ")", ":", "try", ":", "del", "mutagen_file", "[", "cover_tag", "]", "except", "KeyError", ":", "pass" ]
Remove all images from the file.
[ "Remove", "all", "images", "from", "the", "file", "." ]
python
train
DAI-Lab/Copulas
copulas/bivariate/gumbel.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/bivariate/gumbel.py#L20-L35
def probability_density(self, X): """Compute density function for given copula family.""" self.check_fit() U, V = self.split_matrix(X) if self.theta == 1: return np.multiply(U, V) else: a = np.power(np.multiply(U, V), -1) tmp = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta) b = np.power(tmp, -2 + 2.0 / self.theta) c = np.power(np.multiply(np.log(U), np.log(V)), self.theta - 1) d = 1 + (self.theta - 1) * np.power(tmp, -1.0 / self.theta) return self.cumulative_distribution(X) * a * b * c * d
[ "def", "probability_density", "(", "self", ",", "X", ")", ":", "self", ".", "check_fit", "(", ")", "U", ",", "V", "=", "self", ".", "split_matrix", "(", "X", ")", "if", "self", ".", "theta", "==", "1", ":", "return", "np", ".", "multiply", "(", "...
Compute density function for given copula family.
[ "Compute", "density", "function", "for", "given", "copula", "family", "." ]
python
train
aio-libs/aioredis
aioredis/commands/string.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/string.py#L45-L47
def bitop_xor(self, dest, key, *keys): """Perform bitwise XOR operations between strings.""" return self.execute(b'BITOP', b'XOR', dest, key, *keys)
[ "def", "bitop_xor", "(", "self", ",", "dest", ",", "key", ",", "*", "keys", ")", ":", "return", "self", ".", "execute", "(", "b'BITOP'", ",", "b'XOR'", ",", "dest", ",", "key", ",", "*", "keys", ")" ]
Perform bitwise XOR operations between strings.
[ "Perform", "bitwise", "XOR", "operations", "between", "strings", "." ]
python
train
Nic30/hwtGraph
hwtGraph/elk/fromHwt/utils.py
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/utils.py#L84-L92
def getDefault(self, k): """ :return: tuple (value, True if key was there before else False) """ try: return self[k], True except KeyError: v = self[k] = NetCtx(self, k) return v, False
[ "def", "getDefault", "(", "self", ",", "k", ")", ":", "try", ":", "return", "self", "[", "k", "]", ",", "True", "except", "KeyError", ":", "v", "=", "self", "[", "k", "]", "=", "NetCtx", "(", "self", ",", "k", ")", "return", "v", ",", "False" ]
:return: tuple (value, True if key was there before else False)
[ ":", "return", ":", "tuple", "(", "value", "True", "if", "key", "was", "there", "before", "else", "False", ")" ]
python
train
Miserlou/Zappa
zappa/core.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L2284-L2326
def create_domain_name(self, domain_name, certificate_name, certificate_body=None, certificate_private_key=None, certificate_chain=None, certificate_arn=None, lambda_name=None, stage=None, base_path=None): """ Creates the API GW domain and returns the resulting DNS name. """ # This is a Let's Encrypt or custom certificate if not certificate_arn: agw_response = self.apigateway_client.create_domain_name( domainName=domain_name, certificateName=certificate_name, certificateBody=certificate_body, certificatePrivateKey=certificate_private_key, certificateChain=certificate_chain ) # This is an AWS ACM-hosted Certificate else: agw_response = self.apigateway_client.create_domain_name( domainName=domain_name, certificateName=certificate_name, certificateArn=certificate_arn ) api_id = self.get_api_id(lambda_name) if not api_id: raise LookupError("No API URL to certify found - did you deploy?") self.apigateway_client.create_base_path_mapping( domainName=domain_name, basePath='' if base_path is None else base_path, restApiId=api_id, stage=stage ) return agw_response['distributionDomainName']
[ "def", "create_domain_name", "(", "self", ",", "domain_name", ",", "certificate_name", ",", "certificate_body", "=", "None", ",", "certificate_private_key", "=", "None", ",", "certificate_chain", "=", "None", ",", "certificate_arn", "=", "None", ",", "lambda_name", ...
Creates the API GW domain and returns the resulting DNS name.
[ "Creates", "the", "API", "GW", "domain", "and", "returns", "the", "resulting", "DNS", "name", "." ]
python
train
uber/doubles
doubles/target.py
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/target.py#L114-L121
def restore_attr(self, attr_name): """Restore an attribute back onto the target object. :param str attr_name: the name of the attribute to restore """ original_attr = self._original_attr(attr_name) if self._original_attr(attr_name): setattr(self.obj.__class__, attr_name, original_attr)
[ "def", "restore_attr", "(", "self", ",", "attr_name", ")", ":", "original_attr", "=", "self", ".", "_original_attr", "(", "attr_name", ")", "if", "self", ".", "_original_attr", "(", "attr_name", ")", ":", "setattr", "(", "self", ".", "obj", ".", "__class__...
Restore an attribute back onto the target object. :param str attr_name: the name of the attribute to restore
[ "Restore", "an", "attribute", "back", "onto", "the", "target", "object", "." ]
python
train
Rapptz/discord.py
discord/permissions.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/permissions.py#L171-L190
def update(self, **kwargs): r"""Bulk updates this permission object. Allows you to set multiple attributes by using keyword arguments. The names must be equivalent to the properties listed. Extraneous key/value pairs will be silently ignored. Parameters ------------ \*\*kwargs A list of key/value pairs to bulk update permissions with. """ for key, value in kwargs.items(): try: is_property = isinstance(getattr(self.__class__, key), property) except AttributeError: continue if is_property: setattr(self, key, value)
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "try", ":", "is_property", "=", "isinstance", "(", "getattr", "(", "self", ".", "__class__", ",", "key", ")", ...
r"""Bulk updates this permission object. Allows you to set multiple attributes by using keyword arguments. The names must be equivalent to the properties listed. Extraneous key/value pairs will be silently ignored. Parameters ------------ \*\*kwargs A list of key/value pairs to bulk update permissions with.
[ "r", "Bulk", "updates", "this", "permission", "object", "." ]
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/capture_collector.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/capture_collector.py#L385-L406
def CaptureNamedVariable(self, name, value, depth, limits): """Appends name to the product of CaptureVariable. Args: name: name of the variable. value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. Returns: Formatted captured data as per Variable proto with name. """ if not hasattr(name, '__dict__'): name = str(name) else: # TODO(vlif): call str(name) with immutability verifier here. name = str(id(name)) self._total_size += len(name) v = (self.CheckDataVisiblity(value) or self.CaptureVariable(value, depth, limits)) v['name'] = name return v
[ "def", "CaptureNamedVariable", "(", "self", ",", "name", ",", "value", ",", "depth", ",", "limits", ")", ":", "if", "not", "hasattr", "(", "name", ",", "'__dict__'", ")", ":", "name", "=", "str", "(", "name", ")", "else", ":", "# TODO(vlif): call str(nam...
Appends name to the product of CaptureVariable. Args: name: name of the variable. value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. Returns: Formatted captured data as per Variable proto with name.
[ "Appends", "name", "to", "the", "product", "of", "CaptureVariable", "." ]
python
train
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/distro_lib/sles_12/utils.py#L47-L71
def _WriteIfcfg(self, interfaces, logger): """Write ifcfg files for multi-NIC support. Overwrites the files. This allows us to update ifcfg-* in the future. Disable the network setup to override this behavior and customize the configurations. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port. """ for interface in interfaces: interface_config = os.path.join( self.network_path, 'ifcfg-%s' % interface) interface_content = [ '# Added by Google.', 'STARTMODE=hotplug', 'BOOTPROTO=dhcp', 'DHCLIENT_SET_DEFAULT_ROUTE=yes', 'DHCLIENT_ROUTE_PRIORITY=10%s00' % interface, '', ] with open(interface_config, 'w') as interface_file: interface_file.write('\n'.join(interface_content)) logger.info('Created ifcfg file for interface %s.', interface)
[ "def", "_WriteIfcfg", "(", "self", ",", "interfaces", ",", "logger", ")", ":", "for", "interface", "in", "interfaces", ":", "interface_config", "=", "os", ".", "path", ".", "join", "(", "self", ".", "network_path", ",", "'ifcfg-%s'", "%", "interface", ")",...
Write ifcfg files for multi-NIC support. Overwrites the files. This allows us to update ifcfg-* in the future. Disable the network setup to override this behavior and customize the configurations. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port.
[ "Write", "ifcfg", "files", "for", "multi", "-", "NIC", "support", "." ]
python
train
HumanCellAtlas/dcp-cli
hca/upload/upload_config.py
https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/upload_config.py#L128-L143
def area_uuid_from_partial_uuid(self, partial_uuid): """ Given a partial UUID (a prefix), see if we have know about an Upload Area matching it. :param (str) partial_uuid: UUID prefix :return: a matching UUID :rtype: str :raises UploadException: if no or more than one UUIDs match. """ matching_areas = [uuid for uuid in self.areas if re.match(partial_uuid, uuid)] if len(matching_areas) == 0: raise UploadException("Sorry I don't recognize area \"%s\"" % (partial_uuid,)) elif len(matching_areas) == 1: return matching_areas[0] else: raise UploadException( "\"%s\" matches more than one area, please provide more characters." % (partial_uuid,))
[ "def", "area_uuid_from_partial_uuid", "(", "self", ",", "partial_uuid", ")", ":", "matching_areas", "=", "[", "uuid", "for", "uuid", "in", "self", ".", "areas", "if", "re", ".", "match", "(", "partial_uuid", ",", "uuid", ")", "]", "if", "len", "(", "matc...
Given a partial UUID (a prefix), see if we have know about an Upload Area matching it. :param (str) partial_uuid: UUID prefix :return: a matching UUID :rtype: str :raises UploadException: if no or more than one UUIDs match.
[ "Given", "a", "partial", "UUID", "(", "a", "prefix", ")", "see", "if", "we", "have", "know", "about", "an", "Upload", "Area", "matching", "it", ".", ":", "param", "(", "str", ")", "partial_uuid", ":", "UUID", "prefix", ":", "return", ":", "a", "match...
python
train
aws/aws-iot-device-sdk-python
AWSIoTPythonSDK/core/protocol/paho/client.py
https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L1141-L1162
def loop_read(self, max_packets=1): """Process read network events. Use in place of calling loop() if you wish to handle your client reads as part of your own application. Use socket() to obtain the client socket to call select() or equivalent on. Do not use if you are using the threaded interface loop_start().""" if self._sock is None and self._ssl is None: return MQTT_ERR_NO_CONN max_packets = len(self._out_messages) + len(self._in_messages) if max_packets < 1: max_packets = 1 for i in range(0, max_packets): rc = self._packet_read() if rc > 0: return self._loop_rc_handle(rc) elif rc == MQTT_ERR_AGAIN: return MQTT_ERR_SUCCESS return MQTT_ERR_SUCCESS
[ "def", "loop_read", "(", "self", ",", "max_packets", "=", "1", ")", ":", "if", "self", ".", "_sock", "is", "None", "and", "self", ".", "_ssl", "is", "None", ":", "return", "MQTT_ERR_NO_CONN", "max_packets", "=", "len", "(", "self", ".", "_out_messages", ...
Process read network events. Use in place of calling loop() if you wish to handle your client reads as part of your own application. Use socket() to obtain the client socket to call select() or equivalent on. Do not use if you are using the threaded interface loop_start().
[ "Process", "read", "network", "events", ".", "Use", "in", "place", "of", "calling", "loop", "()", "if", "you", "wish", "to", "handle", "your", "client", "reads", "as", "part", "of", "your", "own", "application", "." ]
python
train
vertexproject/synapse
synapse/cortex.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L225-L231
async def updateTrigger(self, iden, query): ''' Change an existing trigger's query ''' trig = self.cell.triggers.get(iden) self._trig_auth_check(trig.get('useriden')) self.cell.triggers.mod(iden, query)
[ "async", "def", "updateTrigger", "(", "self", ",", "iden", ",", "query", ")", ":", "trig", "=", "self", ".", "cell", ".", "triggers", ".", "get", "(", "iden", ")", "self", ".", "_trig_auth_check", "(", "trig", ".", "get", "(", "'useriden'", ")", ")",...
Change an existing trigger's query
[ "Change", "an", "existing", "trigger", "s", "query" ]
python
train
pytroll/posttroll
posttroll/listener.py
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/listener.py#L65-L71
def restart_listener(self, topics): '''Restart listener after configuration update. ''' if self.listener is not None: if self.listener.running: self.stop() self.__init__(topics=topics)
[ "def", "restart_listener", "(", "self", ",", "topics", ")", ":", "if", "self", ".", "listener", "is", "not", "None", ":", "if", "self", ".", "listener", ".", "running", ":", "self", ".", "stop", "(", ")", "self", ".", "__init__", "(", "topics", "=", ...
Restart listener after configuration update.
[ "Restart", "listener", "after", "configuration", "update", "." ]
python
train
JdeRobot/base
src/libs/comm_py/comm/ice/irIceClient.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/libs/comm_py/comm/ice/irIceClient.py#L92-L105
def getIRData(self): ''' Returns last LaserData. @return last JdeRobotTypes LaserData saved ''' if self.hasproxy(): self.lock.acquire() ir = self.ir self.lock.release() return ir return None
[ "def", "getIRData", "(", "self", ")", ":", "if", "self", ".", "hasproxy", "(", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "ir", "=", "self", ".", "ir", "self", ".", "lock", ".", "release", "(", ")", "return", "ir", "return", "None" ...
Returns last LaserData. @return last JdeRobotTypes LaserData saved
[ "Returns", "last", "LaserData", "." ]
python
train
ml4ai/delphi
delphi/AnalysisGraph.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/AnalysisGraph.py#L940-L1060
def to_sql(self, app=None, last_known_value_date: Optional[date] = None): """ Inserts the model into the SQLite3 database associated with Delphi, for use with the ICM REST API. """ from delphi.apps.rest_api import create_app, db self.assign_uuids_to_nodes_and_edges() icm_metadata = ICMMetadata( id=self.id, created=self.dateCreated, estimatedNumberOfPrimitives=len(self.nodes) + len(self.edges), createdByUser_id=1, lastAccessedByUser_id=1, lastUpdatedByUser_id=1, ) if last_known_value_date is None: today = date.today().isoformat() else: today = last_known_value_date.isoformat() default_latent_var_value = 1.0 causal_primitives = [] nodeset = {n.split("/")[-1] for n in self.nodes} simplified_labels = len(nodeset) == len(self) for n in self.nodes(data=True): n[1]["rv"] = LatentVar(n[0]) n[1]["update_function"] = self.default_update_function rv = n[1]["rv"] rv.dataset = [default_latent_var_value for _ in range(self.res)] causal_variable = CausalVariable( id=n[1]["id"], model_id=self.id, units="", namespaces={}, auxiliaryProperties=[], label=n[0].split("/")[-1].replace("_", " ").capitalize() if simplified_labels else n[0], description=n[0], lastUpdated=today, confidence=1.0, lastKnownValue={ "active": "ACTIVE", "trend": None, "time": today, "value": { "baseType": "FloatValue", "value": n[1]["rv"].dataset[0], }, }, range={ "baseType": "FloatRange", "range": {"min": 0, "max": 5, "step": 0.1}, }, ) causal_primitives.append(causal_variable) max_mean_betas = max( [abs(np.median(e[2]["βs"])) for e in self.edges(data=True)] ) for e in self.edges(data=True): causal_relationship = CausalRelationship( id=e[2]["id"], namespaces={}, source={ "id": self.nodes[e[0]]["id"], "baseType": "CausalVariable", }, target={ "id": self.nodes[e[1]]["id"], "baseType": "CausalVariable", }, model_id=self.id, auxiliaryProperties=[], lastUpdated=today, types=["causal"], description="", confidence=np.mean( [s.belief for s in e[2]["InfluenceStatements"]] ), label="", strength=abs(np.median(e[2]["βs"]) / max_mean_betas), reinforcement=( True if np.mean( [ stmt.subj_delta["polarity"] * stmt.obj_delta["polarity"] for stmt in e[2]["InfluenceStatements"] ] ) > 0 else False ), ) causal_primitives.append(causal_relationship) evidences = [] for edge in self.edges(data=True): for stmt in edge[2]["InfluenceStatements"]: for ev in stmt.evidence: evidence = Evidence( id=str(uuid4()), causalrelationship_id=edge[2]["id"], # TODO - make source and target appear in CauseEx HMI description=(ev.text), ) evidences.append(evidence) if app is None: app = create_app() with app.app_context(): db.drop_all() db.create_all() db.session.add(icm_metadata) db.session.add(DelphiModel(id=self.id, model=self)) for causal_primitive in causal_primitives: db.session.add(causal_primitive) for evidence in evidences: db.session.add(evidence) db.session.commit()
[ "def", "to_sql", "(", "self", ",", "app", "=", "None", ",", "last_known_value_date", ":", "Optional", "[", "date", "]", "=", "None", ")", ":", "from", "delphi", ".", "apps", ".", "rest_api", "import", "create_app", ",", "db", "self", ".", "assign_uuids_t...
Inserts the model into the SQLite3 database associated with Delphi, for use with the ICM REST API.
[ "Inserts", "the", "model", "into", "the", "SQLite3", "database", "associated", "with", "Delphi", "for", "use", "with", "the", "ICM", "REST", "API", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/bsc.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/bsc.py#L240-L253
def add_handler(self, handler): ''' Add an additional handler Args: handler: A dictionary of handler configuration for the handler that should be added. See :func:`__init__` for details on valid parameters. ''' handler['logger'] = self._get_logger(handler) handler['reads'] = 0 handler['data_read'] = 0 self.capture_handlers.append(handler)
[ "def", "add_handler", "(", "self", ",", "handler", ")", ":", "handler", "[", "'logger'", "]", "=", "self", ".", "_get_logger", "(", "handler", ")", "handler", "[", "'reads'", "]", "=", "0", "handler", "[", "'data_read'", "]", "=", "0", "self", ".", "...
Add an additional handler Args: handler: A dictionary of handler configuration for the handler that should be added. See :func:`__init__` for details on valid parameters.
[ "Add", "an", "additional", "handler" ]
python
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L19-L23
def stZCR(frame): """Computes zero crossing rate of frame""" count = len(frame) countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2 return (numpy.float64(countZ) / numpy.float64(count-1.0))
[ "def", "stZCR", "(", "frame", ")", ":", "count", "=", "len", "(", "frame", ")", "countZ", "=", "numpy", ".", "sum", "(", "numpy", ".", "abs", "(", "numpy", ".", "diff", "(", "numpy", ".", "sign", "(", "frame", ")", ")", ")", ")", "/", "2", "r...
Computes zero crossing rate of frame
[ "Computes", "zero", "crossing", "rate", "of", "frame" ]
python
train
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L415-L422
def normalized_mutual_info_score(self, reference_clusters): """ Calculates the normalized mutual information w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting normalized mutual information score. """ return normalized_mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters))
[ "def", "normalized_mutual_info_score", "(", "self", ",", "reference_clusters", ")", ":", "return", "normalized_mutual_info_score", "(", "self", ".", "get_labels", "(", "self", ")", ",", "self", ".", "get_labels", "(", "reference_clusters", ")", ")" ]
Calculates the normalized mutual information w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting normalized mutual information score.
[ "Calculates", "the", "normalized", "mutual", "information", "w", ".", "r", ".", "t", ".", "the", "reference", "clusters", "(", "explicit", "evaluation", ")" ]
python
train
bitlabstudio/django-subscribe
subscribe/forms.py
https://github.com/bitlabstudio/django-subscribe/blob/313de63fb4acda172e88b65c3327c793f98e8aa9/subscribe/forms.py#L29-L36
def save(self, *args, **kwargs): """Adds a subscription for the given user to the given object.""" method_kwargs = self._get_method_kwargs() try: subscription = Subscription.objects.get(**method_kwargs) except Subscription.DoesNotExist: subscription = Subscription.objects.create(**method_kwargs) return subscription
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "method_kwargs", "=", "self", ".", "_get_method_kwargs", "(", ")", "try", ":", "subscription", "=", "Subscription", ".", "objects", ".", "get", "(", "*", "*", "method_kwar...
Adds a subscription for the given user to the given object.
[ "Adds", "a", "subscription", "for", "the", "given", "user", "to", "the", "given", "object", "." ]
python
train
openstack/horizon
openstack_dashboard/dashboards/project/key_pairs/tables.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/project/key_pairs/tables.py#L110-L114
def filter(self, table, keypairs, filter_string): """Naive case-insensitive search.""" query = filter_string.lower() return [keypair for keypair in keypairs if query in keypair.name.lower()]
[ "def", "filter", "(", "self", ",", "table", ",", "keypairs", ",", "filter_string", ")", ":", "query", "=", "filter_string", ".", "lower", "(", ")", "return", "[", "keypair", "for", "keypair", "in", "keypairs", "if", "query", "in", "keypair", ".", "name",...
Naive case-insensitive search.
[ "Naive", "case", "-", "insensitive", "search", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L10428-L10448
def reordi(iorder, ndim, array): """ Re-order the elements of an integer array according to a given order vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reordi_c.html :param iorder: Order vector to be used to re-order array. :type iorder: Array of ints :param ndim: Dimension of array. :type ndim: int :param array: Array to be re-ordered. :type array: Array of ints :return: Re-ordered Array. :rtype: Array of ints """ iorder = stypes.toIntVector(iorder) ndim = ctypes.c_int(ndim) array = stypes.toIntVector(array) libspice.reordi_c(iorder, ndim, array) return stypes.cVectorToPython(array)
[ "def", "reordi", "(", "iorder", ",", "ndim", ",", "array", ")", ":", "iorder", "=", "stypes", ".", "toIntVector", "(", "iorder", ")", "ndim", "=", "ctypes", ".", "c_int", "(", "ndim", ")", "array", "=", "stypes", ".", "toIntVector", "(", "array", ")"...
Re-order the elements of an integer array according to a given order vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reordi_c.html :param iorder: Order vector to be used to re-order array. :type iorder: Array of ints :param ndim: Dimension of array. :type ndim: int :param array: Array to be re-ordered. :type array: Array of ints :return: Re-ordered Array. :rtype: Array of ints
[ "Re", "-", "order", "the", "elements", "of", "an", "integer", "array", "according", "to", "a", "given", "order", "vector", "." ]
python
train
revelc/pyaccumulo
pyaccumulo/proxy/AccumuloProxy.py
https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L1577-L1586
def mergeTablets(self, login, tableName, startRow, endRow): """ Parameters: - login - tableName - startRow - endRow """ self.send_mergeTablets(login, tableName, startRow, endRow) self.recv_mergeTablets()
[ "def", "mergeTablets", "(", "self", ",", "login", ",", "tableName", ",", "startRow", ",", "endRow", ")", ":", "self", ".", "send_mergeTablets", "(", "login", ",", "tableName", ",", "startRow", ",", "endRow", ")", "self", ".", "recv_mergeTablets", "(", ")" ...
Parameters: - login - tableName - startRow - endRow
[ "Parameters", ":", "-", "login", "-", "tableName", "-", "startRow", "-", "endRow" ]
python
train
alpacahq/pylivetrader
pylivetrader/backend/alpaca.py
https://github.com/alpacahq/pylivetrader/blob/fd328b6595428c0789d9f218df34623f83a02b8b/pylivetrader/backend/alpaca.py#L474-L527
def get_bars(self, assets, data_frequency, bar_count=500): ''' Interface method. Return: pd.Dataframe() with columns MultiIndex [asset -> OHLCV] ''' assets_is_scalar = not isinstance(assets, (list, set, tuple)) is_daily = 'd' in data_frequency # 'daily' or '1d' if assets_is_scalar: symbols = [assets.symbol] else: symbols = [asset.symbol for asset in assets] symbol_bars = self._symbol_bars( symbols, 'day' if is_daily else 'minute', limit=bar_count) if is_daily: intra_bars = {} symbol_bars_minute = self._symbol_bars( symbols, 'minute', limit=1000) for symbol, df in symbol_bars_minute.items(): agged = df.resample('1D').agg(dict( open='first', high='max', low='min', close='last', volume='sum', )).dropna() intra_bars[symbol] = agged dfs = [] for asset in assets if not assets_is_scalar else [assets]: symbol = asset.symbol df = symbol_bars.get(symbol) if df is None: dfs.append(pd.DataFrame( [], columns=[ 'open', 'high', 'low', 'close', 'volume'] )) continue if is_daily: agged = intra_bars.get(symbol) if agged is not None and len( agged.index) > 0 and agged.index[-1] not in df.index: if not (agged.index[-1] > df.index[-1]): log.warn( ('agged.index[-1] = {}, df.index[-1] = {} ' 'for {}').format( agged.index[-1], df.index[-1], symbol)) df = df.append(agged.iloc[-1]) df.columns = pd.MultiIndex.from_product([[asset, ], df.columns]) dfs.append(df) return pd.concat(dfs, axis=1)
[ "def", "get_bars", "(", "self", ",", "assets", ",", "data_frequency", ",", "bar_count", "=", "500", ")", ":", "assets_is_scalar", "=", "not", "isinstance", "(", "assets", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", "is_daily", "=", "'d'", "in...
Interface method. Return: pd.Dataframe() with columns MultiIndex [asset -> OHLCV]
[ "Interface", "method", "." ]
python
train
fuzeman/PyUPnP
pyupnp/lict.py
https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/lict.py#L249-L255
def remove(self, value): """ L.remove(value) -- remove first occurrence of value. Raises ValueError if the value is not present. """ self._col_list.remove(value) self._col_dict.pop(self._get_object_key(value))
[ "def", "remove", "(", "self", ",", "value", ")", ":", "self", ".", "_col_list", ".", "remove", "(", "value", ")", "self", ".", "_col_dict", ".", "pop", "(", "self", ".", "_get_object_key", "(", "value", ")", ")" ]
L.remove(value) -- remove first occurrence of value. Raises ValueError if the value is not present.
[ "L", ".", "remove", "(", "value", ")", "--", "remove", "first", "occurrence", "of", "value", ".", "Raises", "ValueError", "if", "the", "value", "is", "not", "present", "." ]
python
train
materialsproject/pymatgen
pymatgen/transformations/standard_transformations.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/transformations/standard_transformations.py#L768-L796
def apply_transformation(self, structure): """ Discretizes the site occupancies in the structure. Args: structure: disordered Structure to discretize occupancies Returns: A new disordered Structure with occupancies discretized """ if structure.is_ordered: return structure species = [dict(sp) for sp in structure.species_and_occu] for sp in species: for k, v in sp.items(): old_occ = sp[k] new_occ = float( Fraction(old_occ).limit_denominator(self.max_denominator)) if self.fix_denominator: new_occ = around(old_occ*self.max_denominator)\ / self.max_denominator if round(abs(old_occ - new_occ), 6) > self.tol: raise RuntimeError( "Cannot discretize structure within tolerance!") sp[k] = new_occ return Structure(structure.lattice, species, structure.frac_coords)
[ "def", "apply_transformation", "(", "self", ",", "structure", ")", ":", "if", "structure", ".", "is_ordered", ":", "return", "structure", "species", "=", "[", "dict", "(", "sp", ")", "for", "sp", "in", "structure", ".", "species_and_occu", "]", "for", "sp"...
Discretizes the site occupancies in the structure. Args: structure: disordered Structure to discretize occupancies Returns: A new disordered Structure with occupancies discretized
[ "Discretizes", "the", "site", "occupancies", "in", "the", "structure", "." ]
python
train
zhanglab/psamm
psamm/lpsolver/glpk.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/lpsolver/glpk.py#L444-L447
def _get_value(self, variable): """Return value of variable in solution.""" return swiglpk.glp_mip_col_val( self._problem._p, self._problem._variables[variable])
[ "def", "_get_value", "(", "self", ",", "variable", ")", ":", "return", "swiglpk", ".", "glp_mip_col_val", "(", "self", ".", "_problem", ".", "_p", ",", "self", ".", "_problem", ".", "_variables", "[", "variable", "]", ")" ]
Return value of variable in solution.
[ "Return", "value", "of", "variable", "in", "solution", "." ]
python
train
bionikspoon/cache_requests
cache_requests/memoize.py
https://github.com/bionikspoon/cache_requests/blob/d75f6f944bc5a72fef5d7811e4973e124d9921dd/cache_requests/memoize.py#L97-L107
def put_cache_results(self, key, func_akw, set_cache_cb): """Put function results into cache.""" args, kwargs = func_akw # get function results func_results = self.func(*args, **kwargs) # optionally add results to cache if set_cache_cb(func_results): self[key] = func_results return func_results
[ "def", "put_cache_results", "(", "self", ",", "key", ",", "func_akw", ",", "set_cache_cb", ")", ":", "args", ",", "kwargs", "=", "func_akw", "# get function results", "func_results", "=", "self", ".", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")",...
Put function results into cache.
[ "Put", "function", "results", "into", "cache", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/noaa_lpd.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/noaa_lpd.py#L578-L603
def __name_unit_regex(word): """ Split a name and unit that are bunched together (i.e. '250m') :param str word: :return str str: """ value = "" unit = "" r = re.findall(re_name_unit, word) try: value = r[0][0] except IndexError as e: logger_noaa_lpd.warn("name_unit_regex: IndexError: value: {}, {}, {}".format(word, r, e)) try: unit = r[0][1] # Replace unit with correct synonym. if unit.lower() in UNITS: unit = UNITS[unit] except IndexError as e: logger_noaa_lpd.warn("name_unit_regex: IndexError: unit: {}, {}, {}".format(word, r, e)) if value: try: value = float(value) except ValueError as e: logger_noaa_lpd.warn("name_unit_regex: ValueError: val: {}, {}".format(value, e)) return value, unit
[ "def", "__name_unit_regex", "(", "word", ")", ":", "value", "=", "\"\"", "unit", "=", "\"\"", "r", "=", "re", ".", "findall", "(", "re_name_unit", ",", "word", ")", "try", ":", "value", "=", "r", "[", "0", "]", "[", "0", "]", "except", "IndexError"...
Split a name and unit that are bunched together (i.e. '250m') :param str word: :return str str:
[ "Split", "a", "name", "and", "unit", "that", "are", "bunched", "together", "(", "i", ".", "e", ".", "250m", ")", ":", "param", "str", "word", ":", ":", "return", "str", "str", ":" ]
python
train
numenta/htmresearch
htmresearch/algorithms/union_temporal_pooler.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/union_temporal_pooler.py#L180-L246
def compute(self, activeInput, predictedActiveInput, learn): """ Computes one cycle of the Union Temporal Pooler algorithm. @param activeInput (numpy array) A numpy array of 0's and 1's that comprises the input to the union pooler @param predictedActiveInput (numpy array) A numpy array of 0's and 1's that comprises the correctly predicted input to the union pooler @param learn (boolen) A boolen value indicating whether learning should be performed """ assert numpy.size(activeInput) == self.getNumInputs() assert numpy.size(predictedActiveInput) == self.getNumInputs() self._updateBookeepingVars(learn) # Compute proximal dendrite overlaps with active and active-predicted inputs overlapsActive = self._calculateOverlap(activeInput) overlapsPredictedActive = self._calculateOverlap(predictedActiveInput) totalOverlap = (overlapsActive * self._activeOverlapWeight + overlapsPredictedActive * self._predictedActiveOverlapWeight).astype(REAL_DTYPE) if learn: boostFactors = numpy.zeros(self.getNumColumns(), dtype=REAL_DTYPE) self.getBoostFactors(boostFactors) boostedOverlaps = boostFactors * totalOverlap else: boostedOverlaps = totalOverlap activeCells = self._inhibitColumns(boostedOverlaps) self._activeCells = activeCells # Decrement pooling activation of all cells self._decayPoolingActivation() # Update the poolingActivation of current active Union Temporal Pooler cells self._addToPoolingActivation(activeCells, overlapsPredictedActive) # update union SDR self._getMostActiveCells() if learn: # adapt permanence of connections from predicted active inputs to newly active cell # This step is the spatial pooler learning rule, applied only to the predictedActiveInput # Todo: should we also include unpredicted active input in this step? self._adaptSynapses(predictedActiveInput, activeCells, self.getSynPermActiveInc(), self.getSynPermInactiveDec()) # Increase permanence of connections from predicted active inputs to cells in the union SDR # This is Hebbian learning applied to the current time step self._adaptSynapses(predictedActiveInput, self._unionSDR, self._synPermPredActiveInc, 0.0) # adapt permenence of connections from previously predicted inputs to newly active cells # This is a reinforcement learning rule that considers previous input to the current cell for i in xrange(self._historyLength): self._adaptSynapses(self._prePredictedActiveInput[:,i], activeCells, self._synPermPreviousPredActiveInc, 0.0) # Homeostasis learning inherited from the spatial pooler self._updateDutyCycles(totalOverlap.astype(UINT_DTYPE), activeCells) self._bumpUpWeakColumns() self._updateBoostFactors() if self._isUpdateRound(): self._updateInhibitionRadius() self._updateMinDutyCycles() # save inputs from the previous time step self._preActiveInput = copy.copy(activeInput) self._prePredictedActiveInput = numpy.roll(self._prePredictedActiveInput,1,1) if self._historyLength > 0: self._prePredictedActiveInput[:, 0] = predictedActiveInput return self._unionSDR
[ "def", "compute", "(", "self", ",", "activeInput", ",", "predictedActiveInput", ",", "learn", ")", ":", "assert", "numpy", ".", "size", "(", "activeInput", ")", "==", "self", ".", "getNumInputs", "(", ")", "assert", "numpy", ".", "size", "(", "predictedAct...
Computes one cycle of the Union Temporal Pooler algorithm. @param activeInput (numpy array) A numpy array of 0's and 1's that comprises the input to the union pooler @param predictedActiveInput (numpy array) A numpy array of 0's and 1's that comprises the correctly predicted input to the union pooler @param learn (boolen) A boolen value indicating whether learning should be performed
[ "Computes", "one", "cycle", "of", "the", "Union", "Temporal", "Pooler", "algorithm", "." ]
python
train
ranaroussi/ezibpy
ezibpy/utils.py
https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/utils.py#L204-L207
def order_to_dict(order): """Convert an IBPy Order object to a dict containing any non-default values.""" default = Order() return {field: val for field, val in vars(order).items() if val != getattr(default, field, None)}
[ "def", "order_to_dict", "(", "order", ")", ":", "default", "=", "Order", "(", ")", "return", "{", "field", ":", "val", "for", "field", ",", "val", "in", "vars", "(", "order", ")", ".", "items", "(", ")", "if", "val", "!=", "getattr", "(", "default"...
Convert an IBPy Order object to a dict containing any non-default values.
[ "Convert", "an", "IBPy", "Order", "object", "to", "a", "dict", "containing", "any", "non", "-", "default", "values", "." ]
python
train
eleme/ruskit
ruskit/utils.py
https://github.com/eleme/ruskit/blob/2e8c5a3f6a65b8aeb07012b4e2c8ba324d887c3b/ruskit/utils.py#L57-L76
def spread(nodes, n): """Distrubute master instances in different nodes { "192.168.0.1": [node1, node2], "192.168.0.2": [node3, node4], "192.168.0.3": [node5, node6] } => [node1, node3, node5] """ target = [] while len(target) < n and nodes: for ip, node_group in list(nodes.items()): if not node_group: nodes.pop(ip) continue target.append(node_group.pop(0)) if len(target) >= n: break return target
[ "def", "spread", "(", "nodes", ",", "n", ")", ":", "target", "=", "[", "]", "while", "len", "(", "target", ")", "<", "n", "and", "nodes", ":", "for", "ip", ",", "node_group", "in", "list", "(", "nodes", ".", "items", "(", ")", ")", ":", "if", ...
Distrubute master instances in different nodes { "192.168.0.1": [node1, node2], "192.168.0.2": [node3, node4], "192.168.0.3": [node5, node6] } => [node1, node3, node5]
[ "Distrubute", "master", "instances", "in", "different", "nodes" ]
python
train
wummel/linkchecker
linkcheck/cache/urlqueue.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/cache/urlqueue.py#L108-L114
def put (self, item): """Put an item into the queue. Block if necessary until a free slot is available. """ with self.mutex: self._put(item) self.not_empty.notify()
[ "def", "put", "(", "self", ",", "item", ")", ":", "with", "self", ".", "mutex", ":", "self", ".", "_put", "(", "item", ")", "self", ".", "not_empty", ".", "notify", "(", ")" ]
Put an item into the queue. Block if necessary until a free slot is available.
[ "Put", "an", "item", "into", "the", "queue", ".", "Block", "if", "necessary", "until", "a", "free", "slot", "is", "available", "." ]
python
train