nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/site-packages/win32/lib/win32timezone.py
python
TimeZoneInfo.get_sorted_time_zone_names
()
return [get_standard_name(tz) for tz in tzs]
Return a list of time zone names that can be used to initialize TimeZoneInfo instances
Return a list of time zone names that can be used to initialize TimeZoneInfo instances
[ "Return", "a", "list", "of", "time", "zone", "names", "that", "can", "be", "used", "to", "initialize", "TimeZoneInfo", "instances" ]
def get_sorted_time_zone_names(): "Return a list of time zone names that can be used to initialize TimeZoneInfo instances" tzs = TimeZoneInfo.get_sorted_time_zones() get_standard_name = lambda tzi: tzi.standardName return [get_standard_name(tz) for tz in tzs]
[ "def", "get_sorted_time_zone_names", "(", ")", ":", "tzs", "=", "TimeZoneInfo", ".", "get_sorted_time_zones", "(", ")", "get_standard_name", "=", "lambda", "tzi", ":", "tzi", ".", "standardName", "return", "[", "get_standard_name", "(", "tz", ")", "for", "tz", ...
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/site-packages/win32/lib/win32timezone.py#L586-L590
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/physics/link_physics_randomizer.py
python
LinkPhysicsRandomizer.get_properties
(self)
return {link: {'mass': info[0], 'local_inertia_diagonal': info[2], 'local_inertia_position': info[3], 'local_inertia_orientation': info[4], 'lateral_friction': info[1], 'spinning_friction': info[7], 'rolling_friction': info[6], 'restitution': info[5], 'contact_stiffness': info[9], 'contact_damping': info[8]} for link, info in zip(self.links, infos)}
Get the current physics properties. Returns: dict: current physical property values {physic property name: corresponding value}.
Get the current physics properties.
[ "Get", "the", "current", "physics", "properties", "." ]
def get_properties(self): """ Get the current physics properties. Returns: dict: current physical property values {physic property name: corresponding value}. """ infos = [self.simulator.get_dynamics_info(body_id=self.body.id, link_id=link) for link in self.links] return {link: {'mass': info[0], 'local_inertia_diagonal': info[2], 'local_inertia_position': info[3], 'local_inertia_orientation': info[4], 'lateral_friction': info[1], 'spinning_friction': info[7], 'rolling_friction': info[6], 'restitution': info[5], 'contact_stiffness': info[9], 'contact_damping': info[8]} for link, info in zip(self.links, infos)}
[ "def", "get_properties", "(", "self", ")", ":", "infos", "=", "[", "self", ".", "simulator", ".", "get_dynamics_info", "(", "body_id", "=", "self", ".", "body", ".", "id", ",", "link_id", "=", "link", ")", "for", "link", "in", "self", ".", "links", "...
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/physics/link_physics_randomizer.py#L381-L394
Tencent/bk-bcs-saas
2b437bf2f5fd5ce2078f7787c3a12df609f7679d
bcs-app/backend/uniapps/application/views.py
python
CreateInstance.get_instance_conf
(self, info)
return conf
获取instance conf
获取instance conf
[ "获取instance", "conf" ]
def get_instance_conf(self, info): """获取instance conf""" try: conf = json.loads(info.config) except Exception as error: logger.error(u"解析instance config异常,id为 %s, 详情: %s" % (info.id, error)) raise error_codes.JSONParseError(_("Instance config解析异常")) return conf
[ "def", "get_instance_conf", "(", "self", ",", "info", ")", ":", "try", ":", "conf", "=", "json", ".", "loads", "(", "info", ".", "config", ")", "except", "Exception", "as", "error", ":", "logger", ".", "error", "(", "u\"解析instance config异常,id为 %s, 详情: %s\" %...
https://github.com/Tencent/bk-bcs-saas/blob/2b437bf2f5fd5ce2078f7787c3a12df609f7679d/bcs-app/backend/uniapps/application/views.py#L1020-L1027
easezyc/deep-transfer-learning
9af0921f4f21bc2ccea61be53cf8e8a49873d613
MUDA/MFSAN/MFSAN_3src/mmd.py
python
guassian_kernel
(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None)
return sum(kernel_val)
[]
def guassian_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None): n_samples = int(source.size()[0])+int(target.size()[0]) total = torch.cat([source, target], dim=0) total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1))) total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1))) L2_distance = ((total0-total1)**2).sum(2) if fix_sigma: bandwidth = fix_sigma else: bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples) bandwidth /= kernel_mul ** (kernel_num // 2) bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)] kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list] return sum(kernel_val)
[ "def", "guassian_kernel", "(", "source", ",", "target", ",", "kernel_mul", "=", "2.0", ",", "kernel_num", "=", "5", ",", "fix_sigma", "=", "None", ")", ":", "n_samples", "=", "int", "(", "source", ".", "size", "(", ")", "[", "0", "]", ")", "+", "in...
https://github.com/easezyc/deep-transfer-learning/blob/9af0921f4f21bc2ccea61be53cf8e8a49873d613/MUDA/MFSAN/MFSAN_3src/mmd.py#L7-L20
pyinstaller/pyinstaller
872312500a8a324d25fb405f85117f7966a0ebd5
PyInstaller/utils/win32/versioninfo.py
python
getRaw
(text)
return text.encode('UTF-16LE')
Encodes text as UTF-16LE (Microsoft 'Unicode') for use in structs.
Encodes text as UTF-16LE (Microsoft 'Unicode') for use in structs.
[ "Encodes", "text", "as", "UTF", "-", "16LE", "(", "Microsoft", "Unicode", ")", "for", "use", "in", "structs", "." ]
def getRaw(text): """ Encodes text as UTF-16LE (Microsoft 'Unicode') for use in structs. """ return text.encode('UTF-16LE')
[ "def", "getRaw", "(", "text", ")", ":", "return", "text", ".", "encode", "(", "'UTF-16LE'", ")" ]
https://github.com/pyinstaller/pyinstaller/blob/872312500a8a324d25fb405f85117f7966a0ebd5/PyInstaller/utils/win32/versioninfo.py#L49-L53
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/rfc822.py
python
Message.readheaders
(self)
Read header lines. Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not included in the returned list. If a non-header line ends the headers, (which is an error), an attempt is made to backspace over it; it is never included in the returned list. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a completely uninterpreted list of lines contained in the header (so printing them will reproduce the header exactly as it appears in the file).
Read header lines.
[ "Read", "header", "lines", "." ]
def readheaders(self): """Read header lines. Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not included in the returned list. If a non-header line ends the headers, (which is an error), an attempt is made to backspace over it; it is never included in the returned list. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a completely uninterpreted list of lines contained in the header (so printing them will reproduce the header exactly as it appears in the file). """ self.dict = {} self.unixfrom = '' self.headers = lst = [] self.status = '' headerseen = "" firstline = 1 startofline = unread = tell = None if hasattr(self.fp, 'unread'): unread = self.fp.unread elif self.seekable: tell = self.fp.tell while 1: if tell: try: startofline = tell() except IOError: startofline = tell = None self.seekable = 0 line = self.fp.readline() if not line: self.status = 'EOF in headers' break # Skip unix From name time lines if firstline and line.startswith('From '): self.unixfrom = self.unixfrom + line continue firstline = 0 if headerseen and line[0] in ' \t': # It's a continuation line. lst.append(line) x = (self.dict[headerseen] + "\n " + line.strip()) self.dict[headerseen] = x.strip() continue elif self.iscomment(line): # It's a comment. Ignore it. continue elif self.islast(line): # Note! No pushback here! The delimiter line gets eaten. break headerseen = self.isheader(line) if headerseen: # It's a legal header line, save it. lst.append(line) self.dict[headerseen] = line[len(headerseen)+1:].strip() continue elif headerseen is not None: # An empty header name. These aren't allowed in HTTP, but it's # probably a benign mistake. Don't add the header, just keep # going. continue else: # It's not a header line; throw it back and stop here. if not self.dict: self.status = 'No headers' else: self.status = 'Non-header line where header expected' # Try to undo the read. if unread: unread(line) elif tell: self.fp.seek(startofline) else: self.status = self.status + '; bad seek' break
[ "def", "readheaders", "(", "self", ")", ":", "self", ".", "dict", "=", "{", "}", "self", ".", "unixfrom", "=", "''", "self", ".", "headers", "=", "lst", "=", "[", "]", "self", ".", "status", "=", "''", "headerseen", "=", "\"\"", "firstline", "=", ...
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/rfc822.py#L122-L200
rucio/rucio
6d0d358e04f5431f0b9a98ae40f31af0ddff4833
lib/rucio/db/sqla/migrate_repo/versions/5f139f77382a_added_child_rule_id_column.py
python
upgrade
()
Upgrade the database to this revision
Upgrade the database to this revision
[ "Upgrade", "the", "database", "to", "this", "revision" ]
def upgrade(): ''' Upgrade the database to this revision ''' if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']: schema = context.get_context().version_table_schema if context.get_context().version_table_schema else '' add_column('rules', sa.Column('child_rule_id', GUID()), schema=schema) add_column('rules_hist_recent', sa.Column('child_rule_id', GUID()), schema=schema) add_column('rules_history', sa.Column('child_rule_id', GUID()), schema=schema) create_foreign_key('RULES_CHILD_RULE_ID_FK', 'rules', 'rules', ['child_rule_id'], ['id']) create_index('RULES_CHILD_RULE_ID_IDX', 'rules', ['child_rule_id'])
[ "def", "upgrade", "(", ")", ":", "if", "context", ".", "get_context", "(", ")", ".", "dialect", ".", "name", "in", "[", "'oracle'", ",", "'mysql'", ",", "'postgresql'", "]", ":", "schema", "=", "context", ".", "get_context", "(", ")", ".", "version_tab...
https://github.com/rucio/rucio/blob/6d0d358e04f5431f0b9a98ae40f31af0ddff4833/lib/rucio/db/sqla/migrate_repo/versions/5f139f77382a_added_child_rule_id_column.py#L36-L48
arizvisa/ida-minsc
8627a60f047b5e55d3efeecde332039cd1a16eea
base/database.py
python
address.prevstack
(cls, delta)
return cls.prevstack(ui.current.address(), delta)
Return the previous instruction from the current one that is past the specified sp `delta`.
Return the previous instruction from the current one that is past the specified sp `delta`.
[ "Return", "the", "previous", "instruction", "from", "the", "current", "one", "that", "is", "past", "the", "specified", "sp", "delta", "." ]
def prevstack(cls, delta): '''Return the previous instruction from the current one that is past the specified sp `delta`.''' return cls.prevstack(ui.current.address(), delta)
[ "def", "prevstack", "(", "cls", ",", "delta", ")", ":", "return", "cls", ".", "prevstack", "(", "ui", ".", "current", ".", "address", "(", ")", ",", "delta", ")" ]
https://github.com/arizvisa/ida-minsc/blob/8627a60f047b5e55d3efeecde332039cd1a16eea/base/database.py#L2927-L2929
CvvT/dumpDex
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
python/idaapi.py
python
set_segm_class
(*args)
return _idaapi.set_segm_class(*args)
set_segm_class(s, sclass) -> int
set_segm_class(s, sclass) -> int
[ "set_segm_class", "(", "s", "sclass", ")", "-", ">", "int" ]
def set_segm_class(*args): """ set_segm_class(s, sclass) -> int """ return _idaapi.set_segm_class(*args)
[ "def", "set_segm_class", "(", "*", "args", ")", ":", "return", "_idaapi", ".", "set_segm_class", "(", "*", "args", ")" ]
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L48240-L48244
lisa-lab/pylearn2
af81e5c362f0df4df85c3e54e23b2adeec026055
pylearn2/models/rbm.py
python
RBM.mean_v_given_h
(self, h)
Compute the mean activation of the visibles given hidden unit configurations for a set of training examples. Parameters ---------- h : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the hidden unit states for a batch (or several) of training examples, with the first dimension indexing training examples and the second indexing hidden units. Returns ------- vprime : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the mean (deterministic) reconstruction of the visible units given the hidden units.
Compute the mean activation of the visibles given hidden unit configurations for a set of training examples.
[ "Compute", "the", "mean", "activation", "of", "the", "visibles", "given", "hidden", "unit", "configurations", "for", "a", "set", "of", "training", "examples", "." ]
def mean_v_given_h(self, h): """ Compute the mean activation of the visibles given hidden unit configurations for a set of training examples. Parameters ---------- h : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the hidden unit states for a batch (or several) of training examples, with the first dimension indexing training examples and the second indexing hidden units. Returns ------- vprime : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the mean (deterministic) reconstruction of the visible units given the hidden units. """ if isinstance(h, tensor.Variable): return nnet.sigmoid(self.input_to_v_from_h(h)) else: return [self.mean_v_given_h(hid) for hid in h]
[ "def", "mean_v_given_h", "(", "self", ",", "h", ")", ":", "if", "isinstance", "(", "h", ",", "tensor", ".", "Variable", ")", ":", "return", "nnet", ".", "sigmoid", "(", "self", ".", "input_to_v_from_h", "(", "h", ")", ")", "else", ":", "return", "[",...
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/models/rbm.py#L746-L769
forrestbao/pyeeg
a6c18bb093e4748f9d9c208535a6ae024a0802b8
pyeeg/hjorth_mobility_complexity.py
python
hjorth
(X, D=None)
return numpy.sqrt(M2 / TP), numpy.sqrt( float(M4) * TP / M2 / M2 )
Compute Hjorth mobility and complexity of a time series from either two cases below: 1. X, the time series of type list (default) 2. D, a first order differential sequence of X (if D is provided, recommended to speed up) In case 1, D is computed using Numpy's Difference function. Notes ----- To speed up, it is recommended to compute D before calling this function because D may also be used by other functions whereas computing it here again will slow down. Parameters ---------- X list a time series D list first order differential sequence of a time series Returns ------- As indicated in return line Hjorth mobility and complexity
Compute Hjorth mobility and complexity of a time series from either two cases below: 1. X, the time series of type list (default) 2. D, a first order differential sequence of X (if D is provided, recommended to speed up)
[ "Compute", "Hjorth", "mobility", "and", "complexity", "of", "a", "time", "series", "from", "either", "two", "cases", "below", ":", "1", ".", "X", "the", "time", "series", "of", "type", "list", "(", "default", ")", "2", ".", "D", "a", "first", "order", ...
def hjorth(X, D=None): """ Compute Hjorth mobility and complexity of a time series from either two cases below: 1. X, the time series of type list (default) 2. D, a first order differential sequence of X (if D is provided, recommended to speed up) In case 1, D is computed using Numpy's Difference function. Notes ----- To speed up, it is recommended to compute D before calling this function because D may also be used by other functions whereas computing it here again will slow down. Parameters ---------- X list a time series D list first order differential sequence of a time series Returns ------- As indicated in return line Hjorth mobility and complexity """ if D is None: D = numpy.diff(X) D = D.tolist() D.insert(0, X[0]) # pad the first difference D = numpy.array(D) n = len(X) M2 = float(sum(D ** 2)) / n TP = sum(numpy.array(X) ** 2) M4 = 0 for i in range(1, len(D)): M4 += (D[i] - D[i - 1]) ** 2 M4 = M4 / n return numpy.sqrt(M2 / TP), numpy.sqrt( float(M4) * TP / M2 / M2 )
[ "def", "hjorth", "(", "X", ",", "D", "=", "None", ")", ":", "if", "D", "is", "None", ":", "D", "=", "numpy", ".", "diff", "(", "X", ")", "D", "=", "D", ".", "tolist", "(", ")", "D", ".", "insert", "(", "0", ",", "X", "[", "0", "]", ")",...
https://github.com/forrestbao/pyeeg/blob/a6c18bb093e4748f9d9c208535a6ae024a0802b8/pyeeg/hjorth_mobility_complexity.py#L4-L59
nucypher/nucypher
f420caeb1c974f511f689fd1e5a9c6bbdf97f2d7
nucypher/config/util.py
python
cast_paths_from
(cls, payload)
return payload
A serialization helper. Iterates over constructor arguments of `cls` and `cls` parents. Finds arguments of type `pathlib.Path` or `Optional[pathlib.Path]`. Based on this, it casts corresponding values in `payload` from `str` to `pathlib.Path` or None.
A serialization helper. Iterates over constructor arguments of `cls` and `cls` parents. Finds arguments of type `pathlib.Path` or `Optional[pathlib.Path]`. Based on this, it casts corresponding values in `payload` from `str` to `pathlib.Path` or None.
[ "A", "serialization", "helper", ".", "Iterates", "over", "constructor", "arguments", "of", "cls", "and", "cls", "parents", ".", "Finds", "arguments", "of", "type", "pathlib", ".", "Path", "or", "Optional", "[", "pathlib", ".", "Path", "]", ".", "Based", "o...
def cast_paths_from(cls, payload): """ A serialization helper. Iterates over constructor arguments of `cls` and `cls` parents. Finds arguments of type `pathlib.Path` or `Optional[pathlib.Path]`. Based on this, it casts corresponding values in `payload` from `str` to `pathlib.Path` or None. """ constructor_args = get_type_hints(cls.__init__) for ancestor in cls.__mro__: constructor_args.update(get_type_hints(ancestor.__init__)) paths_only = [ arg for (arg, type_) in constructor_args.items() if type_ == Path or type_ == Optional[Path] ] for key in paths_only: if key in payload: payload[key] = Path(payload[key]) if payload[key] else None return payload
[ "def", "cast_paths_from", "(", "cls", ",", "payload", ")", ":", "constructor_args", "=", "get_type_hints", "(", "cls", ".", "__init__", ")", "for", "ancestor", "in", "cls", ".", "__mro__", ":", "constructor_args", ".", "update", "(", "get_type_hints", "(", "...
https://github.com/nucypher/nucypher/blob/f420caeb1c974f511f689fd1e5a9c6bbdf97f2d7/nucypher/config/util.py#L22-L39
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/jinja2/compiler.py
python
CodeGenerator.visit_ScopedEvalContextModifier
(self, node, frame)
[]
def visit_ScopedEvalContextModifier(self, node, frame): old_ctx_name = self.temporary_identifier() saved_ctx = frame.eval_ctx.save() self.writeline('%s = context.eval_ctx.save()' % old_ctx_name) self.visit_EvalContextModifier(node, frame) for child in node.body: self.visit(child, frame) frame.eval_ctx.revert(saved_ctx) self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
[ "def", "visit_ScopedEvalContextModifier", "(", "self", ",", "node", ",", "frame", ")", ":", "old_ctx_name", "=", "self", ".", "temporary_identifier", "(", ")", "saved_ctx", "=", "frame", ".", "eval_ctx", ".", "save", "(", ")", "self", ".", "writeline", "(", ...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/jinja2/compiler.py#L1713-L1721
cobbler/cobbler
eed8cdca3e970c8aa1d199e80b8c8f19b3f940cc
cobbler/items/image.py
python
Image.virt_auto_boot
(self)
return self._virt_auto_boot
r""" Whether the VM should be booted when booting the host or not. :getter: ``True`` means autoboot is enabled, otherwise VM is not booted automatically. :setter: The new state for the property.
r""" Whether the VM should be booted when booting the host or not.
[ "r", "Whether", "the", "VM", "should", "be", "booted", "when", "booting", "the", "host", "or", "not", "." ]
def virt_auto_boot(self) -> bool: r""" Whether the VM should be booted when booting the host or not. :getter: ``True`` means autoboot is enabled, otherwise VM is not booted automatically. :setter: The new state for the property. """ return self._virt_auto_boot
[ "def", "virt_auto_boot", "(", "self", ")", "->", "bool", ":", "return", "self", ".", "_virt_auto_boot" ]
https://github.com/cobbler/cobbler/blob/eed8cdca3e970c8aa1d199e80b8c8f19b3f940cc/cobbler/items/image.py#L335-L342
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/pip/pip/_vendor/idna/compat.py
python
ToASCII
(label)
return encode(label)
[]
def ToASCII(label): return encode(label)
[ "def", "ToASCII", "(", "label", ")", ":", "return", "encode", "(", "label", ")" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/pip/pip/_vendor/idna/compat.py#L4-L5
hyperledger/indy-plenum
7566f960f86d8ce74a26fa6ccbd534b942fc4223
plenum/server/node.py
python
Node.service_observer
(self, limit)
return await self._observer.serviceQueues(limit)
Service the observer's inBox and outBox :return: the number of messages successfully serviced
Service the observer's inBox and outBox
[ "Service", "the", "observer", "s", "inBox", "and", "outBox" ]
async def service_observer(self, limit) -> int: """ Service the observer's inBox and outBox :return: the number of messages successfully serviced """ if not self.isReady(): return 0 return await self._observer.serviceQueues(limit)
[ "async", "def", "service_observer", "(", "self", ",", "limit", ")", "->", "int", ":", "if", "not", "self", ".", "isReady", "(", ")", ":", "return", "0", "return", "await", "self", ".", "_observer", ".", "serviceQueues", "(", "limit", ")" ]
https://github.com/hyperledger/indy-plenum/blob/7566f960f86d8ce74a26fa6ccbd534b942fc4223/plenum/server/node.py#L1159-L1167
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/orm/collections.py
python
MappedCollection._convert
(self, dictlike)
Validate and convert a dict-like object into values for set()ing. This is called behind the scenes when a MappedCollection is replaced entirely by another collection, as in:: myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... Raises a TypeError if the key in any (key, value) pair in the dictlike object does not match the key that this collection's keyfunc would have assigned for that value.
Validate and convert a dict-like object into values for set()ing.
[ "Validate", "and", "convert", "a", "dict", "-", "like", "object", "into", "values", "for", "set", "()", "ing", "." ]
def _convert(self, dictlike): """Validate and convert a dict-like object into values for set()ing. This is called behind the scenes when a MappedCollection is replaced entirely by another collection, as in:: myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... Raises a TypeError if the key in any (key, value) pair in the dictlike object does not match the key that this collection's keyfunc would have assigned for that value. """ for incoming_key, value in util.dictlike_iteritems(dictlike): new_key = self.keyfunc(value) if incoming_key != new_key: raise TypeError( "Found incompatible key %r for value %r; this " "collection's " "keying function requires a key of %r for this value." % ( incoming_key, value, new_key)) yield value
[ "def", "_convert", "(", "self", ",", "dictlike", ")", ":", "for", "incoming_key", ",", "value", "in", "util", ".", "dictlike_iteritems", "(", "dictlike", ")", ":", "new_key", "=", "self", ".", "keyfunc", "(", "value", ")", "if", "incoming_key", "!=", "ne...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/orm/collections.py#L1521-L1542
stevearc/pypicloud
046126f0b2a692b7bd382ae5cd3bf7af2f58103c
pypicloud/util.py
python
normalize_name
(name: str)
return re.sub(r"[-_.]+", "-", name).lower()
Normalize a python package name
Normalize a python package name
[ "Normalize", "a", "python", "package", "name" ]
def normalize_name(name: str) -> str: """Normalize a python package name""" # Lifted directly from PEP503: # https://www.python.org/dev/peps/pep-0503/#id4 return re.sub(r"[-_.]+", "-", name).lower()
[ "def", "normalize_name", "(", "name", ":", "str", ")", "->", "str", ":", "# Lifted directly from PEP503:", "# https://www.python.org/dev/peps/pep-0503/#id4", "return", "re", ".", "sub", "(", "r\"[-_.]+\"", ",", "\"-\"", ",", "name", ")", ".", "lower", "(", ")" ]
https://github.com/stevearc/pypicloud/blob/046126f0b2a692b7bd382ae5cd3bf7af2f58103c/pypicloud/util.py#L67-L71
Robot-Will/Stino
a94831cd1bf40a59587a7b6cc2e9b5c4306b1bf2
libs/base_utils/task_queue.py
python
ActionQueue._start
(self)
.
.
[ "." ]
def _start(self): """.""" if not self._is_alive: self._is_alive = True thread = threading.Thread(target=self._run) thread.start()
[ "def", "_start", "(", "self", ")", ":", "if", "not", "self", ".", "_is_alive", ":", "self", ".", "_is_alive", "=", "True", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run", ")", "thread", ".", "start", "(", ")" ]
https://github.com/Robot-Will/Stino/blob/a94831cd1bf40a59587a7b6cc2e9b5c4306b1bf2/libs/base_utils/task_queue.py#L31-L36
aleju/imgaug
0101108d4fed06bc5056c4a03e2bcb0216dac326
imgaug/augmenters/pillike.py
python
Affine._augment_batch_
(self, batch, random_state, parents, hooks)
return super(Affine, self)._augment_batch_( batch, random_state, parents, hooks)
[]
def _augment_batch_(self, batch, random_state, parents, hooks): cols = batch.get_column_names() assert len(cols) == 0 or (len(cols) == 1 and "images" in cols), ( "pillike.Affine can currently only process image data. Got a " "batch containing: %s. Use imgaug.augmenters.geometric.Affine for " "batches containing non-image data." % (", ".join(cols),)) return super(Affine, self)._augment_batch_( batch, random_state, parents, hooks)
[ "def", "_augment_batch_", "(", "self", ",", "batch", ",", "random_state", ",", "parents", ",", "hooks", ")", ":", "cols", "=", "batch", ".", "get_column_names", "(", ")", "assert", "len", "(", "cols", ")", "==", "0", "or", "(", "len", "(", "cols", ")...
https://github.com/aleju/imgaug/blob/0101108d4fed06bc5056c4a03e2bcb0216dac326/imgaug/augmenters/pillike.py#L2461-L2469
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/celery/celery/worker/buckets.py
python
TaskBucket.get_bucket_for_type
(self, task_name)
return self.buckets[task_name]
Get the bucket for a particular task type.
Get the bucket for a particular task type.
[ "Get", "the", "bucket", "for", "a", "particular", "task", "type", "." ]
def get_bucket_for_type(self, task_name): """Get the bucket for a particular task type.""" if task_name not in self.buckets: return self.add_bucket_for_type(task_name) return self.buckets[task_name]
[ "def", "get_bucket_for_type", "(", "self", ",", "task_name", ")", ":", "if", "task_name", "not", "in", "self", ".", "buckets", ":", "return", "self", ".", "add_bucket_for_type", "(", "task_name", ")", "return", "self", ".", "buckets", "[", "task_name", "]" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/celery/celery/worker/buckets.py#L162-L166
wechatpy/wechatpy
5f693a7e90156786c2540ad3c941d12cdf6d88ef
wechatpy/crypto/__init__.py
python
RefundCrypto.encrypt
(self, text)
return self._encrypt(text)
[]
def encrypt(self, text): return self._encrypt(text)
[ "def", "encrypt", "(", "self", ",", "text", ")", ":", "return", "self", ".", "_encrypt", "(", "text", ")" ]
https://github.com/wechatpy/wechatpy/blob/5f693a7e90156786c2540ad3c941d12cdf6d88ef/wechatpy/crypto/__init__.py#L115-L116
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/mpl_toolkits/mplot3d/art3d.py
python
text_2d_to_3d
(obj, z=0, zdir='z')
Convert a Text to a Text3D object.
Convert a Text to a Text3D object.
[ "Convert", "a", "Text", "to", "a", "Text3D", "object", "." ]
def text_2d_to_3d(obj, z=0, zdir='z'): """Convert a Text to a Text3D object.""" obj.__class__ = Text3D obj.set_3d_properties(z, zdir)
[ "def", "text_2d_to_3d", "(", "obj", ",", "z", "=", "0", ",", "zdir", "=", "'z'", ")", ":", "obj", ".", "__class__", "=", "Text3D", "obj", ".", "set_3d_properties", "(", "z", ",", "zdir", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/mpl_toolkits/mplot3d/art3d.py#L86-L89
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/schemes/curves/constructor.py
python
_is_irreducible_and_reduced
(F)
return len(factors) == 1 and factors[0][1] == 1
Check if the polynomial F is irreducible and reduced. TESTS:: sage: R.<x,y> = QQ[] sage: F = x^2 + y^2 sage: from sage.schemes.curves.constructor import _is_irreducible_and_reduced sage: _is_irreducible_and_reduced(F) True
Check if the polynomial F is irreducible and reduced.
[ "Check", "if", "the", "polynomial", "F", "is", "irreducible", "and", "reduced", "." ]
def _is_irreducible_and_reduced(F): """ Check if the polynomial F is irreducible and reduced. TESTS:: sage: R.<x,y> = QQ[] sage: F = x^2 + y^2 sage: from sage.schemes.curves.constructor import _is_irreducible_and_reduced sage: _is_irreducible_and_reduced(F) True """ factors = F.factor() return len(factors) == 1 and factors[0][1] == 1
[ "def", "_is_irreducible_and_reduced", "(", "F", ")", ":", "factors", "=", "F", ".", "factor", "(", ")", "return", "len", "(", "factors", ")", "==", "1", "and", "factors", "[", "0", "]", "[", "1", "]", "==", "1" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/schemes/curves/constructor.py#L80-L93
bendmorris/static-python
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
Lib/logging/__init__.py
python
PlaceHolder.__init__
(self, alogger)
Initialize with the specified logger being a child of this placeholder.
Initialize with the specified logger being a child of this placeholder.
[ "Initialize", "with", "the", "specified", "logger", "being", "a", "child", "of", "this", "placeholder", "." ]
def __init__(self, alogger): """ Initialize with the specified logger being a child of this placeholder. """ self.loggerMap = { alogger : None }
[ "def", "__init__", "(", "self", ",", "alogger", ")", ":", "self", ".", "loggerMap", "=", "{", "alogger", ":", "None", "}" ]
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/logging/__init__.py#L1046-L1050
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/locale.py
python
_strcoll
(a,b)
return (a > b) - (a < b)
strcoll(string,string) -> int. Compares two strings according to the locale.
strcoll(string,string) -> int. Compares two strings according to the locale.
[ "strcoll", "(", "string", "string", ")", "-", ">", "int", ".", "Compares", "two", "strings", "according", "to", "the", "locale", "." ]
def _strcoll(a,b): """ strcoll(string,string) -> int. Compares two strings according to the locale. """ return (a > b) - (a < b)
[ "def", "_strcoll", "(", "a", ",", "b", ")", ":", "return", "(", "a", ">", "b", ")", "-", "(", "a", "<", "b", ")" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/locale.py#L34-L38
mypaint/mypaint
90b36dbc7b8bd2f323383f7edf608a5e0a3a1a33
lib/palette.py
python
Palette.sequence_changed
(self)
Event: the color ordering or palette length was changed.
Event: the color ordering or palette length was changed.
[ "Event", ":", "the", "color", "ordering", "or", "palette", "length", "was", "changed", "." ]
def sequence_changed(self): """Event: the color ordering or palette length was changed."""
[ "def", "sequence_changed", "(", "self", ")", ":" ]
https://github.com/mypaint/mypaint/blob/90b36dbc7b8bd2f323383f7edf608a5e0a3a1a33/lib/palette.py#L769-L770
Lausannen/NAS-FCOS
d586b28c0fae72a9f30119797732e614a2517de5
maskrcnn_benchmark/modeling/detector/generalized_rcnn.py
python
GeneralizedRCNN.forward
(self, images, targets=None)
return result
Arguments: images (list[Tensor] or ImageList): images to be processed targets (list[BoxList]): ground-truth boxes present in the image (optional) Returns: result (list[BoxList] or dict[Tensor]): the output from the model. During training, it returns a dict[Tensor] which contains the losses. During testing, it returns list[BoxList] contains additional fields like `scores`, `labels` and `mask` (for Mask R-CNN models).
Arguments: images (list[Tensor] or ImageList): images to be processed targets (list[BoxList]): ground-truth boxes present in the image (optional)
[ "Arguments", ":", "images", "(", "list", "[", "Tensor", "]", "or", "ImageList", ")", ":", "images", "to", "be", "processed", "targets", "(", "list", "[", "BoxList", "]", ")", ":", "ground", "-", "truth", "boxes", "present", "in", "the", "image", "(", ...
def forward(self, images, targets=None): """ Arguments: images (list[Tensor] or ImageList): images to be processed targets (list[BoxList]): ground-truth boxes present in the image (optional) Returns: result (list[BoxList] or dict[Tensor]): the output from the model. During training, it returns a dict[Tensor] which contains the losses. During testing, it returns list[BoxList] contains additional fields like `scores`, `labels` and `mask` (for Mask R-CNN models). """ if self.training and targets is None: raise ValueError("In training mode, targets should be passed") images = to_image_list(images) features = self.backbone(images.tensors) proposals, proposal_losses = self.rpn(images, features, targets) if self.roi_heads: x, result, detector_losses = self.roi_heads(features, proposals, targets) else: # RPN-only models don't have roi_heads x = features result = proposals detector_losses = {} if self.training: losses = {} losses.update(detector_losses) losses.update(proposal_losses) return losses return result
[ "def", "forward", "(", "self", ",", "images", ",", "targets", "=", "None", ")", ":", "if", "self", ".", "training", "and", "targets", "is", "None", ":", "raise", "ValueError", "(", "\"In training mode, targets should be passed\"", ")", "images", "=", "to_image...
https://github.com/Lausannen/NAS-FCOS/blob/d586b28c0fae72a9f30119797732e614a2517de5/maskrcnn_benchmark/modeling/detector/generalized_rcnn.py#L49-L81
DataBiosphere/toil
2e148eee2114ece8dcc3ec8a83f36333266ece0d
src/toil/utils/toilStats.py
python
buildElement
(element: Expando, items: List[Job], itemName: str)
return element[itemName]
Create an element for output.
Create an element for output.
[ "Create", "an", "element", "for", "output", "." ]
def buildElement(element: Expando, items: List[Job], itemName: str) -> Expando: """ Create an element for output. """ def assertNonnegative(i: float, name: str) -> float: if i < 0: raise RuntimeError("Negative value %s reported for %s" %(i,name) ) else: return float(i) itemTimes = [] itemClocks = [] itemMemory = [] for item in items: # If something lacks an entry, assume it used none of that thing. # This avoids crashing when jobs e.g. aren't done. itemTimes.append(assertNonnegative(float(item.get("time", 0)), "time")) itemClocks.append(assertNonnegative(float(item.get("clock", 0)), "clock")) itemMemory.append(assertNonnegative(float(item.get("memory", 0)), "memory")) assert len(itemClocks) == len(itemTimes) == len(itemMemory) itemWaits=[] for index in range(0,len(itemTimes)): itemWaits.append(itemTimes[index] - itemClocks[index]) itemWaits.sort() itemTimes.sort() itemClocks.sort() itemMemory.sort() if len(itemTimes) == 0: itemTimes.append(0) itemClocks.append(0) itemWaits.append(0) itemMemory.append(0) element[itemName]=Expando( total_number=float(len(items)), total_time=float(sum(itemTimes)), median_time=float(itemTimes[len(itemTimes) // 2]), average_time=float(sum(itemTimes) / len(itemTimes)), min_time=float(min(itemTimes)), max_time=float(max(itemTimes)), total_clock=float(sum(itemClocks)), median_clock=float(itemClocks[len(itemClocks) // 2]), average_clock=float(sum(itemClocks) / len(itemClocks)), min_clock=float(min(itemClocks)), max_clock=float(max(itemClocks)), total_wait=float(sum(itemWaits)), median_wait=float(itemWaits[len(itemWaits) // 2]), average_wait=float(sum(itemWaits) / len(itemWaits)), min_wait=float(min(itemWaits)), max_wait=float(max(itemWaits)), total_memory=float(sum(itemMemory)), median_memory=float(itemMemory[len(itemMemory) // 2]), average_memory=float(sum(itemMemory) / len(itemMemory)), min_memory=float(min(itemMemory)), max_memory=float(max(itemMemory)), name=itemName ) return element[itemName]
[ "def", "buildElement", "(", "element", ":", "Expando", ",", "items", ":", "List", "[", "Job", "]", ",", "itemName", ":", "str", ")", "->", "Expando", ":", "def", "assertNonnegative", "(", "i", ":", "float", ",", "name", ":", "str", ")", "->", "float"...
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/utils/toilStats.py#L363-L422
cloudera/impyla
0c736af4cad2bade9b8e313badc08ec50e81c948
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
python
exchange_partitions_result.__init__
(self, success=None, o1=None, o2=None, o3=None, o4=None,)
[]
def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): self.success = success self.o1 = o1 self.o2 = o2 self.o3 = o3 self.o4 = o4
[ "def", "__init__", "(", "self", ",", "success", "=", "None", ",", "o1", "=", "None", ",", "o2", "=", "None", ",", "o3", "=", "None", ",", "o4", "=", "None", ",", ")", ":", "self", ".", "success", "=", "success", "self", ".", "o1", "=", "o1", ...
https://github.com/cloudera/impyla/blob/0c736af4cad2bade9b8e313badc08ec50e81c948/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L19541-L19546
rhinstaller/anaconda
63edc8680f1b05cbfe11bef28703acba808c5174
pyanaconda/ui/gui/xkl_wrapper.py
python
XklWrapper.replace_layouts
(self, layouts_list)
Method that replaces the layouts defined in the current X configuration with the new ones given. :param layouts_list: list of layouts defined as either 'layout' or 'layout (variant)' :raise XklWrapperError: if layouts cannot be replaced with the new ones
Method that replaces the layouts defined in the current X configuration with the new ones given.
[ "Method", "that", "replaces", "the", "layouts", "defined", "in", "the", "current", "X", "configuration", "with", "the", "new", "ones", "given", "." ]
def replace_layouts(self, layouts_list): """ Method that replaces the layouts defined in the current X configuration with the new ones given. :param layouts_list: list of layouts defined as either 'layout' or 'layout (variant)' :raise XklWrapperError: if layouts cannot be replaced with the new ones """ new_layouts = list() new_variants = list() for layout_variant in layouts_list: (layout, variant) = parse_layout_variant(layout_variant) new_layouts.append(layout) new_variants.append(variant) self._rec.set_layouts(new_layouts) self._rec.set_variants(new_variants) if not self._rec.activate(self._engine): msg = "Failed to replace layouts with: %s" % ",".join(layouts_list) raise XklWrapperError(msg)
[ "def", "replace_layouts", "(", "self", ",", "layouts_list", ")", ":", "new_layouts", "=", "list", "(", ")", "new_variants", "=", "list", "(", ")", "for", "layout_variant", "in", "layouts_list", ":", "(", "layout", ",", "variant", ")", "=", "parse_layout_vari...
https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/ui/gui/xkl_wrapper.py#L357-L381
pwnieexpress/pwn_plug_sources
1a23324f5dc2c3de20f9c810269b6a29b2758cad
src/metagoofil/hachoir_metadata/audio.py
python
computeComprRate
(meta, size)
[]
def computeComprRate(meta, size): if not meta.has("duration") \ or not meta.has("sample_rate") \ or not meta.has("bits_per_sample") \ or not meta.has("nb_channel") \ or not size: return orig_size = timedelta2seconds(meta.get("duration")) * meta.get('sample_rate') * meta.get('bits_per_sample') * meta.get('nb_channel') meta.compr_rate = float(orig_size) / size
[ "def", "computeComprRate", "(", "meta", ",", "size", ")", ":", "if", "not", "meta", ".", "has", "(", "\"duration\"", ")", "or", "not", "meta", ".", "has", "(", "\"sample_rate\"", ")", "or", "not", "meta", ".", "has", "(", "\"bits_per_sample\"", ")", "o...
https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/metagoofil/hachoir_metadata/audio.py#L11-L19
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/vendor/urllib3/contrib/ntlmpool.py
python
NTLMConnectionPool.__init__
(self, user, pw, authurl, *args, **kwargs)
authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\\username format. pw is the password for the user.
authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\\username format. pw is the password for the user.
[ "authurl", "is", "a", "random", "URL", "on", "the", "server", "that", "is", "protected", "by", "NTLM", ".", "user", "is", "the", "Windows", "user", "probably", "in", "the", "DOMAIN", "\\\\", "username", "format", ".", "pw", "is", "the", "password", "for"...
def __init__(self, user, pw, authurl, *args, **kwargs): """ authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\\username format. pw is the password for the user. """ super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user user_parts = user.split("\\", 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw
[ "def", "__init__", "(", "self", ",", "user", ",", "pw", ",", "authurl", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "NTLMConnectionPool", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "s...
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/vendor/urllib3/contrib/ntlmpool.py#L34-L46
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/pip/_vendor/requests/sessions.py
python
SessionRedirectMixin.get_redirect_target
(self, resp)
return None
Receives a Response. Returns a redirect URI or ``None``
Receives a Response. Returns a redirect URI or ``None``
[ "Receives", "a", "Response", ".", "Returns", "a", "redirect", "URI", "or", "None" ]
def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will # be called at least once upon the original response and at least twice # on each subsequent redirect response (if any). # If a custom mixin is used to handle this logic, it may be advantageous # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: location = resp.headers['location'] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. if is_py3: location = location.encode('latin1') return to_native_string(location, 'utf8') return None
[ "def", "get_redirect_target", "(", "self", ",", "resp", ")", ":", "# Due to the nature of how requests processes redirects this method will", "# be called at least once upon the original response and at least twice", "# on each subsequent redirect response (if any).", "# If a custom mixin is u...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/pip/_vendor/requests/sessions.py#L97-L116
quentinhardy/odat
364b94cc662dcbb95a0b28880c6a71ddfc66dd6b
ServiceNameGuesser.py
python
ServiceNameGuesser.__testIfAGoodServiceName__
(self)
Test if it is a good Service Name
Test if it is a good Service Name
[ "Test", "if", "it", "is", "a", "good", "Service", "Name" ]
def __testIfAGoodServiceName__(self): ''' Test if it is a good Service Name ''' no_good_service_name_found = False self.__generateConnectionString__(username=self.__generateRandomString__(nb=15), password=self.__generateRandomString__(nb=5)) logging.debug("Try to connect with the {0} Service Name ({1})".format(self.args['serviceName'], self.args['connectionStr'])) status = self.connection() if self.__needRetryConnection__(status) == True: status = self.__retryConnect__(nbTry=4) if status != None : for aNoGoodString in self.NO_GOOD_SERVICE_NAME_STRING_LIST: if aNoGoodString in str(status): no_good_service_name_found = True break if no_good_service_name_found == False: self.appendValidServiceName(self.args['serviceName']) logging.info("'{0}' is a valid Service Name (Server message: {1})".format(self.args['serviceName'], str(status))) self.args['print'].goodNews(stringToLinePadded("'{0}' is a valid Service Name. Continue... ".format(self.args['serviceName']))) self.close()
[ "def", "__testIfAGoodServiceName__", "(", "self", ")", ":", "no_good_service_name_found", "=", "False", "self", ".", "__generateConnectionString__", "(", "username", "=", "self", ".", "__generateRandomString__", "(", "nb", "=", "15", ")", ",", "password", "=", "se...
https://github.com/quentinhardy/odat/blob/364b94cc662dcbb95a0b28880c6a71ddfc66dd6b/ServiceNameGuesser.py#L57-L76
svenkreiss/pysparkling
f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78
pysparkling/sql/functions.py
python
dayofmonth
(e)
return col(DayOfMonth(ensure_column(e)))
:rtype: Column
:rtype: Column
[ ":", "rtype", ":", "Column" ]
def dayofmonth(e): """ :rtype: Column """ return col(DayOfMonth(ensure_column(e)))
[ "def", "dayofmonth", "(", "e", ")", ":", "return", "col", "(", "DayOfMonth", "(", "ensure_column", "(", "e", ")", ")", ")" ]
https://github.com/svenkreiss/pysparkling/blob/f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78/pysparkling/sql/functions.py#L1756-L1760
tooxie/shiva-server
4d169aae8d4cb01133f62701b14610695e48c297
shiva/utils.py
python
MetadataManager.title
(self)
return self._getter('title')
[]
def title(self): return self._getter('title')
[ "def", "title", "(", "self", ")", ":", "return", "self", ".", "_getter", "(", "'title'", ")" ]
https://github.com/tooxie/shiva-server/blob/4d169aae8d4cb01133f62701b14610695e48c297/shiva/utils.py#L118-L119
snakeztc/NeuralDialog-ZSDG
1d1548457a16a2e07567dc8532ea8b2fba178540
zsdg/models/model_bases.py
python
BaseModel.ptr_loss
(self, dec_ctx, labels)
return avg_attn_loss
[]
def ptr_loss(self, dec_ctx, labels): # find attention loss g = dec_ctx[DecoderPointerGen.KEY_G] ptr_softmax = dec_ctx[DecoderPointerGen.KEY_PTR_SOFTMAX] flat_ptr = ptr_softmax.view(-1, self.vocab_size) label_mask = labels.view(-1, 1) == self.rev_vocab[PAD] label_ptr = flat_ptr.gather(1, labels.view(-1, 1)) not_in_ctx = label_ptr == 0 mix_ptr = torch.cat([label_ptr, g.view(-1, 1)], dim=1).gather(1, not_in_ctx.long()) # mix_ptr = g.view(-1, 1) + label_ptr attention_loss = -1.0 * torch.log(mix_ptr.clamp(min=1e-8)) attention_loss.masked_fill_(label_mask, 0) valid_cnt = (label_mask.size(0) - torch.sum(label_mask).float()) + 1e-8 avg_attn_loss = torch.sum(attention_loss) / valid_cnt return avg_attn_loss
[ "def", "ptr_loss", "(", "self", ",", "dec_ctx", ",", "labels", ")", ":", "# find attention loss", "g", "=", "dec_ctx", "[", "DecoderPointerGen", ".", "KEY_G", "]", "ptr_softmax", "=", "dec_ctx", "[", "DecoderPointerGen", ".", "KEY_PTR_SOFTMAX", "]", "flat_ptr", ...
https://github.com/snakeztc/NeuralDialog-ZSDG/blob/1d1548457a16a2e07567dc8532ea8b2fba178540/zsdg/models/model_bases.py#L139-L154
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit /scripts/sshbackdoors/rpyc/utils/classic.py
python
upload
(conn, localpath, remotepath, filter = None, ignore_invalid = False, chunk_size = 16000)
uploads a file or a directory to the given remote path :param localpath: the local file or directory :param remotepath: the remote path :param filter: a predicate that accepts the filename and determines whether it should be uploaded; None means any file :param chunk_size: the IO chunk size
uploads a file or a directory to the given remote path :param localpath: the local file or directory :param remotepath: the remote path :param filter: a predicate that accepts the filename and determines whether it should be uploaded; None means any file :param chunk_size: the IO chunk size
[ "uploads", "a", "file", "or", "a", "directory", "to", "the", "given", "remote", "path", ":", "param", "localpath", ":", "the", "local", "file", "or", "directory", ":", "param", "remotepath", ":", "the", "remote", "path", ":", "param", "filter", ":", "a",...
def upload(conn, localpath, remotepath, filter = None, ignore_invalid = False, chunk_size = 16000): """uploads a file or a directory to the given remote path :param localpath: the local file or directory :param remotepath: the remote path :param filter: a predicate that accepts the filename and determines whether it should be uploaded; None means any file :param chunk_size: the IO chunk size """ if os.path.isdir(localpath): upload_dir(conn, localpath, remotepath, filter, chunk_size) elif os.path.isfile(localpath): upload_file(conn, localpath, remotepath, chunk_size) else: if not ignore_invalid: raise ValueError("cannot upload %r" % (localpath,))
[ "def", "upload", "(", "conn", ",", "localpath", ",", "remotepath", ",", "filter", "=", "None", ",", "ignore_invalid", "=", "False", ",", "chunk_size", "=", "16000", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "localpath", ")", ":", "upload_di...
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /scripts/sshbackdoors/rpyc/utils/classic.py#L151-L166
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Tools/bgen/bgen/bgenBuffer.py
python
StructInputOutputBufferType.getOutputBufferDeclarations
(self, name, constmode=False, outmode=False)
return ["%s%s %s__out__" % (self.type, out, name)]
[]
def getOutputBufferDeclarations(self, name, constmode=False, outmode=False): if constmode: raise RuntimeError, "Cannot use const output buffer" if outmode: out = "*" else: out = "" return ["%s%s %s__out__" % (self.type, out, name)]
[ "def", "getOutputBufferDeclarations", "(", "self", ",", "name", ",", "constmode", "=", "False", ",", "outmode", "=", "False", ")", ":", "if", "constmode", ":", "raise", "RuntimeError", ",", "\"Cannot use const output buffer\"", "if", "outmode", ":", "out", "=", ...
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Tools/bgen/bgen/bgenBuffer.py#L217-L224
pyansys/pymapdl
c07291fc062b359abf0e92b95a92d753a95ef3d7
ansys/mapdl/core/_commands/preproc/coupled_dof.py
python
CoupledDOF.cpngen
(self, nset="", lab="", node1="", node2="", ninc="", **kwargs)
return self.run(command, **kwargs)
Defines, modifies, or adds to a set of coupled degrees of freedom. APDL Command: CPNGEN Parameters ---------- nset Set reference number [CP]. lab Degree of freedom label [CP]. node1, node2, ninc Include in coupled set nodes NODE1 to NODE2 in steps of NINC (defaults to 1). If NODE1 = P, graphical picking is enabled and all remaining command fields are ignored (valid only in the GUI). If -NODE1, delete range of nodes from set instead of including. A component name may also be substituted for NODE1 (NODE2 and NINC are ignored). Notes ----- Defines, modifies, or adds to a set of coupled degrees of freedom. May be used in combination with (or in place of) the CP command. Repeat CPNGEN command for additional nodes.
Defines, modifies, or adds to a set of coupled degrees of freedom.
[ "Defines", "modifies", "or", "adds", "to", "a", "set", "of", "coupled", "degrees", "of", "freedom", "." ]
def cpngen(self, nset="", lab="", node1="", node2="", ninc="", **kwargs): """Defines, modifies, or adds to a set of coupled degrees of freedom. APDL Command: CPNGEN Parameters ---------- nset Set reference number [CP]. lab Degree of freedom label [CP]. node1, node2, ninc Include in coupled set nodes NODE1 to NODE2 in steps of NINC (defaults to 1). If NODE1 = P, graphical picking is enabled and all remaining command fields are ignored (valid only in the GUI). If -NODE1, delete range of nodes from set instead of including. A component name may also be substituted for NODE1 (NODE2 and NINC are ignored). Notes ----- Defines, modifies, or adds to a set of coupled degrees of freedom. May be used in combination with (or in place of) the CP command. Repeat CPNGEN command for additional nodes. """ command = f"CPNGEN,{nset},{lab},{node1},{node2},{ninc}" return self.run(command, **kwargs)
[ "def", "cpngen", "(", "self", ",", "nset", "=", "\"\"", ",", "lab", "=", "\"\"", ",", "node1", "=", "\"\"", ",", "node2", "=", "\"\"", ",", "ninc", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "command", "=", "f\"CPNGEN,{nset},{lab},{node1},{node2},...
https://github.com/pyansys/pymapdl/blob/c07291fc062b359abf0e92b95a92d753a95ef3d7/ansys/mapdl/core/_commands/preproc/coupled_dof.py#L290-L318
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_zabbix/build/lib/base.py
python
Utils.get_priority
(priority)
return prior
determine priority
determine priority
[ "determine", "priority" ]
def get_priority(priority): ''' determine priority ''' prior = 0 if 'info' in priority: prior = 1 elif 'warn' in priority: prior = 2 elif 'avg' == priority or 'ave' in priority: prior = 3 elif 'high' in priority: prior = 4 elif 'dis' in priority: prior = 5 return prior
[ "def", "get_priority", "(", "priority", ")", ":", "prior", "=", "0", "if", "'info'", "in", "priority", ":", "prior", "=", "1", "elif", "'warn'", "in", "priority", ":", "prior", "=", "2", "elif", "'avg'", "==", "priority", "or", "'ave'", "in", "priority...
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_zabbix/build/lib/base.py#L15-L29
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/whoosh/query/positional.py
python
Sequence.__eq__
(self, other)
return (other and type(self) is type(other) and self.subqueries == other.subqueries and self.boost == other.boost)
[]
def __eq__(self, other): return (other and type(self) is type(other) and self.subqueries == other.subqueries and self.boost == other.boost)
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "return", "(", "other", "and", "type", "(", "self", ")", "is", "type", "(", "other", ")", "and", "self", ".", "subqueries", "==", "other", ".", "subqueries", "and", "self", ".", "boost", "==", "...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/query/positional.py#L65-L68
Azure/azure-storage-python
4306898850dd21617644fc537a57d025e833db74
azure-storage-blob/azure/storage/blob/baseblobservice.py
python
BaseBlobService.get_blob_to_path
( self, container_name, blob_name, file_path, open_mode='wb', snapshot=None, start_range=None, end_range=None, validate_content=False, progress_callback=None, max_connections=2, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, cpk=None)
return blob
Downloads a blob to a file path, with automatic chunking and progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with properties and metadata. :param str container_name: Name of existing container. :param str blob_name: Name of existing blob. :param str file_path: Path of file to write out to. :param str open_mode: Mode to use when opening the file. Note that specifying append only open_mode prevents parallel download. So, max_connections must be set to 1 if this open_mode is used. :param str snapshot: The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. :param int start_range: Start of byte range to use for downloading a section of the blob. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of blob. :param int end_range: End of byte range to use for downloading a section of the blob. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of blob. :param bool validate_content: If set to true, validates an MD5 hash for each retrieved portion of the blob. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that the service will only return transactional MD5s for chunks 4MB or less so the first get request will be of size self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be thrown. As computing the MD5 takes processing time and more requests will need to be done due to the reduced chunk size there may be some increase in latency. :param progress_callback: Callback for progress with signature function(current, total) where current is the number of bytes transfered so far, and total is the size of the blob if known. :type progress_callback: func(current, total) :param int max_connections: If set to 2 or greater, an initial get will be done for the first self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, the method returns at this point. If it is not, it will download the remaining data parallel using the number of threads equal to max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. If set to 1, a single large get request will be done. This is not generally recommended but available if very few threads should be used, network requests are very expensive, or a non-seekable stream prevents parallel download. This may also be useful if many blobs are expected to be empty as an extra request is required for empty blobs if max_connections is greater than 1. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk: Decrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. As the encryption key itself is provided in the request, a secure connection must be established to transfer the key. :param int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. :return: A Blob with properties and metadata. If max_connections is greater than 1, the content_md5 (if set on the blob) will not be returned. If you require this value, either use get_blob_properties or set max_connections to 1. :rtype: :class:`~azure.storage.blob.models.Blob`
Downloads a blob to a file path, with automatic chunking and progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with properties and metadata.
[ "Downloads", "a", "blob", "to", "a", "file", "path", "with", "automatic", "chunking", "and", "progress", "notifications", ".", "Returns", "an", "instance", "of", ":", "class", ":", "~azure", ".", "storage", ".", "blob", ".", "models", ".", "Blob", "with", ...
def get_blob_to_path( self, container_name, blob_name, file_path, open_mode='wb', snapshot=None, start_range=None, end_range=None, validate_content=False, progress_callback=None, max_connections=2, lease_id=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, cpk=None): ''' Downloads a blob to a file path, with automatic chunking and progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with properties and metadata. :param str container_name: Name of existing container. :param str blob_name: Name of existing blob. :param str file_path: Path of file to write out to. :param str open_mode: Mode to use when opening the file. Note that specifying append only open_mode prevents parallel download. So, max_connections must be set to 1 if this open_mode is used. :param str snapshot: The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. :param int start_range: Start of byte range to use for downloading a section of the blob. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of blob. :param int end_range: End of byte range to use for downloading a section of the blob. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of blob. :param bool validate_content: If set to true, validates an MD5 hash for each retrieved portion of the blob. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that the service will only return transactional MD5s for chunks 4MB or less so the first get request will be of size self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be thrown. As computing the MD5 takes processing time and more requests will need to be done due to the reduced chunk size there may be some increase in latency. :param progress_callback: Callback for progress with signature function(current, total) where current is the number of bytes transfered so far, and total is the size of the blob if known. :type progress_callback: func(current, total) :param int max_connections: If set to 2 or greater, an initial get will be done for the first self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, the method returns at this point. If it is not, it will download the remaining data parallel using the number of threads equal to max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. If set to 1, a single large get request will be done. This is not generally recommended but available if very few threads should be used, network requests are very expensive, or a non-seekable stream prevents parallel download. This may also be useful if many blobs are expected to be empty as an extra request is required for empty blobs if max_connections is greater than 1. :param str lease_id: Required if the blob has an active lease. :param datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. :param datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. :param str if_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified. :param str if_none_match: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. :param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk: Decrypts the data on the service-side with the given key. Use of customer-provided keys must be done over HTTPS. As the encryption key itself is provided in the request, a secure connection must be established to transfer the key. :param int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. :return: A Blob with properties and metadata. If max_connections is greater than 1, the content_md5 (if set on the blob) will not be returned. If you require this value, either use get_blob_properties or set max_connections to 1. :rtype: :class:`~azure.storage.blob.models.Blob` ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('file_path', file_path) _validate_not_none('open_mode', open_mode) if max_connections > 1 and 'a' in open_mode: raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) with open(file_path, open_mode) as stream: blob = self.get_blob_to_stream( container_name, blob_name, stream, snapshot, start_range, end_range, validate_content, progress_callback, max_connections, lease_id, if_modified_since, if_unmodified_since, if_match, if_none_match, timeout=timeout, cpk=cpk) return blob
[ "def", "get_blob_to_path", "(", "self", ",", "container_name", ",", "blob_name", ",", "file_path", ",", "open_mode", "=", "'wb'", ",", "snapshot", "=", "None", ",", "start_range", "=", "None", ",", "end_range", "=", "None", ",", "validate_content", "=", "Fal...
https://github.com/Azure/azure-storage-python/blob/4306898850dd21617644fc537a57d025e833db74/azure-storage-blob/azure/storage/blob/baseblobservice.py#L1889-L2016
meduza-corp/interstellar
40a801ccd7856491726f5a126621d9318cabe2e1
gsutil/third_party/boto/boto/ec2/cloudwatch/__init__.py
python
CloudWatchConnection.put_metric_alarm
(self, alarm)
return self.get_status('PutMetricAlarm', params)
Creates or updates an alarm and associates it with the specified Amazon CloudWatch metric. Optionally, this operation can associate one or more Amazon Simple Notification Service resources with the alarm. When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. Any actions associated with the StateValue is then executed. When updating an existing alarm, its StateValue is left unchanged. :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm :param alarm: MetricAlarm object.
Creates or updates an alarm and associates it with the specified Amazon CloudWatch metric. Optionally, this operation can associate one or more Amazon Simple Notification Service resources with the alarm.
[ "Creates", "or", "updates", "an", "alarm", "and", "associates", "it", "with", "the", "specified", "Amazon", "CloudWatch", "metric", ".", "Optionally", "this", "operation", "can", "associate", "one", "or", "more", "Amazon", "Simple", "Notification", "Service", "r...
def put_metric_alarm(self, alarm): """ Creates or updates an alarm and associates it with the specified Amazon CloudWatch metric. Optionally, this operation can associate one or more Amazon Simple Notification Service resources with the alarm. When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. Any actions associated with the StateValue is then executed. When updating an existing alarm, its StateValue is left unchanged. :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm :param alarm: MetricAlarm object. """ params = { 'AlarmName': alarm.name, 'MetricName': alarm.metric, 'Namespace': alarm.namespace, 'Statistic': alarm.statistic, 'ComparisonOperator': alarm.comparison, 'Threshold': alarm.threshold, 'EvaluationPeriods': alarm.evaluation_periods, 'Period': alarm.period, } if alarm.actions_enabled is not None: params['ActionsEnabled'] = alarm.actions_enabled if alarm.alarm_actions: self.build_list_params(params, alarm.alarm_actions, 'AlarmActions.member.%s') if alarm.description: params['AlarmDescription'] = alarm.description if alarm.dimensions: self.build_dimension_param(alarm.dimensions, params) if alarm.insufficient_data_actions: self.build_list_params(params, alarm.insufficient_data_actions, 'InsufficientDataActions.member.%s') if alarm.ok_actions: self.build_list_params(params, alarm.ok_actions, 'OKActions.member.%s') if alarm.unit: params['Unit'] = alarm.unit alarm.connection = self return self.get_status('PutMetricAlarm', params)
[ "def", "put_metric_alarm", "(", "self", ",", "alarm", ")", ":", "params", "=", "{", "'AlarmName'", ":", "alarm", ".", "name", ",", "'MetricName'", ":", "alarm", ".", "metric", ",", "'Namespace'", ":", "alarm", ".", "namespace", ",", "'Statistic'", ":", "...
https://github.com/meduza-corp/interstellar/blob/40a801ccd7856491726f5a126621d9318cabe2e1/gsutil/third_party/boto/boto/ec2/cloudwatch/__init__.py#L484-L528
nettitude/scrounger
dd393666aa1ba1117d1c472cfdef4d0b18216904
scrounger/utils/general.py
python
pretty_multiline_grep
(needle, haystack, no_lines, after=True)
return findings
Returns a well formatted dict with the results of grepping the needle in the haystack :param str needle: the needle to look for - needs to be a regex :param str haystack: the haystack to look in :param int no_lines: number of lines to be displayed :param Bool after: True if lines to display are after or False if lines to display are before :return: a dict ordered by filename with a list of dict containing the finding and the line number
Returns a well formatted dict with the results of grepping the needle in the haystack
[ "Returns", "a", "well", "formatted", "dict", "with", "the", "results", "of", "grepping", "the", "needle", "in", "the", "haystack" ]
def pretty_multiline_grep(needle, haystack, no_lines, after=True): """ Returns a well formatted dict with the results of grepping the needle in the haystack :param str needle: the needle to look for - needs to be a regex :param str haystack: the haystack to look in :param int no_lines: number of lines to be displayed :param Bool after: True if lines to display are after or False if lines to display are before :return: a dict ordered by filename with a list of dict containing the finding and the line number """ additional_modifiers = "-{} {}".format("A" if after else "B", int(no_lines)) grep_result = grep(needle, haystack, "{} -arEin".format( additional_modifiers)) findings = {} for line in grep_result.split("\n"): # if line is blank or if it does not have the right format if not line or (line.count(":") < 2 and ( line.count("-") < 2 or len(line) < 5)): continue if line.count(":") < 2: filename, line_number, details = line.split("-", 2) else: filename, line_number, details = line.split(":", 2) # create a new list if filename not in findings if filename not in findings: findings[filename] = [] findings[filename].append({ "line": line_number.strip(), "details": details.strip() }) return findings
[ "def", "pretty_multiline_grep", "(", "needle", ",", "haystack", ",", "no_lines", ",", "after", "=", "True", ")", ":", "additional_modifiers", "=", "\"-{} {}\"", ".", "format", "(", "\"A\"", "if", "after", "else", "\"B\"", ",", "int", "(", "no_lines", ")", ...
https://github.com/nettitude/scrounger/blob/dd393666aa1ba1117d1c472cfdef4d0b18216904/scrounger/utils/general.py#L106-L146
JacquesLucke/animation_nodes
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
animation_nodes/tree_info/network.py
python
NodeNetwork.getNodes
(self, nodeByID = None)
[]
def getNodes(self, nodeByID = None): if nodeByID is None: return [idToNode(nodeID) for nodeID in self.nodeIDs] else: return [nodeByID[nodeID] for nodeID in self.nodeIDs]
[ "def", "getNodes", "(", "self", ",", "nodeByID", "=", "None", ")", ":", "if", "nodeByID", "is", "None", ":", "return", "[", "idToNode", "(", "nodeID", ")", "for", "nodeID", "in", "self", ".", "nodeIDs", "]", "else", ":", "return", "[", "nodeByID", "[...
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/tree_info/network.py#L110-L114
pm4py/pm4py-core
7807b09a088b02199cd0149d724d0e28793971bf
pm4py/objects/petri/embed_stochastic_map.py
python
apply
(smap, parameters=None)
Embed the stochastic map into the Petri net Parameters --------------- smap Stochastic map parameters Possible parameters of the algorithm Returns --------------- void
Embed the stochastic map into the Petri net
[ "Embed", "the", "stochastic", "map", "into", "the", "Petri", "net" ]
def apply(smap, parameters=None): """ Embed the stochastic map into the Petri net Parameters --------------- smap Stochastic map parameters Possible parameters of the algorithm Returns --------------- void """ if parameters is None: parameters = {} for t in smap: t.properties[STOCHASTIC_DISTRIBUTION] = smap[t]
[ "def", "apply", "(", "smap", ",", "parameters", "=", "None", ")", ":", "if", "parameters", "is", "None", ":", "parameters", "=", "{", "}", "for", "t", "in", "smap", ":", "t", ".", "properties", "[", "STOCHASTIC_DISTRIBUTION", "]", "=", "smap", "[", "...
https://github.com/pm4py/pm4py-core/blob/7807b09a088b02199cd0149d724d0e28793971bf/pm4py/objects/petri/embed_stochastic_map.py#L20-L39
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-35/fabmetheus_utilities/geometry/solids/trianglemesh.py
python
TriangleMesh.setCarveIsCorrectMesh
( self, isCorrectMesh )
Set the is correct mesh flag.
Set the is correct mesh flag.
[ "Set", "the", "is", "correct", "mesh", "flag", "." ]
def setCarveIsCorrectMesh( self, isCorrectMesh ): "Set the is correct mesh flag." self.isCorrectMesh = isCorrectMesh
[ "def", "setCarveIsCorrectMesh", "(", "self", ",", "isCorrectMesh", ")", ":", "self", ".", "isCorrectMesh", "=", "isCorrectMesh" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-35/fabmetheus_utilities/geometry/solids/trianglemesh.py#L793-L795
marinho/geraldo
868ebdce67176d9b6205cddc92476f642c783fff
site/newsite/django_1_0/django/views/decorators/http.py
python
require_http_methods
(request_method_list)
return decorator
Decorator to make a view only accept particular request methods. Usage:: @require_http_methods(["GET", "POST"]) def my_view(request): # I can assume now that only GET or POST requests make it this far # ... Note that request methods should be in uppercase.
Decorator to make a view only accept particular request methods. Usage::
[ "Decorator", "to", "make", "a", "view", "only", "accept", "particular", "request", "methods", ".", "Usage", "::" ]
def require_http_methods(request_method_list): """ Decorator to make a view only accept particular request methods. Usage:: @require_http_methods(["GET", "POST"]) def my_view(request): # I can assume now that only GET or POST requests make it this far # ... Note that request methods should be in uppercase. """ def decorator(func): def inner(request, *args, **kwargs): if request.method not in request_method_list: return HttpResponseNotAllowed(request_method_list) return func(request, *args, **kwargs) return wraps(func)(inner) return decorator
[ "def", "require_http_methods", "(", "request_method_list", ")", ":", "def", "decorator", "(", "func", ")", ":", "def", "inner", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "method", "not", "in", "request_met...
https://github.com/marinho/geraldo/blob/868ebdce67176d9b6205cddc92476f642c783fff/site/newsite/django_1_0/django/views/decorators/http.py#L16-L33
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py
python
HTTPConnectionPool._validate_conn
(self, conn)
Called right before a request is made, after the socket is created.
Called right before a request is made, after the socket is created.
[ "Called", "right", "before", "a", "request", "is", "made", "after", "the", "socket", "is", "created", "." ]
def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ pass
[ "def", "_validate_conn", "(", "self", ",", "conn", ")", ":", "pass" ]
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py#L287-L291
rotki/rotki
aafa446815cdd5e9477436d1b02bee7d01b398c8
rotkehlchen/exchanges/coinbase.py
python
trade_from_conversion
(trade_a: Dict[str, Any], trade_b: Dict[str, Any])
return Trade( timestamp=timestamp, location=Location.COINBASE, # in coinbase you are buying/selling tx_asset for native_asset base_asset=tx_asset, quote_asset=native_asset, trade_type=TradeType.SELL, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(trade_a['trade']['id']), )
Turn information from a conversion into a trade Mary raise: - UnknownAsset due to Asset instantiation - DeserializationError due to unexpected format of dict entries - KeyError due to dict entries missing an expected entry
Turn information from a conversion into a trade
[ "Turn", "information", "from", "a", "conversion", "into", "a", "trade" ]
def trade_from_conversion(trade_a: Dict[str, Any], trade_b: Dict[str, Any]) -> Optional[Trade]: """Turn information from a conversion into a trade Mary raise: - UnknownAsset due to Asset instantiation - DeserializationError due to unexpected format of dict entries - KeyError due to dict entries missing an expected entry """ # Check that the status is complete if trade_a['status'] != 'completed': return None # Trade b will represent the asset we are converting to if trade_b['amount']['amount'].startswith('-'): trade_a, trade_b = trade_b, trade_a timestamp = deserialize_timestamp_from_date(trade_a['updated_at'], 'iso8601', 'coinbase') tx_amount = AssetAmount(abs(deserialize_asset_amount(trade_a['amount']['amount']))) tx_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(trade_b['amount']['amount']) native_asset = asset_from_coinbase(trade_b['amount']['currency'], time=timestamp) amount = tx_amount # The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency rate = Price(native_amount / tx_amount) # Obtain fee amount in the native currency using data from both trades amount_after_fee = deserialize_asset_amount(trade_b['native_amount']['amount']) amount_before_fee = deserialize_asset_amount(trade_a['native_amount']['amount']) # amount_after_fee + amount_before_fee is a negative amount and the fee needs to be positive conversion_native_fee_amount = abs(amount_after_fee + amount_before_fee) if ZERO not in (tx_amount, conversion_native_fee_amount, amount_before_fee): # We have the fee amount in the native currency. To get it in the # converted asset we have to get the rate asset_native_rate = tx_amount / abs(amount_before_fee) fee_amount = Fee(conversion_native_fee_amount * asset_native_rate) else: fee_amount = Fee(ZERO) fee_asset = asset_from_coinbase(trade_a['amount']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, # in coinbase you are buying/selling tx_asset for native_asset base_asset=tx_asset, quote_asset=native_asset, trade_type=TradeType.SELL, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(trade_a['trade']['id']), )
[ "def", "trade_from_conversion", "(", "trade_a", ":", "Dict", "[", "str", ",", "Any", "]", ",", "trade_b", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Optional", "[", "Trade", "]", ":", "# Check that the status is complete", "if", "trade_a", "[", ...
https://github.com/rotki/rotki/blob/aafa446815cdd5e9477436d1b02bee7d01b398c8/rotkehlchen/exchanges/coinbase.py#L105-L156
SPFlow/SPFlow
68ce7dac0f41bd3cf86ccb56555a29ef1368fe69
src/spn/structure/Base.py
python
eval_spn_bottom_up
(node, eval_functions, all_results=None, debug=False, **args)
return all_results[node]
Evaluates the spn bottom up :param node: spn root :param eval_functions: is a dictionary that contains k:Class of the node, v:lambda function that receives as parameters (node, args**) for leave nodes and (node, [children results], args**) :param all_results: is a dictionary that contains k:Class of the node, v:result of the evaluation of the lambda function for that node. It is used to store intermediate results so that non-tree graphs can be computed in O(n) size of the network :param debug: whether to present progress information on the evaluation :param args: free parameters that will be fed to the lambda functions. :return: the result of computing and propagating all the values throught the network
Evaluates the spn bottom up
[ "Evaluates", "the", "spn", "bottom", "up" ]
def eval_spn_bottom_up(node, eval_functions, all_results=None, debug=False, **args): """ Evaluates the spn bottom up :param node: spn root :param eval_functions: is a dictionary that contains k:Class of the node, v:lambda function that receives as parameters (node, args**) for leave nodes and (node, [children results], args**) :param all_results: is a dictionary that contains k:Class of the node, v:result of the evaluation of the lambda function for that node. It is used to store intermediate results so that non-tree graphs can be computed in O(n) size of the network :param debug: whether to present progress information on the evaluation :param args: free parameters that will be fed to the lambda functions. :return: the result of computing and propagating all the values throught the network """ nodes = get_topological_order(node) if debug: from tqdm import tqdm nodes = tqdm(list(nodes)) if all_results is None: all_results = {} else: all_results.clear() for node_type, func in eval_functions.items(): if "_eval_func" not in node_type.__dict__: node_type._eval_func = [] node_type._eval_func.append(func) node_type._is_leaf = issubclass(node_type, Leaf) leaf_func = eval_functions.get(Leaf, None) tmp_children_list = [] len_tmp_children_list = 0 for n in nodes: try: func = n.__class__._eval_func[-1] n_is_leaf = n.__class__._is_leaf except: if isinstance(n, Leaf) and leaf_func is not None: func = leaf_func n_is_leaf = True else: raise AssertionError("No lambda function associated with type: %s" % (n.__class__.__name__)) if n_is_leaf: result = func(n, **args) else: len_children = len(n.children) if len_tmp_children_list < len_children: tmp_children_list.extend([None] * len_children) len_tmp_children_list = len(tmp_children_list) for i in range(len_children): ci = n.children[i] tmp_children_list[i] = all_results[ci] result = func(n, tmp_children_list[0:len_children], **args) all_results[n] = result for node_type, func in eval_functions.items(): del node_type._eval_func[-1] if len(node_type._eval_func) == 0: delattr(node_type, "_eval_func") return all_results[node]
[ "def", "eval_spn_bottom_up", "(", "node", ",", "eval_functions", ",", "all_results", "=", "None", ",", "debug", "=", "False", ",", "*", "*", "args", ")", ":", "nodes", "=", "get_topological_order", "(", "node", ")", "if", "debug", ":", "from", "tqdm", "i...
https://github.com/SPFlow/SPFlow/blob/68ce7dac0f41bd3cf86ccb56555a29ef1368fe69/src/spn/structure/Base.py#L348-L413
google-research/uda
960684e363251772a5938451d4d2bc0f1da9e24b
text/utils/tokenization.py
python
whitespace_tokenize
(text)
return tokens
Runs basic whitespace cleaning and splitting on a peice of text.
Runs basic whitespace cleaning and splitting on a peice of text.
[ "Runs", "basic", "whitespace", "cleaning", "and", "splitting", "on", "a", "peice", "of", "text", "." ]
def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a peice of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens
[ "def", "whitespace_tokenize", "(", "text", ")", ":", "text", "=", "text", ".", "strip", "(", ")", "if", "not", "text", ":", "return", "[", "]", "tokens", "=", "text", ".", "split", "(", ")", "return", "tokens" ]
https://github.com/google-research/uda/blob/960684e363251772a5938451d4d2bc0f1da9e24b/text/utils/tokenization.py#L56-L62
allenai/longformer
caefee668e39cacdece7dd603a0bebf24df6d8ca
scripts/triviaqa.py
python
TriviaQADataset._get_qid
(self, qid)
all input qids are formatted uniqueID__evidenceFile, but for wikipedia, qid = uniqueID, and for web, qid = uniqueID__evidenceFile. This function takes care of this conversion.
all input qids are formatted uniqueID__evidenceFile, but for wikipedia, qid = uniqueID, and for web, qid = uniqueID__evidenceFile. This function takes care of this conversion.
[ "all", "input", "qids", "are", "formatted", "uniqueID__evidenceFile", "but", "for", "wikipedia", "qid", "=", "uniqueID", "and", "for", "web", "qid", "=", "uniqueID__evidenceFile", ".", "This", "function", "takes", "care", "of", "this", "conversion", "." ]
def _get_qid(self, qid): """all input qids are formatted uniqueID__evidenceFile, but for wikipedia, qid = uniqueID, and for web, qid = uniqueID__evidenceFile. This function takes care of this conversion. """ if 'wikipedia' in self.file_path: # for evaluation on wikipedia, every question has one answer even if multiple evidence documents are given return qid.split('--')[0] elif 'web' in self.file_path: # for evaluation on web, every question/document pair have an answer return qid elif 'sample' in self.file_path: return qid else: raise RuntimeError('Unexpected filename')
[ "def", "_get_qid", "(", "self", ",", "qid", ")", ":", "if", "'wikipedia'", "in", "self", ".", "file_path", ":", "# for evaluation on wikipedia, every question has one answer even if multiple evidence documents are given", "return", "qid", ".", "split", "(", "'--'", ")", ...
https://github.com/allenai/longformer/blob/caefee668e39cacdece7dd603a0bebf24df6d8ca/scripts/triviaqa.py#L253-L266
hyperspy/hyperspy
1ffb3fab33e607045a37f30c1463350b72617e10
hyperspy/drawing/widget.py
python
ResizersMixin._get_resizer_pos
(self)
return positions
Get the positions of the resizer handles.
Get the positions of the resizer handles.
[ "Get", "the", "positions", "of", "the", "resizer", "handles", "." ]
def _get_resizer_pos(self): """Get the positions of the resizer handles. """ invtrans = self.ax.transData.inverted() border = self.border_thickness # Transform the border thickness into data values dl = np.abs(invtrans.transform((border, border)) - invtrans.transform((0, 0))) / 2 rsize = self._get_resizer_size() xs, ys = self._size positions = [] rp = np.array(self._get_patch_xy()) p = rp - rsize + dl # Top left positions.append(p) p = rp + (xs - dl[0], -rsize[1] + dl[1]) # Top right positions.append(p) p = rp + (-rsize[0] + dl[0], ys - dl[1]) # Bottom left positions.append(p) p = rp + (xs - dl[0], ys - dl[1]) # Bottom right positions.append(p) return positions
[ "def", "_get_resizer_pos", "(", "self", ")", ":", "invtrans", "=", "self", ".", "ax", ".", "transData", ".", "inverted", "(", ")", "border", "=", "self", ".", "border_thickness", "# Transform the border thickness into data values", "dl", "=", "np", ".", "abs", ...
https://github.com/hyperspy/hyperspy/blob/1ffb3fab33e607045a37f30c1463350b72617e10/hyperspy/drawing/widget.py#L928-L949
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/zmq/log/handlers.py
python
PUBHandler.format
(self,record)
return self.formatters[record.levelno].format(record)
Format a record.
Format a record.
[ "Format", "a", "record", "." ]
def format(self,record): """Format a record.""" return self.formatters[record.levelno].format(record)
[ "def", "format", "(", "self", ",", "record", ")", ":", "return", "self", ".", "formatters", "[", "record", ".", "levelno", "]", ".", "format", "(", "record", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/zmq/log/handlers.py#L83-L85
enthought/traitsui
b7c38c7a47bf6ae7971f9ddab70c8a358647dd25
traitsui/qt4/range_editor.py
python
LargeRangeSliderEditor.reduce_range
(self)
Reduces the extent of the displayed range.
Reduces the extent of the displayed range.
[ "Reduces", "the", "extent", "of", "the", "displayed", "range", "." ]
def reduce_range(self): """Reduces the extent of the displayed range.""" low, high = self.low, self.high if abs(self.cur_low) < 10: self.cur_low = max(-10, low) self.cur_high = min(10, high) elif self.cur_low > 0: self.cur_high = self.cur_low self.cur_low = max(low, self.cur_low / 10) else: self.cur_high = self.cur_low self.cur_low = max(low, self.cur_low * 10) self.ui_changing = True self.value = min(max(self.value, self.cur_low), self.cur_high) self.ui_changing = False self.update_range_ui()
[ "def", "reduce_range", "(", "self", ")", ":", "low", ",", "high", "=", "self", ".", "low", ",", "self", ".", "high", "if", "abs", "(", "self", ".", "cur_low", ")", "<", "10", ":", "self", ".", "cur_low", "=", "max", "(", "-", "10", ",", "low", ...
https://github.com/enthought/traitsui/blob/b7c38c7a47bf6ae7971f9ddab70c8a358647dd25/traitsui/qt4/range_editor.py#L515-L531
pgq/skytools-legacy
8b7e6c118572a605d28b7a3403c96aeecfd0d272
python/londiste/handlers/dispatch.py
python
BaseBulkCollectingLoader.flush
(self, curs)
[]
def flush(self, curs): op_map = self.collect_data() self.bulk_flush(curs, op_map)
[ "def", "flush", "(", "self", ",", "curs", ")", ":", "op_map", "=", "self", ".", "collect_data", "(", ")", "self", ".", "bulk_flush", "(", "curs", ",", "op_map", ")" ]
https://github.com/pgq/skytools-legacy/blob/8b7e6c118572a605d28b7a3403c96aeecfd0d272/python/londiste/handlers/dispatch.py#L296-L298
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
src/oci/_vendor/idna/uts46data.py
python
_seg_24
()
return [ (0x24BA, 'M', u'e'), (0x24BB, 'M', u'f'), (0x24BC, 'M', u'g'), (0x24BD, 'M', u'h'), (0x24BE, 'M', u'i'), (0x24BF, 'M', u'j'), (0x24C0, 'M', u'k'), (0x24C1, 'M', u'l'), (0x24C2, 'M', u'm'), (0x24C3, 'M', u'n'), (0x24C4, 'M', u'o'), (0x24C5, 'M', u'p'), (0x24C6, 'M', u'q'), (0x24C7, 'M', u'r'), (0x24C8, 'M', u's'), (0x24C9, 'M', u't'), (0x24CA, 'M', u'u'), (0x24CB, 'M', u'v'), (0x24CC, 'M', u'w'), (0x24CD, 'M', u'x'), (0x24CE, 'M', u'y'), (0x24CF, 'M', u'z'), (0x24D0, 'M', u'a'), (0x24D1, 'M', u'b'), (0x24D2, 'M', u'c'), (0x24D3, 'M', u'd'), (0x24D4, 'M', u'e'), (0x24D5, 'M', u'f'), (0x24D6, 'M', u'g'), (0x24D7, 'M', u'h'), (0x24D8, 'M', u'i'), (0x24D9, 'M', u'j'), (0x24DA, 'M', u'k'), (0x24DB, 'M', u'l'), (0x24DC, 'M', u'm'), (0x24DD, 'M', u'n'), (0x24DE, 'M', u'o'), (0x24DF, 'M', u'p'), (0x24E0, 'M', u'q'), (0x24E1, 'M', u'r'), (0x24E2, 'M', u's'), (0x24E3, 'M', u't'), (0x24E4, 'M', u'u'), (0x24E5, 'M', u'v'), (0x24E6, 'M', u'w'), (0x24E7, 'M', u'x'), (0x24E8, 'M', u'y'), (0x24E9, 'M', u'z'), (0x24EA, 'M', u'0'), (0x24EB, 'V'), (0x2A0C, 'M', u'∫∫∫∫'), (0x2A0D, 'V'), (0x2A74, '3', u'::='), (0x2A75, '3', u'=='), (0x2A76, '3', u'==='), (0x2A77, 'V'), (0x2ADC, 'M', u'⫝̸'), (0x2ADD, 'V'), (0x2B74, 'X'), (0x2B76, 'V'), (0x2B96, 'X'), (0x2B97, 'V'), (0x2C00, 'M', u'ⰰ'), (0x2C01, 'M', u'ⰱ'), (0x2C02, 'M', u'ⰲ'), (0x2C03, 'M', u'ⰳ'), (0x2C04, 'M', u'ⰴ'), (0x2C05, 'M', u'ⰵ'), (0x2C06, 'M', u'ⰶ'), (0x2C07, 'M', u'ⰷ'), (0x2C08, 'M', u'ⰸ'), (0x2C09, 'M', u'ⰹ'), (0x2C0A, 'M', u'ⰺ'), (0x2C0B, 'M', u'ⰻ'), (0x2C0C, 'M', u'ⰼ'), (0x2C0D, 'M', u'ⰽ'), (0x2C0E, 'M', u'ⰾ'), (0x2C0F, 'M', u'ⰿ'), (0x2C10, 'M', u'ⱀ'), (0x2C11, 'M', u'ⱁ'), (0x2C12, 'M', u'ⱂ'), (0x2C13, 'M', u'ⱃ'), (0x2C14, 'M', u'ⱄ'), (0x2C15, 'M', u'ⱅ'), (0x2C16, 'M', u'ⱆ'), (0x2C17, 'M', u'ⱇ'), (0x2C18, 'M', u'ⱈ'), (0x2C19, 'M', u'ⱉ'), (0x2C1A, 'M', u'ⱊ'), (0x2C1B, 'M', u'ⱋ'), (0x2C1C, 'M', u'ⱌ'), (0x2C1D, 'M', u'ⱍ'), (0x2C1E, 'M', u'ⱎ'), (0x2C1F, 'M', u'ⱏ'), (0x2C20, 'M', u'ⱐ'), (0x2C21, 'M', u'ⱑ'), (0x2C22, 'M', u'ⱒ'), (0x2C23, 'M', u'ⱓ'), (0x2C24, 'M', u'ⱔ'), (0x2C25, 'M', u'ⱕ'), ]
[]
def _seg_24(): return [ (0x24BA, 'M', u'e'), (0x24BB, 'M', u'f'), (0x24BC, 'M', u'g'), (0x24BD, 'M', u'h'), (0x24BE, 'M', u'i'), (0x24BF, 'M', u'j'), (0x24C0, 'M', u'k'), (0x24C1, 'M', u'l'), (0x24C2, 'M', u'm'), (0x24C3, 'M', u'n'), (0x24C4, 'M', u'o'), (0x24C5, 'M', u'p'), (0x24C6, 'M', u'q'), (0x24C7, 'M', u'r'), (0x24C8, 'M', u's'), (0x24C9, 'M', u't'), (0x24CA, 'M', u'u'), (0x24CB, 'M', u'v'), (0x24CC, 'M', u'w'), (0x24CD, 'M', u'x'), (0x24CE, 'M', u'y'), (0x24CF, 'M', u'z'), (0x24D0, 'M', u'a'), (0x24D1, 'M', u'b'), (0x24D2, 'M', u'c'), (0x24D3, 'M', u'd'), (0x24D4, 'M', u'e'), (0x24D5, 'M', u'f'), (0x24D6, 'M', u'g'), (0x24D7, 'M', u'h'), (0x24D8, 'M', u'i'), (0x24D9, 'M', u'j'), (0x24DA, 'M', u'k'), (0x24DB, 'M', u'l'), (0x24DC, 'M', u'm'), (0x24DD, 'M', u'n'), (0x24DE, 'M', u'o'), (0x24DF, 'M', u'p'), (0x24E0, 'M', u'q'), (0x24E1, 'M', u'r'), (0x24E2, 'M', u's'), (0x24E3, 'M', u't'), (0x24E4, 'M', u'u'), (0x24E5, 'M', u'v'), (0x24E6, 'M', u'w'), (0x24E7, 'M', u'x'), (0x24E8, 'M', u'y'), (0x24E9, 'M', u'z'), (0x24EA, 'M', u'0'), (0x24EB, 'V'), (0x2A0C, 'M', u'∫∫∫∫'), (0x2A0D, 'V'), (0x2A74, '3', u'::='), (0x2A75, '3', u'=='), (0x2A76, '3', u'==='), (0x2A77, 'V'), (0x2ADC, 'M', u'⫝̸'), (0x2ADD, 'V'), (0x2B74, 'X'), (0x2B76, 'V'), (0x2B96, 'X'), (0x2B97, 'V'), (0x2C00, 'M', u'ⰰ'), (0x2C01, 'M', u'ⰱ'), (0x2C02, 'M', u'ⰲ'), (0x2C03, 'M', u'ⰳ'), (0x2C04, 'M', u'ⰴ'), (0x2C05, 'M', u'ⰵ'), (0x2C06, 'M', u'ⰶ'), (0x2C07, 'M', u'ⰷ'), (0x2C08, 'M', u'ⰸ'), (0x2C09, 'M', u'ⰹ'), (0x2C0A, 'M', u'ⰺ'), (0x2C0B, 'M', u'ⰻ'), (0x2C0C, 'M', u'ⰼ'), (0x2C0D, 'M', u'ⰽ'), (0x2C0E, 'M', u'ⰾ'), (0x2C0F, 'M', u'ⰿ'), (0x2C10, 'M', u'ⱀ'), (0x2C11, 'M', u'ⱁ'), (0x2C12, 'M', u'ⱂ'), (0x2C13, 'M', u'ⱃ'), (0x2C14, 'M', u'ⱄ'), (0x2C15, 'M', u'ⱅ'), (0x2C16, 'M', u'ⱆ'), (0x2C17, 'M', u'ⱇ'), (0x2C18, 'M', u'ⱈ'), (0x2C19, 'M', u'ⱉ'), (0x2C1A, 'M', u'ⱊ'), (0x2C1B, 'M', u'ⱋ'), (0x2C1C, 'M', u'ⱌ'), (0x2C1D, 'M', u'ⱍ'), (0x2C1E, 'M', u'ⱎ'), (0x2C1F, 'M', u'ⱏ'), (0x2C20, 'M', u'ⱐ'), (0x2C21, 'M', u'ⱑ'), (0x2C22, 'M', u'ⱒ'), (0x2C23, 'M', u'ⱓ'), (0x2C24, 'M', u'ⱔ'), (0x2C25, 'M', u'ⱕ'), ]
[ "def", "_seg_24", "(", ")", ":", "return", "[", "(", "0x24BA", ",", "'M'", ",", "u'e'", ")", ",", "(", "0x24BB", ",", "'M'", ",", "u'f'", ")", ",", "(", "0x24BC", ",", "'M'", ",", "u'g'", ")", ",", "(", "0x24BD", ",", "'M'", ",", "u'h'", ")",...
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/_vendor/idna/uts46data.py#L2509-L2611
sqlmapproject/sqlmap
3b07b70864624dff4c29dcaa8a61c78e7f9189f7
lib/core/agent.py
python
Agent.suffixQuery
(self, expression, comment=None, suffix=None, where=None, trimEmpty=True)
return re.sub(r";\W*;", ";", expression) if trimEmpty else expression
This method appends the DBMS comment to the SQL injection request
This method appends the DBMS comment to the SQL injection request
[ "This", "method", "appends", "the", "DBMS", "comment", "to", "the", "SQL", "injection", "request" ]
def suffixQuery(self, expression, comment=None, suffix=None, where=None, trimEmpty=True): """ This method appends the DBMS comment to the SQL injection request """ if conf.direct: return self.payloadDirect(expression) if expression is None: return None expression = self.cleanupPayload(expression) # Take default values if None suffix = kb.injection.suffix if kb.injection and suffix is None else suffix if getTechnique() is not None and getTechnique() in kb.injection.data: where = getTechniqueData().where if where is None else where comment = getTechniqueData().comment if comment is None else comment if any((comment or "").startswith(_) for _ in ("--", GENERIC_SQL_COMMENT_MARKER)): if Backend.getIdentifiedDbms() and not GENERIC_SQL_COMMENT.startswith(queries[Backend.getIdentifiedDbms()].comment.query): comment = queries[Backend.getIdentifiedDbms()].comment.query if comment is not None: expression += comment # If we are replacing (<where>) the parameter original value with # our payload do not append the suffix if where == PAYLOAD.WHERE.REPLACE and not conf.suffix: pass elif suffix and not comment: if re.search(r"\w\Z", expression) and re.search(r"\A\w", suffix): expression += " " expression += suffix.replace('\\', BOUNDARY_BACKSLASH_MARKER) return re.sub(r";\W*;", ";", expression) if trimEmpty else expression
[ "def", "suffixQuery", "(", "self", ",", "expression", ",", "comment", "=", "None", ",", "suffix", "=", "None", ",", "where", "=", "None", ",", "trimEmpty", "=", "True", ")", ":", "if", "conf", ".", "direct", ":", "return", "self", ".", "payloadDirect",...
https://github.com/sqlmapproject/sqlmap/blob/3b07b70864624dff4c29dcaa8a61c78e7f9189f7/lib/core/agent.py#L301-L340
tuckerbalch/QSTK
4981506c37227a72404229d5e1e0887f797a5d57
qstkutil/DataAccess.py
python
DataAccess.get_info
(self)
return retstr
@summary: Returns and prints a string that describes the datastore. @return: A string.
[]
def get_info (self): ''' @summary: Returns and prints a string that describes the datastore. @return: A string. ''' if (self.source == DataSource.NORGATE): retstr = "Norgate:\n" retstr = retstr + "Daily price and volume data from Norgate (premiumdata.net)\n" retstr = retstr + "that is valid at the time of NYSE close each trading day.\n" retstr = retstr + "\n" retstr = retstr + "Valid data items include: \n" retstr = retstr + "\topen, high, low, close, volume, actual_close\n" retstr = retstr + "\n" retstr = retstr + "Valid subdirs include: \n" for i in self.folderSubList: retstr = retstr + "\t" + i + "\n" elif (self.source == DataSource.YAHOO): retstr = "Yahoo:\n" retstr = retstr + "To be completed by Shreyas\n" retstr = retstr + "Valid data items include: \n" retstr = retstr + "\topen, high, low, close, volume, actual_close\n" retstr = retstr + "\n" retstr = retstr + "Valid subdirs include: \n" for i in self.folderSubList: retstr = retstr + "\t" + i + "\n" elif (self.source == DataSource.COMPUSTAT): retstr = "Compustat:\n" retstr = retstr + "Compilation of (almost) all data items provided by Compustat\n" retstr = retstr + "Valid data items can be retrieved by calling get_data_labels(): \n" retstr = retstr + "\n" retstr = retstr + "Valid subdirs include: \n" for i in self.folderSubList: retstr = retstr + "\t" + i + "\n" elif (self.source == DataSource.CUSTOM): retstr = "Custom:\n" retstr = retstr + "Attempts to load a custom data set, assuming each stock has\n" retstr = retstr + "a csv file with the name and first column as the stock ticker, date in second column, and data in following columns.\n" retstr = retstr + "everything should be located in QSDATA/Processed/Custom\n" elif (self.source == DataSource.MLT): retstr = "ML4Trading:\n" retstr = retstr + "Attempts to load a custom data set, assuming each stock has\n" retstr = retstr + "a csv file with the name and first column as the stock ticker, date in second column, and data in following columns.\n" retstr = retstr + "everything should be located in QSDATA/Processed/ML4Trading\n" else: retstr = "DataAccess internal error\n" print retstr return retstr
[ "def", "get_info", "(", "self", ")", ":", "if", "(", "self", ".", "source", "==", "DataSource", ".", "NORGATE", ")", ":", "retstr", "=", "\"Norgate:\\n\"", "retstr", "=", "retstr", "+", "\"Daily price and volume data from Norgate (premiumdata.net)\\n\"", "retstr", ...
https://github.com/tuckerbalch/QSTK/blob/4981506c37227a72404229d5e1e0887f797a5d57/qstkutil/DataAccess.py#L652-L702
PyThaiNLP/pythainlp
de38b8507bf0934540aa5094e5f7f57d7f67e2dc
pythainlp/tokenize/core.py
python
sent_tokenize
( text: str, engine: str = DEFAULT_SENT_TOKENIZE_ENGINE, keep_whitespace: bool = True, )
return segments
Sentence tokenizer. Tokenizes running text into "sentences" :param str text: the text to be tokenized :param str engine: choose among *'crfcut'*, *'whitespace'*, \ *'whitespace+newline'* :return: list of splited sentences :rtype: list[str] **Options for engine** * *crfcut* - (default) split by CRF trained on TED dataset * *whitespace+newline* - split by whitespaces and newline. * *whitespace* - split by whitespaces. Specifiaclly, with \ :class:`regex` pattern ``r" +"`` * *tltk* - split by `TLTK <https://pypi.org/project/tltk/>`_., :Example: Split the text based on *whitespace*:: from pythainlp.tokenize import sent_tokenize sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม" sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\ และได้รับมอบหมายให้ประจำในระดับภูมิภาค" sent_tokenize(sentence_1, engine="whitespace") # output: ['ฉันไปประชุมเมื่อวันที่', '11', 'มีนาคม'] sent_tokenize(sentence_2, engine="whitespace") # output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ', # '\\nและได้รับมอบหมายให้ประจำในระดับภูมิภาค'] Split the text based on *whitespace* and *newline*:: sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม" sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\ และได้รับมอบหมายให้ประจำในระดับภูมิภาค" sent_tokenize(sentence_1, engine="whitespace+newline") # output: ['ฉันไปประชุมเมื่อวันที่', '11', 'มีนาคม'] sent_tokenize(sentence_2, engine="whitespace+newline") # output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ', '\\nและได้รับมอบหมายให้ประจำในระดับภูมิภาค'] Split the text using CRF trained on TED dataset:: sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม" sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\ และเขาได้รับมอบหมายให้ประจำในระดับภูมิภาค" sent_tokenize(sentence_1, engine="crfcut") # output: ['ฉันไปประชุมเมื่อวันที่ 11 มีนาคม'] sent_tokenize(sentence_2, engine="crfcut") # output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ ', 'และเขาได้รับมอบหมายให้ประจำในระดับภูมิภาค']
Sentence tokenizer.
[ "Sentence", "tokenizer", "." ]
def sent_tokenize( text: str, engine: str = DEFAULT_SENT_TOKENIZE_ENGINE, keep_whitespace: bool = True, ) -> List[str]: """ Sentence tokenizer. Tokenizes running text into "sentences" :param str text: the text to be tokenized :param str engine: choose among *'crfcut'*, *'whitespace'*, \ *'whitespace+newline'* :return: list of splited sentences :rtype: list[str] **Options for engine** * *crfcut* - (default) split by CRF trained on TED dataset * *whitespace+newline* - split by whitespaces and newline. * *whitespace* - split by whitespaces. Specifiaclly, with \ :class:`regex` pattern ``r" +"`` * *tltk* - split by `TLTK <https://pypi.org/project/tltk/>`_., :Example: Split the text based on *whitespace*:: from pythainlp.tokenize import sent_tokenize sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม" sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\ และได้รับมอบหมายให้ประจำในระดับภูมิภาค" sent_tokenize(sentence_1, engine="whitespace") # output: ['ฉันไปประชุมเมื่อวันที่', '11', 'มีนาคม'] sent_tokenize(sentence_2, engine="whitespace") # output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ', # '\\nและได้รับมอบหมายให้ประจำในระดับภูมิภาค'] Split the text based on *whitespace* and *newline*:: sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม" sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\ และได้รับมอบหมายให้ประจำในระดับภูมิภาค" sent_tokenize(sentence_1, engine="whitespace+newline") # output: ['ฉันไปประชุมเมื่อวันที่', '11', 'มีนาคม'] sent_tokenize(sentence_2, engine="whitespace+newline") # output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ', '\\nและได้รับมอบหมายให้ประจำในระดับภูมิภาค'] Split the text using CRF trained on TED dataset:: sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม" sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\ และเขาได้รับมอบหมายให้ประจำในระดับภูมิภาค" sent_tokenize(sentence_1, engine="crfcut") # output: ['ฉันไปประชุมเมื่อวันที่ 11 มีนาคม'] sent_tokenize(sentence_2, engine="crfcut") # output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ ', 'และเขาได้รับมอบหมายให้ประจำในระดับภูมิภาค'] """ if not text or not isinstance(text, str): return [] segments = [] if engine == "crfcut": from pythainlp.tokenize.crfcut import segment segments = segment(text) elif engine == "whitespace": segments = re.split(r" +", text, re.U) elif engine == "whitespace+newline": segments = text.split() elif engine == "tltk": from pythainlp.tokenize.tltk import sent_tokenize as segment segments = segment(text) else: raise ValueError( f"""Tokenizer \"{engine}\" not found. It might be a typo; if not, please consult our document.""" ) if not keep_whitespace: segments = [token.strip(" ") for token in segments if token.strip(" ")] return segments
[ "def", "sent_tokenize", "(", "text", ":", "str", ",", "engine", ":", "str", "=", "DEFAULT_SENT_TOKENIZE_ENGINE", ",", "keep_whitespace", ":", "bool", "=", "True", ",", ")", "->", "List", "[", "str", "]", ":", "if", "not", "text", "or", "not", "isinstance...
https://github.com/PyThaiNLP/pythainlp/blob/de38b8507bf0934540aa5094e5f7f57d7f67e2dc/pythainlp/tokenize/core.py#L221-L311
iniqua/plecost
ef0d89bfdf1ef870bd11b1d8bdf93a8ce9ec6ca0
plecost_lib/libs/helpers.py
python
is_remote_a_wordpress
(base_url, error_page, downloader)
This functions checks if remote host contains a WordPress installation. :param base_url: Base url :type base_url: basestring :param error_page: error page content :type error_page: basestring :param downloader: download function. This function must accept only one parameter: the URL :type downloader: function :return: True if target contains WordPress installation. False otherwise. :rtype: bool
This functions checks if remote host contains a WordPress installation.
[ "This", "functions", "checks", "if", "remote", "host", "contains", "a", "WordPress", "installation", "." ]
def is_remote_a_wordpress(base_url, error_page, downloader): """ This functions checks if remote host contains a WordPress installation. :param base_url: Base url :type base_url: basestring :param error_page: error page content :type error_page: basestring :param downloader: download function. This function must accept only one parameter: the URL :type downloader: function :return: True if target contains WordPress installation. False otherwise. :rtype: bool """ total_urls = 0 urls_found = 0 for url in update_progress(get_wordlist("wordpress_detection.txt"), prefix_text=" "): total_urls += 1 # Fix the url for urljoin path = url[1:] if url.startswith("/") else url headers, status, content = yield from downloader(urljoin(base_url, path)) if status == 200: # Try to detect non-default error pages ratio = get_diff_ratio(content, error_page) if ratio < 0.35: urls_found += 1 # Check that al least the 85% of typical Wordpress links exits if (urls_found / float(total_urls)) < 0.85: # # If the Wordpress Admin site is found --> Is a Wordpress Site # _headers, _status, _content = yield from downloader(urljoin(base_url, "/wp-admin/")) if _status == 302 and "wp-login.php?redirect_to=" in _headers.get("location", ""): return True elif _status == 301 and "/wp-admin/" in _headers.get("location", ""): return True elif _status == 200: return True else: # # Try to find wordpress Links # _, _, _content = yield from downloader(base_url) if _content: _web_links = re.findall(REGEX_FIND_CSS_SCRIPT_LINKS, _content) is_wp_content = any("/wp-content/" in x[3] for x in _web_links) is_wp_includes = any("/wp-includes/" in x[3] for x in _web_links) if is_wp_content or is_wp_includes: return True else: return False # No content else: return False else: return True
[ "def", "is_remote_a_wordpress", "(", "base_url", ",", "error_page", ",", "downloader", ")", ":", "total_urls", "=", "0", "urls_found", "=", "0", "for", "url", "in", "update_progress", "(", "get_wordlist", "(", "\"wordpress_detection.txt\"", ")", ",", "prefix_text"...
https://github.com/iniqua/plecost/blob/ef0d89bfdf1ef870bd11b1d8bdf93a8ce9ec6ca0/plecost_lib/libs/helpers.py#L61-L132
CellProfiler/CellProfiler
a90e17e4d258c6f3900238be0f828e0b4bd1b293
cellprofiler/modules/exporttodatabase.py
python
ExportToDatabase.make_full_filename
(self, file_name, workspace=None, image_set_index=None)
return os.path.join(path, file)
Convert a file name into an absolute path We do a few things here: * apply metadata from an image set to the file name if an image set is specified * change the relative path into an absolute one using the "." and "&" convention * Create any directories along the path
Convert a file name into an absolute path
[ "Convert", "a", "file", "name", "into", "an", "absolute", "path" ]
def make_full_filename(self, file_name, workspace=None, image_set_index=None): """Convert a file name into an absolute path We do a few things here: * apply metadata from an image set to the file name if an image set is specified * change the relative path into an absolute one using the "." and "&" convention * Create any directories along the path """ if image_set_index is not None and workspace is not None: file_name = workspace.measurements.apply_metadata( file_name, image_set_index ) measurements = None if workspace is None else workspace.measurements path_name = self.directory.get_absolute_path(measurements, image_set_index) file_name = os.path.join(path_name, file_name) path, file = os.path.split(file_name) if not os.path.isdir(path): os.makedirs(path) return os.path.join(path, file)
[ "def", "make_full_filename", "(", "self", ",", "file_name", ",", "workspace", "=", "None", ",", "image_set_index", "=", "None", ")", ":", "if", "image_set_index", "is", "not", "None", "and", "workspace", "is", "not", "None", ":", "file_name", "=", "workspace...
https://github.com/CellProfiler/CellProfiler/blob/a90e17e4d258c6f3900238be0f828e0b4bd1b293/cellprofiler/modules/exporttodatabase.py#L2311-L2331
WyAtu/CVE-2018-20250
5a2df20fe3d28137ab4553153294abfb3c059be9
acefile.py
python
AceArchive.getmember
(self, member)
Return an :class:`AceMember` object corresponding to archive member *member*. Raise :class:`KeyError` or :class:`IndexError` if *member* is not found in archive. *Member* can refer to an :class:`AceMember` object, a member name or an index into the archive member list. If *member* is a name and it occurs multiple times in the archive, then the last member with matching filename is returned.
Return an :class:`AceMember` object corresponding to archive member *member*. Raise :class:`KeyError` or :class:`IndexError` if *member* is not found in archive. *Member* can refer to an :class:`AceMember` object, a member name or an index into the archive member list. If *member* is a name and it occurs multiple times in the archive, then the last member with matching filename is returned.
[ "Return", "an", ":", "class", ":", "AceMember", "object", "corresponding", "to", "archive", "member", "*", "member", "*", ".", "Raise", ":", "class", ":", "KeyError", "or", ":", "class", ":", "IndexError", "if", "*", "member", "*", "is", "not", "found", ...
def getmember(self, member): """ Return an :class:`AceMember` object corresponding to archive member *member*. Raise :class:`KeyError` or :class:`IndexError` if *member* is not found in archive. *Member* can refer to an :class:`AceMember` object, a member name or an index into the archive member list. If *member* is a name and it occurs multiple times in the archive, then the last member with matching filename is returned. """ if isinstance(member, int): return self._getmember_byidx(member) elif isinstance(member, AceMember): return member elif isinstance(member, str): return self._getmember_byname(member) else: raise TypeError("member argument has unsupported type")
[ "def", "getmember", "(", "self", ",", "member", ")", ":", "if", "isinstance", "(", "member", ",", "int", ")", ":", "return", "self", ".", "_getmember_byidx", "(", "member", ")", "elif", "isinstance", "(", "member", ",", "AceMember", ")", ":", "return", ...
https://github.com/WyAtu/CVE-2018-20250/blob/5a2df20fe3d28137ab4553153294abfb3c059be9/acefile.py#L3399-L3417
fake-name/ReadableWebProxy
ed5c7abe38706acc2684a1e6cd80242a03c5f010
WebMirror/management/rss_parser_funcs/feed_parse_extractBoredtranslationsWordpressCom.py
python
extractBoredtranslationsWordpressCom
(item)
return False
Parser for 'boredtranslations.wordpress.com'
Parser for 'boredtranslations.wordpress.com'
[ "Parser", "for", "boredtranslations", ".", "wordpress", ".", "com" ]
def extractBoredtranslationsWordpressCom(item): ''' Parser for 'boredtranslations.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
[ "def", "extractBoredtranslationsWordpressCom", "(", "item", ")", ":", "vol", ",", "chp", ",", "frag", ",", "postfix", "=", "extractVolChapterFragmentPostfix", "(", "item", "[", "'title'", "]", ")", "if", "not", "(", "chp", "or", "vol", ")", "or", "\"preview\...
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractBoredtranslationsWordpressCom.py#L2-L21
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/redis_model/models/attributes.py
python
Attribute.__set__
(self,instance,val)
set the object's name value param: instance:the name type is string val: the value type is string
set the object's name value param: instance:the name type is string val: the value type is string
[ "set", "the", "object", "s", "name", "value", "param", ":", "instance", ":", "the", "name", "type", "is", "string", "val", ":", "the", "value", "type", "is", "string" ]
def __set__(self,instance,val): """ set the object's name value param: instance:the name type is string val: the value type is string """ val = self.typecast_for_read(val) setattr(instance,"_"+self.name,val)
[ "def", "__set__", "(", "self", ",", "instance", ",", "val", ")", ":", "val", "=", "self", ".", "typecast_for_read", "(", "val", ")", "setattr", "(", "instance", ",", "\"_\"", "+", "self", ".", "name", ",", "val", ")" ]
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/redis_model/models/attributes.py#L99-L107
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/Lib/ssl.py
python
SSLSocket.read
(self, len=1024)
Read up to LEN bytes and return them. Return zero-length string on EOF.
Read up to LEN bytes and return them. Return zero-length string on EOF.
[ "Read", "up", "to", "LEN", "bytes", "and", "return", "them", ".", "Return", "zero", "-", "length", "string", "on", "EOF", "." ]
def read(self, len=1024): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" try: return self._sslobj.read(len) except SSLError, x: if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: return '' else: raise
[ "def", "read", "(", "self", ",", "len", "=", "1024", ")", ":", "try", ":", "return", "self", ".", "_sslobj", ".", "read", "(", "len", ")", "except", "SSLError", ",", "x", ":", "if", "x", ".", "args", "[", "0", "]", "==", "SSL_ERROR_EOF", "and", ...
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/Lib/ssl.py#L145-L156
holoviz/panel
5e25cb09447d8edf0b316f130ee1318a2aeb880f
panel/pane/vtk/vtk.py
python
VTKVolume.register_serializer
(cls, class_type, serializer)
Register a seriliazer for a given type of class. A serializer is a function which take an instance of `class_type` (like a vtk.vtkImageData) as input and return a numpy array of the data
Register a seriliazer for a given type of class. A serializer is a function which take an instance of `class_type` (like a vtk.vtkImageData) as input and return a numpy array of the data
[ "Register", "a", "seriliazer", "for", "a", "given", "type", "of", "class", ".", "A", "serializer", "is", "a", "function", "which", "take", "an", "instance", "of", "class_type", "(", "like", "a", "vtk", ".", "vtkImageData", ")", "as", "input", "and", "ret...
def register_serializer(cls, class_type, serializer): """ Register a seriliazer for a given type of class. A serializer is a function which take an instance of `class_type` (like a vtk.vtkImageData) as input and return a numpy array of the data """ cls._serializers.update({class_type:serializer})
[ "def", "register_serializer", "(", "cls", ",", "class_type", ",", "serializer", ")", ":", "cls", ".", "_serializers", ".", "update", "(", "{", "class_type", ":", "serializer", "}", ")" ]
https://github.com/holoviz/panel/blob/5e25cb09447d8edf0b316f130ee1318a2aeb880f/panel/pane/vtk/vtk.py#L682-L688
psychopy/psychopy
01b674094f38d0e0bd51c45a6f66f671d7041696
psychopy/projects/pavlovia.py
python
refreshSession
()
return _existingSession
Restarts the session with the same user logged in
Restarts the session with the same user logged in
[ "Restarts", "the", "session", "with", "the", "same", "user", "logged", "in" ]
def refreshSession(): """Restarts the session with the same user logged in""" global _existingSession if _existingSession and _existingSession.getToken(): _existingSession = PavloviaSession( token=_existingSession.getToken() ) else: _existingSession = PavloviaSession() return _existingSession
[ "def", "refreshSession", "(", ")", ":", "global", "_existingSession", "if", "_existingSession", "and", "_existingSession", ".", "getToken", "(", ")", ":", "_existingSession", "=", "PavloviaSession", "(", "token", "=", "_existingSession", ".", "getToken", "(", ")",...
https://github.com/psychopy/psychopy/blob/01b674094f38d0e0bd51c45a6f66f671d7041696/psychopy/projects/pavlovia.py#L1155-L1164
inducer/loopy
55143b21711a534c07bbb14aaa63ff3879a93433
loopy/transform/diff.py
python
DifferentiationContext.get_diff_var
(self, var_name)
return new_var_name
:return: a string containing the name of a new variable holding the derivative of *var_name* by the desired *diff_context.by_name*, or *None* if no dependency exists.
:return: a string containing the name of a new variable holding the derivative of *var_name* by the desired *diff_context.by_name*, or *None* if no dependency exists.
[ ":", "return", ":", "a", "string", "containing", "the", "name", "of", "a", "new", "variable", "holding", "the", "derivative", "of", "*", "var_name", "*", "by", "the", "desired", "*", "diff_context", ".", "by_name", "*", "or", "*", "None", "*", "if", "n...
def get_diff_var(self, var_name): """ :return: a string containing the name of a new variable holding the derivative of *var_name* by the desired *diff_context.by_name*, or *None* if no dependency exists. """ new_var_name = self.rule_mapping_context.make_unique_var_name( var_name + "_d" + self.by_name) writers = self.kernel.writer_map().get(var_name, []) if not writers: # FIXME: There should be hooks to supply earlier dvar_dby # This would be the spot to think about them. return None if len(writers) > 1: raise LoopyError("%s is written in more than one place" % var_name) orig_writer_id, = writers orig_writer_insn = self.kernel.id_to_insn[orig_writer_id] diff_inames = self.add_diff_inames() diff_iname_exprs = tuple(var(diname) for diname in diff_inames) # {{{ write code diff_mapper = LoopyDiffMapper(self.rule_mapping_context, self, diff_inames) diff_expr = diff_mapper(orig_writer_insn.expression, self.kernel, orig_writer_insn) if not diff_expr: return None assert isinstance(orig_writer_insn, lp.Assignment) if isinstance(orig_writer_insn.assignee, p.Subscript): lhs_ind = orig_writer_insn.assignee.index_tuple elif isinstance(orig_writer_insn.assignee, p.Variable): lhs_ind = () else: raise LoopyError( "Unrecognized LHS type in differentiation: %s" % type(orig_writer_insn.assignee).__name__) new_insn_id = self.generate_instruction_id() insn = lp.Assignment( id=new_insn_id, assignee=var(new_var_name)[ lhs_ind + diff_iname_exprs], expression=diff_expr, within_inames=( orig_writer_insn.within_inames | frozenset(diff_inames))) self.new_instructions.append(insn) # }}} # {{{ manage variable declaration if var_name in self.kernel.arg_dict: arg = self.kernel.arg_dict[var_name] orig_shape = arg.shape elif var_name in self.kernel.temporary_variables: tv = self.kernel.temporary_variables[var_name] orig_shape = tv.shape else: raise ValueError("%s: variable not found" % var_name) shape = orig_shape + self.additional_shape dim_tags = ("c",) * len(shape) if var_name in self.kernel.arg_dict: self.new_args.append( lp.GlobalArg( new_var_name, arg.dtype, shape=shape, dim_tags=dim_tags, is_input=arg.is_input, is_output=arg.is_output )) elif var_name in self.kernel.temporary_variables: self.new_temporary_variables[new_var_name] = lp.TemporaryVariable( new_var_name, tv.dtype, shape=shape, dim_tags=dim_tags) # }}} return new_var_name
[ "def", "get_diff_var", "(", "self", ",", "var_name", ")", ":", "new_var_name", "=", "self", ".", "rule_mapping_context", ".", "make_unique_var_name", "(", "var_name", "+", "\"_d\"", "+", "self", ".", "by_name", ")", "writers", "=", "self", ".", "kernel", "."...
https://github.com/inducer/loopy/blob/55143b21711a534c07bbb14aaa63ff3879a93433/loopy/transform/diff.py#L269-L365
elfi-dev/elfi
07ac0ed5e81d5d5fb42de63db3cf9ccc9135b88c
elfi/model/extensions.py
python
ModelPrior.gradient_logpdf
(self, x, stepsize=None)
return grads
Return the gradient of log density of the joint prior at x. Parameters ---------- x : float or np.ndarray stepsize : float or list Stepsize or stepsizes for the dimensions
Return the gradient of log density of the joint prior at x.
[ "Return", "the", "gradient", "of", "log", "density", "of", "the", "joint", "prior", "at", "x", "." ]
def gradient_logpdf(self, x, stepsize=None): """Return the gradient of log density of the joint prior at x. Parameters ---------- x : float or np.ndarray stepsize : float or list Stepsize or stepsizes for the dimensions """ x = np.asanyarray(x) ndim = x.ndim x = x.reshape((-1, self.dim)) grads = np.zeros_like(x) for i in range(len(grads)): xi = x[i] grads[i] = numgrad(self.logpdf, xi, h=stepsize) grads[np.isinf(grads)] = 0 grads[np.isnan(grads)] = 0 if ndim == 0 or (ndim == 1 and self.dim > 1): grads = grads[0] return grads
[ "def", "gradient_logpdf", "(", "self", ",", "x", ",", "stepsize", "=", "None", ")", ":", "x", "=", "np", ".", "asanyarray", "(", "x", ")", "ndim", "=", "x", ".", "ndim", "x", "=", "x", ".", "reshape", "(", "(", "-", "1", ",", "self", ".", "di...
https://github.com/elfi-dev/elfi/blob/07ac0ed5e81d5d5fb42de63db3cf9ccc9135b88c/elfi/model/extensions.py#L203-L228
titusjan/argos
5a9c31a8a9a2ca825bbf821aa1e685740e3682d7
argos/utils/logs.py
python
initLogging
(configFileName=None, streamLogLevel=None)
Configures logging given a (JSON) config file name. If configFileName is None, the default logging (from iriscc/lib/default_logging.yaml) is used. :param configFileName: JSON file with log config. :param streamLogLevel: If given it overrides the log level of StreamHandlers in the config. All messages below this level will be suppressed.
Configures logging given a (JSON) config file name.
[ "Configures", "logging", "given", "a", "(", "JSON", ")", "config", "file", "name", "." ]
def initLogging(configFileName=None, streamLogLevel=None): """ Configures logging given a (JSON) config file name. If configFileName is None, the default logging (from iriscc/lib/default_logging.yaml) is used. :param configFileName: JSON file with log config. :param streamLogLevel: If given it overrides the log level of StreamHandlers in the config. All messages below this level will be suppressed. """ if configFileName is None: configFileName = os.path.join(THIS_MODULE_DIR, "default_logging.json") with open(configFileName, 'r') as stream: lines = stream.readlines() cfgLines = ''.join(lines) # Ensure the directory exists if @logDir@ is in the JSON file. logDir = argosLogDirectory() if '@logDir@' in cfgLines: ensureDirectoryExists(logDir) configDict = json.loads(cfgLines) configDict = replaceStringsInDict(configDict, "@logDir@", logDir) logging.config.dictConfig(configDict) if streamLogLevel: # Using getLevelName to get the level number. This undocumented behavior has been upgraded # to documented behavior in Python 3.4.2. # See https://docs.python.org/3.4/library/logging.html#logging.getLevelName levelNr = logging.getLevelName(streamLogLevel.upper()) - 1 #logging.disable(levelNr) for streamHandler in findStreamHandlersInConfig(): logger.debug("Setting log level to {} in handler: {} ".format(levelNr, streamHandler)) streamHandler.setLevel(levelNr) logging.info("Initialized logging from: '{}'".format(normRealPath(configFileName))) logging.info("Default location of log files: '{}'".format(logDir))
[ "def", "initLogging", "(", "configFileName", "=", "None", ",", "streamLogLevel", "=", "None", ")", ":", "if", "configFileName", "is", "None", ":", "configFileName", "=", "os", ".", "path", ".", "join", "(", "THIS_MODULE_DIR", ",", "\"default_logging.json\"", "...
https://github.com/titusjan/argos/blob/5a9c31a8a9a2ca825bbf821aa1e685740e3682d7/argos/utils/logs.py#L110-L149
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/inspect.py
python
isbuiltin
(object)
return isinstance(object, types.BuiltinFunctionType)
Return true if the object is a built-in function or method. Built-in functions and methods provide these attributes: __doc__ documentation string __name__ original name of this function or method __self__ instance to which a method is bound, or None
Return true if the object is a built-in function or method.
[ "Return", "true", "if", "the", "object", "is", "a", "built", "-", "in", "function", "or", "method", "." ]
def isbuiltin(object): """Return true if the object is a built-in function or method. Built-in functions and methods provide these attributes: __doc__ documentation string __name__ original name of this function or method __self__ instance to which a method is bound, or None""" return isinstance(object, types.BuiltinFunctionType)
[ "def", "isbuiltin", "(", "object", ")", ":", "return", "isinstance", "(", "object", ",", "types", ".", "BuiltinFunctionType", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/inspect.py#L227-L234
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/tkinter/__init__.py
python
Entry.index
(self, index)
return self.tk.getint(self.tk.call( self._w, 'index', index))
Return position of cursor.
Return position of cursor.
[ "Return", "position", "of", "cursor", "." ]
def index(self, index): """Return position of cursor.""" return self.tk.getint(self.tk.call( self._w, 'index', index))
[ "def", "index", "(", "self", ",", "index", ")", ":", "return", "self", ".", "tk", ".", "getint", "(", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'index'", ",", "index", ")", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/tkinter/__init__.py#L3044-L3047
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/plat-freebsd8/IN.py
python
__STRING
(x)
return
[]
def __STRING(x): return
[ "def", "__STRING", "(", "x", ")", ":", "return" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/plat-freebsd8/IN.py#L28-L28
mozilla/kitsune
7c7cf9baed57aa776547aea744243ccad6ca91fb
kitsune/dashboards/views.py
python
aggregated_metrics
(request)
return render( request, "dashboards/aggregated_metrics.html", { "locales_json": json.dumps(settings.SUMO_LANGUAGES), "locales": locales, "product": product, "products": Product.objects.filter(visible=True), }, )
The aggregated (all locales) kb metrics dashboard.
The aggregated (all locales) kb metrics dashboard.
[ "The", "aggregated", "(", "all", "locales", ")", "kb", "metrics", "dashboard", "." ]
def aggregated_metrics(request): """The aggregated (all locales) kb metrics dashboard.""" today = date.today() locales = get_locales_by_visit(today - timedelta(days=30), today) product = _get_product(request) return render( request, "dashboards/aggregated_metrics.html", { "locales_json": json.dumps(settings.SUMO_LANGUAGES), "locales": locales, "product": product, "products": Product.objects.filter(visible=True), }, )
[ "def", "aggregated_metrics", "(", "request", ")", ":", "today", "=", "date", ".", "today", "(", ")", "locales", "=", "get_locales_by_visit", "(", "today", "-", "timedelta", "(", "days", "=", "30", ")", ",", "today", ")", "product", "=", "_get_product", "...
https://github.com/mozilla/kitsune/blob/7c7cf9baed57aa776547aea744243ccad6ca91fb/kitsune/dashboards/views.py#L235-L250
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/sms/models.py
python
MessagingEvent.get_recipient_doc_type
(self)
return MessagingEvent._get_recipient_doc_type(self.recipient_type)
[]
def get_recipient_doc_type(self): return MessagingEvent._get_recipient_doc_type(self.recipient_type)
[ "def", "get_recipient_doc_type", "(", "self", ")", ":", "return", "MessagingEvent", ".", "_get_recipient_doc_type", "(", "self", ".", "recipient_type", ")" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/sms/models.py#L1174-L1175
dvlab-research/3DSSD
8bc7605d4d3a6ec9051e7689e96a23bdac4c4cd9
lib/utils/kitti_object.py
python
kitti_object.get_lidar
(self, idx)
return utils.load_velo_scan(lidar_filename)
[]
def get_lidar(self, idx): assert(idx<self.num_samples) lidar_filename = os.path.join(self.lidar_dir, '%06d.bin'%(idx)) return utils.load_velo_scan(lidar_filename)
[ "def", "get_lidar", "(", "self", ",", "idx", ")", ":", "assert", "(", "idx", "<", "self", ".", "num_samples", ")", "lidar_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "lidar_dir", ",", "'%06d.bin'", "%", "(", "idx", ")", ")", "...
https://github.com/dvlab-research/3DSSD/blob/8bc7605d4d3a6ec9051e7689e96a23bdac4c4cd9/lib/utils/kitti_object.py#L54-L57
OpenTransitTools/gtfsdb
8638a9d83747f1ac6c8953119239b42dd3628b29
gtfsdb/model/stop.py
python
CurrentStops.post_process
(cls, db, **kwargs)
will update the current 'view' of this data
will update the current 'view' of this data
[ "will", "update", "the", "current", "view", "of", "this", "data" ]
def post_process(cls, db, **kwargs): """ will update the current 'view' of this data """ session = db.session() try: session.query(CurrentStops).delete() # import pdb; pdb.set_trace() for s in Stop.query_active_stops(session): c = CurrentStops(s, session) session.add(c) session.commit() session.flush() except Exception as e: log.warning(e) session.rollback() finally: session.flush() session.close()
[ "def", "post_process", "(", "cls", ",", "db", ",", "*", "*", "kwargs", ")", ":", "session", "=", "db", ".", "session", "(", ")", "try", ":", "session", ".", "query", "(", "CurrentStops", ")", ".", "delete", "(", ")", "# import pdb; pdb.set_trace()", "f...
https://github.com/OpenTransitTools/gtfsdb/blob/8638a9d83747f1ac6c8953119239b42dd3628b29/gtfsdb/model/stop.py#L245-L265
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/linux_packages/oldisim_dependencies.py
python
YumInstall
(vm)
Installs oldisim dependencies on the VM.
Installs oldisim dependencies on the VM.
[ "Installs", "oldisim", "dependencies", "on", "the", "VM", "." ]
def YumInstall(vm): """Installs oldisim dependencies on the VM.""" vm.InstallEpelRepo() _Install(vm, YUM_PACKAGES)
[ "def", "YumInstall", "(", "vm", ")", ":", "vm", ".", "InstallEpelRepo", "(", ")", "_Install", "(", "vm", ",", "YUM_PACKAGES", ")" ]
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/linux_packages/oldisim_dependencies.py#L39-L42
eirannejad/pyRevit
49c0b7eb54eb343458ce1365425e6552d0c47d44
site-packages/werkzeug/datastructures.py
python
ETags.is_weak
(self, etag)
return etag in self._weak
Check if an etag is weak.
Check if an etag is weak.
[ "Check", "if", "an", "etag", "is", "weak", "." ]
def is_weak(self, etag): """Check if an etag is weak.""" return etag in self._weak
[ "def", "is_weak", "(", "self", ",", "etag", ")", ":", "return", "etag", "in", "self", ".", "_weak" ]
https://github.com/eirannejad/pyRevit/blob/49c0b7eb54eb343458ce1365425e6552d0c47d44/site-packages/werkzeug/datastructures.py#L2170-L2172
moskytw/mosql
730b2bd859306cd333cc4dc97708bcec6005973c
mosql/mysql.py
python
format_param
(s='')
return '%s'
This function always returns ``'%s'``, so it makes you can use the prepare statement with MySQLdb.
This function always returns ``'%s'``, so it makes you can use the prepare statement with MySQLdb.
[ "This", "function", "always", "returns", "%s", "so", "it", "makes", "you", "can", "use", "the", "prepare", "statement", "with", "MySQLdb", "." ]
def format_param(s=''): '''This function always returns ``'%s'``, so it makes you can use the prepare statement with MySQLdb.''' return '%s'
[ "def", "format_param", "(", "s", "=", "''", ")", ":", "return", "'%s'" ]
https://github.com/moskytw/mosql/blob/730b2bd859306cd333cc4dc97708bcec6005973c/mosql/mysql.py#L70-L73
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/rlcompleter.py
python
Completer.__init__
(self, namespace = None)
Create a new completer for the command line. Completer([namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)
Create a new completer for the command line.
[ "Create", "a", "new", "completer", "for", "the", "command", "line", "." ]
def __init__(self, namespace = None): """Create a new completer for the command line. Completer([namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete) """ if namespace and not isinstance(namespace, dict): raise TypeError,'namespace must be a dictionary' # Don't bind to namespace quite yet, but flag whether the user wants a # specific namespace or to use __main__.__dict__. This will allow us # to bind to __main__.__dict__ at completion time, not now. if namespace is None: self.use_main_ns = 1 else: self.use_main_ns = 0 self.namespace = namespace
[ "def", "__init__", "(", "self", ",", "namespace", "=", "None", ")", ":", "if", "namespace", "and", "not", "isinstance", "(", "namespace", ",", "dict", ")", ":", "raise", "TypeError", ",", "'namespace must be a dictionary'", "# Don't bind to namespace quite yet, but ...
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/rlcompleter.py#L44-L69
mitogen-hq/mitogen
5b505f524a7ae170fe68613841ab92b299613d3f
mitogen/service.py
python
FileService._prefix_is_authorized
(self, path)
return False
Return the set of all possible directory prefixes for `path`. :func:`os.path.abspath` is used to ensure the path is absolute. :param str path: The path. :returns: Set of prefixes.
Return the set of all possible directory prefixes for `path`. :func:`os.path.abspath` is used to ensure the path is absolute.
[ "Return", "the", "set", "of", "all", "possible", "directory", "prefixes", "for", "path", ".", ":", "func", ":", "os", ".", "path", ".", "abspath", "is", "used", "to", "ensure", "the", "path", "is", "absolute", "." ]
def _prefix_is_authorized(self, path): """ Return the set of all possible directory prefixes for `path`. :func:`os.path.abspath` is used to ensure the path is absolute. :param str path: The path. :returns: Set of prefixes. """ path = os.path.abspath(path) while True: if path in self._prefixes: return True if path == '/': break path = os.path.dirname(path) return False
[ "def", "_prefix_is_authorized", "(", "self", ",", "path", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "while", "True", ":", "if", "path", "in", "self", ".", "_prefixes", ":", "return", "True", "if", "path", "==", "'/'"...
https://github.com/mitogen-hq/mitogen/blob/5b505f524a7ae170fe68613841ab92b299613d3f/mitogen/service.py#L1014-L1030
nameko/nameko
17ecee2bcfa90cb0f3a2f3328c5004f48e4e02a3
nameko/messaging.py
python
Publisher.amqp_uri
(self)
return self.container.config[AMQP_URI_CONFIG_KEY]
[]
def amqp_uri(self): return self.container.config[AMQP_URI_CONFIG_KEY]
[ "def", "amqp_uri", "(", "self", ")", ":", "return", "self", ".", "container", ".", "config", "[", "AMQP_URI_CONFIG_KEY", "]" ]
https://github.com/nameko/nameko/blob/17ecee2bcfa90cb0f3a2f3328c5004f48e4e02a3/nameko/messaging.py#L146-L147
realpython/book2-exercises
cde325eac8e6d8cff2316601c2e5b36bb46af7d0
web2py/gluon/tools.py
python
Auth.archive
(form, archive_table=None, current_record='current_record', archive_current=False, fields=None)
return id
If you have a table (db.mytable) that needs full revision history you can just do:: form = crud.update(db.mytable, myrecord, onaccept=auth.archive) or:: form = SQLFORM(db.mytable, myrecord).process(onaccept=auth.archive) crud.archive will define a new table "mytable_archive" and store a copy of the current record (if archive_current=True) or a copy of the previous record (if archive_current=False) in the newly created table including a reference to the current record. fields allows to specify extra fields that need to be archived. If you want to access such table you need to define it yourself in a model:: db.define_table('mytable_archive', Field('current_record', db.mytable), db.mytable) Notice such table includes all fields of db.mytable plus one: current_record. crud.archive does not timestamp the stored record unless your original table has a fields like:: db.define_table(..., Field('saved_on', 'datetime', default=request.now, update=request.now, writable=False), Field('saved_by', auth.user, default=auth.user_id, update=auth.user_id, writable=False), there is nothing special about these fields since they are filled before the record is archived. If you want to change the archive table name and the name of the reference field you can do, for example:: db.define_table('myhistory', Field('parent_record', db.mytable), db.mytable) and use it as:: form = crud.update(db.mytable, myrecord, onaccept=lambda form:crud.archive(form, archive_table=db.myhistory, current_record='parent_record'))
If you have a table (db.mytable) that needs full revision history you can just do::
[ "If", "you", "have", "a", "table", "(", "db", ".", "mytable", ")", "that", "needs", "full", "revision", "history", "you", "can", "just", "do", "::" ]
def archive(form, archive_table=None, current_record='current_record', archive_current=False, fields=None): """ If you have a table (db.mytable) that needs full revision history you can just do:: form = crud.update(db.mytable, myrecord, onaccept=auth.archive) or:: form = SQLFORM(db.mytable, myrecord).process(onaccept=auth.archive) crud.archive will define a new table "mytable_archive" and store a copy of the current record (if archive_current=True) or a copy of the previous record (if archive_current=False) in the newly created table including a reference to the current record. fields allows to specify extra fields that need to be archived. If you want to access such table you need to define it yourself in a model:: db.define_table('mytable_archive', Field('current_record', db.mytable), db.mytable) Notice such table includes all fields of db.mytable plus one: current_record. crud.archive does not timestamp the stored record unless your original table has a fields like:: db.define_table(..., Field('saved_on', 'datetime', default=request.now, update=request.now, writable=False), Field('saved_by', auth.user, default=auth.user_id, update=auth.user_id, writable=False), there is nothing special about these fields since they are filled before the record is archived. If you want to change the archive table name and the name of the reference field you can do, for example:: db.define_table('myhistory', Field('parent_record', db.mytable), db.mytable) and use it as:: form = crud.update(db.mytable, myrecord, onaccept=lambda form:crud.archive(form, archive_table=db.myhistory, current_record='parent_record')) """ if not archive_current and not form.record: return None table = form.table if not archive_table: archive_table_name = '%s_archive' % table if archive_table_name not in table._db: table._db.define_table( archive_table_name, Field(current_record, table), *[field.clone(unique=False) for field in table]) archive_table = table._db[archive_table_name] new_record = {current_record: form.vars.id} for fieldname in archive_table.fields: if not fieldname in ['id', current_record]: if archive_current and fieldname in form.vars: new_record[fieldname] = form.vars[fieldname] elif form.record and fieldname in form.record: new_record[fieldname] = form.record[fieldname] if fields: new_record.update(fields) id = archive_table.insert(**new_record) return id
[ "def", "archive", "(", "form", ",", "archive_table", "=", "None", ",", "current_record", "=", "'current_record'", ",", "archive_current", "=", "False", ",", "fields", "=", "None", ")", ":", "if", "not", "archive_current", "and", "not", "form", ".", "record",...
https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py/gluon/tools.py#L4638-L4716
andresriancho/enumerate-iam
4529114002d1571035c31980162e401112f356da
enumerate-iam.py
python
main
()
[]
def main(): parser = argparse.ArgumentParser(description='Enumerate IAM permissions') parser.add_argument('--access-key', help='AWS access key', required=True) parser.add_argument('--secret-key', help='AWS secret key', required=True) parser.add_argument('--session-token', help='STS session token') parser.add_argument('--region', help='AWS region to send API requests to', default='us-east-1') args = parser.parse_args() enumerate_iam(args.access_key, args.secret_key, args.session_token, args.region)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Enumerate IAM permissions'", ")", "parser", ".", "add_argument", "(", "'--access-key'", ",", "help", "=", "'AWS access key'", ",", "required", "=", "True",...
https://github.com/andresriancho/enumerate-iam/blob/4529114002d1571035c31980162e401112f356da/enumerate-iam.py#L7-L20
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/app_manager/models.py
python
Application.ensure_module_unique_ids
(self, should_save=False)
Creates unique_ids for modules that don't have unique_id attributes should_save: the doc will be saved only if should_save is set to True WARNING: If called on the same doc in different requests without saving, this function will set different uuid each time, likely causing unexpected behavior
Creates unique_ids for modules that don't have unique_id attributes should_save: the doc will be saved only if should_save is set to True
[ "Creates", "unique_ids", "for", "modules", "that", "don", "t", "have", "unique_id", "attributes", "should_save", ":", "the", "doc", "will", "be", "saved", "only", "if", "should_save", "is", "set", "to", "True" ]
def ensure_module_unique_ids(self, should_save=False): """ Creates unique_ids for modules that don't have unique_id attributes should_save: the doc will be saved only if should_save is set to True WARNING: If called on the same doc in different requests without saving, this function will set different uuid each time, likely causing unexpected behavior """ if any(not mod.unique_id for mod in self.modules): for mod in self.modules: mod.get_or_create_unique_id() if should_save: self.save()
[ "def", "ensure_module_unique_ids", "(", "self", ",", "should_save", "=", "False", ")", ":", "if", "any", "(", "not", "mod", ".", "unique_id", "for", "mod", "in", "self", ".", "modules", ")", ":", "for", "mod", "in", "self", ".", "modules", ":", "mod", ...
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/app_manager/models.py#L4938-L4951
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/scatterternary/marker/_colorbar.py
python
ColorBar.outlinewidth
(self)
return self["outlinewidth"]
Sets the width (in px) of the axis line. The 'outlinewidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float
Sets the width (in px) of the axis line. The 'outlinewidth' property is a number and may be specified as: - An int or float in the interval [0, inf]
[ "Sets", "the", "width", "(", "in", "px", ")", "of", "the", "axis", "line", ".", "The", "outlinewidth", "property", "is", "a", "number", "and", "may", "be", "specified", "as", ":", "-", "An", "int", "or", "float", "in", "the", "interval", "[", "0", ...
def outlinewidth(self): """ Sets the width (in px) of the axis line. The 'outlinewidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["outlinewidth"]
[ "def", "outlinewidth", "(", "self", ")", ":", "return", "self", "[", "\"outlinewidth\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/scatterternary/marker/_colorbar.py#L435-L446
lazylibrarian/LazyLibrarian
ae3c14e9db9328ce81765e094ab2a14ed7155624
mako/util.py
python
restore__ast
(_ast)
Attempt to restore the required classes to the _ast module if it appears to be missing them
Attempt to restore the required classes to the _ast module if it appears to be missing them
[ "Attempt", "to", "restore", "the", "required", "classes", "to", "the", "_ast", "module", "if", "it", "appears", "to", "be", "missing", "them" ]
def restore__ast(_ast): """Attempt to restore the required classes to the _ast module if it appears to be missing them """ if hasattr(_ast, 'AST'): return _ast.PyCF_ONLY_AST = 2 << 9 m = compile("""\ def foo(): pass class Bar(object): pass if False: pass baz = 'mako' 1 + 2 - 3 * 4 / 5 6 // 7 % 8 << 9 >> 10 11 & 12 ^ 13 | 14 15 and 16 or 17 -baz + (not +18) - ~17 baz and 'foo' or 'bar' (mako is baz == baz) is not baz != mako mako > baz < mako >= baz <= mako mako in baz not in mako""", '<unknown>', 'exec', _ast.PyCF_ONLY_AST) _ast.Module = type(m) for cls in _ast.Module.__mro__: if cls.__name__ == 'mod': _ast.mod = cls elif cls.__name__ == 'AST': _ast.AST = cls _ast.FunctionDef = type(m.body[0]) _ast.ClassDef = type(m.body[1]) _ast.If = type(m.body[2]) _ast.Name = type(m.body[3].targets[0]) _ast.Store = type(m.body[3].targets[0].ctx) _ast.Str = type(m.body[3].value) _ast.Sub = type(m.body[4].value.op) _ast.Add = type(m.body[4].value.left.op) _ast.Div = type(m.body[4].value.right.op) _ast.Mult = type(m.body[4].value.right.left.op) _ast.RShift = type(m.body[5].value.op) _ast.LShift = type(m.body[5].value.left.op) _ast.Mod = type(m.body[5].value.left.left.op) _ast.FloorDiv = type(m.body[5].value.left.left.left.op) _ast.BitOr = type(m.body[6].value.op) _ast.BitXor = type(m.body[6].value.left.op) _ast.BitAnd = type(m.body[6].value.left.left.op) _ast.Or = type(m.body[7].value.op) _ast.And = type(m.body[7].value.values[0].op) _ast.Invert = type(m.body[8].value.right.op) _ast.Not = type(m.body[8].value.left.right.op) _ast.UAdd = type(m.body[8].value.left.right.operand.op) _ast.USub = type(m.body[8].value.left.left.op) _ast.Or = type(m.body[9].value.op) _ast.And = type(m.body[9].value.values[0].op) _ast.IsNot = type(m.body[10].value.ops[0]) _ast.NotEq = type(m.body[10].value.ops[1]) _ast.Is = type(m.body[10].value.left.ops[0]) _ast.Eq = type(m.body[10].value.left.ops[1]) _ast.Gt = type(m.body[11].value.ops[0]) _ast.Lt = type(m.body[11].value.ops[1]) _ast.GtE = type(m.body[11].value.ops[2]) _ast.LtE = type(m.body[11].value.ops[3]) _ast.In = type(m.body[12].value.ops[0]) _ast.NotIn = type(m.body[12].value.ops[1])
[ "def", "restore__ast", "(", "_ast", ")", ":", "if", "hasattr", "(", "_ast", ",", "'AST'", ")", ":", "return", "_ast", ".", "PyCF_ONLY_AST", "=", "2", "<<", "9", "m", "=", "compile", "(", "\"\"\"\\\ndef foo(): pass\nclass Bar(object): pass\nif False: pass\nbaz = 'm...
https://github.com/lazylibrarian/LazyLibrarian/blob/ae3c14e9db9328ce81765e094ab2a14ed7155624/mako/util.py#L288-L361
openembedded/bitbake
98407efc8c670abd71d3fa88ec3776ee9b5c38f3
lib/toaster/toastergui/tablefilter.py
python
TableFilterActionDateRange.set_filter_params
(self, params)
This filter depends on the user selecting some input, so it needs to have its parameters set before its queryset is filtered params: (str) a string of extra parameters for the filtering in the format "2015-12-09,2015-12-11" (from,to); this is passed in the querystring and used to set the criteria on the QuerysetFilter associated with this action
This filter depends on the user selecting some input, so it needs to have its parameters set before its queryset is filtered
[ "This", "filter", "depends", "on", "the", "user", "selecting", "some", "input", "so", "it", "needs", "to", "have", "its", "parameters", "set", "before", "its", "queryset", "is", "filtered" ]
def set_filter_params(self, params): """ This filter depends on the user selecting some input, so it needs to have its parameters set before its queryset is filtered params: (str) a string of extra parameters for the filtering in the format "2015-12-09,2015-12-11" (from,to); this is passed in the querystring and used to set the criteria on the QuerysetFilter associated with this action """ # if params are invalid, return immediately, resetting criteria # on the QuerysetFilter try: date_from_str, date_to_str = params.split(',') except ValueError: self.criteria = None return # one of the values required for the filter is missing, so set # it to the one which was supplied self.criteria = self.query_helper.dateStringsToQ( self.field, date_from_str, date_to_str )
[ "def", "set_filter_params", "(", "self", ",", "params", ")", ":", "# if params are invalid, return immediately, resetting criteria", "# on the QuerysetFilter", "try", ":", "date_from_str", ",", "date_to_str", "=", "params", ".", "split", "(", "','", ")", "except", "Valu...
https://github.com/openembedded/bitbake/blob/98407efc8c670abd71d3fa88ec3776ee9b5c38f3/lib/toaster/toastergui/tablefilter.py#L215-L240
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/protocol/message.py
python
Message.send
(self, conn)
Send the message on the given connection. Args: conn (WebSocketHandler) : a WebSocketHandler to send messages Returns: int : number of bytes sent
Send the message on the given connection.
[ "Send", "the", "message", "on", "the", "given", "connection", "." ]
def send(self, conn): ''' Send the message on the given connection. Args: conn (WebSocketHandler) : a WebSocketHandler to send messages Returns: int : number of bytes sent ''' if conn is None: raise ValueError("Cannot send to connection None") with (yield conn.write_lock.acquire()): sent = 0 yield conn.write_message(self.header_json, locked=False) sent += len(self.header_json) # uncomment this to make it a lot easier to reproduce lock-related bugs #yield gen.sleep(0.1) yield conn.write_message(self.metadata_json, locked=False) sent += len(self.metadata_json) # uncomment this to make it a lot easier to reproduce lock-related bugs #yield gen.sleep(0.1) yield conn.write_message(self.content_json, locked=False) sent += len(self.content_json) sent += yield self.write_buffers(conn, locked=False) raise gen.Return(sent)
[ "def", "send", "(", "self", ",", "conn", ")", ":", "if", "conn", "is", "None", ":", "raise", "ValueError", "(", "\"Cannot send to connection None\"", ")", "with", "(", "yield", "conn", ".", "write_lock", ".", "acquire", "(", ")", ")", ":", "sent", "=", ...
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/protocol/message.py#L248-L281
Cadene/tensorflow-model-zoo.torch
990b10ffc22d4c8eacb2a502f20415b4f70c74c2
models/research/syntaxnet/dragnn/python/spec_builder.py
python
ComponentSpecBuilder.__init__
(self, name, builder='DynamicComponentBuilder', backend='SyntaxNetComponent')
Initializes the ComponentSpec with some defaults for SyntaxNet. Args: name: The name of this Component in the pipeline. builder: The component builder type. backend: The component backend type.
Initializes the ComponentSpec with some defaults for SyntaxNet.
[ "Initializes", "the", "ComponentSpec", "with", "some", "defaults", "for", "SyntaxNet", "." ]
def __init__(self, name, builder='DynamicComponentBuilder', backend='SyntaxNetComponent'): """Initializes the ComponentSpec with some defaults for SyntaxNet. Args: name: The name of this Component in the pipeline. builder: The component builder type. backend: The component backend type. """ self.spec = spec_pb2.ComponentSpec( name=name, backend=self.make_module(backend), component_builder=self.make_module(builder))
[ "def", "__init__", "(", "self", ",", "name", ",", "builder", "=", "'DynamicComponentBuilder'", ",", "backend", "=", "'SyntaxNetComponent'", ")", ":", "self", ".", "spec", "=", "spec_pb2", ".", "ComponentSpec", "(", "name", "=", "name", ",", "backend", "=", ...
https://github.com/Cadene/tensorflow-model-zoo.torch/blob/990b10ffc22d4c8eacb2a502f20415b4f70c74c2/models/research/syntaxnet/dragnn/python/spec_builder.py#L39-L53
ytisf/PyExfil
0297b46dcbb135b2e2ade07ee040557a31cef04c
pyexfil/includes/image_manipulation.py
python
_openImage
(image_path)
return imgObj, ImageWidth, ImageHeight, TotalPixels
Opens image path as PIL Image Object :param image_path: String, image path :return: imgObj, ImageWidth, ImageHeight, TotalPixels
Opens image path as PIL Image Object :param image_path: String, image path :return: imgObj, ImageWidth, ImageHeight, TotalPixels
[ "Opens", "image", "path", "as", "PIL", "Image", "Object", ":", "param", "image_path", ":", "String", "image", "path", ":", "return", ":", "imgObj", "ImageWidth", "ImageHeight", "TotalPixels" ]
def _openImage(image_path): """ Opens image path as PIL Image Object :param image_path: String, image path :return: imgObj, ImageWidth, ImageHeight, TotalPixels """ imgObj = Image.open(image_path) ImageWidth, ImageHeight = imgObj.size TotalPixels = ImageWidth * ImageHeight return imgObj, ImageWidth, ImageHeight, TotalPixels
[ "def", "_openImage", "(", "image_path", ")", ":", "imgObj", "=", "Image", ".", "open", "(", "image_path", ")", "ImageWidth", ",", "ImageHeight", "=", "imgObj", ".", "size", "TotalPixels", "=", "ImageWidth", "*", "ImageHeight", "return", "imgObj", ",", "Image...
https://github.com/ytisf/PyExfil/blob/0297b46dcbb135b2e2ade07ee040557a31cef04c/pyexfil/includes/image_manipulation.py#L4-L13
mdiazcl/fuzzbunch-debian
2b76c2249ade83a389ae3badb12a1bd09901fd2c
windows/Resources/Python/Core/Lib/logging/__init__.py
python
BufferingFormatter.formatHeader
(self, records)
return ''
Return the header string for the specified records.
Return the header string for the specified records.
[ "Return", "the", "header", "string", "for", "the", "specified", "records", "." ]
def formatHeader(self, records): """ Return the header string for the specified records. """ return ''
[ "def", "formatHeader", "(", "self", ",", "records", ")", ":", "return", "''" ]
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/logging/__init__.py#L415-L419
galaxyproject/galaxy
4c03520f05062e0f4a1b3655dc0b7452fda69943
lib/galaxy/tool_util/deps/docker_util.py
python
_docker_prefix
( docker_cmd=DEFAULT_DOCKER_COMMAND, sudo=DEFAULT_SUDO, sudo_cmd=DEFAULT_SUDO_COMMAND, host=DEFAULT_HOST, **kwds )
return command_parts
Prefix to issue a docker command.
Prefix to issue a docker command.
[ "Prefix", "to", "issue", "a", "docker", "command", "." ]
def _docker_prefix( docker_cmd=DEFAULT_DOCKER_COMMAND, sudo=DEFAULT_SUDO, sudo_cmd=DEFAULT_SUDO_COMMAND, host=DEFAULT_HOST, **kwds ): """Prefix to issue a docker command.""" command_parts = [] if sudo: command_parts.append(sudo_cmd) command_parts.append(docker_cmd) if host: command_parts.extend(["-H", host]) return command_parts
[ "def", "_docker_prefix", "(", "docker_cmd", "=", "DEFAULT_DOCKER_COMMAND", ",", "sudo", "=", "DEFAULT_SUDO", ",", "sudo_cmd", "=", "DEFAULT_SUDO_COMMAND", ",", "host", "=", "DEFAULT_HOST", ",", "*", "*", "kwds", ")", ":", "command_parts", "=", "[", "]", "if", ...
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/tool_util/deps/docker_util.py#L207-L221
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/coding/information_set_decoder.py
python
InformationSetAlgorithm._repr_
(self)
return "ISD Algorithm ({}) for {} decoding {} errors ".format(self._algorithm_name, self.code(), _format_decoding_interval(self.decoding_interval()))
r""" Returns a string representation of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
r""" Returns a string representation of this ISD algorithm.
[ "r", "Returns", "a", "string", "representation", "of", "this", "ISD", "algorithm", "." ]
def _repr_(self): r""" Returns a string representation of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors """ return "ISD Algorithm ({}) for {} decoding {} errors ".format(self._algorithm_name, self.code(), _format_decoding_interval(self.decoding_interval()))
[ "def", "_repr_", "(", "self", ")", ":", "return", "\"ISD Algorithm ({}) for {} decoding {} errors \"", ".", "format", "(", "self", ".", "_algorithm_name", ",", "self", ".", "code", "(", ")", ",", "_format_decoding_interval", "(", "self", ".", "decoding_interval", ...
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/coding/information_set_decoder.py#L315-L327
apache/libcloud
90971e17bfd7b6bb97b2489986472c531cc8e140
libcloud/loadbalancer/base.py
python
Driver.update_balancer
(self, balancer, **kwargs)
Sets the name, algorithm, protocol, or port on a load balancer. :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param name: New load balancer name :type name: ``str`` :param algorithm: New load balancer algorithm :type algorithm: :class:`.Algorithm` :param protocol: New load balancer protocol :type protocol: ``str`` :param port: New load balancer port :type port: ``int`` :rtype: :class:`LoadBalancer`
Sets the name, algorithm, protocol, or port on a load balancer.
[ "Sets", "the", "name", "algorithm", "protocol", "or", "port", "on", "a", "load", "balancer", "." ]
def update_balancer(self, balancer, **kwargs): """ Sets the name, algorithm, protocol, or port on a load balancer. :param balancer: LoadBalancer which should be used :type balancer: :class:`LoadBalancer` :param name: New load balancer name :type name: ``str`` :param algorithm: New load balancer algorithm :type algorithm: :class:`.Algorithm` :param protocol: New load balancer protocol :type protocol: ``str`` :param port: New load balancer port :type port: ``int`` :rtype: :class:`LoadBalancer` """ raise NotImplementedError("update_balancer not implemented for this driver")
[ "def", "update_balancer", "(", "self", ",", "balancer", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "(", "\"update_balancer not implemented for this driver\"", ")" ]
https://github.com/apache/libcloud/blob/90971e17bfd7b6bb97b2489986472c531cc8e140/libcloud/loadbalancer/base.py#L221-L242