nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
wrobstory/vincent
c5a06e50179015fbb788a7a42e4570ff4467a9e9
vincent/scales.py
python
Scale.domain_max
(value)
float, int, or DataRef : Maximum domain value Only used for quantitative/time scales. This takes precedence over the maximum of the ``domain`` property.
float, int, or DataRef : Maximum domain value
[ "float", "int", "or", "DataRef", ":", "Maximum", "domain", "value" ]
def domain_max(value): """float, int, or DataRef : Maximum domain value Only used for quantitative/time scales. This takes precedence over the maximum of the ``domain`` property. """
[ "def", "domain_max", "(", "value", ")", ":" ]
https://github.com/wrobstory/vincent/blob/c5a06e50179015fbb788a7a42e4570ff4467a9e9/vincent/scales.py#L77-L82
mrJean1/PyGeodesy
7da5ca71aa3edb7bc49e219e0b8190686e1a7965
pygeodesy/points.py
python
_Array2LatLon.__repr__
(self)
return self._repr()
Return a string representation.
Return a string representation.
[ "Return", "a", "string", "representation", "." ]
def __repr__(self): '''Return a string representation. ''' return self._repr()
[ "def", "__repr__", "(", "self", ")", ":", "return", "self", ".", "_repr", "(", ")" ]
https://github.com/mrJean1/PyGeodesy/blob/7da5ca71aa3edb7bc49e219e0b8190686e1a7965/pygeodesy/points.py#L629-L632
tchellomello/python-ring-doorbell
4382c6e54ad7ac342c116fa608d3b45c1e43ed35
ring_doorbell/doorbot.py
python
RingDoorBell.has_subscription
(self)
return self._attrs.get("features").get("show_recordings")
Return boolean if the account has subscription.
Return boolean if the account has subscription.
[ "Return", "boolean", "if", "the", "account", "has", "subscription", "." ]
def has_subscription(self): """Return boolean if the account has subscription.""" return self._attrs.get("features").get("show_recordings")
[ "def", "has_subscription", "(", "self", ")", ":", "return", "self", ".", "_attrs", ".", "get", "(", "\"features\"", ")", ".", "get", "(", "\"show_recordings\"", ")" ]
https://github.com/tchellomello/python-ring-doorbell/blob/4382c6e54ad7ac342c116fa608d3b45c1e43ed35/ring_doorbell/doorbot.py#L399-L401
rwth-i6/returnn
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
returnn/util/fsa.py
python
Ngram._create_ngram_list
(self)
creates a ngram list from list of lemmas permute over the created list
creates a ngram list from list of lemmas permute over the created list
[ "creates", "a", "ngram", "list", "from", "list", "of", "lemmas", "permute", "over", "the", "created", "list" ]
def _create_ngram_list(self): """ creates a ngram list from list of lemmas permute over the created list """ for perm in itertools.permutations(self.lemma_list, self.n): self.ngram_list.append(perm)
[ "def", "_create_ngram_list", "(", "self", ")", ":", "for", "perm", "in", "itertools", ".", "permutations", "(", "self", ".", "lemma_list", ",", "self", ".", "n", ")", ":", "self", ".", "ngram_list", ".", "append", "(", "perm", ")" ]
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/util/fsa.py#L795-L801
nerdvegas/rez
d392c65bf63b4bca8106f938cec49144ba54e770
src/rezplugins/release_vcs/svn.py
python
get_last_changed_revision
(client, url)
util func, get last revision of url
util func, get last revision of url
[ "util", "func", "get", "last", "revision", "of", "url" ]
def get_last_changed_revision(client, url): """ util func, get last revision of url """ try: svn_entries = client.info2(url, pysvn.Revision(pysvn.opt_revision_kind.head), recurse=False) if not svn_entries: raise ReleaseVCSError("svn.info2() returned no results on url %s" % url) return svn_entries[0][1].last_changed_rev except pysvn.ClientError as ce: raise ReleaseVCSError("svn.info2() raised ClientError: %s" % ce)
[ "def", "get_last_changed_revision", "(", "client", ",", "url", ")", ":", "try", ":", "svn_entries", "=", "client", ".", "info2", "(", "url", ",", "pysvn", ".", "Revision", "(", "pysvn", ".", "opt_revision_kind", ".", "head", ")", ",", "recurse", "=", "Fa...
https://github.com/nerdvegas/rez/blob/d392c65bf63b4bca8106f938cec49144ba54e770/src/rezplugins/release_vcs/svn.py#L43-L55
CouchPotato/CouchPotatoServer
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
libs/xmpp/simplexml.py
python
Node.getTagAttr
(self,tag,attr)
Returns attribute value of the child with specified name (or None if no such attribute).
Returns attribute value of the child with specified name (or None if no such attribute).
[ "Returns", "attribute", "value", "of", "the", "child", "with", "specified", "name", "(", "or", "None", "if", "no", "such", "attribute", ")", "." ]
def getTagAttr(self,tag,attr): """ Returns attribute value of the child with specified name (or None if no such attribute).""" try: return self.getTag(tag).attrs[attr] except: return None
[ "def", "getTagAttr", "(", "self", ",", "tag", ",", "attr", ")", ":", "try", ":", "return", "self", ".", "getTag", "(", "tag", ")", ".", "attrs", "[", "attr", "]", "except", ":", "return", "None" ]
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/xmpp/simplexml.py#L221-L224
skylander86/lambda-text-extractor
6da52d077a2fc571e38bfe29c33ae68f6443cd5a
lib-linux_x64/odf/odf2moinmoin.py
python
ODF2MoinMoin.processStyles
(self, styleElements)
Runs through "style" elements extracting necessary information.
Runs through "style" elements extracting necessary information.
[ "Runs", "through", "style", "elements", "extracting", "necessary", "information", "." ]
def processStyles(self, styleElements): """ Runs through "style" elements extracting necessary information. """ for style in styleElements: name = style.getAttribute("style:name") if name == "Standard": continue family = style.getAttribute("style:family") parent = style.getAttribute("style:parent-style-name") if family == "text": self.textStyles[name] = self.extractTextProperties(style, parent) elif family == "paragraph": self.paragraphStyles[name] = \ self.extractParagraphProperties(style, parent) self.textStyles[name] = self.extractTextProperties(style, parent)
[ "def", "processStyles", "(", "self", ",", "styleElements", ")", ":", "for", "style", "in", "styleElements", ":", "name", "=", "style", ".", "getAttribute", "(", "\"style:name\"", ")", "if", "name", "==", "\"Standard\"", ":", "continue", "family", "=", "style...
https://github.com/skylander86/lambda-text-extractor/blob/6da52d077a2fc571e38bfe29c33ae68f6443cd5a/lib-linux_x64/odf/odf2moinmoin.py#L251-L270
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/idlelib/run.py
python
MyRPCServer.handle_error
(self, request, client_address)
Override RPCServer method for IDLE Interrupt the MainThread and exit server if link is dropped.
Override RPCServer method for IDLE
[ "Override", "RPCServer", "method", "for", "IDLE" ]
def handle_error(self, request, client_address): """Override RPCServer method for IDLE Interrupt the MainThread and exit server if link is dropped. """ global quitting try: raise except SystemExit: raise except EOFError: global exit_now exit_now = True thread.interrupt_main() except: erf = sys.__stderr__ print>>erf, '\n' + '-'*40 print>>erf, 'Unhandled server exception!' print>>erf, 'Thread: %s' % threading.currentThread().getName() print>>erf, 'Client Address: ', client_address print>>erf, 'Request: ', repr(request) traceback.print_exc(file=erf) print>>erf, '\n*** Unrecoverable, server exiting!' print>>erf, '-'*40 quitting = True thread.interrupt_main()
[ "def", "handle_error", "(", "self", ",", "request", ",", "client_address", ")", ":", "global", "quitting", "try", ":", "raise", "except", "SystemExit", ":", "raise", "except", "EOFError", ":", "global", "exit_now", "exit_now", "=", "True", "thread", ".", "in...
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/idlelib/run.py#L259-L285
edfungus/Crouton
ada98b3930192938a48909072b45cb84b945f875
clients/python_clients/venv/lib/python2.7/site-packages/setuptools/package_index.py
python
find_external_links
(url, page)
Find rel="homepage" and rel="download" links in `page`, yielding URLs
Find rel="homepage" and rel="download" links in `page`, yielding URLs
[ "Find", "rel", "=", "homepage", "and", "rel", "=", "download", "links", "in", "page", "yielding", "URLs" ]
def find_external_links(url, page): """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" for match in REL.finditer(page): tag, rel = match.groups() rels = set(map(str.strip, rel.lower().split(','))) if 'homepage' in rels or 'download' in rels: for match in HREF.finditer(tag): yield urljoin(url, htmldecode(match.group(1))) for tag in ("<th>Home Page", "<th>Download URL"): pos = page.find(tag) if pos!=-1: match = HREF.search(page,pos) if match: yield urljoin(url, htmldecode(match.group(1)))
[ "def", "find_external_links", "(", "url", ",", "page", ")", ":", "for", "match", "in", "REL", ".", "finditer", "(", "page", ")", ":", "tag", ",", "rel", "=", "match", ".", "groups", "(", ")", "rels", "=", "set", "(", "map", "(", "str", ".", "stri...
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/python_clients/venv/lib/python2.7/site-packages/setuptools/package_index.py#L184-L199
vulscanteam/vulscan
787397e267c4e6469522ee0abe55b3e98f968d4a
pocsuite/thirdparty/requests/cookies.py
python
morsel_to_cookie
(morsel)
return create_cookie( comment=morsel['comment'], comment_url=bool(morsel['comment']), discard=False, domain=morsel['domain'], expires=expires, name=morsel.key, path=morsel['path'], port=None, rest={'HttpOnly': morsel['httponly']}, rfc2109=False, secure=bool(morsel['secure']), value=morsel.value, version=morsel['version'] or 0, )
Convert a Morsel object into a Cookie containing the one k/v pair.
Convert a Morsel object into a Cookie containing the one k/v pair.
[ "Convert", "a", "Morsel", "object", "into", "a", "Cookie", "containing", "the", "one", "k", "/", "v", "pair", "." ]
def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None if morsel['max-age']: expires = time.time() + morsel['max-age'] elif morsel['expires']: time_template = '%a, %d-%b-%Y %H:%M:%S GMT' expires = time.mktime( time.strptime(morsel['expires'], time_template)) - time.timezone return create_cookie( comment=morsel['comment'], comment_url=bool(morsel['comment']), discard=False, domain=morsel['domain'], expires=expires, name=morsel.key, path=morsel['path'], port=None, rest={'HttpOnly': morsel['httponly']}, rfc2109=False, secure=bool(morsel['secure']), value=morsel.value, version=morsel['version'] or 0, )
[ "def", "morsel_to_cookie", "(", "morsel", ")", ":", "expires", "=", "None", "if", "morsel", "[", "'max-age'", "]", ":", "expires", "=", "time", ".", "time", "(", ")", "+", "morsel", "[", "'max-age'", "]", "elif", "morsel", "[", "'expires'", "]", ":", ...
https://github.com/vulscanteam/vulscan/blob/787397e267c4e6469522ee0abe55b3e98f968d4a/pocsuite/thirdparty/requests/cookies.py#L413-L437
LinkedInAttic/indextank-service
880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e
api/boto/mturk/connection.py
python
MTurkConnection._process_request
(self, request_type, params, marker_elems=None)
return self._process_response(response, marker_elems)
Helper to process the xml response from AWS
Helper to process the xml response from AWS
[ "Helper", "to", "process", "the", "xml", "response", "from", "AWS" ]
def _process_request(self, request_type, params, marker_elems=None): """ Helper to process the xml response from AWS """ response = self.make_request(request_type, params) return self._process_response(response, marker_elems)
[ "def", "_process_request", "(", "self", ",", "request_type", ",", "params", ",", "marker_elems", "=", "None", ")", ":", "response", "=", "self", ".", "make_request", "(", "request_type", ",", "params", ")", "return", "self", ".", "_process_response", "(", "r...
https://github.com/LinkedInAttic/indextank-service/blob/880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e/api/boto/mturk/connection.py#L379-L384
longld/peda
84d38bda505941ba823db7f6c1bcca1e485a2d43
peda.py
python
PEDACmd.show
(self, *arg)
return
Show various PEDA options and other settings Usage: MYNAME option [optname] MYNAME (show all options) MYNAME args MYNAME env [envname]
Show various PEDA options and other settings Usage: MYNAME option [optname] MYNAME (show all options) MYNAME args MYNAME env [envname]
[ "Show", "various", "PEDA", "options", "and", "other", "settings", "Usage", ":", "MYNAME", "option", "[", "optname", "]", "MYNAME", "(", "show", "all", "options", ")", "MYNAME", "args", "MYNAME", "env", "[", "envname", "]" ]
def show(self, *arg): """ Show various PEDA options and other settings Usage: MYNAME option [optname] MYNAME (show all options) MYNAME args MYNAME env [envname] """ # show options def _show_option(name=None): if name is None: name = "" filename = peda.getfile() if filename: filename = os.path.basename(filename) else: filename = None for (k, v) in sorted(config.Option.show(name).items()): if filename and isinstance(v, str) and "#FILENAME#" in v: v = v.replace("#FILENAME#", filename) msg("%s = %s" % (k, repr(v))) return # show args def _show_arg(): arg = peda.execute_redirect("show args") arg = arg.split("started is ")[1][1:-3] arg = (peda.string_to_argv(arg)) if not arg: msg("No argument") for (i, a) in enumerate(arg): text = "arg[%d]: %s" % ((i+1), a if is_printable(a) else to_hexstr(a)) msg(text) return # show envs def _show_env(name=None): if name is None: name = "" env = peda.execute_redirect("show env") for line in env.splitlines(): (k, v) = line.split("=", 1) if k.startswith(name): msg("%s = %s" % (k, v if is_printable(v) else to_hexstr(v))) return (opt, name) = normalize_argv(arg, 2) if opt is None or opt.startswith("opt"): _show_option(name) elif opt.startswith("arg"): _show_arg() elif opt.startswith("env"): _show_env(name) else: msg("Unknown show option: %s" % opt) return
[ "def", "show", "(", "self", ",", "*", "arg", ")", ":", "# show options", "def", "_show_option", "(", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "\"\"", "filename", "=", "peda", ".", "getfile", "(", ")", "if", "file...
https://github.com/longld/peda/blob/84d38bda505941ba823db7f6c1bcca1e485a2d43/peda.py#L3181-L3238
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
cpython/Lib/logging/__init__.py
python
Manager._fixupParents
(self, alogger)
Ensure that there are either loggers or placeholders all the way from the specified logger to the root of the logger hierarchy.
Ensure that there are either loggers or placeholders all the way from the specified logger to the root of the logger hierarchy.
[ "Ensure", "that", "there", "are", "either", "loggers", "or", "placeholders", "all", "the", "way", "from", "the", "specified", "logger", "to", "the", "root", "of", "the", "logger", "hierarchy", "." ]
def _fixupParents(self, alogger): """ Ensure that there are either loggers or placeholders all the way from the specified logger to the root of the logger hierarchy. """ name = alogger.name i = name.rfind(".") rv = None while (i > 0) and not rv: substr = name[:i] if substr not in self.loggerDict: self.loggerDict[substr] = PlaceHolder(alogger) else: obj = self.loggerDict[substr] if isinstance(obj, Logger): rv = obj else: assert isinstance(obj, PlaceHolder) obj.append(alogger) i = name.rfind(".", 0, i - 1) if not rv: rv = self.root alogger.parent = rv
[ "def", "_fixupParents", "(", "self", ",", "alogger", ")", ":", "name", "=", "alogger", ".", "name", "i", "=", "name", ".", "rfind", "(", "\".\"", ")", "rv", "=", "None", "while", "(", "i", ">", "0", ")", "and", "not", "rv", ":", "substr", "=", ...
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/cpython/Lib/logging/__init__.py#L1036-L1058
DragonComputer/Dragonfire
dd21f8e88d9b6390bd229ff73f89a8c3c137b89c
dragonfire/deepconv/__init__.py
python
DeepConversation.getDevice
(self)
Parse the argument to decide on which device run the model Return: str: The name of the device on which run the program
Parse the argument to decide on which device run the model Return: str: The name of the device on which run the program
[ "Parse", "the", "argument", "to", "decide", "on", "which", "device", "run", "the", "model", "Return", ":", "str", ":", "The", "name", "of", "the", "device", "on", "which", "run", "the", "program" ]
def getDevice(self): """ Parse the argument to decide on which device run the model Return: str: The name of the device on which run the program """ if self.device == 'cpu': return '/cpu:0' elif self.device == 'gpu': return '/gpu:0' elif self.device is None: # No specified device (default) return None else: print('Warning: Error in the device name: {}, use the default device'.format(self.device)) return None
[ "def", "getDevice", "(", "self", ")", ":", "if", "self", ".", "device", "==", "'cpu'", ":", "return", "'/cpu:0'", "elif", "self", ".", "device", "==", "'gpu'", ":", "return", "'/gpu:0'", "elif", "self", ".", "device", "is", "None", ":", "# No specified d...
https://github.com/DragonComputer/Dragonfire/blob/dd21f8e88d9b6390bd229ff73f89a8c3c137b89c/dragonfire/deepconv/__init__.py#L593-L606
huawei-noah/vega
d9f13deede7f2b584e4b1d32ffdb833856129989
vega/datasets/common/utils/auto_lane_utils.py
python
load_json
(file_path)
return target_dict
Load annot json. :param file_path:file path :type: str :return:json content :rtype: dict
Load annot json.
[ "Load", "annot", "json", "." ]
def load_json(file_path): """Load annot json. :param file_path:file path :type: str :return:json content :rtype: dict """ with open(file_path) as f: target_dict = json.load(f) return target_dict
[ "def", "load_json", "(", "file_path", ")", ":", "with", "open", "(", "file_path", ")", "as", "f", ":", "target_dict", "=", "json", ".", "load", "(", "f", ")", "return", "target_dict" ]
https://github.com/huawei-noah/vega/blob/d9f13deede7f2b584e4b1d32ffdb833856129989/vega/datasets/common/utils/auto_lane_utils.py#L206-L216
brendano/tweetmotif
1b0b1e3a941745cd5a26eba01f554688b7c4b27e
everything_else/djfrontend/django-1.0.2/utils/tree.py
python
Node.__len__
(self)
return len(self.children)
The size of a node if the number of children it has.
The size of a node if the number of children it has.
[ "The", "size", "of", "a", "node", "if", "the", "number", "of", "children", "it", "has", "." ]
def __len__(self): """ The size of a node if the number of children it has. """ return len(self.children)
[ "def", "__len__", "(", "self", ")", ":", "return", "len", "(", "self", ".", "children", ")" ]
https://github.com/brendano/tweetmotif/blob/1b0b1e3a941745cd5a26eba01f554688b7c4b27e/everything_else/djfrontend/django-1.0.2/utils/tree.py#L65-L69
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/modern_forms/fan.py
python
ModernFormsFanEntity.async_turn_on
( self, speed: int | None = None, percentage: int | None = None, preset_mode: int | None = None, **kwargs: Any, )
Turn on the fan.
Turn on the fan.
[ "Turn", "on", "the", "fan", "." ]
async def async_turn_on( self, speed: int | None = None, percentage: int | None = None, preset_mode: int | None = None, **kwargs: Any, ) -> None: """Turn on the fan.""" data = {OPT_ON: FAN_POWER_ON} if percentage: data[OPT_SPEED] = round( percentage_to_ranged_value(self.SPEED_RANGE, percentage) ) await self.coordinator.modern_forms.fan(**data)
[ "async", "def", "async_turn_on", "(", "self", ",", "speed", ":", "int", "|", "None", "=", "None", ",", "percentage", ":", "int", "|", "None", "=", "None", ",", "preset_mode", ":", "int", "|", "None", "=", "None", ",", "*", "*", "kwargs", ":", "Any"...
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/modern_forms/fan.py#L130-L144
MPI-IS/mesh
49e70425cf373ec5269917012bda2944215c5ccd
mesh/meshviewer.py
python
test_for_opengl
()
return test_for_opengl_cached
Tests if opengl is supported. .. note:: the result of the test is cached
Tests if opengl is supported.
[ "Tests", "if", "opengl", "is", "supported", "." ]
def test_for_opengl(): """Tests if opengl is supported. .. note:: the result of the test is cached """ global test_for_opengl_cached if test_for_opengl_cached is None: with open(os.devnull) as dev_null, \ tempfile.TemporaryFile() as out, \ tempfile.TemporaryFile() as err: p = _run_self(["TEST_FOR_OPENGL"], stdin=dev_null, stdout=out, stderr=err) p.wait() out.seek(0) err.seek(0) line = ''.join(out.read().decode()) test_for_opengl_cached = 'success' in line if not test_for_opengl_cached: print('OpenGL test failed: ') print('\tstdout:', line) print('\tstderr:', '\n'.join(err.read().decode())) return test_for_opengl_cached
[ "def", "test_for_opengl", "(", ")", ":", "global", "test_for_opengl_cached", "if", "test_for_opengl_cached", "is", "None", ":", "with", "open", "(", "os", ".", "devnull", ")", "as", "dev_null", ",", "tempfile", ".", "TemporaryFile", "(", ")", "as", "out", ",...
https://github.com/MPI-IS/mesh/blob/49e70425cf373ec5269917012bda2944215c5ccd/mesh/meshviewer.py#L111-L141
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/linux_packages/speccpu2017.py
python
Install
(vm)
Installs SPECCPU 2017.
Installs SPECCPU 2017.
[ "Installs", "SPECCPU", "2017", "." ]
def Install(vm): """Installs SPECCPU 2017.""" speccpu.InstallSPECCPU(vm, GetSpecInstallConfig(vm.GetScratchDir())) # spec17 tarball comes pre-packages with runner scripts for x86 architecture. # But because we may have x86 or arm architecture machines, just rerun the # install script to regenerate the runner scripts based on what spec detects # to be the vm architecture. vm.RemoteCommand('echo yes | {0}/cpu2017/install.sh'.format( vm.GetScratchDir()))
[ "def", "Install", "(", "vm", ")", ":", "speccpu", ".", "InstallSPECCPU", "(", "vm", ",", "GetSpecInstallConfig", "(", "vm", ".", "GetScratchDir", "(", ")", ")", ")", "# spec17 tarball comes pre-packages with runner scripts for x86 architecture.", "# But because we may hav...
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/linux_packages/speccpu2017.py#L78-L86
zzzeek/sqlalchemy
fc5c54fcd4d868c2a4c7ac19668d72f506fe821e
lib/sqlalchemy/sql/default_comparator.py
python
_between_impl
(expr, op, cleft, cright, **kw)
return BinaryExpression( expr, ClauseList( coercions.expect( roles.BinaryElementRole, cleft, expr=expr, operator=operators.and_, ), coercions.expect( roles.BinaryElementRole, cright, expr=expr, operator=operators.and_, ), operator=operators.and_, group=False, group_contents=False, ), op, negate=operators.not_between_op if op is operators.between_op else operators.between_op, modifiers=kw, )
See :meth:`.ColumnOperators.between`.
See :meth:`.ColumnOperators.between`.
[ "See", ":", "meth", ":", ".", "ColumnOperators", ".", "between", "." ]
def _between_impl(expr, op, cleft, cright, **kw): """See :meth:`.ColumnOperators.between`.""" return BinaryExpression( expr, ClauseList( coercions.expect( roles.BinaryElementRole, cleft, expr=expr, operator=operators.and_, ), coercions.expect( roles.BinaryElementRole, cright, expr=expr, operator=operators.and_, ), operator=operators.and_, group=False, group_contents=False, ), op, negate=operators.not_between_op if op is operators.between_op else operators.between_op, modifiers=kw, )
[ "def", "_between_impl", "(", "expr", ",", "op", ",", "cleft", ",", "cright", ",", "*", "*", "kw", ")", ":", "return", "BinaryExpression", "(", "expr", ",", "ClauseList", "(", "coercions", ".", "expect", "(", "roles", ".", "BinaryElementRole", ",", "cleft...
https://github.com/zzzeek/sqlalchemy/blob/fc5c54fcd4d868c2a4c7ac19668d72f506fe821e/lib/sqlalchemy/sql/default_comparator.py#L229-L255
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/functools.py
python
update_wrapper
(wrapper, wrapped, assigned = WRAPPER_ASSIGNMENTS, updated = WRAPPER_UPDATES)
return wrapper
Update a wrapper function to look like the wrapped function wrapper is the function to be updated wrapped is the original function assigned is a tuple naming the attributes assigned directly from the wrapped function to the wrapper function (defaults to functools.WRAPPER_ASSIGNMENTS) updated is a tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function (defaults to functools.WRAPPER_UPDATES)
Update a wrapper function to look like the wrapped function
[ "Update", "a", "wrapper", "function", "to", "look", "like", "the", "wrapped", "function" ]
def update_wrapper(wrapper, wrapped, assigned = WRAPPER_ASSIGNMENTS, updated = WRAPPER_UPDATES): """Update a wrapper function to look like the wrapped function wrapper is the function to be updated wrapped is the original function assigned is a tuple naming the attributes assigned directly from the wrapped function to the wrapper function (defaults to functools.WRAPPER_ASSIGNMENTS) updated is a tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function (defaults to functools.WRAPPER_UPDATES) """ for attr in assigned: setattr(wrapper, attr, getattr(wrapped, attr)) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) # Return the wrapper so this can be used as a decorator via partial() return wrapper
[ "def", "update_wrapper", "(", "wrapper", ",", "wrapped", ",", "assigned", "=", "WRAPPER_ASSIGNMENTS", ",", "updated", "=", "WRAPPER_UPDATES", ")", ":", "for", "attr", "in", "assigned", ":", "setattr", "(", "wrapper", ",", "attr", ",", "getattr", "(", "wrappe...
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/functools.py#L17-L37
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-darwin/x64/tornado/template.py
python
_parse
(reader, template, in_block=None, in_loop=None)
[]
def _parse(reader, template, in_block=None, in_loop=None): body = _ChunkList([]) while True: # Find next template directive curly = 0 while True: curly = reader.find("{", curly) if curly == -1 or curly + 1 == reader.remaining(): # EOF if in_block: reader.raise_parse_error( "Missing {%% end %%} block for %s" % in_block) body.chunks.append(_Text(reader.consume(), reader.line, reader.whitespace)) return body # If the first curly brace is not the start of a special token, # start searching from the character after it if reader[curly + 1] not in ("{", "%", "#"): curly += 1 continue # When there are more than 2 curlies in a row, use the # innermost ones. This is useful when generating languages # like latex where curlies are also meaningful if (curly + 2 < reader.remaining() and reader[curly + 1] == '{' and reader[curly + 2] == '{'): curly += 1 continue break # Append any text before the special token if curly > 0: cons = reader.consume(curly) body.chunks.append(_Text(cons, reader.line, reader.whitespace)) start_brace = reader.consume(2) line = reader.line # Template directives may be escaped as "{{!" or "{%!". # In this case output the braces and consume the "!". # This is especially useful in conjunction with jquery templates, # which also use double braces. if reader.remaining() and reader[0] == "!": reader.consume(1) body.chunks.append(_Text(start_brace, line, reader.whitespace)) continue # Comment if start_brace == "{#": end = reader.find("#}") if end == -1: reader.raise_parse_error("Missing end comment #}") contents = reader.consume(end).strip() reader.consume(2) continue # Expression if start_brace == "{{": end = reader.find("}}") if end == -1: reader.raise_parse_error("Missing end expression }}") contents = reader.consume(end).strip() reader.consume(2) if not contents: reader.raise_parse_error("Empty expression") body.chunks.append(_Expression(contents, line)) continue # Block assert start_brace == "{%", start_brace end = reader.find("%}") if end == -1: reader.raise_parse_error("Missing end block %}") contents = reader.consume(end).strip() reader.consume(2) if not contents: reader.raise_parse_error("Empty block tag ({% %})") operator, space, suffix = contents.partition(" ") suffix = suffix.strip() # Intermediate ("else", "elif", etc) blocks intermediate_blocks = { "else": set(["if", "for", "while", "try"]), "elif": set(["if"]), "except": set(["try"]), "finally": set(["try"]), } allowed_parents = intermediate_blocks.get(operator) if allowed_parents is not None: if not in_block: reader.raise_parse_error("%s outside %s block" % (operator, allowed_parents)) if in_block not in allowed_parents: reader.raise_parse_error( "%s block cannot be attached to %s block" % (operator, in_block)) body.chunks.append(_IntermediateControlBlock(contents, line)) continue # End tag elif operator == "end": if not in_block: reader.raise_parse_error("Extra {% end %} block") return body elif operator in ("extends", "include", "set", "import", "from", "comment", "autoescape", "whitespace", "raw", "module"): if operator == "comment": continue if operator == "extends": suffix = suffix.strip('"').strip("'") if not suffix: reader.raise_parse_error("extends missing file path") block = _ExtendsBlock(suffix) elif operator in ("import", "from"): if not suffix: reader.raise_parse_error("import missing statement") block = _Statement(contents, line) elif operator == "include": suffix = suffix.strip('"').strip("'") if not suffix: reader.raise_parse_error("include missing file path") block = _IncludeBlock(suffix, reader, line) elif operator == "set": if not suffix: reader.raise_parse_error("set missing statement") block = _Statement(suffix, line) elif operator == "autoescape": fn = suffix.strip() if fn == "None": fn = None template.autoescape = fn continue elif operator == "whitespace": mode = suffix.strip() # Validate the selected mode filter_whitespace(mode, '') reader.whitespace = mode continue elif operator == "raw": block = _Expression(suffix, line, raw=True) elif operator == "module": block = _Module(suffix, line) body.chunks.append(block) continue elif operator in ("apply", "block", "try", "if", "for", "while"): # parse inner body recursively if operator in ("for", "while"): block_body = _parse(reader, template, operator, operator) elif operator == "apply": # apply creates a nested function so syntactically it's not # in the loop. block_body = _parse(reader, template, operator, None) else: block_body = _parse(reader, template, operator, in_loop) if operator == "apply": if not suffix: reader.raise_parse_error("apply missing method name") block = _ApplyBlock(suffix, line, block_body) elif operator == "block": if not suffix: reader.raise_parse_error("block missing name") block = _NamedBlock(suffix, block_body, template, line) else: block = _ControlBlock(contents, line, block_body) body.chunks.append(block) continue elif operator in ("break", "continue"): if not in_loop: reader.raise_parse_error("%s outside %s block" % (operator, set(["for", "while"]))) body.chunks.append(_Statement(contents, line)) continue else: reader.raise_parse_error("unknown operator: %r" % operator)
[ "def", "_parse", "(", "reader", ",", "template", ",", "in_block", "=", "None", ",", "in_loop", "=", "None", ")", ":", "body", "=", "_ChunkList", "(", "[", "]", ")", "while", "True", ":", "# Find next template directive", "curly", "=", "0", "while", "True...
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/tornado/template.py#L795-L976
hakril/PythonForWindows
61e027a678d5b87aa64fcf8a37a6661a86236589
windows/winproxy/apis/kernel32.py
python
GetCurrentThread
()
return GetCurrentThread.ctypes_function()
[]
def GetCurrentThread(): return GetCurrentThread.ctypes_function()
[ "def", "GetCurrentThread", "(", ")", ":", "return", "GetCurrentThread", ".", "ctypes_function", "(", ")" ]
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/windows/winproxy/apis/kernel32.py#L166-L167
lightkurve/lightkurve
70d1c4cd1ab30f24c83e54bdcea4dd16624bfd9c
src/lightkurve/interact.py
python
_correct_with_proper_motion
(ra, dec, pm_ra, pm_dec, equinox, new_time)
return new_c.ra, new_c.dec, True
Return proper-motion corrected RA / Dec. It also return whether proper motion correction is applied or not.
Return proper-motion corrected RA / Dec. It also return whether proper motion correction is applied or not.
[ "Return", "proper", "-", "motion", "corrected", "RA", "/", "Dec", ".", "It", "also", "return", "whether", "proper", "motion", "correction", "is", "applied", "or", "not", "." ]
def _correct_with_proper_motion(ra, dec, pm_ra, pm_dec, equinox, new_time): """Return proper-motion corrected RA / Dec. It also return whether proper motion correction is applied or not.""" # all parameters have units if ra is None or dec is None or \ pm_ra is None or pm_dec is None or (np.all(pm_ra == 0) and np.all(pm_dec == 0)) or \ equinox is None: return ra, dec, False # To be more accurate, we should have supplied distance to SkyCoord # in theory, for Gaia DR2 data, we can infer the distance from the parallax provided. # It is not done for 2 reasons: # 1. Gaia DR2 data has negative parallax values occasionally. Correctly handling them could be tricky. See: # https://www.cosmos.esa.int/documents/29201/1773953/Gaia+DR2+primer+version+1.3.pdf/a4459741-6732-7a98-1406-a1bea243df79 # 2. For our purpose (ploting in various interact usage) here, the added distance does not making # noticeable significant difference. E.g., applying it to Proxima Cen, a target with large parallax # and huge proper motion, does not change the result in any noticeable way. # c = SkyCoord(ra, dec, pm_ra_cosdec=pm_ra, pm_dec=pm_dec, frame='icrs', obstime=equinox) # Suppress ErfaWarning temporarily as a workaround for: # https://github.com/astropy/astropy/issues/11747 with warnings.catch_warnings(): # the same warning appears both as an ErfaWarning and a astropy warning # so we filter by the message instead warnings.filterwarnings("ignore", message="ERFA function") new_c = c.apply_space_motion(new_obstime=new_time) return new_c.ra, new_c.dec, True
[ "def", "_correct_with_proper_motion", "(", "ra", ",", "dec", ",", "pm_ra", ",", "pm_dec", ",", "equinox", ",", "new_time", ")", ":", "# all parameters have units", "if", "ra", "is", "None", "or", "dec", "is", "None", "or", "pm_ra", "is", "None", "or", "pm_...
https://github.com/lightkurve/lightkurve/blob/70d1c4cd1ab30f24c83e54bdcea4dd16624bfd9c/src/lightkurve/interact.py#L80-L109
ganeti/ganeti
d340a9ddd12f501bef57da421b5f9b969a4ba905
lib/cmdlib/cluster/__init__.py
python
LUClusterDestroy.BuildHooksNodes
(self)
return ([], [])
Build hooks nodes.
Build hooks nodes.
[ "Build", "hooks", "nodes", "." ]
def BuildHooksNodes(self): """Build hooks nodes. """ return ([], [])
[ "def", "BuildHooksNodes", "(", "self", ")", ":", "return", "(", "[", "]", ",", "[", "]", ")" ]
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/cmdlib/cluster/__init__.py#L290-L294
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/sonos/config_flow.py
python
_async_has_devices
(hass: HomeAssistant)
return bool(await ssdp.async_get_discovery_info_by_st(hass, UPNP_ST))
Return if Sonos devices have been seen recently with SSDP.
Return if Sonos devices have been seen recently with SSDP.
[ "Return", "if", "Sonos", "devices", "have", "been", "seen", "recently", "with", "SSDP", "." ]
async def _async_has_devices(hass: HomeAssistant) -> bool: """Return if Sonos devices have been seen recently with SSDP.""" return bool(await ssdp.async_get_discovery_info_by_st(hass, UPNP_ST))
[ "async", "def", "_async_has_devices", "(", "hass", ":", "HomeAssistant", ")", "->", "bool", ":", "return", "bool", "(", "await", "ssdp", ".", "async_get_discovery_info_by_st", "(", "hass", ",", "UPNP_ST", ")", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/sonos/config_flow.py#L14-L16
dashingsoft/pyarmor
4bbe0ad4223c04ddc29b2bf3b07ad7158d7f1d87
src/examples/pybench/pybench.py
python
Benchmark.stat
(self)
return (min_time, avg_time, max_time)
Return benchmark run statistics as tuple: (minimum round time, average round time, maximum round time) XXX Currently not used, since the benchmark does test statistics across all rounds.
Return benchmark run statistics as tuple:
[ "Return", "benchmark", "run", "statistics", "as", "tuple", ":" ]
def stat(self): """ Return benchmark run statistics as tuple: (minimum round time, average round time, maximum round time) XXX Currently not used, since the benchmark does test statistics across all rounds. """ runs = len(self.roundtimes) if runs == 0: return 0.0, 0.0 min_time = min(self.roundtimes) total_time = sum(self.roundtimes) avg_time = total_time / float(runs) max_time = max(self.roundtimes) return (min_time, avg_time, max_time)
[ "def", "stat", "(", "self", ")", ":", "runs", "=", "len", "(", "self", ".", "roundtimes", ")", "if", "runs", "==", "0", ":", "return", "0.0", ",", "0.0", "min_time", "=", "min", "(", "self", ".", "roundtimes", ")", "total_time", "=", "sum", "(", ...
https://github.com/dashingsoft/pyarmor/blob/4bbe0ad4223c04ddc29b2bf3b07ad7158d7f1d87/src/examples/pybench/pybench.py#L560-L579
datastax/python-driver
5fdb0061f56f53b9d8d8ad67b99110899653ad77
cassandra/policies.py
python
EC2MultiRegionTranslator.translate
(self, addr)
return addr
Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter.
Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter.
[ "Reverse", "DNS", "the", "public", "broadcast_address", "then", "lookup", "that", "hostname", "to", "get", "the", "AWS", "-", "resolved", "IP", "which", "will", "point", "to", "the", "private", "IP", "address", "within", "the", "same", "datacenter", "." ]
def translate(self, addr): """ Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter. """ # get family of this address so we translate to the same family = socket.getaddrinfo(addr, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][0] host = socket.getfqdn(addr) for a in socket.getaddrinfo(host, 0, family, socket.SOCK_STREAM): try: return a[4][0] except Exception: pass return addr
[ "def", "translate", "(", "self", ",", "addr", ")", ":", "# get family of this address so we translate to the same", "family", "=", "socket", ".", "getaddrinfo", "(", "addr", ",", "0", ",", "socket", ".", "AF_UNSPEC", ",", "socket", ".", "SOCK_STREAM", ")", "[", ...
https://github.com/datastax/python-driver/blob/5fdb0061f56f53b9d8d8ad67b99110899653ad77/cassandra/policies.py#L1034-L1047
nlloyd/SubliminalCollaborator
5c619e17ddbe8acb9eea8996ec038169ddcd50a1
libs/twisted/internet/udp.py
python
Port.connectionLost
(self, reason=None)
Cleans up my socket.
Cleans up my socket.
[ "Cleans", "up", "my", "socket", "." ]
def connectionLost(self, reason=None): """ Cleans up my socket. """ log.msg('(UDP Port %s Closed)' % self._realPortNumber) self._realPortNumber = None base.BasePort.connectionLost(self, reason) self.protocol.doStop() self.socket.close() del self.socket del self.fileno if hasattr(self, "d"): self.d.callback(None) del self.d
[ "def", "connectionLost", "(", "self", ",", "reason", "=", "None", ")", ":", "log", ".", "msg", "(", "'(UDP Port %s Closed)'", "%", "self", ".", "_realPortNumber", ")", "self", ".", "_realPortNumber", "=", "None", "base", ".", "BasePort", ".", "connectionLost...
https://github.com/nlloyd/SubliminalCollaborator/blob/5c619e17ddbe8acb9eea8996ec038169ddcd50a1/libs/twisted/internet/udp.py#L230-L243
santhoshkolloju/Abstractive-Summarization-With-Transfer-Learning
97ff2ae3ba9f2d478e174444c4e0f5349f28c319
texar_repo/texar/modules/decoders/rnn_decoder_base.py
python
RNNDecoderBase.cell
(self)
return self._cell
The RNN cell.
The RNN cell.
[ "The", "RNN", "cell", "." ]
def cell(self): """The RNN cell. """ return self._cell
[ "def", "cell", "(", "self", ")", ":", "return", "self", ".", "_cell" ]
https://github.com/santhoshkolloju/Abstractive-Summarization-With-Transfer-Learning/blob/97ff2ae3ba9f2d478e174444c4e0f5349f28c319/texar_repo/texar/modules/decoders/rnn_decoder_base.py#L486-L489
quantumlib/Cirq
89f88b01d69222d3f1ec14d649b7b3a85ed9211f
cirq-core/cirq/sim/density_matrix_simulator.py
python
DensityMatrixSimulator.__init__
( self, *, dtype: 'DTypeLike' = np.complex64, noise: 'cirq.NOISE_MODEL_LIKE' = None, seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None, ignore_measurement_results: bool = False, split_untangled_states: bool = True, )
Density matrix simulator. Args: dtype: The `numpy.dtype` used by the simulation. One of `numpy.complex64` or `numpy.complex128` noise: A noise model to apply while simulating. seed: The random seed to use for this simulator. ignore_measurement_results: if True, then the simulation will treat measurement as dephasing instead of collapsing process. split_untangled_states: If True, optimizes simulation by running unentangled qubit sets independently and merging those states at the end. Raises: ValueError: If the supplied dtype is not `np.complex64` or `np.complex128`. Example: >>> (q0,) = cirq.LineQubit.range(1) >>> circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0)) Default case (ignore_measurement_results = False): >>> simulator = cirq.DensityMatrixSimulator() >>> result = simulator.run(circuit) The measurement result will be strictly one of 0 or 1. In the other case: >>> simulator = cirq.DensityMatrixSimulator( ... ignore_measurement_results = True) Will raise a `ValueError` exception if you call `simulator.run` when `ignore_measurement_results` has been set to True (for more see https://github.com/quantumlib/Cirq/issues/2777).
Density matrix simulator.
[ "Density", "matrix", "simulator", "." ]
def __init__( self, *, dtype: 'DTypeLike' = np.complex64, noise: 'cirq.NOISE_MODEL_LIKE' = None, seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None, ignore_measurement_results: bool = False, split_untangled_states: bool = True, ): """Density matrix simulator. Args: dtype: The `numpy.dtype` used by the simulation. One of `numpy.complex64` or `numpy.complex128` noise: A noise model to apply while simulating. seed: The random seed to use for this simulator. ignore_measurement_results: if True, then the simulation will treat measurement as dephasing instead of collapsing process. split_untangled_states: If True, optimizes simulation by running unentangled qubit sets independently and merging those states at the end. Raises: ValueError: If the supplied dtype is not `np.complex64` or `np.complex128`. Example: >>> (q0,) = cirq.LineQubit.range(1) >>> circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0)) Default case (ignore_measurement_results = False): >>> simulator = cirq.DensityMatrixSimulator() >>> result = simulator.run(circuit) The measurement result will be strictly one of 0 or 1. In the other case: >>> simulator = cirq.DensityMatrixSimulator( ... ignore_measurement_results = True) Will raise a `ValueError` exception if you call `simulator.run` when `ignore_measurement_results` has been set to True (for more see https://github.com/quantumlib/Cirq/issues/2777). """ super().__init__( dtype=dtype, noise=noise, seed=seed, ignore_measurement_results=ignore_measurement_results, split_untangled_states=split_untangled_states, ) if dtype not in {np.complex64, np.complex128}: raise ValueError(f'dtype must be complex64 or complex128, was {dtype}')
[ "def", "__init__", "(", "self", ",", "*", ",", "dtype", ":", "'DTypeLike'", "=", "np", ".", "complex64", ",", "noise", ":", "'cirq.NOISE_MODEL_LIKE'", "=", "None", ",", "seed", ":", "'cirq.RANDOM_STATE_OR_SEED_LIKE'", "=", "None", ",", "ignore_measurement_result...
https://github.com/quantumlib/Cirq/blob/89f88b01d69222d3f1ec14d649b7b3a85ed9211f/cirq-core/cirq/sim/density_matrix_simulator.py#L120-L173
wandb/client
3963364d8112b7dedb928fa423b6878ea1b467d9
wandb/vendor/watchdog/utils/dirsnapshot.py
python
DirectorySnapshot.path
(self, id)
return self._inode_to_path.get(id)
Returns path for id. None if id is unknown to this snapshot.
Returns path for id. None if id is unknown to this snapshot.
[ "Returns", "path", "for", "id", ".", "None", "if", "id", "is", "unknown", "to", "this", "snapshot", "." ]
def path(self, id): """ Returns path for id. None if id is unknown to this snapshot. """ return self._inode_to_path.get(id)
[ "def", "path", "(", "self", ",", "id", ")", ":", "return", "self", ".", "_inode_to_path", ".", "get", "(", "id", ")" ]
https://github.com/wandb/client/blob/3963364d8112b7dedb928fa423b6878ea1b467d9/wandb/vendor/watchdog/utils/dirsnapshot.py#L248-L252
ipython/ipyparallel
d35d4fb9501da5b3280b11e83ed633a95f17be1d
ipyparallel/client/view.py
python
BroadcastView.execute
(self, code, silent=True, targets=None, block=None)
return ar
Executes `code` on `targets` in blocking or nonblocking manner. ``execute`` is always `bound` (affects engine namespace) Parameters ---------- code : str the code string to be executed block : bool whether or not to wait until done to return default: self.block
Executes `code` on `targets` in blocking or nonblocking manner.
[ "Executes", "code", "on", "targets", "in", "blocking", "or", "nonblocking", "manner", "." ]
def execute(self, code, silent=True, targets=None, block=None): """Executes `code` on `targets` in blocking or nonblocking manner. ``execute`` is always `bound` (affects engine namespace) Parameters ---------- code : str the code string to be executed block : bool whether or not to wait until done to return default: self.block """ block = self.block if block is None else block targets = self.targets if targets is None else targets _idents, _targets = self.client._build_targets(targets) s_idents = [ident.decode("utf8") for ident in _idents] target_tuples = list(zip(s_idents, _targets)) metadata = self._init_metadata(target_tuples) ar = None def make_asyncresult(message_future): nonlocal ar ar = self._make_async_result( message_future, s_idents, fname='execute', targets=_targets ) message_future = self.client.send_execute_request( self._socket, code, silent=silent, metadata=metadata, message_future_hook=make_asyncresult, ) if block: try: ar.get() ar.wait_for_output() except KeyboardInterrupt: pass return ar
[ "def", "execute", "(", "self", ",", "code", ",", "silent", "=", "True", ",", "targets", "=", "None", ",", "block", "=", "None", ")", ":", "block", "=", "self", ".", "block", "if", "block", "is", "None", "else", "block", "targets", "=", "self", ".",...
https://github.com/ipython/ipyparallel/blob/d35d4fb9501da5b3280b11e83ed633a95f17be1d/ipyparallel/client/view.py#L983-L1026
fake-name/ReadableWebProxy
ed5c7abe38706acc2684a1e6cd80242a03c5f010
WebMirror/management/rss_parser_funcs/feed_parse_extractYanyantlsWordpressCom.py
python
extractYanyantlsWordpressCom
(item)
return False
Parser for 'yanyantls.wordpress.com'
Parser for 'yanyantls.wordpress.com'
[ "Parser", "for", "yanyantls", ".", "wordpress", ".", "com" ]
def extractYanyantlsWordpressCom(item): ''' Parser for 'yanyantls.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('the star light shines when he came', 'the star light shines when he came', 'translated'), ('when two alphas meet ones an omega', 'When Two Alphas Meet, One’s an Omega', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
[ "def", "extractYanyantlsWordpressCom", "(", "item", ")", ":", "vol", ",", "chp", ",", "frag", ",", "postfix", "=", "extractVolChapterFragmentPostfix", "(", "item", "[", "'title'", "]", ")", "if", "not", "(", "chp", "or", "vol", ")", "or", "\"preview\"", "i...
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractYanyantlsWordpressCom.py#L1-L22
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/distributions/modules.py
python
init_orthogonal_weights_and_constant_biases
(module, gain=1., val=0.)
return module
Initialize the weights of the module to be orthogonal and with a bias of 0s. This is inspired by [1]. Args: gain (float): optional scaling factor for the orthogonal weights. val (float): val: the value to fill the bias tensor with. Returns: torch.nn.Module: the initialized module. References: [1] https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/distributions.py
Initialize the weights of the module to be orthogonal and with a bias of 0s. This is inspired by [1].
[ "Initialize", "the", "weights", "of", "the", "module", "to", "be", "orthogonal", "and", "with", "a", "bias", "of", "0s", ".", "This", "is", "inspired", "by", "[", "1", "]", "." ]
def init_orthogonal_weights_and_constant_biases(module, gain=1., val=0.): """Initialize the weights of the module to be orthogonal and with a bias of 0s. This is inspired by [1]. Args: gain (float): optional scaling factor for the orthogonal weights. val (float): val: the value to fill the bias tensor with. Returns: torch.nn.Module: the initialized module. References: [1] https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/distributions.py """ weight_init = wrap_init_tensor(torch.nn.init.orthogonal_, gain=gain) weight_bias = wrap_init_tensor(torch.nn.init.constant_, val=val) module = init_module(module, weight_init, weight_bias) return module
[ "def", "init_orthogonal_weights_and_constant_biases", "(", "module", ",", "gain", "=", "1.", ",", "val", "=", "0.", ")", ":", "weight_init", "=", "wrap_init_tensor", "(", "torch", ".", "nn", ".", "init", ".", "orthogonal_", ",", "gain", "=", "gain", ")", "...
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/distributions/modules.py#L83-L99
biopython/biopython
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
Bio/motifs/transfac.py
python
Record.__init__
(self)
Initialize the class.
Initialize the class.
[ "Initialize", "the", "class", "." ]
def __init__(self): """Initialize the class.""" self.version = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "version", "=", "None" ]
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/motifs/transfac.py#L83-L85
mikew/ss-plex.bundle
031566c06205e08a8cb15c57a0c143fba5270493
Contents/Libraries/Shared/nose/plugins/base.py
python
IPluginInterface.addOptions
(self, parser, env)
Called to allow plugin to register command-line options with the parser. DO NOT return a value from this method unless you want to stop all other plugins from setting their options. .. warning :: DEPRECATED -- implement :meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
Called to allow plugin to register command-line options with the parser. DO NOT return a value from this method unless you want to stop all other plugins from setting their options.
[ "Called", "to", "allow", "plugin", "to", "register", "command", "-", "line", "options", "with", "the", "parser", ".", "DO", "NOT", "return", "a", "value", "from", "this", "method", "unless", "you", "want", "to", "stop", "all", "other", "plugins", "from", ...
def addOptions(self, parser, env): """Called to allow plugin to register command-line options with the parser. DO NOT return a value from this method unless you want to stop all other plugins from setting their options. .. warning :: DEPRECATED -- implement :meth:`options <nose.plugins.base.IPluginInterface.options>` instead. """ pass
[ "def", "addOptions", "(", "self", ",", "parser", ",", "env", ")", ":", "pass" ]
https://github.com/mikew/ss-plex.bundle/blob/031566c06205e08a8cb15c57a0c143fba5270493/Contents/Libraries/Shared/nose/plugins/base.py#L128-L138
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_secret.py
python
Secret.secrets
(self)
return self._secrets
secret property setter
secret property setter
[ "secret", "property", "setter" ]
def secrets(self): '''secret property setter''' if self._secrets is None: self._secrets = self.get_secrets() return self._secrets
[ "def", "secrets", "(", "self", ")", ":", "if", "self", ".", "_secrets", "is", "None", ":", "self", ".", "_secrets", "=", "self", ".", "get_secrets", "(", ")", "return", "self", ".", "_secrets" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_secret.py#L1575-L1579
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/geometry/point.py
python
Point.equals
(self, other)
return all(a.equals(b) for a, b in zip(self, other))
Returns whether the coordinates of self and other agree.
Returns whether the coordinates of self and other agree.
[ "Returns", "whether", "the", "coordinates", "of", "self", "and", "other", "agree", "." ]
def equals(self, other): """Returns whether the coordinates of self and other agree.""" # a point is equal to another point if all its components are equal if not isinstance(other, Point) or len(self) != len(other): return False return all(a.equals(b) for a, b in zip(self, other))
[ "def", "equals", "(", "self", ",", "other", ")", ":", "# a point is equal to another point if all its components are equal", "if", "not", "isinstance", "(", "other", ",", "Point", ")", "or", "len", "(", "self", ")", "!=", "len", "(", "other", ")", ":", "return...
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/geometry/point.py#L445-L450
wistbean/fxxkpython
88e16d79d8dd37236ba6ecd0d0ff11d63143968c
vip/qyxuan/projects/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/vcs/__init__.py
python
VcsSupport.get_backend_type
(self, location)
return None
Return the type of the version control backend if found at given location, e.g. vcs.get_backend_type('/path/to/vcs/checkout')
Return the type of the version control backend if found at given location, e.g. vcs.get_backend_type('/path/to/vcs/checkout')
[ "Return", "the", "type", "of", "the", "version", "control", "backend", "if", "found", "at", "given", "location", "e", ".", "g", ".", "vcs", ".", "get_backend_type", "(", "/", "path", "/", "to", "/", "vcs", "/", "checkout", ")" ]
def get_backend_type(self, location): # type: (str) -> Optional[Type[VersionControl]] """ Return the type of the version control backend if found at given location, e.g. vcs.get_backend_type('/path/to/vcs/checkout') """ for vc_type in self._registry.values(): if vc_type.controls_location(location): logger.debug('Determine that %s uses VCS: %s', location, vc_type.name) return vc_type return None
[ "def", "get_backend_type", "(", "self", ",", "location", ")", ":", "# type: (str) -> Optional[Type[VersionControl]]", "for", "vc_type", "in", "self", ".", "_registry", ".", "values", "(", ")", ":", "if", "vc_type", ".", "controls_location", "(", "location", ")", ...
https://github.com/wistbean/fxxkpython/blob/88e16d79d8dd37236ba6ecd0d0ff11d63143968c/vip/qyxuan/projects/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/vcs/__init__.py#L155-L166
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit - MAC OSX/tools/sqli/thirdparty/bottle/bottle.py
python
JSONPlugin.apply
(self, callback, route)
return wrapper
[]
def apply(self, callback, route): dumps = self.json_dumps if not dumps: return callback def wrapper(*a, **ka): rv = callback(*a, **ka) if isinstance(rv, dict): #Attempt to serialize, raises exception on failure json_response = dumps(rv) #Set content type only if serialization succesful response.content_type = 'application/json' return json_response return rv return wrapper
[ "def", "apply", "(", "self", ",", "callback", ",", "route", ")", ":", "dumps", "=", "self", ".", "json_dumps", "if", "not", "dumps", ":", "return", "callback", "def", "wrapper", "(", "*", "a", ",", "*", "*", "ka", ")", ":", "rv", "=", "callback", ...
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit - MAC OSX/tools/sqli/thirdparty/bottle/bottle.py#L1573-L1585
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/datetime.py
python
_days_before_month
(year, month)
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
year, month -> number of days in year preceding first day of month.
year, month -> number of days in year preceding first day of month.
[ "year", "month", "-", ">", "number", "of", "days", "in", "year", "preceding", "first", "day", "of", "month", "." ]
def _days_before_month(year, month): "year, month -> number of days in year preceding first day of month." assert 1 <= month <= 12, 'month must be in 1..12' return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
[ "def", "_days_before_month", "(", "year", ",", "month", ")", ":", "assert", "1", "<=", "month", "<=", "12", ",", "'month must be in 1..12'", "return", "_DAYS_BEFORE_MONTH", "[", "month", "]", "+", "(", "month", ">", "2", "and", "_is_leap", "(", "year", ")"...
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/datetime.py#L52-L55
IBM/watson-online-store
4c8b60883b319f07c3187d9cb433ef9c3ae29aea
watsononlinestore/watson_online_store.py
python
SlackSender.send_message
(self, message)
Sends message via Slack API. :param str message: The message to be sent to slack
Sends message via Slack API.
[ "Sends", "message", "via", "Slack", "API", "." ]
def send_message(self, message): """Sends message via Slack API. :param str message: The message to be sent to slack """ self.slack_client.api_call("chat.postMessage", channel=self.channel, text=message, as_user=True)
[ "def", "send_message", "(", "self", ",", "message", ")", ":", "self", ".", "slack_client", ".", "api_call", "(", "\"chat.postMessage\"", ",", "channel", "=", "self", ".", "channel", ",", "text", "=", "message", ",", "as_user", "=", "True", ")" ]
https://github.com/IBM/watson-online-store/blob/4c8b60883b319f07c3187d9cb433ef9c3ae29aea/watsononlinestore/watson_online_store.py#L44-L52
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/tkinter/__init__.py
python
Text.index
(self, index)
return str(self.tk.call(self._w, 'index', index))
Return the index in the form line.char for INDEX.
Return the index in the form line.char for INDEX.
[ "Return", "the", "index", "in", "the", "form", "line", ".", "char", "for", "INDEX", "." ]
def index(self, index): """Return the index in the form line.char for INDEX.""" return str(self.tk.call(self._w, 'index', index))
[ "def", "index", "(", "self", ",", "index", ")", ":", "return", "str", "(", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'index'", ",", "index", ")", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/tkinter/__init__.py#L3731-L3733
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v9/services/services/feed_item_target_service/client.py
python
FeedItemTargetServiceClient.campaign_path
(customer_id: str, campaign_id: str,)
return "customers/{customer_id}/campaigns/{campaign_id}".format( customer_id=customer_id, campaign_id=campaign_id, )
Return a fully-qualified campaign string.
Return a fully-qualified campaign string.
[ "Return", "a", "fully", "-", "qualified", "campaign", "string", "." ]
def campaign_path(customer_id: str, campaign_id: str,) -> str: """Return a fully-qualified campaign string.""" return "customers/{customer_id}/campaigns/{campaign_id}".format( customer_id=customer_id, campaign_id=campaign_id, )
[ "def", "campaign_path", "(", "customer_id", ":", "str", ",", "campaign_id", ":", "str", ",", ")", "->", "str", ":", "return", "\"customers/{customer_id}/campaigns/{campaign_id}\"", ".", "format", "(", "customer_id", "=", "customer_id", ",", "campaign_id", "=", "ca...
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v9/services/services/feed_item_target_service/client.py#L192-L196
python-cmd2/cmd2
c1f6114d52161a3b8a32d3cee1c495d79052e1fb
cmd2/cmd2.py
python
Cmd.flag_based_complete
( self, text: str, line: str, begidx: int, endidx: int, flag_dict: Dict[str, Union[Iterable[str], CompleterFunc]], *, all_else: Union[None, Iterable[str], CompleterFunc] = None, )
return completions_matches
Tab completes based on a particular flag preceding the token being completed. :param text: the string prefix we are attempting to match (all matches must begin with it) :param line: the current input line with leading whitespace removed :param begidx: the beginning index of the prefix text :param endidx: the ending index of the prefix text :param flag_dict: dictionary whose structure is the following: `keys` - flags (ex: -c, --create) that result in tab completion for the next argument in the command line `values` - there are two types of values: 1. iterable list of strings to match against (dictionaries, lists, etc.) 2. function that performs tab completion (ex: path_complete) :param all_else: an optional parameter for tab completing any token that isn't preceded by a flag in flag_dict :return: a list of possible tab completions
Tab completes based on a particular flag preceding the token being completed.
[ "Tab", "completes", "based", "on", "a", "particular", "flag", "preceding", "the", "token", "being", "completed", "." ]
def flag_based_complete( self, text: str, line: str, begidx: int, endidx: int, flag_dict: Dict[str, Union[Iterable[str], CompleterFunc]], *, all_else: Union[None, Iterable[str], CompleterFunc] = None, ) -> List[str]: """Tab completes based on a particular flag preceding the token being completed. :param text: the string prefix we are attempting to match (all matches must begin with it) :param line: the current input line with leading whitespace removed :param begidx: the beginning index of the prefix text :param endidx: the ending index of the prefix text :param flag_dict: dictionary whose structure is the following: `keys` - flags (ex: -c, --create) that result in tab completion for the next argument in the command line `values` - there are two types of values: 1. iterable list of strings to match against (dictionaries, lists, etc.) 2. function that performs tab completion (ex: path_complete) :param all_else: an optional parameter for tab completing any token that isn't preceded by a flag in flag_dict :return: a list of possible tab completions """ # Get all tokens through the one being completed tokens, _ = self.tokens_for_completion(line, begidx, endidx) if not tokens: # pragma: no cover return [] completions_matches = [] match_against = all_else # Must have at least 2 args for a flag to precede the token being completed if len(tokens) > 1: flag = tokens[-2] if flag in flag_dict: match_against = flag_dict[flag] # Perform tab completion using an Iterable if isinstance(match_against, Iterable): completions_matches = self.basic_complete(text, line, begidx, endidx, match_against) # Perform tab completion using a function elif callable(match_against): completions_matches = match_against(text, line, begidx, endidx) return completions_matches
[ "def", "flag_based_complete", "(", "self", ",", "text", ":", "str", ",", "line", ":", "str", ",", "begidx", ":", "int", ",", "endidx", ":", "int", ",", "flag_dict", ":", "Dict", "[", "str", ",", "Union", "[", "Iterable", "[", "str", "]", ",", "Comp...
https://github.com/python-cmd2/cmd2/blob/c1f6114d52161a3b8a32d3cee1c495d79052e1fb/cmd2/cmd2.py#L1381-L1428
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/pip/_vendor/html5lib/_inputstream.py
python
HTMLBinaryInputStream.openStream
(self, source)
return stream
Produces a file object from source. source can be either a file object, local filename or a string.
Produces a file object from source.
[ "Produces", "a", "file", "object", "from", "source", "." ]
def openStream(self, source): """Produces a file object from source. source can be either a file object, local filename or a string. """ # Already a file object if hasattr(source, 'read'): stream = source else: stream = BytesIO(source) try: stream.seek(stream.tell()) except: # pylint:disable=bare-except stream = BufferedStream(stream) return stream
[ "def", "openStream", "(", "self", ",", "source", ")", ":", "# Already a file object", "if", "hasattr", "(", "source", ",", "'read'", ")", ":", "stream", "=", "source", "else", ":", "stream", "=", "BytesIO", "(", "source", ")", "try", ":", "stream", ".", ...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/_vendor/html5lib/_inputstream.py#L438-L455
WikidPad/WikidPad
558109638807bc76b4672922686e416ab2d5f79c
WikidPad/lib/aui/framemanager.py
python
AuiPaneInfo.SetDockPos
(self, source)
return self
Copies the `source` pane members that pertain to docking position to `self`. :param `source`: the source pane from where to copy the attributes, an instance of :class:`AuiPaneInfo`.
Copies the `source` pane members that pertain to docking position to `self`.
[ "Copies", "the", "source", "pane", "members", "that", "pertain", "to", "docking", "position", "to", "self", "." ]
def SetDockPos(self, source): """ Copies the `source` pane members that pertain to docking position to `self`. :param `source`: the source pane from where to copy the attributes, an instance of :class:`AuiPaneInfo`. """ self.dock_direction = source.dock_direction self.dock_layer = source.dock_layer self.dock_row = source.dock_row self.dock_pos = source.dock_pos self.dock_proportion = source.dock_proportion self.floating_pos = wx.Point(*source.floating_pos) self.floating_size = wx.Size(*source.floating_size) self.rect = wx.Rect(*source.rect) return self
[ "def", "SetDockPos", "(", "self", ",", "source", ")", ":", "self", ".", "dock_direction", "=", "source", ".", "dock_direction", "self", ".", "dock_layer", "=", "source", ".", "dock_layer", "self", ".", "dock_row", "=", "source", ".", "dock_row", "self", "....
https://github.com/WikidPad/WikidPad/blob/558109638807bc76b4672922686e416ab2d5f79c/WikidPad/lib/aui/framemanager.py#L1685-L1702
Qiskit/qiskit-terra
b66030e3b9192efdd3eb95cf25c6545fe0a13da4
qiskit/algorithms/factorizers/shor.py
python
ShorResult.total_counts
(self)
return self._total_counts
returns total counts
returns total counts
[ "returns", "total", "counts" ]
def total_counts(self) -> int: """returns total counts""" return self._total_counts
[ "def", "total_counts", "(", "self", ")", "->", "int", ":", "return", "self", ".", "_total_counts" ]
https://github.com/Qiskit/qiskit-terra/blob/b66030e3b9192efdd3eb95cf25c6545fe0a13da4/qiskit/algorithms/factorizers/shor.py#L507-L509
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/wled/coordinator.py
python
WLEDDataUpdateCoordinator._use_websocket
(self)
Use WebSocket for updates, instead of polling.
Use WebSocket for updates, instead of polling.
[ "Use", "WebSocket", "for", "updates", "instead", "of", "polling", "." ]
def _use_websocket(self) -> None: """Use WebSocket for updates, instead of polling.""" async def listen() -> None: """Listen for state changes via WebSocket.""" try: await self.wled.connect() except WLEDError as err: self.logger.info(err) if self.unsub: self.unsub() self.unsub = None return try: await self.wled.listen(callback=self.async_set_updated_data) except WLEDConnectionClosed as err: self.last_update_success = False self.logger.info(err) except WLEDError as err: self.last_update_success = False self.update_listeners() self.logger.error(err) # Ensure we are disconnected await self.wled.disconnect() if self.unsub: self.unsub() self.unsub = None async def close_websocket(_) -> None: """Close WebSocket connection.""" self.unsub = None await self.wled.disconnect() # Clean disconnect WebSocket on Home Assistant shutdown self.unsub = self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, close_websocket ) # Start listening asyncio.create_task(listen())
[ "def", "_use_websocket", "(", "self", ")", "->", "None", ":", "async", "def", "listen", "(", ")", "->", "None", ":", "\"\"\"Listen for state changes via WebSocket.\"\"\"", "try", ":", "await", "self", ".", "wled", ".", "connect", "(", ")", "except", "WLEDError...
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/wled/coordinator.py#L63-L104
VirtueSecurity/aws-extender
d123b7e1a845847709ba3a481f11996bddc68a1c
BappModules/docutils/writers/_html_base.py
python
HTMLTranslator.check_simple_list
(self, node)
Check for a simple list that can be rendered compactly.
Check for a simple list that can be rendered compactly.
[ "Check", "for", "a", "simple", "list", "that", "can", "be", "rendered", "compactly", "." ]
def check_simple_list(self, node): """Check for a simple list that can be rendered compactly.""" visitor = SimpleListChecker(self.document) try: node.walk(visitor) except nodes.NodeFound: return False else: return True
[ "def", "check_simple_list", "(", "self", ",", "node", ")", ":", "visitor", "=", "SimpleListChecker", "(", "self", ".", "document", ")", "try", ":", "node", ".", "walk", "(", "visitor", ")", "except", "nodes", ".", "NodeFound", ":", "return", "False", "el...
https://github.com/VirtueSecurity/aws-extender/blob/d123b7e1a845847709ba3a481f11996bddc68a1c/BappModules/docutils/writers/_html_base.py#L479-L487
Runbook/runbook
7b68622f75ef09f654046f0394540025f3ee7445
src/monitors/checks/cloudflare-traffic-decrease/__init__.py
python
check
(**kwargs)
Grab Zone Analytics from CloudFlare API and determine status
Grab Zone Analytics from CloudFlare API and determine status
[ "Grab", "Zone", "Analytics", "from", "CloudFlare", "API", "and", "determine", "status" ]
def check(**kwargs): ''' Grab Zone Analytics from CloudFlare API and determine status ''' jdata = kwargs['jdata'] logger = kwargs['logger'] # Grab and process metrics metrics = cloudflare.get_zone_analytics(jdata['data']['email'], jdata['data']['apikey'], jdata['data']['domain'], logger, jdata['data']['start_time'], "0") if not metrics: return None time_pattern = "%Y-%m-%dT%H:%M:%SZ" time_delta = metrics['query']['time_delta'] * 60 previous_interval = metrics['result']['timeseries'][-2] current_interval = metrics['result']['timeseries'][-1] # Calculate whether traffic increased or not percent = ((float(current_interval['requests']['all']) + 1) / (float(previous_interval['requests']['all']) + 1)) * 100 msg = "cloudflare-traffic-decrease:" msg = msg + " current_requests=%s" % str(current_interval['requests']['all']) msg = msg + " previous_requests=%s" % str(previous_interval['requests']['all']) msg = msg + " percent=%s" % str(percent) logger.debug(msg) if percent > 100: if "true" in jdata['data']['return_value']: return False else: return True delta = 100 - percent if delta > float(jdata['data']['threshold']): if "true" in jdata['data']['return_value']: return True else: return False else: if "true" in jdata['data']['return_value']: return False else: return True
[ "def", "check", "(", "*", "*", "kwargs", ")", ":", "jdata", "=", "kwargs", "[", "'jdata'", "]", "logger", "=", "kwargs", "[", "'logger'", "]", "# Grab and process metrics", "metrics", "=", "cloudflare", ".", "get_zone_analytics", "(", "jdata", "[", "'data'",...
https://github.com/Runbook/runbook/blob/7b68622f75ef09f654046f0394540025f3ee7445/src/monitors/checks/cloudflare-traffic-decrease/__init__.py#L6-L50
seppius-xbmc-repo/ru
d0879d56ec8243b2c7af44fda5cf3d1ff77fd2e2
script.module.simplejson/lib/simplejson/scanner.py
python
py_make_scanner
(context)
return scan_once
[]
def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match encoding = context.encoding strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook object_pairs_hook = context.object_pairs_hook memo = context.memo def _scan_once(string, idx): errmsg = 'Expecting value' try: nextchar = string[idx] except IndexError: raise JSONDecodeError(errmsg, string, idx) if nextchar == '"': return parse_string(string, idx + 1, encoding, strict) elif nextchar == '{': return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook, object_pairs_hook, memo) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise JSONDecodeError(errmsg, string, idx) def scan_once(string, idx): try: return _scan_once(string, idx) finally: memo.clear() return scan_once
[ "def", "py_make_scanner", "(", "context", ")", ":", "parse_object", "=", "context", ".", "parse_object", "parse_array", "=", "context", ".", "parse_array", "parse_string", "=", "context", ".", "parse_string", "match_number", "=", "NUMBER_RE", ".", "match", "encodi...
https://github.com/seppius-xbmc-repo/ru/blob/d0879d56ec8243b2c7af44fda5cf3d1ff77fd2e2/script.module.simplejson/lib/simplejson/scanner.py#L65-L123
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-50/fabmetheus_utilities/miscellaneous/nophead/vector3.py
python
Vector3.__abs__
(self)
return math.sqrt( self.x * self.x + self.y * self.y + self.z * self.z )
Get the magnitude of the Vector3.
Get the magnitude of the Vector3.
[ "Get", "the", "magnitude", "of", "the", "Vector3", "." ]
def __abs__(self): "Get the magnitude of the Vector3." return math.sqrt( self.x * self.x + self.y * self.y + self.z * self.z )
[ "def", "__abs__", "(", "self", ")", ":", "return", "math", ".", "sqrt", "(", "self", ".", "x", "*", "self", ".", "x", "+", "self", ".", "y", "*", "self", ".", "y", "+", "self", ".", "z", "*", "self", ".", "z", ")" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-50/fabmetheus_utilities/miscellaneous/nophead/vector3.py#L53-L55
wucng/TensorExpand
4ea58f64f5c5082b278229b799c9f679536510b7
TensorExpand/Object detection/YOLO/darkflow/darkflow/dark/layer.py
python
Layer.__ne__
(self, other)
return not self.__eq__(other)
[]
def __ne__(self, other): return not self.__eq__(other)
[ "def", "__ne__", "(", "self", ",", "other", ")", ":", "return", "not", "self", ".", "__eq__", "(", "other", ")" ]
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/Object detection/YOLO/darkflow/darkflow/dark/layer.py#L57-L58
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/compiler/pyassem.py
python
StackDepthTracker.DUP_TOPX
(self, argc)
return argc
[]
def DUP_TOPX(self, argc): return argc
[ "def", "DUP_TOPX", "(", "self", ",", "argc", ")", ":", "return", "argc" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/compiler/pyassem.py#L760-L761
CedricGuillemet/Imogen
ee417b42747ed5b46cb11b02ef0c3630000085b3
bin/Lib/logging/__init__.py
python
Formatter.formatTime
(self, record, datefmt=None)
return s
Return the creation time of the specified LogRecord as formatted text. This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class.
Return the creation time of the specified LogRecord as formatted text.
[ "Return", "the", "creation", "time", "of", "the", "specified", "LogRecord", "as", "formatted", "text", "." ]
def formatTime(self, record, datefmt=None): """ Return the creation time of the specified LogRecord as formatted text. This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class. """ ct = self.converter(record.created) if datefmt: s = time.strftime(datefmt, ct) else: t = time.strftime(self.default_time_format, ct) s = self.default_msec_format % (t, record.msecs) return s
[ "def", "formatTime", "(", "self", ",", "record", ",", "datefmt", "=", "None", ")", ":", "ct", "=", "self", ".", "converter", "(", "record", ".", "created", ")", "if", "datefmt", ":", "s", "=", "time", ".", "strftime", "(", "datefmt", ",", "ct", ")"...
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/logging/__init__.py#L539-L563
quantumblacklabs/causalnex
127d9324a3d68c1795299c7522f22cdea880f344
causalnex/network/network.py
python
BayesianNetwork.nodes
(self)
return list(self._model.nodes)
List of all nodes contained within the Bayesian Network. Returns: A list of node names.
List of all nodes contained within the Bayesian Network.
[ "List", "of", "all", "nodes", "contained", "within", "the", "Bayesian", "Network", "." ]
def nodes(self) -> List[str]: """ List of all nodes contained within the Bayesian Network. Returns: A list of node names. """ return list(self._model.nodes)
[ "def", "nodes", "(", "self", ")", "->", "List", "[", "str", "]", ":", "return", "list", "(", "self", ".", "_model", ".", "nodes", ")" ]
https://github.com/quantumblacklabs/causalnex/blob/127d9324a3d68c1795299c7522f22cdea880f344/causalnex/network/network.py#L166-L173
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/smtplib.py
python
quotedata
(data)
return re.sub(r'(?m)^\.', '..', re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
Quote data for email. Double leading '.', and change Unix newline '\\n', or Mac '\\r' into Internet CRLF end-of-line.
Quote data for email.
[ "Quote", "data", "for", "email", "." ]
def quotedata(data): """Quote data for email. Double leading '.', and change Unix newline '\\n', or Mac '\\r' into Internet CRLF end-of-line. """ return re.sub(r'(?m)^\.', '..', re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
[ "def", "quotedata", "(", "data", ")", ":", "return", "re", ".", "sub", "(", "r'(?m)^\\.'", ",", "'..'", ",", "re", ".", "sub", "(", "r'(?:\\r\\n|\\n|\\r(?!\\n))'", ",", "CRLF", ",", "data", ")", ")" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/smtplib.py#L160-L167
TadaSoftware/PyNFe
a54de70b92553596f09a57cbe3a207880eafcd93
pynfe/utils/bar_code_128.py
python
test
()
Execute all tests
Execute all tests
[ "Execute", "all", "tests" ]
def test(): """ Execute all tests """ testWithChecksum() testImage()
[ "def", "test", "(", ")", ":", "testWithChecksum", "(", ")", "testImage", "(", ")" ]
https://github.com/TadaSoftware/PyNFe/blob/a54de70b92553596f09a57cbe3a207880eafcd93/pynfe/utils/bar_code_128.py#L294-L297
asteroid-team/asteroid
fae2f7d1d4eb83da741818a5c375267fe8d98847
asteroid/engine/system.py
python
System.common_step
(self, batch, batch_nb, train=True)
return loss
Common forward step between training and validation. The function of this method is to unpack the data given by the loader, forward the batch through the model and compute the loss. Pytorch-lightning handles all the rest. Args: batch: the object returned by the loader (a list of torch.Tensor in most cases) but can be something else. batch_nb (int): The number of the batch in the epoch. train (bool): Whether in training mode. Needed only if the training and validation steps are fundamentally different, otherwise, pytorch-lightning handles the usual differences. Returns: :class:`torch.Tensor` : The loss value on this batch. .. note:: This is typically the method to overwrite when subclassing ``System``. If the training and validation steps are somehow different (except for ``loss.backward()`` and ``optimzer.step()``), the argument ``train`` can be used to switch behavior. Otherwise, ``training_step`` and ``validation_step`` can be overwriten.
Common forward step between training and validation.
[ "Common", "forward", "step", "between", "training", "and", "validation", "." ]
def common_step(self, batch, batch_nb, train=True): """Common forward step between training and validation. The function of this method is to unpack the data given by the loader, forward the batch through the model and compute the loss. Pytorch-lightning handles all the rest. Args: batch: the object returned by the loader (a list of torch.Tensor in most cases) but can be something else. batch_nb (int): The number of the batch in the epoch. train (bool): Whether in training mode. Needed only if the training and validation steps are fundamentally different, otherwise, pytorch-lightning handles the usual differences. Returns: :class:`torch.Tensor` : The loss value on this batch. .. note:: This is typically the method to overwrite when subclassing ``System``. If the training and validation steps are somehow different (except for ``loss.backward()`` and ``optimzer.step()``), the argument ``train`` can be used to switch behavior. Otherwise, ``training_step`` and ``validation_step`` can be overwriten. """ inputs, targets = batch est_targets = self(inputs) loss = self.loss_func(est_targets, targets) return loss
[ "def", "common_step", "(", "self", ",", "batch", ",", "batch_nb", ",", "train", "=", "True", ")", ":", "inputs", ",", "targets", "=", "batch", "est_targets", "=", "self", "(", "inputs", ")", "loss", "=", "self", ".", "loss_func", "(", "est_targets", ",...
https://github.com/asteroid-team/asteroid/blob/fae2f7d1d4eb83da741818a5c375267fe8d98847/asteroid/engine/system.py#L75-L103
thinkle/gourmet
8af29c8ded24528030e5ae2ea3461f61c1e5a575
gourmet/plugins/import_export/website_import_plugins/ica_se_plugin.py
python
IcaSePlugin.test_url
(self, url, data)
return WebsiteTestState.FAILED
Is this url from ica.se
Is this url from ica.se
[ "Is", "this", "url", "from", "ica", ".", "se" ]
def test_url (self, url, data): "Is this url from ica.se" if 'ica.se' in url: return WebsiteTestState.SUCCESS return WebsiteTestState.FAILED
[ "def", "test_url", "(", "self", ",", "url", ",", "data", ")", ":", "if", "'ica.se'", "in", "url", ":", "return", "WebsiteTestState", ".", "SUCCESS", "return", "WebsiteTestState", ".", "FAILED" ]
https://github.com/thinkle/gourmet/blob/8af29c8ded24528030e5ae2ea3461f61c1e5a575/gourmet/plugins/import_export/website_import_plugins/ica_se_plugin.py#L13-L17
splintered-reality/py_trees
4c150e8304279faed27deb13b0436833bc8534b7
py_trees/behaviours.py
python
WaitForBlackboardVariable.update
(self)
Check for existence, wait otherwise. Returns: :data:`~py_trees.common.Status.SUCCESS` if key found, :data:`~py_trees.common.Status.RUNNING` otherwise.
Check for existence, wait otherwise.
[ "Check", "for", "existence", "wait", "otherwise", "." ]
def update(self) -> common.Status: """ Check for existence, wait otherwise. Returns: :data:`~py_trees.common.Status.SUCCESS` if key found, :data:`~py_trees.common.Status.RUNNING` otherwise. """ self.logger.debug("%s.update()" % self.__class__.__name__) new_status = super().update() # CheckBlackboardExists only returns SUCCESS || FAILURE if new_status == common.Status.SUCCESS: self.feedback_message = "'{}' found".format(self.key) return common.Status.SUCCESS else: # new_status == common.Status.FAILURE self.feedback_message = "waiting for key '{}'...".format(self.key) return common.Status.RUNNING
[ "def", "update", "(", "self", ")", "->", "common", ".", "Status", ":", "self", ".", "logger", ".", "debug", "(", "\"%s.update()\"", "%", "self", ".", "__class__", ".", "__name__", ")", "new_status", "=", "super", "(", ")", ".", "update", "(", ")", "#...
https://github.com/splintered-reality/py_trees/blob/4c150e8304279faed27deb13b0436833bc8534b7/py_trees/behaviours.py#L424-L439
jdf/processing.py
76e48ac855fd34169a7576a5cbc396bda698e781
mode/formatter/autopep8.py
python
_parse_container
(tokens, index, for_or_if=None)
return (None, None)
Parse a high-level container, such as a list, tuple, etc.
Parse a high-level container, such as a list, tuple, etc.
[ "Parse", "a", "high", "-", "level", "container", "such", "as", "a", "list", "tuple", "etc", "." ]
def _parse_container(tokens, index, for_or_if=None): """Parse a high-level container, such as a list, tuple, etc.""" # Store the opening bracket. items = [Atom(Token(*tokens[index]))] index += 1 num_tokens = len(tokens) while index < num_tokens: tok = Token(*tokens[index]) if tok.token_string in ',)]}': # First check if we're at the end of a list comprehension or # if-expression. Don't add the ending token as part of the list # comprehension or if-expression, because they aren't part of those # constructs. if for_or_if == 'for': return (ListComprehension(items), index - 1) elif for_or_if == 'if': return (IfExpression(items), index - 1) # We've reached the end of a container. items.append(Atom(tok)) # If not, then we are at the end of a container. if tok.token_string == ')': # The end of a tuple. return (Tuple(items), index) elif tok.token_string == ']': # The end of a list. return (List(items), index) elif tok.token_string == '}': # The end of a dictionary or set. return (DictOrSet(items), index) elif tok.token_string in '([{': # A sub-container is being defined. (container, index) = _parse_container(tokens, index) items.append(container) elif tok.token_string == 'for': (container, index) = _parse_container(tokens, index, 'for') items.append(container) elif tok.token_string == 'if': (container, index) = _parse_container(tokens, index, 'if') items.append(container) else: items.append(Atom(tok)) index += 1 return (None, None)
[ "def", "_parse_container", "(", "tokens", ",", "index", ",", "for_or_if", "=", "None", ")", ":", "# Store the opening bracket.", "items", "=", "[", "Atom", "(", "Token", "(", "*", "tokens", "[", "index", "]", ")", ")", "]", "index", "+=", "1", "num_token...
https://github.com/jdf/processing.py/blob/76e48ac855fd34169a7576a5cbc396bda698e781/mode/formatter/autopep8.py#L2024-L2080
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/tensor/tensor.py
python
tensorhead
(name, typ, sym=None, comm=0)
return TensorHead(name, typ, sym, comm)
Function generating tensorhead(s). This method is deprecated, use TensorHead constructor or tensor_heads() instead. Parameters ========== name : name or sequence of names (as in ``symbols``) typ : index types sym : same as ``*args`` in ``tensorsymmetry`` comm : commutation group number see ``_TensorManager.set_comm``
Function generating tensorhead(s). This method is deprecated, use TensorHead constructor or tensor_heads() instead.
[ "Function", "generating", "tensorhead", "(", "s", ")", ".", "This", "method", "is", "deprecated", "use", "TensorHead", "constructor", "or", "tensor_heads", "()", "instead", "." ]
def tensorhead(name, typ, sym=None, comm=0): """ Function generating tensorhead(s). This method is deprecated, use TensorHead constructor or tensor_heads() instead. Parameters ========== name : name or sequence of names (as in ``symbols``) typ : index types sym : same as ``*args`` in ``tensorsymmetry`` comm : commutation group number see ``_TensorManager.set_comm`` """ if sym is None: sym = [[1] for i in range(len(typ))] sym = tensorsymmetry(*sym) return TensorHead(name, typ, sym, comm)
[ "def", "tensorhead", "(", "name", ",", "typ", ",", "sym", "=", "None", ",", "comm", "=", "0", ")", ":", "if", "sym", "is", "None", ":", "sym", "=", "[", "[", "1", "]", "for", "i", "in", "range", "(", "len", "(", "typ", ")", ")", "]", "sym",...
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/tensor/tensor.py#L1532-L1552
tensorflow/tfx
b4a6b83269815ed12ba9df9e9154c7376fef2ea0
tfx/orchestration/metadata.py
python
Metadata._get_outputs_of_execution
( self, execution_id: int, events: List[metadata_store_pb2.Event] )
return result
Fetches outputs produced by a historical execution. Args: execution_id: the id of the execution that produced the outputs. events: events related to the execution id. Returns: A dict of key -> List[Artifact] as the result
Fetches outputs produced by a historical execution.
[ "Fetches", "outputs", "produced", "by", "a", "historical", "execution", "." ]
def _get_outputs_of_execution( self, execution_id: int, events: List[metadata_store_pb2.Event] ) -> Optional[Dict[str, List[Artifact]]]: """Fetches outputs produced by a historical execution. Args: execution_id: the id of the execution that produced the outputs. events: events related to the execution id. Returns: A dict of key -> List[Artifact] as the result """ absl.logging.debug('Execution %s matches all inputs' % execution_id) result = collections.defaultdict(list) output_events = [ event for event in events if event.type in [metadata_store_pb2.Event.OUTPUT] ] output_events.sort(key=lambda e: e.path.steps[1].index) cached_output_artifacts = self.store.get_artifacts_by_id( [e.artifact_id for e in output_events]) artifact_types = self.store.get_artifact_types_by_id( [a.type_id for a in cached_output_artifacts]) for event, mlmd_artifact, artifact_type in zip(output_events, cached_output_artifacts, artifact_types): key = event.path.steps[0].key tfx_artifact = artifact_utils.deserialize_artifact( artifact_type, mlmd_artifact) result[key].append(tfx_artifact) return result
[ "def", "_get_outputs_of_execution", "(", "self", ",", "execution_id", ":", "int", ",", "events", ":", "List", "[", "metadata_store_pb2", ".", "Event", "]", ")", "->", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "Artifact", "]", "]", "]", ":", ...
https://github.com/tensorflow/tfx/blob/b4a6b83269815ed12ba9df9e9154c7376fef2ea0/tfx/orchestration/metadata.py#L880-L914
hyperledger/aries-cloudagent-python
2f36776e99f6053ae92eed8123b5b1b2e891c02a
aries_cloudagent/ledger/merkel_validation/merkel_verifier.py
python
MerkleVerifier.lsb
(self, x)
return x & 1
Return Least Significant Bits.
Return Least Significant Bits.
[ "Return", "Least", "Significant", "Bits", "." ]
def lsb(self, x): """Return Least Significant Bits.""" return x & 1
[ "def", "lsb", "(", "self", ",", "x", ")", ":", "return", "x", "&", "1" ]
https://github.com/hyperledger/aries-cloudagent-python/blob/2f36776e99f6053ae92eed8123b5b1b2e891c02a/aries_cloudagent/ledger/merkel_validation/merkel_verifier.py#L48-L50
CouchPotato/CouchPotatoServer
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
libs/suds/resolver.py
python
TreeResolver.getchild
(self, name, parent)
get a child by name
get a child by name
[ "get", "a", "child", "by", "name" ]
def getchild(self, name, parent): """ get a child by name """ log.debug('searching parent (%s) for (%s)', Repr(parent), name) if name.startswith('@'): return parent.get_attribute(name[1:]) else: return parent.get_child(name)
[ "def", "getchild", "(", "self", ",", "name", ",", "parent", ")", ":", "log", ".", "debug", "(", "'searching parent (%s) for (%s)'", ",", "Repr", "(", "parent", ")", ",", "name", ")", "if", "name", ".", "startswith", "(", "'@'", ")", ":", "return", "par...
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/suds/resolver.py#L294-L300
modelop/hadrian
7c63e539d79e6e3cad959792d313dfc8b0c523ea
titus/titus/pfaast.py
python
Subs.jsonNode
(self, lineNumbers, memo)
return self
Inserts ``self`` into the Pythonized JSON, which would make it unserializable.
Inserts ``self`` into the Pythonized JSON, which would make it unserializable.
[ "Inserts", "self", "into", "the", "Pythonized", "JSON", "which", "would", "make", "it", "unserializable", "." ]
def jsonNode(self, lineNumbers, memo): """Inserts ``self`` into the Pythonized JSON, which would make it unserializable.""" return self
[ "def", "jsonNode", "(", "self", ",", "lineNumbers", ",", "memo", ")", ":", "return", "self" ]
https://github.com/modelop/hadrian/blob/7c63e539d79e6e3cad959792d313dfc8b0c523ea/titus/titus/pfaast.py#L649-L651
nitishsrivastava/deepnet
f4e4ff207923e01552c96038a1e2c29eb5d16160
deepnet/fastdropoutnet.py
python
FastDropoutNet.SetUpData
(self, skip_outputs=False, skip_layernames=[])
Setup the data.
Setup the data.
[ "Setup", "the", "data", "." ]
def SetUpData(self, skip_outputs=False, skip_layernames=[]): """Setup the data.""" hyp_list = [] name_list = [[], [], []] for node in self.layer: if not (node.is_input or node.is_output): continue if skip_outputs and node.is_output: continue if node.name in skip_layernames: continue data_field = node.proto.data_field if node.hyperparams.fast_dropout: pass #self.fast_dropout_layers.append(node) elif data_field.tied: self.tied_datalayer.append(node) node.tied_to = next(l for l in self.datalayer\ if l.name == data_field.tied_to) else: self.datalayer.append(node) hyp_list.append(node.hyperparams) if data_field.train: name_list[0].append(data_field.train) if data_field.validation: name_list[1].append(data_field.validation) if data_field.test: name_list[2].append(data_field.test) if self.t_op: op = self.t_op else: op = self.e_op handles = GetDataHandles(op, name_list, hyp_list, verbose=self.verbose) self.train_data_handler = handles[0] self.validation_data_handler = handles[1] self.test_data_handler = handles[2]
[ "def", "SetUpData", "(", "self", ",", "skip_outputs", "=", "False", ",", "skip_layernames", "=", "[", "]", ")", ":", "hyp_list", "=", "[", "]", "name_list", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", "]", "for", "node", "in", "self", ".", ...
https://github.com/nitishsrivastava/deepnet/blob/f4e4ff207923e01552c96038a1e2c29eb5d16160/deepnet/fastdropoutnet.py#L179-L215
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
asdl/typed_arith_abbrev.py
python
_arith_expr__Unary
(obj)
return p_node
[]
def _arith_expr__Unary(obj): # type: (arith_expr__Unary) -> hnode__Record p_node = runtime.NewRecord('U') p_node.abbrev = True n = runtime.NewLeaf(str(obj.op), color_e.StringConst) p_node.unnamed_fields.append(n) p_node.unnamed_fields.append(obj.a.AbbreviatedTree()) # type: ignore return p_node
[ "def", "_arith_expr__Unary", "(", "obj", ")", ":", "# type: (arith_expr__Unary) -> hnode__Record", "p_node", "=", "runtime", ".", "NewRecord", "(", "'U'", ")", "p_node", ".", "abbrev", "=", "True", "n", "=", "runtime", ".", "NewLeaf", "(", "str", "(", "obj", ...
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/asdl/typed_arith_abbrev.py#L8-L16
wikimedia/pywikibot
81a01ffaec7271bf5b4b170f85a80388420a4e78
scripts/interwiki.py
python
Subject.whereReport
(self, page, indent=4)
Report found interlanguage links with conflicts.
Report found interlanguage links with conflicts.
[ "Report", "found", "interlanguage", "links", "with", "conflicts", "." ]
def whereReport(self, page, indent=4): """Report found interlanguage links with conflicts.""" for page2 in sorted(self.found_in[page]): if page2 is None: pywikibot.output(' ' * indent + 'Given as a hint.') else: pywikibot.output(' ' * indent + str(page2))
[ "def", "whereReport", "(", "self", ",", "page", ",", "indent", "=", "4", ")", ":", "for", "page2", "in", "sorted", "(", "self", ".", "found_in", "[", "page", "]", ")", ":", "if", "page2", "is", "None", ":", "pywikibot", ".", "output", "(", "' '", ...
https://github.com/wikimedia/pywikibot/blob/81a01ffaec7271bf5b4b170f85a80388420a4e78/scripts/interwiki.py#L1339-L1345
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_openshift/library/oc_label.py
python
Yedit.parse_value
(inc_value, vtype='')
return inc_value
determine value type passed
determine value type passed
[ "determine", "value", "type", "passed" ]
def parse_value(inc_value, vtype=''): '''determine value type passed''' true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', ] false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF'] # It came in as a string but you didn't specify value_type as string # we will convert to bool if it matches any of the above cases if isinstance(inc_value, str) and 'bool' in vtype: if inc_value not in true_bools and inc_value not in false_bools: raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype)) elif isinstance(inc_value, bool) and 'str' in vtype: inc_value = str(inc_value) # There is a special case where '' will turn into None after yaml loading it so skip if isinstance(inc_value, str) and inc_value == '': pass # If vtype is not str then go ahead and attempt to yaml load it. elif isinstance(inc_value, str) and 'str' not in vtype: try: inc_value = yaml.safe_load(inc_value) except Exception: raise YeditException('Could not determine type of incoming value. ' + 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype)) return inc_value
[ "def", "parse_value", "(", "inc_value", ",", "vtype", "=", "''", ")", ":", "true_bools", "=", "[", "'y'", ",", "'Y'", ",", "'yes'", ",", "'Yes'", ",", "'YES'", ",", "'true'", ",", "'True'", ",", "'TRUE'", ",", "'on'", ",", "'On'", ",", "'ON'", ",",...
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_openshift/library/oc_label.py#L693-L719
google/grr
8ad8a4d2c5a93c92729206b7771af19d92d4f915
grr/server/grr_response_server/flows/general/filesystem.py
python
GlobLogic._ConvertGlobIntoPathComponents
(self, pattern)
return components
r"""Converts a glob pattern into a list of pathspec components. Wildcards are also converted to regular expressions. The pathspec components do not span directories, and are marked as a regex or a literal component. We also support recursion into directories using the ** notation. For example, /home/**2/foo.txt will find all files named foo.txt recursed 2 directories deep. If the directory depth is omitted, it defaults to 3. Example: /home/test/* -> ['home', 'test', '.*\\Z(?ms)'] Args: pattern: A glob expression with wildcards. Returns: A list of PathSpec instances for each component. Raises: ValueError: If the glob is invalid.
r"""Converts a glob pattern into a list of pathspec components.
[ "r", "Converts", "a", "glob", "pattern", "into", "a", "list", "of", "pathspec", "components", "." ]
def _ConvertGlobIntoPathComponents(self, pattern): r"""Converts a glob pattern into a list of pathspec components. Wildcards are also converted to regular expressions. The pathspec components do not span directories, and are marked as a regex or a literal component. We also support recursion into directories using the ** notation. For example, /home/**2/foo.txt will find all files named foo.txt recursed 2 directories deep. If the directory depth is omitted, it defaults to 3. Example: /home/test/* -> ['home', 'test', '.*\\Z(?ms)'] Args: pattern: A glob expression with wildcards. Returns: A list of PathSpec instances for each component. Raises: ValueError: If the glob is invalid. """ components = [] for path_component in pattern.split("/"): # A ** in the path component means recurse into directories that match the # pattern. m = rdf_paths.GlobExpression.RECURSION_REGEX.search(path_component) if m: path_component = path_component.replace(m.group(0), "*") component = rdf_paths.PathSpec( path=fnmatch.translate(path_component), pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.RECURSIVE) # Allow the user to override the recursion depth. if m.group(1): component.recursion_depth = int(m.group(1)) elif self.GLOB_MAGIC_CHECK.search(path_component): component = rdf_paths.PathSpec( path=fnmatch.translate(path_component), pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.REGEX) else: component = rdf_paths.PathSpec( path=path_component, pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.CASE_INSENSITIVE) components.append(component) return components
[ "def", "_ConvertGlobIntoPathComponents", "(", "self", ",", "pattern", ")", ":", "components", "=", "[", "]", "for", "path_component", "in", "pattern", ".", "split", "(", "\"/\"", ")", ":", "# A ** in the path component means recurse into directories that match the", "# ...
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/flows/general/filesystem.py#L413-L466
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/api/policy_v1beta1_api.py
python
PolicyV1beta1Api.delete_collection_pod_security_policy_with_http_info
(self, **kwargs)
return self.api_client.call_api( '/apis/policy/v1beta1/podsecuritypolicies', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
delete_collection_pod_security_policy # noqa: E501 delete collection of PodSecurityPolicy # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_pod_security_policy_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param V1DeleteOptions body: :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
delete_collection_pod_security_policy # noqa: E501
[ "delete_collection_pod_security_policy", "#", "noqa", ":", "E501" ]
def delete_collection_pod_security_policy_with_http_info(self, **kwargs): # noqa: E501 """delete_collection_pod_security_policy # noqa: E501 delete collection of PodSecurityPolicy # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_pod_security_policy_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param V1DeleteOptions body: :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'pretty', '_continue', 'dry_run', 'field_selector', 'grace_period_seconds', 'label_selector', 'limit', 'orphan_dependents', 'propagation_policy', 'resource_version', 'resource_version_match', 'timeout_seconds', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_pod_security_policy" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501 query_params.append(('continue', local_var_params['_continue'])) # noqa: E501 if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501 query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501 if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501 query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501 if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501 query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501 if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501 query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501 if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501 query_params.append(('limit', local_var_params['limit'])) # noqa: E501 if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501 query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501 if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501 query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501 if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501 query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501 if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501 query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501 if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501 query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/policy/v1beta1/podsecuritypolicies', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
[ "def", "delete_collection_pod_security_policy_with_http_info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "local_var_params", "=", "locals", "(", ")", "all_params", "=", "[", "'pretty'", ",", "'_continue'", ",", "'dry_run'", ",", "'field_selecto...
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/api/policy_v1beta1_api.py#L522-L653
zenodo/zenodo
3c45e52a742ad5a0a7788a67b02fbbc15ab4d8d5
zenodo/modules/records/models.py
python
ObjectType.get_openaire_subtype
(cls, value)
Get the OpenAIRE community-specific subtype. OpenAIRE community-specific subtype requires that the record is accepted to the relevant community. :param value: Full 'metadata' dictionary. Higher level metadata is required since we are fetching both 'resource_type' and 'communities'. :type value: dict :returns: Subtype in the form "openaire:<OA-comm-ID>:<OA-subtype-ID>" or None. :rtype: str
Get the OpenAIRE community-specific subtype.
[ "Get", "the", "OpenAIRE", "community", "-", "specific", "subtype", "." ]
def get_openaire_subtype(cls, value): """Get the OpenAIRE community-specific subtype. OpenAIRE community-specific subtype requires that the record is accepted to the relevant community. :param value: Full 'metadata' dictionary. Higher level metadata is required since we are fetching both 'resource_type' and 'communities'. :type value: dict :returns: Subtype in the form "openaire:<OA-comm-ID>:<OA-subtype-ID>" or None. :rtype: str """ comms = value.get('communities', []) oa_type = value['resource_type'].get('openaire_subtype') if oa_type and is_valid_openaire_type(value['resource_type'], comms): return 'openaire:' + oa_type
[ "def", "get_openaire_subtype", "(", "cls", ",", "value", ")", ":", "comms", "=", "value", ".", "get", "(", "'communities'", ",", "[", "]", ")", "oa_type", "=", "value", "[", "'resource_type'", "]", ".", "get", "(", "'openaire_subtype'", ")", "if", "oa_ty...
https://github.com/zenodo/zenodo/blob/3c45e52a742ad5a0a7788a67b02fbbc15ab4d8d5/zenodo/modules/records/models.py#L242-L259
bwohlberg/sporco
df67462abcf83af6ab1961bcb0d51b87a66483fa
sporco/admm/cbpdn.py
python
ConvBPDN.obfn_reg
(self)
return (self.lmbda*rl1, rl1)
Compute regularisation term and contribution to objective function.
Compute regularisation term and contribution to objective function.
[ "Compute", "regularisation", "term", "and", "contribution", "to", "objective", "function", "." ]
def obfn_reg(self): """Compute regularisation term and contribution to objective function. """ rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1) return (self.lmbda*rl1, rl1)
[ "def", "obfn_reg", "(", "self", ")", ":", "rl1", "=", "np", ".", "linalg", ".", "norm", "(", "(", "self", ".", "wl1", "*", "self", ".", "obfn_gvar", "(", ")", ")", ".", "ravel", "(", ")", ",", "1", ")", "return", "(", "self", ".", "lmbda", "*...
https://github.com/bwohlberg/sporco/blob/df67462abcf83af6ab1961bcb0d51b87a66483fa/sporco/admm/cbpdn.py#L624-L630
dsmrreader/dsmr-reader
c037848e0f96028fb500415b9289df40f81bc14f
dsmr_dropbox/services.py
python
should_sync_file
(abs_file_path: str)
return True
Checks whether we should include this file for sync.
Checks whether we should include this file for sync.
[ "Checks", "whether", "we", "should", "include", "this", "file", "for", "sync", "." ]
def should_sync_file(abs_file_path: str) -> bool: """ Checks whether we should include this file for sync. """ file_stat = os.stat(abs_file_path) # Ignore empty files. if file_stat.st_size == 0: logger.debug('Dropbox: Ignoring file with zero Bytes: %s', abs_file_path) return False # Ignore file that haven't been updated in a while. seconds_since_last_modification = int(time.time() - file_stat.st_mtime) if seconds_since_last_modification > settings.DSMRREADER_DROPBOX_MAX_FILE_MODIFICATION_TIME: logger.debug( 'Dropbox: Ignoring file: Time since last modification too high (%s secs): %s', seconds_since_last_modification, abs_file_path ) return False return True
[ "def", "should_sync_file", "(", "abs_file_path", ":", "str", ")", "->", "bool", ":", "file_stat", "=", "os", ".", "stat", "(", "abs_file_path", ")", "# Ignore empty files.", "if", "file_stat", ".", "st_size", "==", "0", ":", "logger", ".", "debug", "(", "'...
https://github.com/dsmrreader/dsmr-reader/blob/c037848e0f96028fb500415b9289df40f81bc14f/dsmr_dropbox/services.py#L76-L96
prompt-toolkit/python-prompt-toolkit
e9eac2eb59ec385e81742fa2ac623d4b8de00925
prompt_toolkit/contrib/regular_languages/compiler.py
python
Match.variables
(self)
return Variables( [(k, self._unescape(k, v), sl) for k, v, sl in self._nodes_to_values()] )
Returns :class:`Variables` instance.
Returns :class:`Variables` instance.
[ "Returns", ":", "class", ":", "Variables", "instance", "." ]
def variables(self) -> "Variables": """ Returns :class:`Variables` instance. """ return Variables( [(k, self._unescape(k, v), sl) for k, v, sl in self._nodes_to_values()] )
[ "def", "variables", "(", "self", ")", "->", "\"Variables\"", ":", "return", "Variables", "(", "[", "(", "k", ",", "self", ".", "_unescape", "(", "k", ",", "v", ")", ",", "sl", ")", "for", "k", ",", "v", ",", "sl", "in", "self", ".", "_nodes_to_va...
https://github.com/prompt-toolkit/python-prompt-toolkit/blob/e9eac2eb59ec385e81742fa2ac623d4b8de00925/prompt_toolkit/contrib/regular_languages/compiler.py#L453-L459
pandas-dev/pandas
5ba7d714014ae8feaccc0dd4a98890828cf2832d
pandas/util/version/__init__.py
python
_parse_local_version
(local: str)
return None
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
[ "Takes", "a", "string", "like", "abc", ".", "1", ".", "twelve", "and", "turns", "it", "into", "(", "abc", "1", "twelve", ")", "." ]
def _parse_local_version(local: str) -> LocalType | None: """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local) ) return None
[ "def", "_parse_local_version", "(", "local", ":", "str", ")", "->", "LocalType", "|", "None", ":", "if", "local", "is", "not", "None", ":", "return", "tuple", "(", "part", ".", "lower", "(", ")", "if", "not", "part", ".", "isdigit", "(", ")", "else",...
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/util/version/__init__.py#L507-L516
zigpy/bellows
acf47f6939c28b20b22d5715646e6f1a0bd70064
bellows/ezsp/protocol.py
python
ProtocolHandler.set_source_routing
(self)
Enable source routing on NCP.
Enable source routing on NCP.
[ "Enable", "source", "routing", "on", "NCP", "." ]
async def set_source_routing(self) -> None: """Enable source routing on NCP."""
[ "async", "def", "set_source_routing", "(", "self", ")", "->", "None", ":" ]
https://github.com/zigpy/bellows/blob/acf47f6939c28b20b22d5715646e6f1a0bd70064/bellows/ezsp/protocol.py#L122-L123
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/smhi/weather.py
python
SmhiWeather.retry_update
(self, _: datetime)
Retry refresh weather forecast.
Retry refresh weather forecast.
[ "Retry", "refresh", "weather", "forecast", "." ]
async def retry_update(self, _: datetime) -> None: """Retry refresh weather forecast.""" await self.async_update( # pylint: disable=unexpected-keyword-arg no_throttle=True )
[ "async", "def", "retry_update", "(", "self", ",", "_", ":", "datetime", ")", "->", "None", ":", "await", "self", ".", "async_update", "(", "# pylint: disable=unexpected-keyword-arg", "no_throttle", "=", "True", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/smhi/weather.py#L149-L153
LinkedInAttic/indextank-service
880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e
storefront/boto/ec2/connection.py
python
EC2Connection.create_key_pair
(self, key_name)
return self.get_object('CreateKeyPair', params, KeyPair)
Create a new key pair for your account. This will create the key pair within the region you are currently connected to. :type key_name: string :param key_name: The name of the new keypair :rtype: :class:`boto.ec2.keypair.KeyPair` :return: The newly created :class:`boto.ec2.keypair.KeyPair`. The material attribute of the new KeyPair object will contain the the unencrypted PEM encoded RSA private key.
Create a new key pair for your account. This will create the key pair within the region you are currently connected to.
[ "Create", "a", "new", "key", "pair", "for", "your", "account", ".", "This", "will", "create", "the", "key", "pair", "within", "the", "region", "you", "are", "currently", "connected", "to", "." ]
def create_key_pair(self, key_name): """ Create a new key pair for your account. This will create the key pair within the region you are currently connected to. :type key_name: string :param key_name: The name of the new keypair :rtype: :class:`boto.ec2.keypair.KeyPair` :return: The newly created :class:`boto.ec2.keypair.KeyPair`. The material attribute of the new KeyPair object will contain the the unencrypted PEM encoded RSA private key. """ params = {'KeyName':key_name} return self.get_object('CreateKeyPair', params, KeyPair)
[ "def", "create_key_pair", "(", "self", ",", "key_name", ")", ":", "params", "=", "{", "'KeyName'", ":", "key_name", "}", "return", "self", ".", "get_object", "(", "'CreateKeyPair'", ",", "params", ",", "KeyPair", ")" ]
https://github.com/LinkedInAttic/indextank-service/blob/880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e/storefront/boto/ec2/connection.py#L1215-L1230
kivy/kivy-designer
20343184a28c2851faf0c1ab451d0286d147a441
tools/pep8checker/pep8.py
python
Checker.check_physical
(self, line)
Run all physical checks on a raw input line.
Run all physical checks on a raw input line.
[ "Run", "all", "physical", "checks", "on", "a", "raw", "input", "line", "." ]
def check_physical(self, line): """ Run all physical checks on a raw input line. """ self.physical_line = line if self.indent_char is None and line[:1] in WHITESPACE: self.indent_char = line[0] for name, check, argument_names in self._physical_checks: result = self.run_check(check, argument_names) if result is not None: offset, text = result self.report_error(self.line_number, offset, text, check)
[ "def", "check_physical", "(", "self", ",", "line", ")", ":", "self", ".", "physical_line", "=", "line", "if", "self", ".", "indent_char", "is", "None", "and", "line", "[", ":", "1", "]", "in", "WHITESPACE", ":", "self", ".", "indent_char", "=", "line",...
https://github.com/kivy/kivy-designer/blob/20343184a28c2851faf0c1ab451d0286d147a441/tools/pep8checker/pep8.py#L1218-L1229
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AdminServer/appscale/admin/base_handler.py
python
BaseHandler.authenticate_access_token
(self, headers, project_id, ua_client)
Method to go through Access Token authentication. Args: headers: The headers associated with the request. project_id: The project that the user wants to access. ua_client: A UA Client, used to see if the user can access the project. Raises: CustomHTTPError specified in called function.
Method to go through Access Token authentication. Args: headers: The headers associated with the request. project_id: The project that the user wants to access. ua_client: A UA Client, used to see if the user can access the project. Raises: CustomHTTPError specified in called function.
[ "Method", "to", "go", "through", "Access", "Token", "authentication", ".", "Args", ":", "headers", ":", "The", "headers", "associated", "with", "the", "request", ".", "project_id", ":", "The", "project", "that", "the", "user", "wants", "to", "access", ".", ...
def authenticate_access_token(self, headers, project_id, ua_client): """ Method to go through Access Token authentication. Args: headers: The headers associated with the request. project_id: The project that the user wants to access. ua_client: A UA Client, used to see if the user can access the project. Raises: CustomHTTPError specified in called function. """ try: token = headers['Authorization'].split()[1] except IndexError: raise CustomHTTPError(HTTPCodes.BAD_REQUEST, message='Malformed ' 'authorization.') method_base64, metadata_base64, signature = token.split('.') self.check_token_hash(method_base64, metadata_base64, signature) metadata = json.loads(base64.urlsafe_b64decode(metadata_base64)) self.check_token_expiration(metadata) self.check_token_scope(metadata) if 'project' in metadata: if metadata['project'] == project_id: return else: raise CustomHTTPError(HTTPCodes.FORBIDDEN, message='Token is not authorized for project') elif 'user' in metadata: return self.check_user_access(metadata, project_id, ua_client) else: raise CustomHTTPError(HTTPCodes.UNAUTHORIZED, message='Invalid token')
[ "def", "authenticate_access_token", "(", "self", ",", "headers", ",", "project_id", ",", "ua_client", ")", ":", "try", ":", "token", "=", "headers", "[", "'Authorization'", "]", ".", "split", "(", ")", "[", "1", "]", "except", "IndexError", ":", "raise", ...
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AdminServer/appscale/admin/base_handler.py#L48-L78
karmab/kcli
fff2a2632841f54d9346b437821585df0ec659d7
kvirt/providers/sampleprovider.py
python
Kbase.update_iso
(self, name, iso)
return
:param name: :param iso: :return:
[]
def update_iso(self, name, iso): """ :param name: :param iso: :return: """ print("not implemented") return
[ "def", "update_iso", "(", "self", ",", "name", ",", "iso", ")", ":", "print", "(", "\"not implemented\"", ")", "return" ]
https://github.com/karmab/kcli/blob/fff2a2632841f54d9346b437821585df0ec659d7/kvirt/providers/sampleprovider.py#L344-L352
faif/python-patterns
2a469f48cbeeaaecab7c5c76d86266cd26b7a30b
patterns/structural/adapter.py
python
Adapter.__init__
(self, obj: T, **adapted_methods: Callable)
We set the adapted methods in the object's dict.
We set the adapted methods in the object's dict.
[ "We", "set", "the", "adapted", "methods", "in", "the", "object", "s", "dict", "." ]
def __init__(self, obj: T, **adapted_methods: Callable): """We set the adapted methods in the object's dict.""" self.obj = obj self.__dict__.update(adapted_methods)
[ "def", "__init__", "(", "self", ",", "obj", ":", "T", ",", "*", "*", "adapted_methods", ":", "Callable", ")", ":", "self", ".", "obj", "=", "obj", "self", ".", "__dict__", ".", "update", "(", "adapted_methods", ")" ]
https://github.com/faif/python-patterns/blob/2a469f48cbeeaaecab7c5c76d86266cd26b7a30b/patterns/structural/adapter.py#L77-L80
TryCatchHCF/DumpsterFire
43b46a274663ff694763db6e627975e160bc3597
FireModules/Websurfing/tor_project_org.py
python
tor_project_org.GetParameters
( self )
return ""
[]
def GetParameters( self ): return ""
[ "def", "GetParameters", "(", "self", ")", ":", "return", "\"\"" ]
https://github.com/TryCatchHCF/DumpsterFire/blob/43b46a274663ff694763db6e627975e160bc3597/FireModules/Websurfing/tor_project_org.py#L42-L43
TensorMSA/tensormsa
c36b565159cd934533636429add3c7d7263d622b
master/workflow/data/workflow_data_text.py
python
WorkFlowDataText.get_src_type
(self)
return self.conf.get('source_type')
:param nnid: :param wfver: :param node: :return:
[]
def get_src_type(self): """ :param nnid: :param wfver: :param node: :return: """ if ('conf' in self.__dict__): self.conf = self.get_step_source() return self.conf.get('source_type')
[ "def", "get_src_type", "(", "self", ")", ":", "if", "(", "'conf'", "in", "self", ".", "__dict__", ")", ":", "self", ".", "conf", "=", "self", ".", "get_step_source", "(", ")", "return", "self", ".", "conf", ".", "get", "(", "'source_type'", ")" ]
https://github.com/TensorMSA/tensormsa/blob/c36b565159cd934533636429add3c7d7263d622b/master/workflow/data/workflow_data_text.py#L66-L76
CouchPotato/CouchPotatoServer
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
libs/bs4/element.py
python
Tag._should_pretty_print
(self, indent_level)
return ( indent_level is not None and (self.name not in HTMLAwareEntitySubstitution.preformatted_tags or self._is_xml))
Should this tag be pretty-printed?
Should this tag be pretty-printed?
[ "Should", "this", "tag", "be", "pretty", "-", "printed?" ]
def _should_pretty_print(self, indent_level): """Should this tag be pretty-printed?""" return ( indent_level is not None and (self.name not in HTMLAwareEntitySubstitution.preformatted_tags or self._is_xml))
[ "def", "_should_pretty_print", "(", "self", ",", "indent_level", ")", ":", "return", "(", "indent_level", "is", "not", "None", "and", "(", "self", ".", "name", "not", "in", "HTMLAwareEntitySubstitution", ".", "preformatted_tags", "or", "self", ".", "_is_xml", ...
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/bs4/element.py#L995-L1000
ClusterLabs/pcs
1f225199e02c8d20456bb386f4c913c3ff21ac78
pcs/lib/sbd.py
python
atb_has_to_be_enabled
( service_manager: ServiceManagerInterface, corosync_conf_facade: CorosyncConfFacade, node_number_modifier: int = 0, )
return ( not corosync_conf_facade.is_enabled_auto_tie_breaker() and is_auto_tie_breaker_needed( service_manager, corosync_conf_facade, node_number_modifier ) )
Return True whenever quorum option auto tie breaker has to be enabled for proper working of SBD fencing. False if it's not needed or it is already enabled. service_manager -- corosync_conf_facade -- node_number_modifier -- this value vill be added to current number of nodes. This can be useful to test whenever is ATB needed when adding/removeing node.
Return True whenever quorum option auto tie breaker has to be enabled for proper working of SBD fencing. False if it's not needed or it is already enabled.
[ "Return", "True", "whenever", "quorum", "option", "auto", "tie", "breaker", "has", "to", "be", "enabled", "for", "proper", "working", "of", "SBD", "fencing", ".", "False", "if", "it", "s", "not", "needed", "or", "it", "is", "already", "enabled", "." ]
def atb_has_to_be_enabled( service_manager: ServiceManagerInterface, corosync_conf_facade: CorosyncConfFacade, node_number_modifier: int = 0, ) -> bool: """ Return True whenever quorum option auto tie breaker has to be enabled for proper working of SBD fencing. False if it's not needed or it is already enabled. service_manager -- corosync_conf_facade -- node_number_modifier -- this value vill be added to current number of nodes. This can be useful to test whenever is ATB needed when adding/removeing node. """ return ( not corosync_conf_facade.is_enabled_auto_tie_breaker() and is_auto_tie_breaker_needed( service_manager, corosync_conf_facade, node_number_modifier ) )
[ "def", "atb_has_to_be_enabled", "(", "service_manager", ":", "ServiceManagerInterface", ",", "corosync_conf_facade", ":", "CorosyncConfFacade", ",", "node_number_modifier", ":", "int", "=", "0", ",", ")", "->", "bool", ":", "return", "(", "not", "corosync_conf_facade"...
https://github.com/ClusterLabs/pcs/blob/1f225199e02c8d20456bb386f4c913c3ff21ac78/pcs/lib/sbd.py#L82-L103
bert-nmt/bert-nmt
fcb616d28091ac23c9c16f30e6870fe90b8576d6
fairseq/models/transformer.py
python
TransformerS2Encoder.forward
(self, src_tokens, src_lengths, bert_encoder_out)
return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T }
Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)`
Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)`
[ "Args", ":", "src_tokens", "(", "LongTensor", ")", ":", "tokens", "in", "the", "source", "language", "of", "shape", "(", "batch", "src_len", ")", "src_lengths", "(", "torch", ".", "LongTensor", ")", ":", "lengths", "of", "each", "source", "sentence", "of",...
def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T }
[ "def", "forward", "(", "self", ",", "src_tokens", ",", "src_lengths", ",", "bert_encoder_out", ")", ":", "# embed tokens and positions", "x", "=", "self", ".", "embed_scale", "*", "self", ".", "embed_tokens", "(", "src_tokens", ")", "if", "self", ".", "embed_p...
https://github.com/bert-nmt/bert-nmt/blob/fcb616d28091ac23c9c16f30e6870fe90b8576d6/fairseq/models/transformer.py#L668-L707
google/containerregistry
8a11dc8c53003ecf5b72ffaf035ba280109356ac
client/v2_2/docker_session_.py
python
Push.upload
(self, image, use_digest = False)
Upload the layers of the given image. Args: image: the image to upload. use_digest: use the manifest digest (i.e. not tag) as the image reference.
Upload the layers of the given image.
[ "Upload", "the", "layers", "of", "the", "given", "image", "." ]
def upload(self, image, use_digest = False): """Upload the layers of the given image. Args: image: the image to upload. use_digest: use the manifest digest (i.e. not tag) as the image reference. """ # If the manifest (by digest) exists, then avoid N layer existence # checks (they must exist). if self._manifest_exists(image): if isinstance(self._name, docker_name.Tag): if self._remote_tag_digest(image) == image.digest(): logging.info('Tag points to the right manifest, skipping push.') return logging.info('Manifest exists, skipping blob uploads and pushing tag.') else: logging.info('Manifest exists, skipping upload.') elif isinstance(image, image_list.DockerImageList): for _, child in image: # TODO(user): Refactor so that the threadpool is shared. with child: self.upload(child, use_digest=True) elif self._threads == 1: for digest in image.distributable_blob_set(): self._upload_one(image, digest) else: with concurrent.futures.ThreadPoolExecutor( max_workers=self._threads) as executor: future_to_params = { executor.submit(self._upload_one, image, digest): (image, digest) for digest in image.distributable_blob_set() } for future in concurrent.futures.as_completed(future_to_params): future.result() # This should complete the upload by uploading the manifest. self._put_manifest(image, use_digest=use_digest)
[ "def", "upload", "(", "self", ",", "image", ",", "use_digest", "=", "False", ")", ":", "# If the manifest (by digest) exists, then avoid N layer existence", "# checks (they must exist).", "if", "self", ".", "_manifest_exists", "(", "image", ")", ":", "if", "isinstance",...
https://github.com/google/containerregistry/blob/8a11dc8c53003ecf5b72ffaf035ba280109356ac/client/v2_2/docker_session_.py#L286-L324
ahkab/ahkab
1e8939194b689909b8184ce7eba478b485ff9e3a
ahkab/diode.py
python
diode.get_op_info
(self, ports_v_v)
return op_keys, op_info
Information regarding the Operating Point (OP) **Parameters:** ports_v : list of lists The parameter is to be set to ``[[v]]``, where ``v`` is the voltage applied to the diode terminals. **Returns:** op_keys : list of strings The labels corresponding to the numeric values in ``op_info``. op_info : list of floats The values corresponding to ``op_keys``.
Information regarding the Operating Point (OP)
[ "Information", "regarding", "the", "Operating", "Point", "(", "OP", ")" ]
def get_op_info(self, ports_v_v): """Information regarding the Operating Point (OP) **Parameters:** ports_v : list of lists The parameter is to be set to ``[[v]]``, where ``v`` is the voltage applied to the diode terminals. **Returns:** op_keys : list of strings The labels corresponding to the numeric values in ``op_info``. op_info : list of floats The values corresponding to ``op_keys``. """ vn1n2 = float(ports_v_v[0][0]) idiode = self.i(0, (vn1n2,)) gmdiode = self.g(0, (vn1n2,), 0) op_keys = ["Part ID", "V(n1-n2) [V]", "I(n1-n2) [A]", "P [W]", "gm [A/V]", u"T [\u00b0K]"] op_info = [self.part_id.upper(), vn1n2, idiode, vn1n2*idiode, gmdiode, self._get_T()] return op_keys, op_info
[ "def", "get_op_info", "(", "self", ",", "ports_v_v", ")", ":", "vn1n2", "=", "float", "(", "ports_v_v", "[", "0", "]", "[", "0", "]", ")", "idiode", "=", "self", ".", "i", "(", "0", ",", "(", "vn1n2", ",", ")", ")", "gmdiode", "=", "self", ".",...
https://github.com/ahkab/ahkab/blob/1e8939194b689909b8184ce7eba478b485ff9e3a/ahkab/diode.py#L207-L230
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/django/views/debug.py
python
get_exception_reporter_filter
(request)
[]
def get_exception_reporter_filter(request): global default_exception_reporter_filter if default_exception_reporter_filter is None: # Load the default filter for the first time and cache it. modpath = settings.DEFAULT_EXCEPTION_REPORTER_FILTER modname, classname = modpath.rsplit('.', 1) try: mod = import_module(modname) except ImportError as e: raise ImproperlyConfigured( 'Error importing default exception reporter filter %s: "%s"' % (modpath, e)) try: default_exception_reporter_filter = getattr(mod, classname)() except AttributeError: raise ImproperlyConfigured('Default exception reporter filter module "%s" does not define a "%s" class' % (modname, classname)) if request: return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter) else: return default_exception_reporter_filter
[ "def", "get_exception_reporter_filter", "(", "request", ")", ":", "global", "default_exception_reporter_filter", "if", "default_exception_reporter_filter", "is", "None", ":", "# Load the default filter for the first time and cache it.", "modpath", "=", "settings", ".", "DEFAULT_E...
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/django/views/debug.py#L75-L93
mne-tools/mne-python
f90b303ce66a8415e64edd4605b09ac0179c1ebf
mne/source_space.py
python
_write_source_spaces_to_fid
(fid, src, verbose=None)
Write the source spaces to a FIF file. Parameters ---------- fid : file descriptor An open file descriptor. src : list The list of source spaces. %(verbose)s
Write the source spaces to a FIF file.
[ "Write", "the", "source", "spaces", "to", "a", "FIF", "file", "." ]
def _write_source_spaces_to_fid(fid, src, verbose=None): """Write the source spaces to a FIF file. Parameters ---------- fid : file descriptor An open file descriptor. src : list The list of source spaces. %(verbose)s """ for s in src: logger.info(' Write a source space...') start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE) _write_one_source_space(fid, s, verbose) end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE) logger.info(' [done]') logger.info(' %d source spaces written' % len(src))
[ "def", "_write_source_spaces_to_fid", "(", "fid", ",", "src", ",", "verbose", "=", "None", ")", ":", "for", "s", "in", "src", ":", "logger", ".", "info", "(", "' Write a source space...'", ")", "start_block", "(", "fid", ",", "FIFF", ".", "FIFFB_MNE_SOURC...
https://github.com/mne-tools/mne-python/blob/f90b303ce66a8415e64edd4605b09ac0179c1ebf/mne/source_space.py#L981-L998
awslabs/aws-servicebroker
c301912e7df3a2f09a9c34d3ae7ffe67c55aa3a0
sample-apps/rds/sample-app/src/bottle.py
python
Router.add
(self, rule, method, target, name=None)
Add a new rule or replace the target for an existing rule.
Add a new rule or replace the target for an existing rule.
[ "Add", "a", "new", "rule", "or", "replace", "the", "target", "for", "an", "existing", "rule", "." ]
def add(self, rule, method, target, name=None): ''' Add a new rule or replace the target for an existing rule. ''' anons = 0 # Number of anonymous wildcards found keys = [] # Names of keys pattern = '' # Regular expression pattern with named groups filters = [] # Lists of wildcard input filters builder = [] # Data structure for the URL builder is_static = True for key, mode, conf in self._itertokens(rule): if mode: is_static = False if mode == 'default': mode = self.default_filter mask, in_filter, out_filter = self.filters[mode](conf) if not key: pattern += '(?:%s)' % mask key = 'anon%d' % anons anons += 1 else: pattern += '(?P<%s>%s)' % (key, mask) keys.append(key) if in_filter: filters.append((key, in_filter)) builder.append((key, out_filter or str)) elif key: pattern += re.escape(key) builder.append((None, key)) self.builder[rule] = builder if name: self.builder[name] = builder if is_static and not self.strict_order: self.static.setdefault(method, {}) self.static[method][self.build(rule)] = (target, None) return try: re_pattern = re.compile('^(%s)$' % pattern) re_match = re_pattern.match except re.error: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) if filters: def getargs(path): url_args = re_match(path).groupdict() for name, wildcard_filter in filters: try: url_args[name] = wildcard_filter(url_args[name]) except ValueError: raise HTTPError(400, 'Path has wrong format.') return url_args elif re_pattern.groupindex: def getargs(path): return re_match(path).groupdict() else: getargs = None flatpat = _re_flatten(pattern) whole_rule = (rule, flatpat, target, getargs) if (flatpat, method) in self._groups: if DEBUG: msg = 'Route <%s %s> overwrites a previously defined route' warnings.warn(msg % (method, rule), RuntimeWarning) self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule else: self.dyna_routes.setdefault(method, []).append(whole_rule) self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1 self._compile(method)
[ "def", "add", "(", "self", ",", "rule", ",", "method", ",", "target", ",", "name", "=", "None", ")", ":", "anons", "=", "0", "# Number of anonymous wildcards found", "keys", "=", "[", "]", "# Names of keys", "pattern", "=", "''", "# Regular expression pattern ...
https://github.com/awslabs/aws-servicebroker/blob/c301912e7df3a2f09a9c34d3ae7ffe67c55aa3a0/sample-apps/rds/sample-app/src/bottle.py#L318-L386
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/Flashpoint/Integrations/Flashpoint/Flashpoint.py
python
get_post_context
(resp)
return post_ec
Prepare context data for forum post :param resp: forum post api response :return: dict object
Prepare context data for forum post
[ "Prepare", "context", "data", "for", "forum", "post" ]
def get_post_context(resp): """ Prepare context data for forum post :param resp: forum post api response :return: dict object """ post_ec = { 'PostId': resp['id'], 'PublishedAt': resp.get('published_at', ''), 'Url': resp.get('url', ''), 'PlatformUrl': resp.get('platform_url', ''), 'Forum': resp['embed']['forum'], 'Room': resp['embed']['room'], 'User': resp['embed']['author'] } return post_ec
[ "def", "get_post_context", "(", "resp", ")", ":", "post_ec", "=", "{", "'PostId'", ":", "resp", "[", "'id'", "]", ",", "'PublishedAt'", ":", "resp", ".", "get", "(", "'published_at'", ",", "''", ")", ",", "'Url'", ":", "resp", ".", "get", "(", "'url'...
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/Flashpoint/Integrations/Flashpoint/Flashpoint.py#L393-L410
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/registry/models/versioned_remote_process_group.py
python
VersionedRemoteProcessGroup.name
(self)
return self._name
Gets the name of this VersionedRemoteProcessGroup. The component's name :return: The name of this VersionedRemoteProcessGroup. :rtype: str
Gets the name of this VersionedRemoteProcessGroup. The component's name
[ "Gets", "the", "name", "of", "this", "VersionedRemoteProcessGroup", ".", "The", "component", "s", "name" ]
def name(self): """ Gets the name of this VersionedRemoteProcessGroup. The component's name :return: The name of this VersionedRemoteProcessGroup. :rtype: str """ return self._name
[ "def", "name", "(", "self", ")", ":", "return", "self", ".", "_name" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/registry/models/versioned_remote_process_group.py#L155-L163
python/mypy
17850b3bd77ae9efb5d21f656c4e4e05ac48d894
mypy/stubdoc.py
python
DocStringParser.add_token
(self, token: tokenize.TokenInfo)
Process next token from the token stream.
Process next token from the token stream.
[ "Process", "next", "token", "from", "the", "token", "stream", "." ]
def add_token(self, token: tokenize.TokenInfo) -> None: """Process next token from the token stream.""" if (token.type == tokenize.NAME and token.string == self.function_name and self.state[-1] == STATE_INIT): self.state.append(STATE_FUNCTION_NAME) elif (token.type == tokenize.OP and token.string == '(' and self.state[-1] == STATE_FUNCTION_NAME): self.state.pop() self.accumulator = "" self.found = True self.state.append(STATE_ARGUMENT_LIST) elif self.state[-1] == STATE_FUNCTION_NAME: # Reset state, function name not followed by '('. self.state.pop() elif (token.type == tokenize.OP and token.string in ('[', '(', '{') and self.state[-1] != STATE_INIT): self.accumulator += token.string self.state.append(STATE_OPEN_BRACKET) elif (token.type == tokenize.OP and token.string in (']', ')', '}') and self.state[-1] == STATE_OPEN_BRACKET): self.accumulator += token.string self.state.pop() elif (token.type == tokenize.OP and token.string == ':' and self.state[-1] == STATE_ARGUMENT_LIST): self.arg_name = self.accumulator self.accumulator = "" self.state.append(STATE_ARGUMENT_TYPE) elif (token.type == tokenize.OP and token.string == '=' and self.state[-1] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_TYPE)): if self.state[-1] == STATE_ARGUMENT_TYPE: self.arg_type = self.accumulator self.state.pop() else: self.arg_name = self.accumulator self.accumulator = "" self.state.append(STATE_ARGUMENT_DEFAULT) elif (token.type == tokenize.OP and token.string in (',', ')') and self.state[-1] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_DEFAULT, STATE_ARGUMENT_TYPE)): if self.state[-1] == STATE_ARGUMENT_DEFAULT: self.arg_default = self.accumulator self.state.pop() elif self.state[-1] == STATE_ARGUMENT_TYPE: self.arg_type = self.accumulator self.state.pop() elif self.state[-1] == STATE_ARGUMENT_LIST: self.arg_name = self.accumulator if not (token.string == ')' and self.accumulator.strip() == '') \ and not _ARG_NAME_RE.match(self.arg_name): # Invalid argument name. self.reset() return if token.string == ')': self.state.pop() # arg_name is empty when there are no args. e.g. func() if self.arg_name: try: self.args.append(ArgSig(name=self.arg_name, type=self.arg_type, default=bool(self.arg_default))) except ValueError: # wrong type, use Any self.args.append(ArgSig(name=self.arg_name, type=None, default=bool(self.arg_default))) self.arg_name = "" self.arg_type = None self.arg_default = None self.accumulator = "" elif token.type == tokenize.OP and token.string == '->' and self.state[-1] == STATE_INIT: self.accumulator = "" self.state.append(STATE_RETURN_VALUE) # ENDMAKER is necessary for python 3.4 and 3.5. elif (token.type in (tokenize.NEWLINE, tokenize.ENDMARKER) and self.state[-1] in (STATE_INIT, STATE_RETURN_VALUE)): if self.state[-1] == STATE_RETURN_VALUE: if not is_valid_type(self.accumulator): self.reset() return self.ret_type = self.accumulator self.accumulator = "" self.state.pop() if self.found: self.signatures.append(FunctionSig(name=self.function_name, args=self.args, ret_type=self.ret_type)) self.found = False self.args = [] self.ret_type = 'Any' # Leave state as INIT. else: self.accumulator += token.string
[ "def", "add_token", "(", "self", ",", "token", ":", "tokenize", ".", "TokenInfo", ")", "->", "None", ":", "if", "(", "token", ".", "type", "==", "tokenize", ".", "NAME", "and", "token", ".", "string", "==", "self", ".", "function_name", "and", "self", ...
https://github.com/python/mypy/blob/17850b3bd77ae9efb5d21f656c4e4e05ac48d894/mypy/stubdoc.py#L89-L189