repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
google/dotty
efilter/parsers/dottysql/grammar.py
application
def application(tokens): """Matches function call (application).""" tokens = iter(tokens) func = next(tokens) paren = next(tokens) if func and func.name == "symbol" and paren.name == "lparen": # We would be able to unambiguously parse function application with # whitespace between the function name and the lparen, but let's not # do that because it's unexpected in most languages. if func.end != paren.start: raise errors.EfilterParseError( start=func.start, end=paren.end, message="No whitespace allowed between function and paren.") return common.TokenMatch(None, func.value, (func, paren))
python
def application(tokens): """Matches function call (application).""" tokens = iter(tokens) func = next(tokens) paren = next(tokens) if func and func.name == "symbol" and paren.name == "lparen": # We would be able to unambiguously parse function application with # whitespace between the function name and the lparen, but let's not # do that because it's unexpected in most languages. if func.end != paren.start: raise errors.EfilterParseError( start=func.start, end=paren.end, message="No whitespace allowed between function and paren.") return common.TokenMatch(None, func.value, (func, paren))
[ "def", "application", "(", "tokens", ")", ":", "tokens", "=", "iter", "(", "tokens", ")", "func", "=", "next", "(", "tokens", ")", "paren", "=", "next", "(", "tokens", ")", "if", "func", "and", "func", ".", "name", "==", "\"symbol\"", "and", "paren", ".", "name", "==", "\"lparen\"", ":", "# We would be able to unambiguously parse function application with", "# whitespace between the function name and the lparen, but let's not", "# do that because it's unexpected in most languages.", "if", "func", ".", "end", "!=", "paren", ".", "start", ":", "raise", "errors", ".", "EfilterParseError", "(", "start", "=", "func", ".", "start", ",", "end", "=", "paren", ".", "end", ",", "message", "=", "\"No whitespace allowed between function and paren.\"", ")", "return", "common", ".", "TokenMatch", "(", "None", ",", "func", ".", "value", ",", "(", "func", ",", "paren", ")", ")" ]
Matches function call (application).
[ "Matches", "function", "call", "(", "application", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/grammar.py#L208-L223
train
google/dotty
setup.py
BdistRPMCommand._make_spec_file
def _make_spec_file(self): """Generates the text of an RPM spec file. Returns: A list of strings containing the lines of text. """ # Note that bdist_rpm can be an old style class. if issubclass(BdistRPMCommand, object): spec_file = super(BdistRPMCommand, self)._make_spec_file() else: spec_file = bdist_rpm._make_spec_file(self) if sys.version_info[0] < 3: python_package = "python" else: python_package = "python3" description = [] summary = "" in_description = False python_spec_file = [] for line in spec_file: if line.startswith("Summary: "): summary = line elif line.startswith("BuildRequires: "): line = "BuildRequires: {0:s}-setuptools".format(python_package) elif line.startswith("Requires: "): if python_package == "python3": line = line.replace("python", "python3") elif line.startswith("%description"): in_description = True elif line.startswith("%files"): line = "%files -f INSTALLED_FILES -n {0:s}-%{{name}}".format( python_package) elif line.startswith("%prep"): in_description = False python_spec_file.append( "%package -n {0:s}-%{{name}}".format(python_package)) python_spec_file.append("{0:s}".format(summary)) python_spec_file.append("") python_spec_file.append( "%description -n {0:s}-%{{name}}".format(python_package)) python_spec_file.extend(description) elif in_description: # Ignore leading white lines in the description. if not description and not line: continue description.append(line) python_spec_file.append(line) return python_spec_file
python
def _make_spec_file(self): """Generates the text of an RPM spec file. Returns: A list of strings containing the lines of text. """ # Note that bdist_rpm can be an old style class. if issubclass(BdistRPMCommand, object): spec_file = super(BdistRPMCommand, self)._make_spec_file() else: spec_file = bdist_rpm._make_spec_file(self) if sys.version_info[0] < 3: python_package = "python" else: python_package = "python3" description = [] summary = "" in_description = False python_spec_file = [] for line in spec_file: if line.startswith("Summary: "): summary = line elif line.startswith("BuildRequires: "): line = "BuildRequires: {0:s}-setuptools".format(python_package) elif line.startswith("Requires: "): if python_package == "python3": line = line.replace("python", "python3") elif line.startswith("%description"): in_description = True elif line.startswith("%files"): line = "%files -f INSTALLED_FILES -n {0:s}-%{{name}}".format( python_package) elif line.startswith("%prep"): in_description = False python_spec_file.append( "%package -n {0:s}-%{{name}}".format(python_package)) python_spec_file.append("{0:s}".format(summary)) python_spec_file.append("") python_spec_file.append( "%description -n {0:s}-%{{name}}".format(python_package)) python_spec_file.extend(description) elif in_description: # Ignore leading white lines in the description. if not description and not line: continue description.append(line) python_spec_file.append(line) return python_spec_file
[ "def", "_make_spec_file", "(", "self", ")", ":", "# Note that bdist_rpm can be an old style class.", "if", "issubclass", "(", "BdistRPMCommand", ",", "object", ")", ":", "spec_file", "=", "super", "(", "BdistRPMCommand", ",", "self", ")", ".", "_make_spec_file", "(", ")", "else", ":", "spec_file", "=", "bdist_rpm", ".", "_make_spec_file", "(", "self", ")", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "python_package", "=", "\"python\"", "else", ":", "python_package", "=", "\"python3\"", "description", "=", "[", "]", "summary", "=", "\"\"", "in_description", "=", "False", "python_spec_file", "=", "[", "]", "for", "line", "in", "spec_file", ":", "if", "line", ".", "startswith", "(", "\"Summary: \"", ")", ":", "summary", "=", "line", "elif", "line", ".", "startswith", "(", "\"BuildRequires: \"", ")", ":", "line", "=", "\"BuildRequires: {0:s}-setuptools\"", ".", "format", "(", "python_package", ")", "elif", "line", ".", "startswith", "(", "\"Requires: \"", ")", ":", "if", "python_package", "==", "\"python3\"", ":", "line", "=", "line", ".", "replace", "(", "\"python\"", ",", "\"python3\"", ")", "elif", "line", ".", "startswith", "(", "\"%description\"", ")", ":", "in_description", "=", "True", "elif", "line", ".", "startswith", "(", "\"%files\"", ")", ":", "line", "=", "\"%files -f INSTALLED_FILES -n {0:s}-%{{name}}\"", ".", "format", "(", "python_package", ")", "elif", "line", ".", "startswith", "(", "\"%prep\"", ")", ":", "in_description", "=", "False", "python_spec_file", ".", "append", "(", "\"%package -n {0:s}-%{{name}}\"", ".", "format", "(", "python_package", ")", ")", "python_spec_file", ".", "append", "(", "\"{0:s}\"", ".", "format", "(", "summary", ")", ")", "python_spec_file", ".", "append", "(", "\"\"", ")", "python_spec_file", ".", "append", "(", "\"%description -n {0:s}-%{{name}}\"", ".", "format", "(", "python_package", ")", ")", "python_spec_file", ".", "extend", "(", "description", ")", "elif", "in_description", ":", "# Ignore leading white lines in the description.", "if", "not", "description", "and", "not", "line", ":", "continue", "description", ".", "append", "(", "line", ")", "python_spec_file", ".", "append", "(", "line", ")", "return", "python_spec_file" ]
Generates the text of an RPM spec file. Returns: A list of strings containing the lines of text.
[ "Generates", "the", "text", "of", "an", "RPM", "spec", "file", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/setup.py#L45-L105
train
google/dotty
efilter/scope.py
ScopeStack.resolve
def resolve(self, name): """Call IStructured.resolve across all scopes and return first hit.""" for scope in reversed(self.scopes): try: return structured.resolve(scope, name) except (KeyError, AttributeError): continue raise AttributeError(name)
python
def resolve(self, name): """Call IStructured.resolve across all scopes and return first hit.""" for scope in reversed(self.scopes): try: return structured.resolve(scope, name) except (KeyError, AttributeError): continue raise AttributeError(name)
[ "def", "resolve", "(", "self", ",", "name", ")", ":", "for", "scope", "in", "reversed", "(", "self", ".", "scopes", ")", ":", "try", ":", "return", "structured", ".", "resolve", "(", "scope", ",", "name", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "continue", "raise", "AttributeError", "(", "name", ")" ]
Call IStructured.resolve across all scopes and return first hit.
[ "Call", "IStructured", ".", "resolve", "across", "all", "scopes", "and", "return", "first", "hit", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L73-L81
train
google/dotty
efilter/scope.py
ScopeStack.getmembers
def getmembers(self): """Gets members (vars) from all scopes, using both runtime and static. This method will attempt both static and runtime getmembers. This is the recommended way of getting available members. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in self.scopes: if isinstance(scope, type): names.update(structured.getmembers_static(scope)) else: names.update(structured.getmembers_runtime(scope)) return names
python
def getmembers(self): """Gets members (vars) from all scopes, using both runtime and static. This method will attempt both static and runtime getmembers. This is the recommended way of getting available members. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in self.scopes: if isinstance(scope, type): names.update(structured.getmembers_static(scope)) else: names.update(structured.getmembers_runtime(scope)) return names
[ "def", "getmembers", "(", "self", ")", ":", "names", "=", "set", "(", ")", "for", "scope", "in", "self", ".", "scopes", ":", "if", "isinstance", "(", "scope", ",", "type", ")", ":", "names", ".", "update", "(", "structured", ".", "getmembers_static", "(", "scope", ")", ")", "else", ":", "names", ".", "update", "(", "structured", ".", "getmembers_runtime", "(", "scope", ")", ")", "return", "names" ]
Gets members (vars) from all scopes, using both runtime and static. This method will attempt both static and runtime getmembers. This is the recommended way of getting available members. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'.
[ "Gets", "members", "(", "vars", ")", "from", "all", "scopes", "using", "both", "runtime", "and", "static", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L83-L102
train
google/dotty
efilter/scope.py
ScopeStack.getmembers_runtime
def getmembers_runtime(self): """Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in self.scopes: names.update(structured.getmembers_runtime(scope)) return names
python
def getmembers_runtime(self): """Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in self.scopes: names.update(structured.getmembers_runtime(scope)) return names
[ "def", "getmembers_runtime", "(", "self", ")", ":", "names", "=", "set", "(", ")", "for", "scope", "in", "self", ".", "scopes", ":", "names", ".", "update", "(", "structured", ".", "getmembers_runtime", "(", "scope", ")", ")", "return", "names" ]
Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'.
[ "Gets", "members", "(", "vars", ")", "from", "all", "scopes", "using", "ONLY", "runtime", "information", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L104-L119
train
google/dotty
efilter/scope.py
ScopeStack.getmembers_static
def getmembers_static(cls): """Gets members (vars) from all scopes using ONLY static information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in cls.scopes: names.update(structured.getmembers_static(scope)) return names
python
def getmembers_static(cls): """Gets members (vars) from all scopes using ONLY static information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in cls.scopes: names.update(structured.getmembers_static(scope)) return names
[ "def", "getmembers_static", "(", "cls", ")", ":", "names", "=", "set", "(", ")", "for", "scope", "in", "cls", ".", "scopes", ":", "names", ".", "update", "(", "structured", ".", "getmembers_static", "(", "scope", ")", ")", "return", "names" ]
Gets members (vars) from all scopes using ONLY static information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'.
[ "Gets", "members", "(", "vars", ")", "from", "all", "scopes", "using", "ONLY", "static", "information", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L122-L137
train
google/dotty
efilter/scope.py
ScopeStack.reflect
def reflect(self, name): """Reflect 'name' starting with local scope all the way up to global. This method will attempt both static and runtime reflection. This is the recommended way of using reflection. Returns: Type of 'name', or protocol.AnyType. Caveat: The type of 'name' does not necessarily have to be an instance of Python's type - it depends on what the host application returns through the reflection API. For example, Rekall uses objects generated at runtime to simulate a native (C/C++) type system. """ # Return whatever the most local scope defines this as, or bubble all # the way to the top. result = None for scope in reversed(self.scopes): try: if isinstance(scope, type): result = structured.reflect_static_member(scope, name) else: result = structured.reflect_runtime_member(scope, name) if result is not None: return result except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
python
def reflect(self, name): """Reflect 'name' starting with local scope all the way up to global. This method will attempt both static and runtime reflection. This is the recommended way of using reflection. Returns: Type of 'name', or protocol.AnyType. Caveat: The type of 'name' does not necessarily have to be an instance of Python's type - it depends on what the host application returns through the reflection API. For example, Rekall uses objects generated at runtime to simulate a native (C/C++) type system. """ # Return whatever the most local scope defines this as, or bubble all # the way to the top. result = None for scope in reversed(self.scopes): try: if isinstance(scope, type): result = structured.reflect_static_member(scope, name) else: result = structured.reflect_runtime_member(scope, name) if result is not None: return result except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
[ "def", "reflect", "(", "self", ",", "name", ")", ":", "# Return whatever the most local scope defines this as, or bubble all", "# the way to the top.", "result", "=", "None", "for", "scope", "in", "reversed", "(", "self", ".", "scopes", ")", ":", "try", ":", "if", "isinstance", "(", "scope", ",", "type", ")", ":", "result", "=", "structured", ".", "reflect_static_member", "(", "scope", ",", "name", ")", "else", ":", "result", "=", "structured", ".", "reflect_runtime_member", "(", "scope", ",", "name", ")", "if", "result", "is", "not", "None", ":", "return", "result", "except", "(", "NotImplementedError", ",", "KeyError", ",", "AttributeError", ")", ":", "continue", "return", "protocol", ".", "AnyType" ]
Reflect 'name' starting with local scope all the way up to global. This method will attempt both static and runtime reflection. This is the recommended way of using reflection. Returns: Type of 'name', or protocol.AnyType. Caveat: The type of 'name' does not necessarily have to be an instance of Python's type - it depends on what the host application returns through the reflection API. For example, Rekall uses objects generated at runtime to simulate a native (C/C++) type system.
[ "Reflect", "name", "starting", "with", "local", "scope", "all", "the", "way", "up", "to", "global", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L139-L170
train
google/dotty
efilter/scope.py
ScopeStack.reflect_runtime_member
def reflect_runtime_member(self, name): """Reflect 'name' using ONLY runtime reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """ for scope in reversed(self.scopes): try: return structured.reflect_runtime_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
python
def reflect_runtime_member(self, name): """Reflect 'name' using ONLY runtime reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """ for scope in reversed(self.scopes): try: return structured.reflect_runtime_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
[ "def", "reflect_runtime_member", "(", "self", ",", "name", ")", ":", "for", "scope", "in", "reversed", "(", "self", ".", "scopes", ")", ":", "try", ":", "return", "structured", ".", "reflect_runtime_member", "(", "scope", ",", "name", ")", "except", "(", "NotImplementedError", ",", "KeyError", ",", "AttributeError", ")", ":", "continue", "return", "protocol", ".", "AnyType" ]
Reflect 'name' using ONLY runtime reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType.
[ "Reflect", "name", "using", "ONLY", "runtime", "reflection", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L172-L186
train
google/dotty
efilter/scope.py
ScopeStack.reflect_static_member
def reflect_static_member(cls, name): """Reflect 'name' using ONLY static reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """ for scope in reversed(cls.scopes): try: return structured.reflect_static_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
python
def reflect_static_member(cls, name): """Reflect 'name' using ONLY static reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """ for scope in reversed(cls.scopes): try: return structured.reflect_static_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
[ "def", "reflect_static_member", "(", "cls", ",", "name", ")", ":", "for", "scope", "in", "reversed", "(", "cls", ".", "scopes", ")", ":", "try", ":", "return", "structured", ".", "reflect_static_member", "(", "scope", ",", "name", ")", "except", "(", "NotImplementedError", ",", "KeyError", ",", "AttributeError", ")", ":", "continue", "return", "protocol", ".", "AnyType" ]
Reflect 'name' using ONLY static reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType.
[ "Reflect", "name", "using", "ONLY", "static", "reflection", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L189-L203
train
radical-cybertools/radical.entk
src/radical/entk/utils/prof_utils.py
get_hostmap
def get_hostmap(profile): ''' We abuse the profile combination to also derive a pilot-host map, which will tell us on what exact host each pilot has been running. To do so, we check for the PMGR_ACTIVE advance event in agent_0.prof, and use the NTP sync info to associate a hostname. ''' # FIXME: This should be replaced by proper hostname logging # in `pilot.resource_details`. hostmap = dict() # map pilot IDs to host names for entry in profile: if entry[ru.EVENT] == 'hostname': hostmap[entry[ru.UID]] = entry[ru.MSG] return hostmap
python
def get_hostmap(profile): ''' We abuse the profile combination to also derive a pilot-host map, which will tell us on what exact host each pilot has been running. To do so, we check for the PMGR_ACTIVE advance event in agent_0.prof, and use the NTP sync info to associate a hostname. ''' # FIXME: This should be replaced by proper hostname logging # in `pilot.resource_details`. hostmap = dict() # map pilot IDs to host names for entry in profile: if entry[ru.EVENT] == 'hostname': hostmap[entry[ru.UID]] = entry[ru.MSG] return hostmap
[ "def", "get_hostmap", "(", "profile", ")", ":", "# FIXME: This should be replaced by proper hostname logging", "# in `pilot.resource_details`.", "hostmap", "=", "dict", "(", ")", "# map pilot IDs to host names", "for", "entry", "in", "profile", ":", "if", "entry", "[", "ru", ".", "EVENT", "]", "==", "'hostname'", ":", "hostmap", "[", "entry", "[", "ru", ".", "UID", "]", "]", "=", "entry", "[", "ru", ".", "MSG", "]", "return", "hostmap" ]
We abuse the profile combination to also derive a pilot-host map, which will tell us on what exact host each pilot has been running. To do so, we check for the PMGR_ACTIVE advance event in agent_0.prof, and use the NTP sync info to associate a hostname.
[ "We", "abuse", "the", "profile", "combination", "to", "also", "derive", "a", "pilot", "-", "host", "map", "which", "will", "tell", "us", "on", "what", "exact", "host", "each", "pilot", "has", "been", "running", ".", "To", "do", "so", "we", "check", "for", "the", "PMGR_ACTIVE", "advance", "event", "in", "agent_0", ".", "prof", "and", "use", "the", "NTP", "sync", "info", "to", "associate", "a", "hostname", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/utils/prof_utils.py#L16-L31
train
radical-cybertools/radical.entk
src/radical/entk/utils/prof_utils.py
get_hostmap_deprecated
def get_hostmap_deprecated(profiles): ''' This method mangles combine_profiles and get_hostmap, and is deprecated. At this point it only returns the hostmap ''' hostmap = dict() # map pilot IDs to host names for pname, prof in profiles.iteritems(): if not len(prof): continue if not prof[0][ru.MSG]: continue host, ip, _, _, _ = prof[0][ru.MSG].split(':') host_id = '%s:%s' % (host, ip) for row in prof: if 'agent_0.prof' in pname and \ row[ru.EVENT] == 'advance' and \ row[ru.STATE] == rps.PMGR_ACTIVE: hostmap[row[ru.UID]] = host_id break return hostmap
python
def get_hostmap_deprecated(profiles): ''' This method mangles combine_profiles and get_hostmap, and is deprecated. At this point it only returns the hostmap ''' hostmap = dict() # map pilot IDs to host names for pname, prof in profiles.iteritems(): if not len(prof): continue if not prof[0][ru.MSG]: continue host, ip, _, _, _ = prof[0][ru.MSG].split(':') host_id = '%s:%s' % (host, ip) for row in prof: if 'agent_0.prof' in pname and \ row[ru.EVENT] == 'advance' and \ row[ru.STATE] == rps.PMGR_ACTIVE: hostmap[row[ru.UID]] = host_id break return hostmap
[ "def", "get_hostmap_deprecated", "(", "profiles", ")", ":", "hostmap", "=", "dict", "(", ")", "# map pilot IDs to host names", "for", "pname", ",", "prof", "in", "profiles", ".", "iteritems", "(", ")", ":", "if", "not", "len", "(", "prof", ")", ":", "continue", "if", "not", "prof", "[", "0", "]", "[", "ru", ".", "MSG", "]", ":", "continue", "host", ",", "ip", ",", "_", ",", "_", ",", "_", "=", "prof", "[", "0", "]", "[", "ru", ".", "MSG", "]", ".", "split", "(", "':'", ")", "host_id", "=", "'%s:%s'", "%", "(", "host", ",", "ip", ")", "for", "row", "in", "prof", ":", "if", "'agent_0.prof'", "in", "pname", "and", "row", "[", "ru", ".", "EVENT", "]", "==", "'advance'", "and", "row", "[", "ru", ".", "STATE", "]", "==", "rps", ".", "PMGR_ACTIVE", ":", "hostmap", "[", "row", "[", "ru", ".", "UID", "]", "]", "=", "host_id", "break", "return", "hostmap" ]
This method mangles combine_profiles and get_hostmap, and is deprecated. At this point it only returns the hostmap
[ "This", "method", "mangles", "combine_profiles", "and", "get_hostmap", "and", "is", "deprecated", ".", "At", "this", "point", "it", "only", "returns", "the", "hostmap" ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/utils/prof_utils.py#L34-L60
train
radical-cybertools/radical.entk
src/radical/entk/appman/appmanager.py
AppManager.run
def run(self): """ **Purpose**: Run the application manager. Once the workflow and resource manager have been assigned. Invoking this method will start the setting up the communication infrastructure, submitting a resource request and then submission of all the tasks. """ try: # Set None objects local to each run self._wfp = None self._sync_thread = None self._terminate_sync = Event() self._resubmit_failed = False self._cur_attempt = 1 if not self._workflow: self._logger.error('No workflow assigned currently, please check your script') raise MissingError(obj=self._uid, missing_attribute='workflow') if not self._resource_manager: self._logger.error('No resource manager assigned currently, please create and add a valid resource manager') raise MissingError(obj=self._uid, missing_attribute='resource_manager') self._prof.prof('amgr run started', uid=self._uid) # Setup rabbitmq stuff if not self._mqs_setup: self._report.info('Setting up RabbitMQ system') setup = self._setup_mqs() if not setup: self._logger.error('RabbitMQ system not available') raise EnTKError("RabbitMQ setup failed") self._mqs_setup = True self._report.ok('>>ok\n') # Create WFProcessor object self._prof.prof('creating wfp obj', uid=self._uid) self._wfp = WFprocessor(sid=self._sid, workflow=self._workflow, pending_queue=self._pending_queue, completed_queue=self._completed_queue, mq_hostname=self._mq_hostname, port=self._port, resubmit_failed=self._resubmit_failed) self._wfp._initialize_workflow() self._workflow = self._wfp.workflow # Submit resource request if not resource allocation done till now or # resubmit a new one if the old one has completed if self._resource_manager: res_alloc_state = self._resource_manager.get_resource_allocation_state() if (not res_alloc_state) or (res_alloc_state in self._resource_manager.get_completed_states()): self._logger.info('Starting resource request submission') self._prof.prof('init rreq submission', uid=self._uid) self._resource_manager._submit_resource_request() res_alloc_state = self._resource_manager.get_resource_allocation_state() if res_alloc_state in self._resource_manager.get_completed_states(): raise EnTKError(msg="Cannot proceed. Resource allocation ended up in %s"%res_alloc_state) else: self._logger.exception('Cannot run without resource manager, please create and assign a resource manager') raise EnTKError(text='Missing resource manager') # Start synchronizer thread if not self._sync_thread: self._logger.info('Starting synchronizer thread') self._sync_thread = Thread(target=self._synchronizer, name='synchronizer-thread') self._prof.prof('starting synchronizer thread', uid=self._uid) self._sync_thread.start() # Start WFprocessor self._logger.info('Starting WFProcessor process from AppManager') self._wfp.start_processor() self._report.ok('All components created\n') # Create tmgr object only if it does not already exist if self._rts == 'radical.pilot': from radical.entk.execman.rp import TaskManager elif self._rts == 'mock': from radical.entk.execman.mock import TaskManager if not self._task_manager: self._prof.prof('creating tmgr obj', uid=self._uid) self._task_manager = TaskManager(sid=self._sid, pending_queue=self._pending_queue, completed_queue=self._completed_queue, mq_hostname=self._mq_hostname, rmgr=self._resource_manager, port=self._port ) self._logger.info('Starting task manager process from AppManager') self._task_manager.start_manager() self._task_manager.start_heartbeat() active_pipe_count = len(self._workflow) finished_pipe_uids = [] # We wait till all pipelines of the workflow are marked # complete while ((active_pipe_count > 0) and (self._wfp.workflow_incomplete()) and (self._resource_manager.get_resource_allocation_state() not in self._resource_manager.get_completed_states())): if active_pipe_count > 0: for pipe in self._workflow: with pipe.lock: if (pipe.completed) and (pipe.uid not in finished_pipe_uids): self._logger.info('Pipe %s completed' % pipe.uid) finished_pipe_uids.append(pipe.uid) active_pipe_count -= 1 self._logger.info('Active pipes: %s' % active_pipe_count) if (not self._sync_thread.is_alive()) and (self._cur_attempt <= self._reattempts): self._sync_thread = Thread(target=self._synchronizer, name='synchronizer-thread') self._logger.info('Restarting synchronizer thread') self._prof.prof('restarting synchronizer', uid=self._uid) self._sync_thread.start() self._cur_attempt += 1 if (not self._wfp.check_processor()) and (self._cur_attempt <= self._reattempts): """ If WFP dies, both child threads are also cleaned out. We simply recreate the wfp object with a copy of the workflow in the appmanager and start the processor. """ self._prof.prof('recreating wfp obj', uid=self._uid) self._wfp = WFProcessor( sid=self._sid, workflow=self._workflow, pending_queue=self._pending_queue, completed_queue=self._completed_queue, mq_hostname=self._mq_hostname, port=self._port, resubmit_failed=self._resubmit_failed) self._logger.info('Restarting WFProcessor process from AppManager') self._wfp.start_processor() self._cur_attempt += 1 if (not self._task_manager.check_heartbeat()) and (self._cur_attempt <= self._reattempts): """ If the tmgr process or heartbeat dies, we simply start a new process using the start_manager method. We do not need to create a new instance of the TaskManager object itself. We stop and start a new instance of the heartbeat thread as well. """ self._prof.prof('restarting tmgr process and heartbeat', uid=self._uid) self._logger.info('Terminating heartbeat thread') self._task_manager.terminate_heartbeat() self._logger.info('Terminating tmgr process') self._task_manager.terminate_manager() self._logger.info('Restarting task manager process') self._task_manager.start_manager() self._logger.info('Restarting heartbeat thread') self._task_manager.start_heartbeat() self._cur_attempt += 1 self._prof.prof('start termination', uid=self._uid) # Terminate threads in following order: wfp, helper, synchronizer self._logger.info('Terminating WFprocessor') self._wfp.terminate_processor() self._logger.info('Terminating synchronizer thread') self._terminate_sync.set() self._sync_thread.join() self._logger.info('Synchronizer thread terminated') if self._autoterminate: self.resource_terminate() if self._write_workflow: write_workflow(self._workflow, self._sid) self._prof.prof('termination done', uid=self._uid) except KeyboardInterrupt: self._prof.prof('start termination', uid=self._uid) self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel enqueuer thread gracefully...') # Terminate threads in following order: wfp, helper, synchronizer if self._wfp: self._logger.info('Terminating WFprocessor') self._wfp.terminate_processor() if self._task_manager: self._logger.info('Terminating task manager process') self._task_manager.terminate_manager() self._task_manager.terminate_heartbeat() if self._sync_thread: self._logger.info('Terminating synchronizer thread') self._terminate_sync.set() self._sync_thread.join() self._logger.info('Synchronizer thread terminated') if self._resource_manager: self._resource_manager._terminate_resource_request() self._prof.prof('termination done', uid=self._uid) raise KeyboardInterrupt except Exception, ex: self._prof.prof('start termination', uid=self._uid) self._logger.exception('Error in AppManager: %s' % ex) # Terminate threads in following order: wfp, helper, synchronizer if self._wfp: self._logger.info('Terminating WFprocessor') self._wfp.terminate_processor() if self._task_manager: self._logger.info('Terminating task manager process') self._task_manager.terminate_manager() self._task_manager.terminate_heartbeat() if self._sync_thread: self._logger.info('Terminating synchronizer thread') self._terminate_sync.set() self._sync_thread.join() self._logger.info('Synchronizer thread terminated') if self._resource_manager: self._resource_manager._terminate_resource_request() self._prof.prof('termination done', uid=self._uid) raise
python
def run(self): """ **Purpose**: Run the application manager. Once the workflow and resource manager have been assigned. Invoking this method will start the setting up the communication infrastructure, submitting a resource request and then submission of all the tasks. """ try: # Set None objects local to each run self._wfp = None self._sync_thread = None self._terminate_sync = Event() self._resubmit_failed = False self._cur_attempt = 1 if not self._workflow: self._logger.error('No workflow assigned currently, please check your script') raise MissingError(obj=self._uid, missing_attribute='workflow') if not self._resource_manager: self._logger.error('No resource manager assigned currently, please create and add a valid resource manager') raise MissingError(obj=self._uid, missing_attribute='resource_manager') self._prof.prof('amgr run started', uid=self._uid) # Setup rabbitmq stuff if not self._mqs_setup: self._report.info('Setting up RabbitMQ system') setup = self._setup_mqs() if not setup: self._logger.error('RabbitMQ system not available') raise EnTKError("RabbitMQ setup failed") self._mqs_setup = True self._report.ok('>>ok\n') # Create WFProcessor object self._prof.prof('creating wfp obj', uid=self._uid) self._wfp = WFprocessor(sid=self._sid, workflow=self._workflow, pending_queue=self._pending_queue, completed_queue=self._completed_queue, mq_hostname=self._mq_hostname, port=self._port, resubmit_failed=self._resubmit_failed) self._wfp._initialize_workflow() self._workflow = self._wfp.workflow # Submit resource request if not resource allocation done till now or # resubmit a new one if the old one has completed if self._resource_manager: res_alloc_state = self._resource_manager.get_resource_allocation_state() if (not res_alloc_state) or (res_alloc_state in self._resource_manager.get_completed_states()): self._logger.info('Starting resource request submission') self._prof.prof('init rreq submission', uid=self._uid) self._resource_manager._submit_resource_request() res_alloc_state = self._resource_manager.get_resource_allocation_state() if res_alloc_state in self._resource_manager.get_completed_states(): raise EnTKError(msg="Cannot proceed. Resource allocation ended up in %s"%res_alloc_state) else: self._logger.exception('Cannot run without resource manager, please create and assign a resource manager') raise EnTKError(text='Missing resource manager') # Start synchronizer thread if not self._sync_thread: self._logger.info('Starting synchronizer thread') self._sync_thread = Thread(target=self._synchronizer, name='synchronizer-thread') self._prof.prof('starting synchronizer thread', uid=self._uid) self._sync_thread.start() # Start WFprocessor self._logger.info('Starting WFProcessor process from AppManager') self._wfp.start_processor() self._report.ok('All components created\n') # Create tmgr object only if it does not already exist if self._rts == 'radical.pilot': from radical.entk.execman.rp import TaskManager elif self._rts == 'mock': from radical.entk.execman.mock import TaskManager if not self._task_manager: self._prof.prof('creating tmgr obj', uid=self._uid) self._task_manager = TaskManager(sid=self._sid, pending_queue=self._pending_queue, completed_queue=self._completed_queue, mq_hostname=self._mq_hostname, rmgr=self._resource_manager, port=self._port ) self._logger.info('Starting task manager process from AppManager') self._task_manager.start_manager() self._task_manager.start_heartbeat() active_pipe_count = len(self._workflow) finished_pipe_uids = [] # We wait till all pipelines of the workflow are marked # complete while ((active_pipe_count > 0) and (self._wfp.workflow_incomplete()) and (self._resource_manager.get_resource_allocation_state() not in self._resource_manager.get_completed_states())): if active_pipe_count > 0: for pipe in self._workflow: with pipe.lock: if (pipe.completed) and (pipe.uid not in finished_pipe_uids): self._logger.info('Pipe %s completed' % pipe.uid) finished_pipe_uids.append(pipe.uid) active_pipe_count -= 1 self._logger.info('Active pipes: %s' % active_pipe_count) if (not self._sync_thread.is_alive()) and (self._cur_attempt <= self._reattempts): self._sync_thread = Thread(target=self._synchronizer, name='synchronizer-thread') self._logger.info('Restarting synchronizer thread') self._prof.prof('restarting synchronizer', uid=self._uid) self._sync_thread.start() self._cur_attempt += 1 if (not self._wfp.check_processor()) and (self._cur_attempt <= self._reattempts): """ If WFP dies, both child threads are also cleaned out. We simply recreate the wfp object with a copy of the workflow in the appmanager and start the processor. """ self._prof.prof('recreating wfp obj', uid=self._uid) self._wfp = WFProcessor( sid=self._sid, workflow=self._workflow, pending_queue=self._pending_queue, completed_queue=self._completed_queue, mq_hostname=self._mq_hostname, port=self._port, resubmit_failed=self._resubmit_failed) self._logger.info('Restarting WFProcessor process from AppManager') self._wfp.start_processor() self._cur_attempt += 1 if (not self._task_manager.check_heartbeat()) and (self._cur_attempt <= self._reattempts): """ If the tmgr process or heartbeat dies, we simply start a new process using the start_manager method. We do not need to create a new instance of the TaskManager object itself. We stop and start a new instance of the heartbeat thread as well. """ self._prof.prof('restarting tmgr process and heartbeat', uid=self._uid) self._logger.info('Terminating heartbeat thread') self._task_manager.terminate_heartbeat() self._logger.info('Terminating tmgr process') self._task_manager.terminate_manager() self._logger.info('Restarting task manager process') self._task_manager.start_manager() self._logger.info('Restarting heartbeat thread') self._task_manager.start_heartbeat() self._cur_attempt += 1 self._prof.prof('start termination', uid=self._uid) # Terminate threads in following order: wfp, helper, synchronizer self._logger.info('Terminating WFprocessor') self._wfp.terminate_processor() self._logger.info('Terminating synchronizer thread') self._terminate_sync.set() self._sync_thread.join() self._logger.info('Synchronizer thread terminated') if self._autoterminate: self.resource_terminate() if self._write_workflow: write_workflow(self._workflow, self._sid) self._prof.prof('termination done', uid=self._uid) except KeyboardInterrupt: self._prof.prof('start termination', uid=self._uid) self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel enqueuer thread gracefully...') # Terminate threads in following order: wfp, helper, synchronizer if self._wfp: self._logger.info('Terminating WFprocessor') self._wfp.terminate_processor() if self._task_manager: self._logger.info('Terminating task manager process') self._task_manager.terminate_manager() self._task_manager.terminate_heartbeat() if self._sync_thread: self._logger.info('Terminating synchronizer thread') self._terminate_sync.set() self._sync_thread.join() self._logger.info('Synchronizer thread terminated') if self._resource_manager: self._resource_manager._terminate_resource_request() self._prof.prof('termination done', uid=self._uid) raise KeyboardInterrupt except Exception, ex: self._prof.prof('start termination', uid=self._uid) self._logger.exception('Error in AppManager: %s' % ex) # Terminate threads in following order: wfp, helper, synchronizer if self._wfp: self._logger.info('Terminating WFprocessor') self._wfp.terminate_processor() if self._task_manager: self._logger.info('Terminating task manager process') self._task_manager.terminate_manager() self._task_manager.terminate_heartbeat() if self._sync_thread: self._logger.info('Terminating synchronizer thread') self._terminate_sync.set() self._sync_thread.join() self._logger.info('Synchronizer thread terminated') if self._resource_manager: self._resource_manager._terminate_resource_request() self._prof.prof('termination done', uid=self._uid) raise
[ "def", "run", "(", "self", ")", ":", "try", ":", "# Set None objects local to each run", "self", ".", "_wfp", "=", "None", "self", ".", "_sync_thread", "=", "None", "self", ".", "_terminate_sync", "=", "Event", "(", ")", "self", ".", "_resubmit_failed", "=", "False", "self", ".", "_cur_attempt", "=", "1", "if", "not", "self", ".", "_workflow", ":", "self", ".", "_logger", ".", "error", "(", "'No workflow assigned currently, please check your script'", ")", "raise", "MissingError", "(", "obj", "=", "self", ".", "_uid", ",", "missing_attribute", "=", "'workflow'", ")", "if", "not", "self", ".", "_resource_manager", ":", "self", ".", "_logger", ".", "error", "(", "'No resource manager assigned currently, please create and add a valid resource manager'", ")", "raise", "MissingError", "(", "obj", "=", "self", ".", "_uid", ",", "missing_attribute", "=", "'resource_manager'", ")", "self", ".", "_prof", ".", "prof", "(", "'amgr run started'", ",", "uid", "=", "self", ".", "_uid", ")", "# Setup rabbitmq stuff", "if", "not", "self", ".", "_mqs_setup", ":", "self", ".", "_report", ".", "info", "(", "'Setting up RabbitMQ system'", ")", "setup", "=", "self", ".", "_setup_mqs", "(", ")", "if", "not", "setup", ":", "self", ".", "_logger", ".", "error", "(", "'RabbitMQ system not available'", ")", "raise", "EnTKError", "(", "\"RabbitMQ setup failed\"", ")", "self", ".", "_mqs_setup", "=", "True", "self", ".", "_report", ".", "ok", "(", "'>>ok\\n'", ")", "# Create WFProcessor object", "self", ".", "_prof", ".", "prof", "(", "'creating wfp obj'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_wfp", "=", "WFprocessor", "(", "sid", "=", "self", ".", "_sid", ",", "workflow", "=", "self", ".", "_workflow", ",", "pending_queue", "=", "self", ".", "_pending_queue", ",", "completed_queue", "=", "self", ".", "_completed_queue", ",", "mq_hostname", "=", "self", ".", "_mq_hostname", ",", "port", "=", "self", ".", "_port", ",", "resubmit_failed", "=", "self", ".", "_resubmit_failed", ")", "self", ".", "_wfp", ".", "_initialize_workflow", "(", ")", "self", ".", "_workflow", "=", "self", ".", "_wfp", ".", "workflow", "# Submit resource request if not resource allocation done till now or", "# resubmit a new one if the old one has completed", "if", "self", ".", "_resource_manager", ":", "res_alloc_state", "=", "self", ".", "_resource_manager", ".", "get_resource_allocation_state", "(", ")", "if", "(", "not", "res_alloc_state", ")", "or", "(", "res_alloc_state", "in", "self", ".", "_resource_manager", ".", "get_completed_states", "(", ")", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Starting resource request submission'", ")", "self", ".", "_prof", ".", "prof", "(", "'init rreq submission'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_resource_manager", ".", "_submit_resource_request", "(", ")", "res_alloc_state", "=", "self", ".", "_resource_manager", ".", "get_resource_allocation_state", "(", ")", "if", "res_alloc_state", "in", "self", ".", "_resource_manager", ".", "get_completed_states", "(", ")", ":", "raise", "EnTKError", "(", "msg", "=", "\"Cannot proceed. Resource allocation ended up in %s\"", "%", "res_alloc_state", ")", "else", ":", "self", ".", "_logger", ".", "exception", "(", "'Cannot run without resource manager, please create and assign a resource manager'", ")", "raise", "EnTKError", "(", "text", "=", "'Missing resource manager'", ")", "# Start synchronizer thread", "if", "not", "self", ".", "_sync_thread", ":", "self", ".", "_logger", ".", "info", "(", "'Starting synchronizer thread'", ")", "self", ".", "_sync_thread", "=", "Thread", "(", "target", "=", "self", ".", "_synchronizer", ",", "name", "=", "'synchronizer-thread'", ")", "self", ".", "_prof", ".", "prof", "(", "'starting synchronizer thread'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_sync_thread", ".", "start", "(", ")", "# Start WFprocessor", "self", ".", "_logger", ".", "info", "(", "'Starting WFProcessor process from AppManager'", ")", "self", ".", "_wfp", ".", "start_processor", "(", ")", "self", ".", "_report", ".", "ok", "(", "'All components created\\n'", ")", "# Create tmgr object only if it does not already exist", "if", "self", ".", "_rts", "==", "'radical.pilot'", ":", "from", "radical", ".", "entk", ".", "execman", ".", "rp", "import", "TaskManager", "elif", "self", ".", "_rts", "==", "'mock'", ":", "from", "radical", ".", "entk", ".", "execman", ".", "mock", "import", "TaskManager", "if", "not", "self", ".", "_task_manager", ":", "self", ".", "_prof", ".", "prof", "(", "'creating tmgr obj'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_task_manager", "=", "TaskManager", "(", "sid", "=", "self", ".", "_sid", ",", "pending_queue", "=", "self", ".", "_pending_queue", ",", "completed_queue", "=", "self", ".", "_completed_queue", ",", "mq_hostname", "=", "self", ".", "_mq_hostname", ",", "rmgr", "=", "self", ".", "_resource_manager", ",", "port", "=", "self", ".", "_port", ")", "self", ".", "_logger", ".", "info", "(", "'Starting task manager process from AppManager'", ")", "self", ".", "_task_manager", ".", "start_manager", "(", ")", "self", ".", "_task_manager", ".", "start_heartbeat", "(", ")", "active_pipe_count", "=", "len", "(", "self", ".", "_workflow", ")", "finished_pipe_uids", "=", "[", "]", "# We wait till all pipelines of the workflow are marked", "# complete", "while", "(", "(", "active_pipe_count", ">", "0", ")", "and", "(", "self", ".", "_wfp", ".", "workflow_incomplete", "(", ")", ")", "and", "(", "self", ".", "_resource_manager", ".", "get_resource_allocation_state", "(", ")", "not", "in", "self", ".", "_resource_manager", ".", "get_completed_states", "(", ")", ")", ")", ":", "if", "active_pipe_count", ">", "0", ":", "for", "pipe", "in", "self", ".", "_workflow", ":", "with", "pipe", ".", "lock", ":", "if", "(", "pipe", ".", "completed", ")", "and", "(", "pipe", ".", "uid", "not", "in", "finished_pipe_uids", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Pipe %s completed'", "%", "pipe", ".", "uid", ")", "finished_pipe_uids", ".", "append", "(", "pipe", ".", "uid", ")", "active_pipe_count", "-=", "1", "self", ".", "_logger", ".", "info", "(", "'Active pipes: %s'", "%", "active_pipe_count", ")", "if", "(", "not", "self", ".", "_sync_thread", ".", "is_alive", "(", ")", ")", "and", "(", "self", ".", "_cur_attempt", "<=", "self", ".", "_reattempts", ")", ":", "self", ".", "_sync_thread", "=", "Thread", "(", "target", "=", "self", ".", "_synchronizer", ",", "name", "=", "'synchronizer-thread'", ")", "self", ".", "_logger", ".", "info", "(", "'Restarting synchronizer thread'", ")", "self", ".", "_prof", ".", "prof", "(", "'restarting synchronizer'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_sync_thread", ".", "start", "(", ")", "self", ".", "_cur_attempt", "+=", "1", "if", "(", "not", "self", ".", "_wfp", ".", "check_processor", "(", ")", ")", "and", "(", "self", ".", "_cur_attempt", "<=", "self", ".", "_reattempts", ")", ":", "\"\"\"\n If WFP dies, both child threads are also cleaned out.\n We simply recreate the wfp object with a copy of the workflow\n in the appmanager and start the processor.\n \"\"\"", "self", ".", "_prof", ".", "prof", "(", "'recreating wfp obj'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_wfp", "=", "WFProcessor", "(", "sid", "=", "self", ".", "_sid", ",", "workflow", "=", "self", ".", "_workflow", ",", "pending_queue", "=", "self", ".", "_pending_queue", ",", "completed_queue", "=", "self", ".", "_completed_queue", ",", "mq_hostname", "=", "self", ".", "_mq_hostname", ",", "port", "=", "self", ".", "_port", ",", "resubmit_failed", "=", "self", ".", "_resubmit_failed", ")", "self", ".", "_logger", ".", "info", "(", "'Restarting WFProcessor process from AppManager'", ")", "self", ".", "_wfp", ".", "start_processor", "(", ")", "self", ".", "_cur_attempt", "+=", "1", "if", "(", "not", "self", ".", "_task_manager", ".", "check_heartbeat", "(", ")", ")", "and", "(", "self", ".", "_cur_attempt", "<=", "self", ".", "_reattempts", ")", ":", "\"\"\"\n If the tmgr process or heartbeat dies, we simply start a\n new process using the start_manager method. We do not\n need to create a new instance of the TaskManager object\n itself. We stop and start a new instance of the\n heartbeat thread as well.\n \"\"\"", "self", ".", "_prof", ".", "prof", "(", "'restarting tmgr process and heartbeat'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'Terminating heartbeat thread'", ")", "self", ".", "_task_manager", ".", "terminate_heartbeat", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Terminating tmgr process'", ")", "self", ".", "_task_manager", ".", "terminate_manager", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Restarting task manager process'", ")", "self", ".", "_task_manager", ".", "start_manager", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Restarting heartbeat thread'", ")", "self", ".", "_task_manager", ".", "start_heartbeat", "(", ")", "self", ".", "_cur_attempt", "+=", "1", "self", ".", "_prof", ".", "prof", "(", "'start termination'", ",", "uid", "=", "self", ".", "_uid", ")", "# Terminate threads in following order: wfp, helper, synchronizer", "self", ".", "_logger", ".", "info", "(", "'Terminating WFprocessor'", ")", "self", ".", "_wfp", ".", "terminate_processor", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Terminating synchronizer thread'", ")", "self", ".", "_terminate_sync", ".", "set", "(", ")", "self", ".", "_sync_thread", ".", "join", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Synchronizer thread terminated'", ")", "if", "self", ".", "_autoterminate", ":", "self", ".", "resource_terminate", "(", ")", "if", "self", ".", "_write_workflow", ":", "write_workflow", "(", "self", ".", "_workflow", ",", "self", ".", "_sid", ")", "self", ".", "_prof", ".", "prof", "(", "'termination done'", ",", "uid", "=", "self", ".", "_uid", ")", "except", "KeyboardInterrupt", ":", "self", ".", "_prof", ".", "prof", "(", "'start termination'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to cancel enqueuer thread gracefully...'", ")", "# Terminate threads in following order: wfp, helper, synchronizer", "if", "self", ".", "_wfp", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating WFprocessor'", ")", "self", ".", "_wfp", ".", "terminate_processor", "(", ")", "if", "self", ".", "_task_manager", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating task manager process'", ")", "self", ".", "_task_manager", ".", "terminate_manager", "(", ")", "self", ".", "_task_manager", ".", "terminate_heartbeat", "(", ")", "if", "self", ".", "_sync_thread", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating synchronizer thread'", ")", "self", ".", "_terminate_sync", ".", "set", "(", ")", "self", ".", "_sync_thread", ".", "join", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Synchronizer thread terminated'", ")", "if", "self", ".", "_resource_manager", ":", "self", ".", "_resource_manager", ".", "_terminate_resource_request", "(", ")", "self", ".", "_prof", ".", "prof", "(", "'termination done'", ",", "uid", "=", "self", ".", "_uid", ")", "raise", "KeyboardInterrupt", "except", "Exception", ",", "ex", ":", "self", ".", "_prof", ".", "prof", "(", "'start termination'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "exception", "(", "'Error in AppManager: %s'", "%", "ex", ")", "# Terminate threads in following order: wfp, helper, synchronizer", "if", "self", ".", "_wfp", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating WFprocessor'", ")", "self", ".", "_wfp", ".", "terminate_processor", "(", ")", "if", "self", ".", "_task_manager", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating task manager process'", ")", "self", ".", "_task_manager", ".", "terminate_manager", "(", ")", "self", ".", "_task_manager", ".", "terminate_heartbeat", "(", ")", "if", "self", ".", "_sync_thread", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating synchronizer thread'", ")", "self", ".", "_terminate_sync", ".", "set", "(", ")", "self", ".", "_sync_thread", ".", "join", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Synchronizer thread terminated'", ")", "if", "self", ".", "_resource_manager", ":", "self", ".", "_resource_manager", ".", "_terminate_resource_request", "(", ")", "self", ".", "_prof", ".", "prof", "(", "'termination done'", ",", "uid", "=", "self", ".", "_uid", ")", "raise" ]
**Purpose**: Run the application manager. Once the workflow and resource manager have been assigned. Invoking this method will start the setting up the communication infrastructure, submitting a resource request and then submission of all the tasks.
[ "**", "Purpose", "**", ":", "Run", "the", "application", "manager", ".", "Once", "the", "workflow", "and", "resource", "manager", "have", "been", "assigned", ".", "Invoking", "this", "method", "will", "start", "the", "setting", "up", "the", "communication", "infrastructure", "submitting", "a", "resource", "request", "and", "then", "submission", "of", "all", "the", "tasks", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/appmanager.py#L247-L503
train
radical-cybertools/radical.entk
src/radical/entk/appman/appmanager.py
AppManager._setup_mqs
def _setup_mqs(self): """ **Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue 'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager. Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new one. """ try: self._prof.prof('init mqs setup', uid=self._uid) self._logger.debug('Setting up mq connection and channel') mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() self._logger.debug('Connection and channel setup successful') self._logger.debug('Setting up all exchanges and queues') qs = [ '%s-tmgr-to-sync' % self._sid, '%s-cb-to-sync' % self._sid, '%s-enq-to-sync' % self._sid, '%s-deq-to-sync' % self._sid, '%s-sync-to-tmgr' % self._sid, '%s-sync-to-cb' % self._sid, '%s-sync-to-enq' % self._sid, '%s-sync-to-deq' % self._sid ] for i in range(1, self._num_pending_qs + 1): queue_name = '%s-pendingq-%s' % (self._sid, i) self._pending_queue.append(queue_name) qs.append(queue_name) for i in range(1, self._num_completed_qs + 1): queue_name = '%s-completedq-%s' % (self._sid, i) self._completed_queue.append(queue_name) qs.append(queue_name) f = open('.%s.txt' % self._sid, 'w') for q in qs: # Durable Qs will not be lost if rabbitmq server crashes mq_channel.queue_declare(queue=q) f.write(q + '\n') f.close() self._logger.debug('All exchanges and queues are setup') self._prof.prof('mqs setup done', uid=self._uid) return True except Exception, ex: self._logger.exception('Error setting RabbitMQ system: %s' % ex) raise
python
def _setup_mqs(self): """ **Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue 'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager. Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new one. """ try: self._prof.prof('init mqs setup', uid=self._uid) self._logger.debug('Setting up mq connection and channel') mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() self._logger.debug('Connection and channel setup successful') self._logger.debug('Setting up all exchanges and queues') qs = [ '%s-tmgr-to-sync' % self._sid, '%s-cb-to-sync' % self._sid, '%s-enq-to-sync' % self._sid, '%s-deq-to-sync' % self._sid, '%s-sync-to-tmgr' % self._sid, '%s-sync-to-cb' % self._sid, '%s-sync-to-enq' % self._sid, '%s-sync-to-deq' % self._sid ] for i in range(1, self._num_pending_qs + 1): queue_name = '%s-pendingq-%s' % (self._sid, i) self._pending_queue.append(queue_name) qs.append(queue_name) for i in range(1, self._num_completed_qs + 1): queue_name = '%s-completedq-%s' % (self._sid, i) self._completed_queue.append(queue_name) qs.append(queue_name) f = open('.%s.txt' % self._sid, 'w') for q in qs: # Durable Qs will not be lost if rabbitmq server crashes mq_channel.queue_declare(queue=q) f.write(q + '\n') f.close() self._logger.debug('All exchanges and queues are setup') self._prof.prof('mqs setup done', uid=self._uid) return True except Exception, ex: self._logger.exception('Error setting RabbitMQ system: %s' % ex) raise
[ "def", "_setup_mqs", "(", "self", ")", ":", "try", ":", "self", ".", "_prof", ".", "prof", "(", "'init mqs setup'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "debug", "(", "'Setting up mq connection and channel'", ")", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "self", ".", "_mq_hostname", ",", "port", "=", "self", ".", "_port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "self", ".", "_logger", ".", "debug", "(", "'Connection and channel setup successful'", ")", "self", ".", "_logger", ".", "debug", "(", "'Setting up all exchanges and queues'", ")", "qs", "=", "[", "'%s-tmgr-to-sync'", "%", "self", ".", "_sid", ",", "'%s-cb-to-sync'", "%", "self", ".", "_sid", ",", "'%s-enq-to-sync'", "%", "self", ".", "_sid", ",", "'%s-deq-to-sync'", "%", "self", ".", "_sid", ",", "'%s-sync-to-tmgr'", "%", "self", ".", "_sid", ",", "'%s-sync-to-cb'", "%", "self", ".", "_sid", ",", "'%s-sync-to-enq'", "%", "self", ".", "_sid", ",", "'%s-sync-to-deq'", "%", "self", ".", "_sid", "]", "for", "i", "in", "range", "(", "1", ",", "self", ".", "_num_pending_qs", "+", "1", ")", ":", "queue_name", "=", "'%s-pendingq-%s'", "%", "(", "self", ".", "_sid", ",", "i", ")", "self", ".", "_pending_queue", ".", "append", "(", "queue_name", ")", "qs", ".", "append", "(", "queue_name", ")", "for", "i", "in", "range", "(", "1", ",", "self", ".", "_num_completed_qs", "+", "1", ")", ":", "queue_name", "=", "'%s-completedq-%s'", "%", "(", "self", ".", "_sid", ",", "i", ")", "self", ".", "_completed_queue", ".", "append", "(", "queue_name", ")", "qs", ".", "append", "(", "queue_name", ")", "f", "=", "open", "(", "'.%s.txt'", "%", "self", ".", "_sid", ",", "'w'", ")", "for", "q", "in", "qs", ":", "# Durable Qs will not be lost if rabbitmq server crashes", "mq_channel", ".", "queue_declare", "(", "queue", "=", "q", ")", "f", ".", "write", "(", "q", "+", "'\\n'", ")", "f", ".", "close", "(", ")", "self", ".", "_logger", ".", "debug", "(", "'All exchanges and queues are setup'", ")", "self", ".", "_prof", ".", "prof", "(", "'mqs setup done'", ",", "uid", "=", "self", ".", "_uid", ")", "return", "True", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Error setting RabbitMQ system: %s'", "%", "ex", ")", "raise" ]
**Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue 'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager. Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new one.
[ "**", "Purpose", "**", ":", "Setup", "RabbitMQ", "system", "on", "the", "client", "side", ".", "We", "instantiate", "queue", "(", "s", ")", "pendingq", "-", "*", "for", "communication", "between", "the", "enqueuer", "thread", "and", "the", "task", "manager", "process", ".", "We", "instantiate", "queue", "(", "s", ")", "completedq", "-", "*", "for", "communication", "between", "the", "task", "manager", "and", "dequeuer", "thread", ".", "We", "instantiate", "queue", "sync", "-", "to", "-", "master", "for", "communication", "from", "enqueuer", "/", "dequeuer", "/", "task_manager", "to", "the", "synchronizer", "thread", ".", "We", "instantiate", "queue", "sync", "-", "ack", "for", "communication", "from", "synchronizer", "thread", "to", "enqueuer", "/", "dequeuer", "/", "task_manager", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/appmanager.py#L527-L589
train
radical-cybertools/radical.entk
src/radical/entk/appman/appmanager.py
AppManager._synchronizer
def _synchronizer(self): """ **Purpose**: Thread in the master process to keep the workflow data structure in appmanager up to date. We receive pipelines, stages and tasks objects directly. The respective object is updated in this master process. Details: Important to note that acknowledgements of the type channel.basic_ack() is an acknowledgement to the server that the msg was received. This is not to be confused with the Ack sent to the enqueuer/dequeuer/task_manager through the sync-ack queue. """ try: self._prof.prof('synchronizer started', uid=self._uid) self._logger.info('synchronizer thread started') def task_update(msg, reply_to, corr_id, mq_channel): completed_task = Task() completed_task.from_dict(msg['object']) self._logger.info('Received %s with state %s' % (completed_task.uid, completed_task.state)) found_task = False # Traverse the entire workflow to find the correct task for pipe in self._workflow: if not pipe.completed: if completed_task.parent_pipeline['uid'] == pipe.uid: for stage in pipe.stages: if completed_task.parent_stage['uid'] == stage.uid: for task in stage.tasks: if (completed_task.uid == task.uid)and(completed_task.state != task.state): task.state = str(completed_task.state) self._logger.debug('Found task %s with state %s' % (task.uid, task.state)) if completed_task.path: task.path = str(completed_task.path) mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % task.uid) self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) self._report.ok('Update: ') self._report.info('%s state: %s\n' % (task.luid, task.state)) found_task = True if not found_task: # If there was a Stage update, but the Stage was not found in any of the Pipelines. This # means that this was a Stage that was added during runtime and the AppManager does not # know about it. The current solution is going to be: add it to the workflow object in the # AppManager via the synchronizer. self._prof.prof('Adap: adding new task') self._logger.info('Adding new task %s to parent stage: %s' % (completed_task.uid, stage.uid)) stage.add_tasks(completed_task) mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % completed_task.uid) self._prof.prof('Adap: added new task') self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) self._report.ok('Update: ') self._report.info('%s state: %s\n' % (completed_task.luid, completed_task.state)) def stage_update(msg, reply_to, corr_id, mq_channel): completed_stage = Stage() completed_stage.from_dict(msg['object']) self._logger.info('Received %s with state %s' % (completed_stage.uid, completed_stage.state)) found_stage = False # Traverse the entire workflow to find the correct stage for pipe in self._workflow: if not pipe.completed: if completed_stage.parent_pipeline['uid'] == pipe.uid: self._logger.info('Found parent pipeline: %s' % pipe.uid) for stage in pipe.stages: if (completed_stage.uid == stage.uid)and(completed_stage.state != stage.state): self._logger.debug('Found stage %s' % stage.uid) stage.state = str(completed_stage.state) mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % stage.uid) self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) self._report.ok('Update: ') self._report.info('%s state: %s\n' % (stage.luid, stage.state)) found_stage = True if not found_stage: # If there was a Stage update, but the Stage was not found in any of the Pipelines. This # means that this was a Stage that was added during runtime and the AppManager does not # know about it. The current solution is going to be: add it to the workflow object in the # AppManager via the synchronizer. self._prof.prof('Adap: adding new stage', uid=self._uid) self._logger.info('Adding new stage %s to parent pipeline: %s' % (completed_stage.uid, pipe.uid)) pipe.add_stages(completed_stage) mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % completed_stage.uid) self._prof.prof('Adap: adding new stage', uid=self._uid) self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) def pipeline_update(msg, reply_to, corr_id, mq_channel): completed_pipeline = Pipeline() completed_pipeline.from_dict(msg['object']) self._logger.info('Received %s with state %s' % (completed_pipeline.uid, completed_pipeline.state)) # Traverse the entire workflow to find the correct pipeline for pipe in self._workflow: if not pipe.completed: if (completed_pipeline.uid == pipe.uid)and(completed_pipeline.state != pipe.state): pipe.state = str(completed_pipeline.state) self._logger.info('Found pipeline %s, state %s, completed %s' % (pipe.uid, pipe.state, pipe.completed) ) # Reply with ack msg to the sender mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % pipe.uid) self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) # Keep the assignment of the completed flag after sending the acknowledgment # back. Otherwise the MainThread takes lock over the pipeline because of logging # and profiling if completed_pipeline.completed: pipe._completed_flag.set() self._report.ok('Update: ') self._report.info('%s state: %s\n' % (pipe.luid, pipe.state)) mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() last = time.time() while not self._terminate_sync.is_set(): #------------------------------------------------------------------------------------------------------- # Messages between tmgr Main thread and synchronizer -- only Task objects method_frame, props, body = mq_channel.basic_get(queue='%s-tmgr-to-sync' % self._sid) """ The message received is a JSON object with the following structure: msg = { 'type': 'Pipeline'/'Stage'/'Task', 'object': json/dict } """ if body: msg = json.loads(body) self._prof.prof('received obj with state %s for sync' % msg['object']['state'], uid=msg['object']['uid']) self._logger.debug('received %s with state %s for sync' % (msg['object']['uid'], msg['object']['state'])) if msg['type'] == 'Task': task_update(msg, '%s-sync-to-tmgr' % self._sid, props.correlation_id, mq_channel) #------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------- # Messages between callback thread and synchronizer -- only Task objects method_frame, props, body = mq_channel.basic_get(queue='%s-cb-to-sync' % self._sid) """ The message received is a JSON object with the following structure: msg = { 'type': 'Pipeline'/'Stage'/'Task', 'object': json/dict } """ if body: msg = json.loads(body) self._prof.prof('received obj with state %s for sync' % msg['object']['state'], uid=msg['object']['uid']) self._logger.debug('received %s with state %s for sync' % (msg['object']['uid'], msg['object']['state'])) if msg['type'] == 'Task': task_update(msg, '%s-sync-to-cb' % self._sid, props.correlation_id, mq_channel) #------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------- # Messages between enqueue thread and synchronizer -- Task, Stage or Pipeline method_frame, props, body = mq_channel.basic_get(queue='%s-enq-to-sync' % self._sid) if body: msg = json.loads(body) self._prof.prof('received obj with state %s for sync' % msg['object']['state'], uid=msg['object']['uid']) self._logger.debug('received %s with state %s for sync' % (msg['object']['uid'], msg['object']['state'])) if msg['type'] == 'Task': task_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel) elif msg['type'] == 'Stage': stage_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel) elif msg['type'] == 'Pipeline': pipeline_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel) #------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------- # Messages between dequeue thread and synchronizer -- Task, Stage or Pipeline method_frame, props, body = mq_channel.basic_get(queue='%s-deq-to-sync' % self._sid) if body: msg = json.loads(body) self._prof.prof('received obj with state %s for sync' % msg['object']['state'], uid=msg['object']['uid']) self._logger.debug('received %s with state %s for sync' % (msg['object']['uid'], msg['object']['state'])) if msg['type'] == 'Task': task_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel) elif msg['type'] == 'Stage': stage_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel) elif msg['type'] == 'Pipeline': pipeline_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel) #------------------------------------------------------------------------------------------------------- # Appease pika cos it thinks the connection is dead now = time.time() if now - last >= self._rmq_ping_interval: mq_connection.process_data_events() last = now self._prof.prof('terminating synchronizer', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to terminate synchronizer thread gracefully...') raise KeyboardInterrupt except Exception, ex: self._logger.exception('Unknown error in synchronizer: %s. \n Terminating thread' % ex) raise
python
def _synchronizer(self): """ **Purpose**: Thread in the master process to keep the workflow data structure in appmanager up to date. We receive pipelines, stages and tasks objects directly. The respective object is updated in this master process. Details: Important to note that acknowledgements of the type channel.basic_ack() is an acknowledgement to the server that the msg was received. This is not to be confused with the Ack sent to the enqueuer/dequeuer/task_manager through the sync-ack queue. """ try: self._prof.prof('synchronizer started', uid=self._uid) self._logger.info('synchronizer thread started') def task_update(msg, reply_to, corr_id, mq_channel): completed_task = Task() completed_task.from_dict(msg['object']) self._logger.info('Received %s with state %s' % (completed_task.uid, completed_task.state)) found_task = False # Traverse the entire workflow to find the correct task for pipe in self._workflow: if not pipe.completed: if completed_task.parent_pipeline['uid'] == pipe.uid: for stage in pipe.stages: if completed_task.parent_stage['uid'] == stage.uid: for task in stage.tasks: if (completed_task.uid == task.uid)and(completed_task.state != task.state): task.state = str(completed_task.state) self._logger.debug('Found task %s with state %s' % (task.uid, task.state)) if completed_task.path: task.path = str(completed_task.path) mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % task.uid) self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) self._report.ok('Update: ') self._report.info('%s state: %s\n' % (task.luid, task.state)) found_task = True if not found_task: # If there was a Stage update, but the Stage was not found in any of the Pipelines. This # means that this was a Stage that was added during runtime and the AppManager does not # know about it. The current solution is going to be: add it to the workflow object in the # AppManager via the synchronizer. self._prof.prof('Adap: adding new task') self._logger.info('Adding new task %s to parent stage: %s' % (completed_task.uid, stage.uid)) stage.add_tasks(completed_task) mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % completed_task.uid) self._prof.prof('Adap: added new task') self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) self._report.ok('Update: ') self._report.info('%s state: %s\n' % (completed_task.luid, completed_task.state)) def stage_update(msg, reply_to, corr_id, mq_channel): completed_stage = Stage() completed_stage.from_dict(msg['object']) self._logger.info('Received %s with state %s' % (completed_stage.uid, completed_stage.state)) found_stage = False # Traverse the entire workflow to find the correct stage for pipe in self._workflow: if not pipe.completed: if completed_stage.parent_pipeline['uid'] == pipe.uid: self._logger.info('Found parent pipeline: %s' % pipe.uid) for stage in pipe.stages: if (completed_stage.uid == stage.uid)and(completed_stage.state != stage.state): self._logger.debug('Found stage %s' % stage.uid) stage.state = str(completed_stage.state) mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % stage.uid) self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) self._report.ok('Update: ') self._report.info('%s state: %s\n' % (stage.luid, stage.state)) found_stage = True if not found_stage: # If there was a Stage update, but the Stage was not found in any of the Pipelines. This # means that this was a Stage that was added during runtime and the AppManager does not # know about it. The current solution is going to be: add it to the workflow object in the # AppManager via the synchronizer. self._prof.prof('Adap: adding new stage', uid=self._uid) self._logger.info('Adding new stage %s to parent pipeline: %s' % (completed_stage.uid, pipe.uid)) pipe.add_stages(completed_stage) mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % completed_stage.uid) self._prof.prof('Adap: adding new stage', uid=self._uid) self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) def pipeline_update(msg, reply_to, corr_id, mq_channel): completed_pipeline = Pipeline() completed_pipeline.from_dict(msg['object']) self._logger.info('Received %s with state %s' % (completed_pipeline.uid, completed_pipeline.state)) # Traverse the entire workflow to find the correct pipeline for pipe in self._workflow: if not pipe.completed: if (completed_pipeline.uid == pipe.uid)and(completed_pipeline.state != pipe.state): pipe.state = str(completed_pipeline.state) self._logger.info('Found pipeline %s, state %s, completed %s' % (pipe.uid, pipe.state, pipe.completed) ) # Reply with ack msg to the sender mq_channel.basic_publish(exchange='', routing_key=reply_to, properties=pika.BasicProperties( correlation_id=corr_id), body='%s-ack' % pipe.uid) self._prof.prof('publishing sync ack for obj with state %s' % msg['object']['state'], uid=msg['object']['uid'] ) mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag) # Keep the assignment of the completed flag after sending the acknowledgment # back. Otherwise the MainThread takes lock over the pipeline because of logging # and profiling if completed_pipeline.completed: pipe._completed_flag.set() self._report.ok('Update: ') self._report.info('%s state: %s\n' % (pipe.luid, pipe.state)) mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() last = time.time() while not self._terminate_sync.is_set(): #------------------------------------------------------------------------------------------------------- # Messages between tmgr Main thread and synchronizer -- only Task objects method_frame, props, body = mq_channel.basic_get(queue='%s-tmgr-to-sync' % self._sid) """ The message received is a JSON object with the following structure: msg = { 'type': 'Pipeline'/'Stage'/'Task', 'object': json/dict } """ if body: msg = json.loads(body) self._prof.prof('received obj with state %s for sync' % msg['object']['state'], uid=msg['object']['uid']) self._logger.debug('received %s with state %s for sync' % (msg['object']['uid'], msg['object']['state'])) if msg['type'] == 'Task': task_update(msg, '%s-sync-to-tmgr' % self._sid, props.correlation_id, mq_channel) #------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------- # Messages between callback thread and synchronizer -- only Task objects method_frame, props, body = mq_channel.basic_get(queue='%s-cb-to-sync' % self._sid) """ The message received is a JSON object with the following structure: msg = { 'type': 'Pipeline'/'Stage'/'Task', 'object': json/dict } """ if body: msg = json.loads(body) self._prof.prof('received obj with state %s for sync' % msg['object']['state'], uid=msg['object']['uid']) self._logger.debug('received %s with state %s for sync' % (msg['object']['uid'], msg['object']['state'])) if msg['type'] == 'Task': task_update(msg, '%s-sync-to-cb' % self._sid, props.correlation_id, mq_channel) #------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------- # Messages between enqueue thread and synchronizer -- Task, Stage or Pipeline method_frame, props, body = mq_channel.basic_get(queue='%s-enq-to-sync' % self._sid) if body: msg = json.loads(body) self._prof.prof('received obj with state %s for sync' % msg['object']['state'], uid=msg['object']['uid']) self._logger.debug('received %s with state %s for sync' % (msg['object']['uid'], msg['object']['state'])) if msg['type'] == 'Task': task_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel) elif msg['type'] == 'Stage': stage_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel) elif msg['type'] == 'Pipeline': pipeline_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel) #------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------- # Messages between dequeue thread and synchronizer -- Task, Stage or Pipeline method_frame, props, body = mq_channel.basic_get(queue='%s-deq-to-sync' % self._sid) if body: msg = json.loads(body) self._prof.prof('received obj with state %s for sync' % msg['object']['state'], uid=msg['object']['uid']) self._logger.debug('received %s with state %s for sync' % (msg['object']['uid'], msg['object']['state'])) if msg['type'] == 'Task': task_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel) elif msg['type'] == 'Stage': stage_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel) elif msg['type'] == 'Pipeline': pipeline_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel) #------------------------------------------------------------------------------------------------------- # Appease pika cos it thinks the connection is dead now = time.time() if now - last >= self._rmq_ping_interval: mq_connection.process_data_events() last = now self._prof.prof('terminating synchronizer', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to terminate synchronizer thread gracefully...') raise KeyboardInterrupt except Exception, ex: self._logger.exception('Unknown error in synchronizer: %s. \n Terminating thread' % ex) raise
[ "def", "_synchronizer", "(", "self", ")", ":", "try", ":", "self", ".", "_prof", ".", "prof", "(", "'synchronizer started'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'synchronizer thread started'", ")", "def", "task_update", "(", "msg", ",", "reply_to", ",", "corr_id", ",", "mq_channel", ")", ":", "completed_task", "=", "Task", "(", ")", "completed_task", ".", "from_dict", "(", "msg", "[", "'object'", "]", ")", "self", ".", "_logger", ".", "info", "(", "'Received %s with state %s'", "%", "(", "completed_task", ".", "uid", ",", "completed_task", ".", "state", ")", ")", "found_task", "=", "False", "# Traverse the entire workflow to find the correct task", "for", "pipe", "in", "self", ".", "_workflow", ":", "if", "not", "pipe", ".", "completed", ":", "if", "completed_task", ".", "parent_pipeline", "[", "'uid'", "]", "==", "pipe", ".", "uid", ":", "for", "stage", "in", "pipe", ".", "stages", ":", "if", "completed_task", ".", "parent_stage", "[", "'uid'", "]", "==", "stage", ".", "uid", ":", "for", "task", "in", "stage", ".", "tasks", ":", "if", "(", "completed_task", ".", "uid", "==", "task", ".", "uid", ")", "and", "(", "completed_task", ".", "state", "!=", "task", ".", "state", ")", ":", "task", ".", "state", "=", "str", "(", "completed_task", ".", "state", ")", "self", ".", "_logger", ".", "debug", "(", "'Found task %s with state %s'", "%", "(", "task", ".", "uid", ",", "task", ".", "state", ")", ")", "if", "completed_task", ".", "path", ":", "task", ".", "path", "=", "str", "(", "completed_task", ".", "path", ")", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "reply_to", ",", "properties", "=", "pika", ".", "BasicProperties", "(", "correlation_id", "=", "corr_id", ")", ",", "body", "=", "'%s-ack'", "%", "task", ".", "uid", ")", "self", ".", "_prof", ".", "prof", "(", "'publishing sync ack for obj with state %s'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "mq_channel", ".", "basic_ack", "(", "delivery_tag", "=", "method_frame", ".", "delivery_tag", ")", "self", ".", "_report", ".", "ok", "(", "'Update: '", ")", "self", ".", "_report", ".", "info", "(", "'%s state: %s\\n'", "%", "(", "task", ".", "luid", ",", "task", ".", "state", ")", ")", "found_task", "=", "True", "if", "not", "found_task", ":", "# If there was a Stage update, but the Stage was not found in any of the Pipelines. This", "# means that this was a Stage that was added during runtime and the AppManager does not", "# know about it. The current solution is going to be: add it to the workflow object in the", "# AppManager via the synchronizer.", "self", ".", "_prof", ".", "prof", "(", "'Adap: adding new task'", ")", "self", ".", "_logger", ".", "info", "(", "'Adding new task %s to parent stage: %s'", "%", "(", "completed_task", ".", "uid", ",", "stage", ".", "uid", ")", ")", "stage", ".", "add_tasks", "(", "completed_task", ")", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "reply_to", ",", "properties", "=", "pika", ".", "BasicProperties", "(", "correlation_id", "=", "corr_id", ")", ",", "body", "=", "'%s-ack'", "%", "completed_task", ".", "uid", ")", "self", ".", "_prof", ".", "prof", "(", "'Adap: added new task'", ")", "self", ".", "_prof", ".", "prof", "(", "'publishing sync ack for obj with state %s'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "mq_channel", ".", "basic_ack", "(", "delivery_tag", "=", "method_frame", ".", "delivery_tag", ")", "self", ".", "_report", ".", "ok", "(", "'Update: '", ")", "self", ".", "_report", ".", "info", "(", "'%s state: %s\\n'", "%", "(", "completed_task", ".", "luid", ",", "completed_task", ".", "state", ")", ")", "def", "stage_update", "(", "msg", ",", "reply_to", ",", "corr_id", ",", "mq_channel", ")", ":", "completed_stage", "=", "Stage", "(", ")", "completed_stage", ".", "from_dict", "(", "msg", "[", "'object'", "]", ")", "self", ".", "_logger", ".", "info", "(", "'Received %s with state %s'", "%", "(", "completed_stage", ".", "uid", ",", "completed_stage", ".", "state", ")", ")", "found_stage", "=", "False", "# Traverse the entire workflow to find the correct stage", "for", "pipe", "in", "self", ".", "_workflow", ":", "if", "not", "pipe", ".", "completed", ":", "if", "completed_stage", ".", "parent_pipeline", "[", "'uid'", "]", "==", "pipe", ".", "uid", ":", "self", ".", "_logger", ".", "info", "(", "'Found parent pipeline: %s'", "%", "pipe", ".", "uid", ")", "for", "stage", "in", "pipe", ".", "stages", ":", "if", "(", "completed_stage", ".", "uid", "==", "stage", ".", "uid", ")", "and", "(", "completed_stage", ".", "state", "!=", "stage", ".", "state", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Found stage %s'", "%", "stage", ".", "uid", ")", "stage", ".", "state", "=", "str", "(", "completed_stage", ".", "state", ")", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "reply_to", ",", "properties", "=", "pika", ".", "BasicProperties", "(", "correlation_id", "=", "corr_id", ")", ",", "body", "=", "'%s-ack'", "%", "stage", ".", "uid", ")", "self", ".", "_prof", ".", "prof", "(", "'publishing sync ack for obj with state %s'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "mq_channel", ".", "basic_ack", "(", "delivery_tag", "=", "method_frame", ".", "delivery_tag", ")", "self", ".", "_report", ".", "ok", "(", "'Update: '", ")", "self", ".", "_report", ".", "info", "(", "'%s state: %s\\n'", "%", "(", "stage", ".", "luid", ",", "stage", ".", "state", ")", ")", "found_stage", "=", "True", "if", "not", "found_stage", ":", "# If there was a Stage update, but the Stage was not found in any of the Pipelines. This", "# means that this was a Stage that was added during runtime and the AppManager does not", "# know about it. The current solution is going to be: add it to the workflow object in the", "# AppManager via the synchronizer.", "self", ".", "_prof", ".", "prof", "(", "'Adap: adding new stage'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'Adding new stage %s to parent pipeline: %s'", "%", "(", "completed_stage", ".", "uid", ",", "pipe", ".", "uid", ")", ")", "pipe", ".", "add_stages", "(", "completed_stage", ")", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "reply_to", ",", "properties", "=", "pika", ".", "BasicProperties", "(", "correlation_id", "=", "corr_id", ")", ",", "body", "=", "'%s-ack'", "%", "completed_stage", ".", "uid", ")", "self", ".", "_prof", ".", "prof", "(", "'Adap: adding new stage'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_prof", ".", "prof", "(", "'publishing sync ack for obj with state %s'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "mq_channel", ".", "basic_ack", "(", "delivery_tag", "=", "method_frame", ".", "delivery_tag", ")", "def", "pipeline_update", "(", "msg", ",", "reply_to", ",", "corr_id", ",", "mq_channel", ")", ":", "completed_pipeline", "=", "Pipeline", "(", ")", "completed_pipeline", ".", "from_dict", "(", "msg", "[", "'object'", "]", ")", "self", ".", "_logger", ".", "info", "(", "'Received %s with state %s'", "%", "(", "completed_pipeline", ".", "uid", ",", "completed_pipeline", ".", "state", ")", ")", "# Traverse the entire workflow to find the correct pipeline", "for", "pipe", "in", "self", ".", "_workflow", ":", "if", "not", "pipe", ".", "completed", ":", "if", "(", "completed_pipeline", ".", "uid", "==", "pipe", ".", "uid", ")", "and", "(", "completed_pipeline", ".", "state", "!=", "pipe", ".", "state", ")", ":", "pipe", ".", "state", "=", "str", "(", "completed_pipeline", ".", "state", ")", "self", ".", "_logger", ".", "info", "(", "'Found pipeline %s, state %s, completed %s'", "%", "(", "pipe", ".", "uid", ",", "pipe", ".", "state", ",", "pipe", ".", "completed", ")", ")", "# Reply with ack msg to the sender", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "reply_to", ",", "properties", "=", "pika", ".", "BasicProperties", "(", "correlation_id", "=", "corr_id", ")", ",", "body", "=", "'%s-ack'", "%", "pipe", ".", "uid", ")", "self", ".", "_prof", ".", "prof", "(", "'publishing sync ack for obj with state %s'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "mq_channel", ".", "basic_ack", "(", "delivery_tag", "=", "method_frame", ".", "delivery_tag", ")", "# Keep the assignment of the completed flag after sending the acknowledgment", "# back. Otherwise the MainThread takes lock over the pipeline because of logging", "# and profiling", "if", "completed_pipeline", ".", "completed", ":", "pipe", ".", "_completed_flag", ".", "set", "(", ")", "self", ".", "_report", ".", "ok", "(", "'Update: '", ")", "self", ".", "_report", ".", "info", "(", "'%s state: %s\\n'", "%", "(", "pipe", ".", "luid", ",", "pipe", ".", "state", ")", ")", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "self", ".", "_mq_hostname", ",", "port", "=", "self", ".", "_port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "last", "=", "time", ".", "time", "(", ")", "while", "not", "self", ".", "_terminate_sync", ".", "is_set", "(", ")", ":", "#-------------------------------------------------------------------------------------------------------", "# Messages between tmgr Main thread and synchronizer -- only Task objects", "method_frame", ",", "props", ",", "body", "=", "mq_channel", ".", "basic_get", "(", "queue", "=", "'%s-tmgr-to-sync'", "%", "self", ".", "_sid", ")", "\"\"\"\n The message received is a JSON object with the following structure:\n\n msg = {\n 'type': 'Pipeline'/'Stage'/'Task',\n 'object': json/dict\n }\n \"\"\"", "if", "body", ":", "msg", "=", "json", ".", "loads", "(", "body", ")", "self", ".", "_prof", ".", "prof", "(", "'received obj with state %s for sync'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "self", ".", "_logger", ".", "debug", "(", "'received %s with state %s for sync'", "%", "(", "msg", "[", "'object'", "]", "[", "'uid'", "]", ",", "msg", "[", "'object'", "]", "[", "'state'", "]", ")", ")", "if", "msg", "[", "'type'", "]", "==", "'Task'", ":", "task_update", "(", "msg", ",", "'%s-sync-to-tmgr'", "%", "self", ".", "_sid", ",", "props", ".", "correlation_id", ",", "mq_channel", ")", "#-------------------------------------------------------------------------------------------------------", "#-------------------------------------------------------------------------------------------------------", "# Messages between callback thread and synchronizer -- only Task objects", "method_frame", ",", "props", ",", "body", "=", "mq_channel", ".", "basic_get", "(", "queue", "=", "'%s-cb-to-sync'", "%", "self", ".", "_sid", ")", "\"\"\"\n The message received is a JSON object with the following structure:\n\n msg = {\n 'type': 'Pipeline'/'Stage'/'Task',\n 'object': json/dict\n }\n \"\"\"", "if", "body", ":", "msg", "=", "json", ".", "loads", "(", "body", ")", "self", ".", "_prof", ".", "prof", "(", "'received obj with state %s for sync'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "self", ".", "_logger", ".", "debug", "(", "'received %s with state %s for sync'", "%", "(", "msg", "[", "'object'", "]", "[", "'uid'", "]", ",", "msg", "[", "'object'", "]", "[", "'state'", "]", ")", ")", "if", "msg", "[", "'type'", "]", "==", "'Task'", ":", "task_update", "(", "msg", ",", "'%s-sync-to-cb'", "%", "self", ".", "_sid", ",", "props", ".", "correlation_id", ",", "mq_channel", ")", "#-------------------------------------------------------------------------------------------------------", "#-------------------------------------------------------------------------------------------------------", "# Messages between enqueue thread and synchronizer -- Task, Stage or Pipeline", "method_frame", ",", "props", ",", "body", "=", "mq_channel", ".", "basic_get", "(", "queue", "=", "'%s-enq-to-sync'", "%", "self", ".", "_sid", ")", "if", "body", ":", "msg", "=", "json", ".", "loads", "(", "body", ")", "self", ".", "_prof", ".", "prof", "(", "'received obj with state %s for sync'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "self", ".", "_logger", ".", "debug", "(", "'received %s with state %s for sync'", "%", "(", "msg", "[", "'object'", "]", "[", "'uid'", "]", ",", "msg", "[", "'object'", "]", "[", "'state'", "]", ")", ")", "if", "msg", "[", "'type'", "]", "==", "'Task'", ":", "task_update", "(", "msg", ",", "'%s-sync-to-enq'", "%", "self", ".", "_sid", ",", "props", ".", "correlation_id", ",", "mq_channel", ")", "elif", "msg", "[", "'type'", "]", "==", "'Stage'", ":", "stage_update", "(", "msg", ",", "'%s-sync-to-enq'", "%", "self", ".", "_sid", ",", "props", ".", "correlation_id", ",", "mq_channel", ")", "elif", "msg", "[", "'type'", "]", "==", "'Pipeline'", ":", "pipeline_update", "(", "msg", ",", "'%s-sync-to-enq'", "%", "self", ".", "_sid", ",", "props", ".", "correlation_id", ",", "mq_channel", ")", "#-------------------------------------------------------------------------------------------------------", "#-------------------------------------------------------------------------------------------------------", "# Messages between dequeue thread and synchronizer -- Task, Stage or Pipeline", "method_frame", ",", "props", ",", "body", "=", "mq_channel", ".", "basic_get", "(", "queue", "=", "'%s-deq-to-sync'", "%", "self", ".", "_sid", ")", "if", "body", ":", "msg", "=", "json", ".", "loads", "(", "body", ")", "self", ".", "_prof", ".", "prof", "(", "'received obj with state %s for sync'", "%", "msg", "[", "'object'", "]", "[", "'state'", "]", ",", "uid", "=", "msg", "[", "'object'", "]", "[", "'uid'", "]", ")", "self", ".", "_logger", ".", "debug", "(", "'received %s with state %s for sync'", "%", "(", "msg", "[", "'object'", "]", "[", "'uid'", "]", ",", "msg", "[", "'object'", "]", "[", "'state'", "]", ")", ")", "if", "msg", "[", "'type'", "]", "==", "'Task'", ":", "task_update", "(", "msg", ",", "'%s-sync-to-deq'", "%", "self", ".", "_sid", ",", "props", ".", "correlation_id", ",", "mq_channel", ")", "elif", "msg", "[", "'type'", "]", "==", "'Stage'", ":", "stage_update", "(", "msg", ",", "'%s-sync-to-deq'", "%", "self", ".", "_sid", ",", "props", ".", "correlation_id", ",", "mq_channel", ")", "elif", "msg", "[", "'type'", "]", "==", "'Pipeline'", ":", "pipeline_update", "(", "msg", ",", "'%s-sync-to-deq'", "%", "self", ".", "_sid", ",", "props", ".", "correlation_id", ",", "mq_channel", ")", "#-------------------------------------------------------------------------------------------------------", "# Appease pika cos it thinks the connection is dead", "now", "=", "time", ".", "time", "(", ")", "if", "now", "-", "last", ">=", "self", ".", "_rmq_ping_interval", ":", "mq_connection", ".", "process_data_events", "(", ")", "last", "=", "now", "self", ".", "_prof", ".", "prof", "(", "'terminating synchronizer'", ",", "uid", "=", "self", ".", "_uid", ")", "except", "KeyboardInterrupt", ":", "self", ".", "_logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to terminate synchronizer thread gracefully...'", ")", "raise", "KeyboardInterrupt", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Unknown error in synchronizer: %s. \\n Terminating thread'", "%", "ex", ")", "raise" ]
**Purpose**: Thread in the master process to keep the workflow data structure in appmanager up to date. We receive pipelines, stages and tasks objects directly. The respective object is updated in this master process. Details: Important to note that acknowledgements of the type channel.basic_ack() is an acknowledgement to the server that the msg was received. This is not to be confused with the Ack sent to the enqueuer/dequeuer/task_manager through the sync-ack queue.
[ "**", "Purpose", "**", ":", "Thread", "in", "the", "master", "process", "to", "keep", "the", "workflow", "data", "structure", "in", "appmanager", "up", "to", "date", ".", "We", "receive", "pipelines", "stages", "and", "tasks", "objects", "directly", ".", "The", "respective", "object", "is", "updated", "in", "this", "master", "process", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/appmanager.py#L619-L958
train
bradmontgomery/django-redis-metrics
redis_metrics/forms.py
MetricCategoryForm.categorize_metrics
def categorize_metrics(self): """Called only on a valid form, this method will place the chosen metrics in the given catgory.""" category = self.cleaned_data['category_name'] metrics = self.cleaned_data['metrics'] self.r.reset_category(category, metrics)
python
def categorize_metrics(self): """Called only on a valid form, this method will place the chosen metrics in the given catgory.""" category = self.cleaned_data['category_name'] metrics = self.cleaned_data['metrics'] self.r.reset_category(category, metrics)
[ "def", "categorize_metrics", "(", "self", ")", ":", "category", "=", "self", ".", "cleaned_data", "[", "'category_name'", "]", "metrics", "=", "self", ".", "cleaned_data", "[", "'metrics'", "]", "self", ".", "r", ".", "reset_category", "(", "category", ",", "metrics", ")" ]
Called only on a valid form, this method will place the chosen metrics in the given catgory.
[ "Called", "only", "on", "a", "valid", "form", "this", "method", "will", "place", "the", "chosen", "metrics", "in", "the", "given", "catgory", "." ]
2c92332920113d28c39234b949aa496b39a091d1
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/forms.py#L50-L55
train
radical-cybertools/radical.entk
src/radical/entk/execman/rp/resource_manager.py
ResourceManager._submit_resource_request
def _submit_resource_request(self): """ **Purpose**: Create and submits a RADICAL Pilot Job as per the user provided resource description """ try: self._prof.prof('creating rreq', uid=self._uid) def _pilot_state_cb(pilot, state): self._logger.info('Pilot %s state: %s' % (pilot.uid, state)) if state == rp.FAILED: self._logger.error('Pilot has failed') elif state == rp.DONE: self._logger.error('Pilot has completed') self._session = rp.Session(dburl=self._mlab_url, uid=self._sid) self._pmgr = rp.PilotManager(session=self._session) self._pmgr.register_callback(_pilot_state_cb) pd_init = { 'resource': self._resource, 'runtime': self._walltime, 'cores': self._cpus, 'project': self._project, } if self._gpus: pd_init['gpus'] = self._gpus if self._access_schema: pd_init['access_schema'] = self._access_schema if self._queue: pd_init['queue'] = self._queue if self._rts_config.get('sandbox_cleanup', None): pd_init['cleanup'] = True # Create Compute Pilot with validated resource description pdesc = rp.ComputePilotDescription(pd_init) self._prof.prof('rreq created', uid=self._uid) # Launch the pilot self._pilot = self._pmgr.submit_pilots(pdesc) self._prof.prof('rreq submitted', uid=self._uid) shared_staging_directives = list() for data in self._shared_data: temp = { 'source': data, 'target': 'pilot:///' + os.path.basename(data) } shared_staging_directives.append(temp) self._pilot.stage_in(shared_staging_directives) self._prof.prof('shared data staging initiated', uid=self._uid) self._logger.info('Resource request submission successful.. waiting for pilot to go Active') # Wait for pilot to go active self._pilot.wait([rp.PMGR_ACTIVE, rp.FAILED, rp.CANCELED]) self._prof.prof('resource active', uid=self._uid) self._logger.info('Pilot is now active') except KeyboardInterrupt: if self._session: self._session.close() self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit callback thread gracefully...') raise KeyboardInterrupt except Exception, ex: self._logger.exception('Resource request submission failed') raise
python
def _submit_resource_request(self): """ **Purpose**: Create and submits a RADICAL Pilot Job as per the user provided resource description """ try: self._prof.prof('creating rreq', uid=self._uid) def _pilot_state_cb(pilot, state): self._logger.info('Pilot %s state: %s' % (pilot.uid, state)) if state == rp.FAILED: self._logger.error('Pilot has failed') elif state == rp.DONE: self._logger.error('Pilot has completed') self._session = rp.Session(dburl=self._mlab_url, uid=self._sid) self._pmgr = rp.PilotManager(session=self._session) self._pmgr.register_callback(_pilot_state_cb) pd_init = { 'resource': self._resource, 'runtime': self._walltime, 'cores': self._cpus, 'project': self._project, } if self._gpus: pd_init['gpus'] = self._gpus if self._access_schema: pd_init['access_schema'] = self._access_schema if self._queue: pd_init['queue'] = self._queue if self._rts_config.get('sandbox_cleanup', None): pd_init['cleanup'] = True # Create Compute Pilot with validated resource description pdesc = rp.ComputePilotDescription(pd_init) self._prof.prof('rreq created', uid=self._uid) # Launch the pilot self._pilot = self._pmgr.submit_pilots(pdesc) self._prof.prof('rreq submitted', uid=self._uid) shared_staging_directives = list() for data in self._shared_data: temp = { 'source': data, 'target': 'pilot:///' + os.path.basename(data) } shared_staging_directives.append(temp) self._pilot.stage_in(shared_staging_directives) self._prof.prof('shared data staging initiated', uid=self._uid) self._logger.info('Resource request submission successful.. waiting for pilot to go Active') # Wait for pilot to go active self._pilot.wait([rp.PMGR_ACTIVE, rp.FAILED, rp.CANCELED]) self._prof.prof('resource active', uid=self._uid) self._logger.info('Pilot is now active') except KeyboardInterrupt: if self._session: self._session.close() self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit callback thread gracefully...') raise KeyboardInterrupt except Exception, ex: self._logger.exception('Resource request submission failed') raise
[ "def", "_submit_resource_request", "(", "self", ")", ":", "try", ":", "self", ".", "_prof", ".", "prof", "(", "'creating rreq'", ",", "uid", "=", "self", ".", "_uid", ")", "def", "_pilot_state_cb", "(", "pilot", ",", "state", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Pilot %s state: %s'", "%", "(", "pilot", ".", "uid", ",", "state", ")", ")", "if", "state", "==", "rp", ".", "FAILED", ":", "self", ".", "_logger", ".", "error", "(", "'Pilot has failed'", ")", "elif", "state", "==", "rp", ".", "DONE", ":", "self", ".", "_logger", ".", "error", "(", "'Pilot has completed'", ")", "self", ".", "_session", "=", "rp", ".", "Session", "(", "dburl", "=", "self", ".", "_mlab_url", ",", "uid", "=", "self", ".", "_sid", ")", "self", ".", "_pmgr", "=", "rp", ".", "PilotManager", "(", "session", "=", "self", ".", "_session", ")", "self", ".", "_pmgr", ".", "register_callback", "(", "_pilot_state_cb", ")", "pd_init", "=", "{", "'resource'", ":", "self", ".", "_resource", ",", "'runtime'", ":", "self", ".", "_walltime", ",", "'cores'", ":", "self", ".", "_cpus", ",", "'project'", ":", "self", ".", "_project", ",", "}", "if", "self", ".", "_gpus", ":", "pd_init", "[", "'gpus'", "]", "=", "self", ".", "_gpus", "if", "self", ".", "_access_schema", ":", "pd_init", "[", "'access_schema'", "]", "=", "self", ".", "_access_schema", "if", "self", ".", "_queue", ":", "pd_init", "[", "'queue'", "]", "=", "self", ".", "_queue", "if", "self", ".", "_rts_config", ".", "get", "(", "'sandbox_cleanup'", ",", "None", ")", ":", "pd_init", "[", "'cleanup'", "]", "=", "True", "# Create Compute Pilot with validated resource description", "pdesc", "=", "rp", ".", "ComputePilotDescription", "(", "pd_init", ")", "self", ".", "_prof", ".", "prof", "(", "'rreq created'", ",", "uid", "=", "self", ".", "_uid", ")", "# Launch the pilot", "self", ".", "_pilot", "=", "self", ".", "_pmgr", ".", "submit_pilots", "(", "pdesc", ")", "self", ".", "_prof", ".", "prof", "(", "'rreq submitted'", ",", "uid", "=", "self", ".", "_uid", ")", "shared_staging_directives", "=", "list", "(", ")", "for", "data", "in", "self", ".", "_shared_data", ":", "temp", "=", "{", "'source'", ":", "data", ",", "'target'", ":", "'pilot:///'", "+", "os", ".", "path", ".", "basename", "(", "data", ")", "}", "shared_staging_directives", ".", "append", "(", "temp", ")", "self", ".", "_pilot", ".", "stage_in", "(", "shared_staging_directives", ")", "self", ".", "_prof", ".", "prof", "(", "'shared data staging initiated'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'Resource request submission successful.. waiting for pilot to go Active'", ")", "# Wait for pilot to go active", "self", ".", "_pilot", ".", "wait", "(", "[", "rp", ".", "PMGR_ACTIVE", ",", "rp", ".", "FAILED", ",", "rp", ".", "CANCELED", "]", ")", "self", ".", "_prof", ".", "prof", "(", "'resource active'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'Pilot is now active'", ")", "except", "KeyboardInterrupt", ":", "if", "self", ".", "_session", ":", "self", ".", "_session", ".", "close", "(", ")", "self", ".", "_logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to exit callback thread gracefully...'", ")", "raise", "KeyboardInterrupt", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Resource request submission failed'", ")", "raise" ]
**Purpose**: Create and submits a RADICAL Pilot Job as per the user provided resource description
[ "**", "Purpose", "**", ":", "Create", "and", "submits", "a", "RADICAL", "Pilot", "Job", "as", "per", "the", "user", "provided", "resource", "description" ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/execman/rp/resource_manager.py#L106-L188
train
radical-cybertools/radical.entk
src/radical/entk/execman/rp/resource_manager.py
ResourceManager._terminate_resource_request
def _terminate_resource_request(self): """ **Purpose**: Cancel the RADICAL Pilot Job """ try: if self._pilot: self._prof.prof('canceling resource allocation', uid=self._uid) self._pilot.cancel() download_rp_profile = os.environ.get('RADICAL_PILOT_PROFILE', False) self._session.close(cleanup=self._rts_config.get('db_cleanup', False), download=download_rp_profile) self._prof.prof('resource allocation canceled', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit callback thread gracefully...') raise KeyboardInterrupt except Exception, ex: self._logger.exception('Could not cancel resource request, error: %s' % ex) raise
python
def _terminate_resource_request(self): """ **Purpose**: Cancel the RADICAL Pilot Job """ try: if self._pilot: self._prof.prof('canceling resource allocation', uid=self._uid) self._pilot.cancel() download_rp_profile = os.environ.get('RADICAL_PILOT_PROFILE', False) self._session.close(cleanup=self._rts_config.get('db_cleanup', False), download=download_rp_profile) self._prof.prof('resource allocation canceled', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit callback thread gracefully...') raise KeyboardInterrupt except Exception, ex: self._logger.exception('Could not cancel resource request, error: %s' % ex) raise
[ "def", "_terminate_resource_request", "(", "self", ")", ":", "try", ":", "if", "self", ".", "_pilot", ":", "self", ".", "_prof", ".", "prof", "(", "'canceling resource allocation'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_pilot", ".", "cancel", "(", ")", "download_rp_profile", "=", "os", ".", "environ", ".", "get", "(", "'RADICAL_PILOT_PROFILE'", ",", "False", ")", "self", ".", "_session", ".", "close", "(", "cleanup", "=", "self", ".", "_rts_config", ".", "get", "(", "'db_cleanup'", ",", "False", ")", ",", "download", "=", "download_rp_profile", ")", "self", ".", "_prof", ".", "prof", "(", "'resource allocation canceled'", ",", "uid", "=", "self", ".", "_uid", ")", "except", "KeyboardInterrupt", ":", "self", ".", "_logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to exit callback thread gracefully...'", ")", "raise", "KeyboardInterrupt", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Could not cancel resource request, error: %s'", "%", "ex", ")", "raise" ]
**Purpose**: Cancel the RADICAL Pilot Job
[ "**", "Purpose", "**", ":", "Cancel", "the", "RADICAL", "Pilot", "Job" ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/execman/rp/resource_manager.py#L190-L214
train
bertouttier/solaredge
solaredge/solaredge.py
Solaredge.get_list
def get_list(self, size=100, startIndex=0, searchText="", sortProperty="", sortOrder='ASC', status='Active,Pending'): """ Request service locations Returns ------- dict """ url = urljoin(BASEURL, "sites", "list") params = { 'api_key': self.token, 'size': size, 'startIndex': startIndex, 'sortOrder': sortOrder, 'status': status } if searchText: params['searchText'] = searchText if sortProperty: params['sortProperty'] = sortProperty r = requests.get(url, params) r.raise_for_status() return r.json()
python
def get_list(self, size=100, startIndex=0, searchText="", sortProperty="", sortOrder='ASC', status='Active,Pending'): """ Request service locations Returns ------- dict """ url = urljoin(BASEURL, "sites", "list") params = { 'api_key': self.token, 'size': size, 'startIndex': startIndex, 'sortOrder': sortOrder, 'status': status } if searchText: params['searchText'] = searchText if sortProperty: params['sortProperty'] = sortProperty r = requests.get(url, params) r.raise_for_status() return r.json()
[ "def", "get_list", "(", "self", ",", "size", "=", "100", ",", "startIndex", "=", "0", ",", "searchText", "=", "\"\"", ",", "sortProperty", "=", "\"\"", ",", "sortOrder", "=", "'ASC'", ",", "status", "=", "'Active,Pending'", ")", ":", "url", "=", "urljoin", "(", "BASEURL", ",", "\"sites\"", ",", "\"list\"", ")", "params", "=", "{", "'api_key'", ":", "self", ".", "token", ",", "'size'", ":", "size", ",", "'startIndex'", ":", "startIndex", ",", "'sortOrder'", ":", "sortOrder", ",", "'status'", ":", "status", "}", "if", "searchText", ":", "params", "[", "'searchText'", "]", "=", "searchText", "if", "sortProperty", ":", "params", "[", "'sortProperty'", "]", "=", "sortProperty", "r", "=", "requests", ".", "get", "(", "url", ",", "params", ")", "r", ".", "raise_for_status", "(", ")", "return", "r", ".", "json", "(", ")" ]
Request service locations Returns ------- dict
[ "Request", "service", "locations" ]
2c94b38527374b0abd088e2819455b332bc5153a
https://github.com/bertouttier/solaredge/blob/2c94b38527374b0abd088e2819455b332bc5153a/solaredge/solaredge.py#L30-L57
train
google/dotty
efilter/parsers/common/token_stream.py
TokenStream.match
def match(self, f, *args): """Match grammar function 'f' against next token and set 'self.matched'. Arguments: f: A grammar function - see efilter.parsers.common.grammar. Must return TokenMatch or None. args: Passed to 'f', if any. Returns: Instance of efilter.parsers.common.grammar.TokenMatch or None. Comment: If a match is returned, it will also be stored in self.matched. """ try: match = f(self.tokenizer, *args) except StopIteration: # The grammar function might have tried to access more tokens than # are available. That's not really an error, it just means it didn't # match. return if match is None: return if not isinstance(match, grammar.TokenMatch): raise TypeError("Invalid grammar function %r returned %r." % (f, match)) self.matched = match return match
python
def match(self, f, *args): """Match grammar function 'f' against next token and set 'self.matched'. Arguments: f: A grammar function - see efilter.parsers.common.grammar. Must return TokenMatch or None. args: Passed to 'f', if any. Returns: Instance of efilter.parsers.common.grammar.TokenMatch or None. Comment: If a match is returned, it will also be stored in self.matched. """ try: match = f(self.tokenizer, *args) except StopIteration: # The grammar function might have tried to access more tokens than # are available. That's not really an error, it just means it didn't # match. return if match is None: return if not isinstance(match, grammar.TokenMatch): raise TypeError("Invalid grammar function %r returned %r." % (f, match)) self.matched = match return match
[ "def", "match", "(", "self", ",", "f", ",", "*", "args", ")", ":", "try", ":", "match", "=", "f", "(", "self", ".", "tokenizer", ",", "*", "args", ")", "except", "StopIteration", ":", "# The grammar function might have tried to access more tokens than", "# are available. That's not really an error, it just means it didn't", "# match.", "return", "if", "match", "is", "None", ":", "return", "if", "not", "isinstance", "(", "match", ",", "grammar", ".", "TokenMatch", ")", ":", "raise", "TypeError", "(", "\"Invalid grammar function %r returned %r.\"", "%", "(", "f", ",", "match", ")", ")", "self", ".", "matched", "=", "match", "return", "match" ]
Match grammar function 'f' against next token and set 'self.matched'. Arguments: f: A grammar function - see efilter.parsers.common.grammar. Must return TokenMatch or None. args: Passed to 'f', if any. Returns: Instance of efilter.parsers.common.grammar.TokenMatch or None. Comment: If a match is returned, it will also be stored in self.matched.
[ "Match", "grammar", "function", "f", "against", "next", "token", "and", "set", "self", ".", "matched", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/token_stream.py#L46-L76
train
google/dotty
efilter/parsers/common/token_stream.py
TokenStream.accept
def accept(self, f, *args): """Like 'match', but consume the token (tokenizer advances.)""" match = self.match(f, *args) if match is None: return self.tokenizer.skip(len(match.tokens)) return match
python
def accept(self, f, *args): """Like 'match', but consume the token (tokenizer advances.)""" match = self.match(f, *args) if match is None: return self.tokenizer.skip(len(match.tokens)) return match
[ "def", "accept", "(", "self", ",", "f", ",", "*", "args", ")", ":", "match", "=", "self", ".", "match", "(", "f", ",", "*", "args", ")", "if", "match", "is", "None", ":", "return", "self", ".", "tokenizer", ".", "skip", "(", "len", "(", "match", ".", "tokens", ")", ")", "return", "match" ]
Like 'match', but consume the token (tokenizer advances.)
[ "Like", "match", "but", "consume", "the", "token", "(", "tokenizer", "advances", ".", ")" ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/token_stream.py#L78-L85
train
google/dotty
efilter/parsers/common/token_stream.py
TokenStream.reject
def reject(self, f, *args): """Like 'match', but throw a parse error if 'f' matches. This is useful when a parser wants to be strict about specific things being prohibited. For example, DottySQL bans the use of SQL keywords as variable names. """ match = self.match(f, *args) if match: token = self.peek(0) raise errors.EfilterParseError( query=self.tokenizer.source, token=token, message="Was not expecting a %s here." % token.name)
python
def reject(self, f, *args): """Like 'match', but throw a parse error if 'f' matches. This is useful when a parser wants to be strict about specific things being prohibited. For example, DottySQL bans the use of SQL keywords as variable names. """ match = self.match(f, *args) if match: token = self.peek(0) raise errors.EfilterParseError( query=self.tokenizer.source, token=token, message="Was not expecting a %s here." % token.name)
[ "def", "reject", "(", "self", ",", "f", ",", "*", "args", ")", ":", "match", "=", "self", ".", "match", "(", "f", ",", "*", "args", ")", "if", "match", ":", "token", "=", "self", ".", "peek", "(", "0", ")", "raise", "errors", ".", "EfilterParseError", "(", "query", "=", "self", ".", "tokenizer", ".", "source", ",", "token", "=", "token", ",", "message", "=", "\"Was not expecting a %s here.\"", "%", "token", ".", "name", ")" ]
Like 'match', but throw a parse error if 'f' matches. This is useful when a parser wants to be strict about specific things being prohibited. For example, DottySQL bans the use of SQL keywords as variable names.
[ "Like", "match", "but", "throw", "a", "parse", "error", "if", "f", "matches", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/token_stream.py#L87-L99
train
google/dotty
efilter/parsers/common/token_stream.py
TokenStream.expect
def expect(self, f, *args): """Like 'accept' but throws a parse error if 'f' doesn't match.""" match = self.accept(f, *args) if match: return match try: func_name = f.func_name except AttributeError: func_name = "<unnamed grammar function>" start, end = self.current_position() raise errors.EfilterParseError( query=self.tokenizer.source, start=start, end=end, message="Was expecting %s here." % (func_name))
python
def expect(self, f, *args): """Like 'accept' but throws a parse error if 'f' doesn't match.""" match = self.accept(f, *args) if match: return match try: func_name = f.func_name except AttributeError: func_name = "<unnamed grammar function>" start, end = self.current_position() raise errors.EfilterParseError( query=self.tokenizer.source, start=start, end=end, message="Was expecting %s here." % (func_name))
[ "def", "expect", "(", "self", ",", "f", ",", "*", "args", ")", ":", "match", "=", "self", ".", "accept", "(", "f", ",", "*", "args", ")", "if", "match", ":", "return", "match", "try", ":", "func_name", "=", "f", ".", "func_name", "except", "AttributeError", ":", "func_name", "=", "\"<unnamed grammar function>\"", "start", ",", "end", "=", "self", ".", "current_position", "(", ")", "raise", "errors", ".", "EfilterParseError", "(", "query", "=", "self", ".", "tokenizer", ".", "source", ",", "start", "=", "start", ",", "end", "=", "end", ",", "message", "=", "\"Was expecting %s here.\"", "%", "(", "func_name", ")", ")" ]
Like 'accept' but throws a parse error if 'f' doesn't match.
[ "Like", "accept", "but", "throws", "a", "parse", "error", "if", "f", "doesn", "t", "match", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/token_stream.py#L101-L115
train
google/dotty
efilter/parsers/common/token_stream.py
TokenStream.current_position
def current_position(self): """Return a tuple of (start, end).""" token = self.tokenizer.peek(0) if token: return token.start, token.end return self.tokenizer.position, self.tokenizer.position + 1
python
def current_position(self): """Return a tuple of (start, end).""" token = self.tokenizer.peek(0) if token: return token.start, token.end return self.tokenizer.position, self.tokenizer.position + 1
[ "def", "current_position", "(", "self", ")", ":", "token", "=", "self", ".", "tokenizer", ".", "peek", "(", "0", ")", "if", "token", ":", "return", "token", ".", "start", ",", "token", ".", "end", "return", "self", ".", "tokenizer", ".", "position", ",", "self", ".", "tokenizer", ".", "position", "+", "1" ]
Return a tuple of (start, end).
[ "Return", "a", "tuple", "of", "(", "start", "end", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/token_stream.py#L117-L123
train
google/dotty
efilter/parsers/common/ast_transforms.py
ComplementEquivalence
def ComplementEquivalence(*args, **kwargs): """Change x != y to not(x == y).""" return ast.Complement( ast.Equivalence(*args, **kwargs), **kwargs)
python
def ComplementEquivalence(*args, **kwargs): """Change x != y to not(x == y).""" return ast.Complement( ast.Equivalence(*args, **kwargs), **kwargs)
[ "def", "ComplementEquivalence", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "ast", ".", "Complement", "(", "ast", ".", "Equivalence", "(", "*", "args", ",", "*", "*", "kwargs", ")", ",", "*", "*", "kwargs", ")" ]
Change x != y to not(x == y).
[ "Change", "x", "!", "=", "y", "to", "not", "(", "x", "==", "y", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/ast_transforms.py#L43-L46
train
google/dotty
efilter/parsers/common/ast_transforms.py
ComplementMembership
def ComplementMembership(*args, **kwargs): """Change (x not in y) to not(x in y).""" return ast.Complement( ast.Membership(*args, **kwargs), **kwargs)
python
def ComplementMembership(*args, **kwargs): """Change (x not in y) to not(x in y).""" return ast.Complement( ast.Membership(*args, **kwargs), **kwargs)
[ "def", "ComplementMembership", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "ast", ".", "Complement", "(", "ast", ".", "Membership", "(", "*", "args", ",", "*", "*", "kwargs", ")", ",", "*", "*", "kwargs", ")" ]
Change (x not in y) to not(x in y).
[ "Change", "(", "x", "not", "in", "y", ")", "to", "not", "(", "x", "in", "y", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/ast_transforms.py#L49-L52
train
google/dotty
efilter/parsers/common/ast_transforms.py
ReverseComplementMembership
def ReverseComplementMembership(x, y, **kwargs): """Change (x doesn't contain y) to not(y in x).""" return ast.Complement( ast.Membership(y, x, **kwargs), **kwargs)
python
def ReverseComplementMembership(x, y, **kwargs): """Change (x doesn't contain y) to not(y in x).""" return ast.Complement( ast.Membership(y, x, **kwargs), **kwargs)
[ "def", "ReverseComplementMembership", "(", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "return", "ast", ".", "Complement", "(", "ast", ".", "Membership", "(", "y", ",", "x", ",", "*", "*", "kwargs", ")", ",", "*", "*", "kwargs", ")" ]
Change (x doesn't contain y) to not(y in x).
[ "Change", "(", "x", "doesn", "t", "contain", "y", ")", "to", "not", "(", "y", "in", "x", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/ast_transforms.py#L60-L63
train
google/dotty
efilter/transforms/solve.py
__solve_for_repeated
def __solve_for_repeated(expr, vars): """Helper: solve 'expr' always returning an IRepeated. If the result of solving 'expr' is a list or a tuple of IStructured objects then treat is as a repeated value of IStructured objects because that's what the called meant to do. This is a convenience helper so users of the API don't have to create IRepeated objects. If the result of solving 'expr' is a scalar then return it as a repeated value of one element. Arguments: expr: Expression to solve. vars: The scope. Returns: IRepeated result of solving 'expr'. A booelan to indicate whether the original was repeating. """ var = solve(expr, vars).value if (var and isinstance(var, (tuple, list)) and protocol.implements(var[0], structured.IStructured)): return repeated.meld(*var), False return var, repeated.isrepeating(var)
python
def __solve_for_repeated(expr, vars): """Helper: solve 'expr' always returning an IRepeated. If the result of solving 'expr' is a list or a tuple of IStructured objects then treat is as a repeated value of IStructured objects because that's what the called meant to do. This is a convenience helper so users of the API don't have to create IRepeated objects. If the result of solving 'expr' is a scalar then return it as a repeated value of one element. Arguments: expr: Expression to solve. vars: The scope. Returns: IRepeated result of solving 'expr'. A booelan to indicate whether the original was repeating. """ var = solve(expr, vars).value if (var and isinstance(var, (tuple, list)) and protocol.implements(var[0], structured.IStructured)): return repeated.meld(*var), False return var, repeated.isrepeating(var)
[ "def", "__solve_for_repeated", "(", "expr", ",", "vars", ")", ":", "var", "=", "solve", "(", "expr", ",", "vars", ")", ".", "value", "if", "(", "var", "and", "isinstance", "(", "var", ",", "(", "tuple", ",", "list", ")", ")", "and", "protocol", ".", "implements", "(", "var", "[", "0", "]", ",", "structured", ".", "IStructured", ")", ")", ":", "return", "repeated", ".", "meld", "(", "*", "var", ")", ",", "False", "return", "var", ",", "repeated", ".", "isrepeating", "(", "var", ")" ]
Helper: solve 'expr' always returning an IRepeated. If the result of solving 'expr' is a list or a tuple of IStructured objects then treat is as a repeated value of IStructured objects because that's what the called meant to do. This is a convenience helper so users of the API don't have to create IRepeated objects. If the result of solving 'expr' is a scalar then return it as a repeated value of one element. Arguments: expr: Expression to solve. vars: The scope. Returns: IRepeated result of solving 'expr'. A booelan to indicate whether the original was repeating.
[ "Helper", ":", "solve", "expr", "always", "returning", "an", "IRepeated", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L84-L108
train
google/dotty
efilter/transforms/solve.py
__solve_for_scalar
def __solve_for_scalar(expr, vars): """Helper: solve 'expr' always returning a scalar (not IRepeated). If the output of 'expr' is a single value or a single RowTuple with a single column then return the value in that column. Otherwise raise. Arguments: expr: Expression to solve. vars: The scope. Returns: A scalar value (not an IRepeated). Raises: EfilterTypeError if it cannot get a scalar. """ var = solve(expr, vars).value try: scalar = repeated.getvalue(var) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Wasn't expecting more than one value here. Got %r." % (var,)) if isinstance(scalar, row_tuple.RowTuple): try: return scalar.get_singleton() except ValueError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Was expecting a scalar value here. Got %r." % (scalar,)) else: return scalar
python
def __solve_for_scalar(expr, vars): """Helper: solve 'expr' always returning a scalar (not IRepeated). If the output of 'expr' is a single value or a single RowTuple with a single column then return the value in that column. Otherwise raise. Arguments: expr: Expression to solve. vars: The scope. Returns: A scalar value (not an IRepeated). Raises: EfilterTypeError if it cannot get a scalar. """ var = solve(expr, vars).value try: scalar = repeated.getvalue(var) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Wasn't expecting more than one value here. Got %r." % (var,)) if isinstance(scalar, row_tuple.RowTuple): try: return scalar.get_singleton() except ValueError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Was expecting a scalar value here. Got %r." % (scalar,)) else: return scalar
[ "def", "__solve_for_scalar", "(", "expr", ",", "vars", ")", ":", "var", "=", "solve", "(", "expr", ",", "vars", ")", ".", "value", "try", ":", "scalar", "=", "repeated", ".", "getvalue", "(", "var", ")", "except", "TypeError", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Wasn't expecting more than one value here. Got %r.\"", "%", "(", "var", ",", ")", ")", "if", "isinstance", "(", "scalar", ",", "row_tuple", ".", "RowTuple", ")", ":", "try", ":", "return", "scalar", ".", "get_singleton", "(", ")", "except", "ValueError", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Was expecting a scalar value here. Got %r.\"", "%", "(", "scalar", ",", ")", ")", "else", ":", "return", "scalar" ]
Helper: solve 'expr' always returning a scalar (not IRepeated). If the output of 'expr' is a single value or a single RowTuple with a single column then return the value in that column. Otherwise raise. Arguments: expr: Expression to solve. vars: The scope. Returns: A scalar value (not an IRepeated). Raises: EfilterTypeError if it cannot get a scalar.
[ "Helper", ":", "solve", "expr", "always", "returning", "a", "scalar", "(", "not", "IRepeated", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L111-L145
train
google/dotty
efilter/transforms/solve.py
__solve_and_destructure_repeated
def __solve_and_destructure_repeated(expr, vars): """Helper: solve 'expr' always returning a list of scalars. If the output of 'expr' is one or more row tuples with only a single column then return a repeated value of values in that column. If there are more than one column per row then raise. This returns a list because there's no point in wrapping the scalars in a repeated value for use internal to the implementing solver. Returns: Two values: - An iterator (not an IRepeated!) of scalars. - A boolean to indicate whether the original value was repeating. Raises: EfilterTypeError if the values don't conform. """ iterable, isrepeating = __solve_for_repeated(expr, vars) if iterable is None: return (), isrepeating if not isrepeating: return [iterable], False values = iter(iterable) try: value = next(values) except StopIteration: return (), True if not isinstance(value, row_tuple.RowTuple): result = [value] # We skip type checking the remaining values because it'd be slow. result.extend(values) return result, True try: result = [value.get_singleton()] for value in values: result.append(value.get_singleton()) return result, True except ValueError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Was expecting exactly one column in %r." % (value,))
python
def __solve_and_destructure_repeated(expr, vars): """Helper: solve 'expr' always returning a list of scalars. If the output of 'expr' is one or more row tuples with only a single column then return a repeated value of values in that column. If there are more than one column per row then raise. This returns a list because there's no point in wrapping the scalars in a repeated value for use internal to the implementing solver. Returns: Two values: - An iterator (not an IRepeated!) of scalars. - A boolean to indicate whether the original value was repeating. Raises: EfilterTypeError if the values don't conform. """ iterable, isrepeating = __solve_for_repeated(expr, vars) if iterable is None: return (), isrepeating if not isrepeating: return [iterable], False values = iter(iterable) try: value = next(values) except StopIteration: return (), True if not isinstance(value, row_tuple.RowTuple): result = [value] # We skip type checking the remaining values because it'd be slow. result.extend(values) return result, True try: result = [value.get_singleton()] for value in values: result.append(value.get_singleton()) return result, True except ValueError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Was expecting exactly one column in %r." % (value,))
[ "def", "__solve_and_destructure_repeated", "(", "expr", ",", "vars", ")", ":", "iterable", ",", "isrepeating", "=", "__solve_for_repeated", "(", "expr", ",", "vars", ")", "if", "iterable", "is", "None", ":", "return", "(", ")", ",", "isrepeating", "if", "not", "isrepeating", ":", "return", "[", "iterable", "]", ",", "False", "values", "=", "iter", "(", "iterable", ")", "try", ":", "value", "=", "next", "(", "values", ")", "except", "StopIteration", ":", "return", "(", ")", ",", "True", "if", "not", "isinstance", "(", "value", ",", "row_tuple", ".", "RowTuple", ")", ":", "result", "=", "[", "value", "]", "# We skip type checking the remaining values because it'd be slow.", "result", ".", "extend", "(", "values", ")", "return", "result", ",", "True", "try", ":", "result", "=", "[", "value", ".", "get_singleton", "(", ")", "]", "for", "value", "in", "values", ":", "result", ".", "append", "(", "value", ".", "get_singleton", "(", ")", ")", "return", "result", ",", "True", "except", "ValueError", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Was expecting exactly one column in %r.\"", "%", "(", "value", ",", ")", ")" ]
Helper: solve 'expr' always returning a list of scalars. If the output of 'expr' is one or more row tuples with only a single column then return a repeated value of values in that column. If there are more than one column per row then raise. This returns a list because there's no point in wrapping the scalars in a repeated value for use internal to the implementing solver. Returns: Two values: - An iterator (not an IRepeated!) of scalars. - A boolean to indicate whether the original value was repeating. Raises: EfilterTypeError if the values don't conform.
[ "Helper", ":", "solve", "expr", "always", "returning", "a", "list", "of", "scalars", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L148-L195
train
google/dotty
efilter/transforms/solve.py
solve_var
def solve_var(expr, vars): """Returns the value of the var named in the expression.""" try: return Result(structured.resolve(vars, expr.value), ()) except (KeyError, AttributeError) as e: # Raise a better exception for accessing a non-existent member. raise errors.EfilterKeyError(root=expr, key=expr.value, message=e, query=expr.source) except (TypeError, ValueError) as e: # Raise a better exception for what is probably a null pointer error. if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Trying to access member %r of a null." % expr.value) else: raise errors.EfilterTypeError( root=expr, query=expr.source, message="%r (vars: %r)" % (e, vars)) except NotImplementedError as e: raise errors.EfilterError( root=expr, query=expr.source, message="Trying to access member %r of an instance of %r." % (expr.value, type(vars)))
python
def solve_var(expr, vars): """Returns the value of the var named in the expression.""" try: return Result(structured.resolve(vars, expr.value), ()) except (KeyError, AttributeError) as e: # Raise a better exception for accessing a non-existent member. raise errors.EfilterKeyError(root=expr, key=expr.value, message=e, query=expr.source) except (TypeError, ValueError) as e: # Raise a better exception for what is probably a null pointer error. if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Trying to access member %r of a null." % expr.value) else: raise errors.EfilterTypeError( root=expr, query=expr.source, message="%r (vars: %r)" % (e, vars)) except NotImplementedError as e: raise errors.EfilterError( root=expr, query=expr.source, message="Trying to access member %r of an instance of %r." % (expr.value, type(vars)))
[ "def", "solve_var", "(", "expr", ",", "vars", ")", ":", "try", ":", "return", "Result", "(", "structured", ".", "resolve", "(", "vars", ",", "expr", ".", "value", ")", ",", "(", ")", ")", "except", "(", "KeyError", ",", "AttributeError", ")", "as", "e", ":", "# Raise a better exception for accessing a non-existent member.", "raise", "errors", ".", "EfilterKeyError", "(", "root", "=", "expr", ",", "key", "=", "expr", ".", "value", ",", "message", "=", "e", ",", "query", "=", "expr", ".", "source", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "e", ":", "# Raise a better exception for what is probably a null pointer error.", "if", "vars", ".", "locals", "is", "None", ":", "raise", "errors", ".", "EfilterNoneError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Trying to access member %r of a null.\"", "%", "expr", ".", "value", ")", "else", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"%r (vars: %r)\"", "%", "(", "e", ",", "vars", ")", ")", "except", "NotImplementedError", "as", "e", ":", "raise", "errors", ".", "EfilterError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Trying to access member %r of an instance of %r.\"", "%", "(", "expr", ".", "value", ",", "type", "(", "vars", ")", ")", ")" ]
Returns the value of the var named in the expression.
[ "Returns", "the", "value", "of", "the", "var", "named", "in", "the", "expression", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L234-L256
train
google/dotty
efilter/transforms/solve.py
solve_select
def solve_select(expr, vars): """Use IAssociative.select to get key (rhs) from the data (lhs). This operation supports both scalars and repeated values on the LHS - selecting from a repeated value implies a map-like operation and returns a new repeated value. """ data, _ = __solve_for_repeated(expr.lhs, vars) key = solve(expr.rhs, vars).value try: results = [associative.select(d, key) for d in repeated.getvalues(data)] except (KeyError, AttributeError): # Raise a better exception for accessing a non-existent key. raise errors.EfilterKeyError(root=expr, key=key, query=expr.source) except (TypeError, ValueError): # Raise a better exception for what is probably a null pointer error. if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Cannot select key %r from a null." % key) else: raise except NotImplementedError: raise errors.EfilterError( root=expr, query=expr.source, message="Cannot select keys from a non-associative value.") return Result(repeated.meld(*results), ())
python
def solve_select(expr, vars): """Use IAssociative.select to get key (rhs) from the data (lhs). This operation supports both scalars and repeated values on the LHS - selecting from a repeated value implies a map-like operation and returns a new repeated value. """ data, _ = __solve_for_repeated(expr.lhs, vars) key = solve(expr.rhs, vars).value try: results = [associative.select(d, key) for d in repeated.getvalues(data)] except (KeyError, AttributeError): # Raise a better exception for accessing a non-existent key. raise errors.EfilterKeyError(root=expr, key=key, query=expr.source) except (TypeError, ValueError): # Raise a better exception for what is probably a null pointer error. if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Cannot select key %r from a null." % key) else: raise except NotImplementedError: raise errors.EfilterError( root=expr, query=expr.source, message="Cannot select keys from a non-associative value.") return Result(repeated.meld(*results), ())
[ "def", "solve_select", "(", "expr", ",", "vars", ")", ":", "data", ",", "_", "=", "__solve_for_repeated", "(", "expr", ".", "lhs", ",", "vars", ")", "key", "=", "solve", "(", "expr", ".", "rhs", ",", "vars", ")", ".", "value", "try", ":", "results", "=", "[", "associative", ".", "select", "(", "d", ",", "key", ")", "for", "d", "in", "repeated", ".", "getvalues", "(", "data", ")", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "# Raise a better exception for accessing a non-existent key.", "raise", "errors", ".", "EfilterKeyError", "(", "root", "=", "expr", ",", "key", "=", "key", ",", "query", "=", "expr", ".", "source", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# Raise a better exception for what is probably a null pointer error.", "if", "vars", ".", "locals", "is", "None", ":", "raise", "errors", ".", "EfilterNoneError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Cannot select key %r from a null.\"", "%", "key", ")", "else", ":", "raise", "except", "NotImplementedError", ":", "raise", "errors", ".", "EfilterError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Cannot select keys from a non-associative value.\"", ")", "return", "Result", "(", "repeated", ".", "meld", "(", "*", "results", ")", ",", "(", ")", ")" ]
Use IAssociative.select to get key (rhs) from the data (lhs). This operation supports both scalars and repeated values on the LHS - selecting from a repeated value implies a map-like operation and returns a new repeated value.
[ "Use", "IAssociative", ".", "select", "to", "get", "key", "(", "rhs", ")", "from", "the", "data", "(", "lhs", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L260-L288
train
google/dotty
efilter/transforms/solve.py
solve_resolve
def solve_resolve(expr, vars): """Use IStructured.resolve to get member (rhs) from the object (lhs). This operation supports both scalars and repeated values on the LHS - resolving from a repeated value implies a map-like operation and returns a new repeated values. """ objs, _ = __solve_for_repeated(expr.lhs, vars) member = solve(expr.rhs, vars).value try: results = [structured.resolve(o, member) for o in repeated.getvalues(objs)] except (KeyError, AttributeError): # Raise a better exception for the non-existent member. raise errors.EfilterKeyError(root=expr.rhs, key=member, query=expr.source) except (TypeError, ValueError): # Is this a null object error? if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Cannot resolve member %r from a null." % member) else: raise except NotImplementedError: raise errors.EfilterError( root=expr, query=expr.source, message="Cannot resolve members from a non-structured value.") return Result(repeated.meld(*results), ())
python
def solve_resolve(expr, vars): """Use IStructured.resolve to get member (rhs) from the object (lhs). This operation supports both scalars and repeated values on the LHS - resolving from a repeated value implies a map-like operation and returns a new repeated values. """ objs, _ = __solve_for_repeated(expr.lhs, vars) member = solve(expr.rhs, vars).value try: results = [structured.resolve(o, member) for o in repeated.getvalues(objs)] except (KeyError, AttributeError): # Raise a better exception for the non-existent member. raise errors.EfilterKeyError(root=expr.rhs, key=member, query=expr.source) except (TypeError, ValueError): # Is this a null object error? if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Cannot resolve member %r from a null." % member) else: raise except NotImplementedError: raise errors.EfilterError( root=expr, query=expr.source, message="Cannot resolve members from a non-structured value.") return Result(repeated.meld(*results), ())
[ "def", "solve_resolve", "(", "expr", ",", "vars", ")", ":", "objs", ",", "_", "=", "__solve_for_repeated", "(", "expr", ".", "lhs", ",", "vars", ")", "member", "=", "solve", "(", "expr", ".", "rhs", ",", "vars", ")", ".", "value", "try", ":", "results", "=", "[", "structured", ".", "resolve", "(", "o", ",", "member", ")", "for", "o", "in", "repeated", ".", "getvalues", "(", "objs", ")", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "# Raise a better exception for the non-existent member.", "raise", "errors", ".", "EfilterKeyError", "(", "root", "=", "expr", ".", "rhs", ",", "key", "=", "member", ",", "query", "=", "expr", ".", "source", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# Is this a null object error?", "if", "vars", ".", "locals", "is", "None", ":", "raise", "errors", ".", "EfilterNoneError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Cannot resolve member %r from a null.\"", "%", "member", ")", "else", ":", "raise", "except", "NotImplementedError", ":", "raise", "errors", ".", "EfilterError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Cannot resolve members from a non-structured value.\"", ")", "return", "Result", "(", "repeated", ".", "meld", "(", "*", "results", ")", ",", "(", ")", ")" ]
Use IStructured.resolve to get member (rhs) from the object (lhs). This operation supports both scalars and repeated values on the LHS - resolving from a repeated value implies a map-like operation and returns a new repeated values.
[ "Use", "IStructured", ".", "resolve", "to", "get", "member", "(", "rhs", ")", "from", "the", "object", "(", "lhs", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L292-L322
train
google/dotty
efilter/transforms/solve.py
solve_apply
def solve_apply(expr, vars): """Returns the result of applying function (lhs) to its arguments (rest). We use IApplicative to apply the function, because that gives the host application an opportunity to compare the function being called against a whitelist. EFILTER will never directly call a function that wasn't provided through a protocol implementation. """ func = __solve_for_scalar(expr.func, vars) args = [] kwargs = {} for arg in expr.args: if isinstance(arg, ast.Pair): if not isinstance(arg.lhs, ast.Var): raise errors.EfilterError( root=arg.lhs, message="Invalid argument name.") kwargs[arg.key.value] = solve(arg.value, vars).value else: args.append(solve(arg, vars).value) result = applicative.apply(func, args, kwargs) return Result(result, ())
python
def solve_apply(expr, vars): """Returns the result of applying function (lhs) to its arguments (rest). We use IApplicative to apply the function, because that gives the host application an opportunity to compare the function being called against a whitelist. EFILTER will never directly call a function that wasn't provided through a protocol implementation. """ func = __solve_for_scalar(expr.func, vars) args = [] kwargs = {} for arg in expr.args: if isinstance(arg, ast.Pair): if not isinstance(arg.lhs, ast.Var): raise errors.EfilterError( root=arg.lhs, message="Invalid argument name.") kwargs[arg.key.value] = solve(arg.value, vars).value else: args.append(solve(arg, vars).value) result = applicative.apply(func, args, kwargs) return Result(result, ())
[ "def", "solve_apply", "(", "expr", ",", "vars", ")", ":", "func", "=", "__solve_for_scalar", "(", "expr", ".", "func", ",", "vars", ")", "args", "=", "[", "]", "kwargs", "=", "{", "}", "for", "arg", "in", "expr", ".", "args", ":", "if", "isinstance", "(", "arg", ",", "ast", ".", "Pair", ")", ":", "if", "not", "isinstance", "(", "arg", ".", "lhs", ",", "ast", ".", "Var", ")", ":", "raise", "errors", ".", "EfilterError", "(", "root", "=", "arg", ".", "lhs", ",", "message", "=", "\"Invalid argument name.\"", ")", "kwargs", "[", "arg", ".", "key", ".", "value", "]", "=", "solve", "(", "arg", ".", "value", ",", "vars", ")", ".", "value", "else", ":", "args", ".", "append", "(", "solve", "(", "arg", ",", "vars", ")", ".", "value", ")", "result", "=", "applicative", ".", "apply", "(", "func", ",", "args", ",", "kwargs", ")", "return", "Result", "(", "result", ",", "(", ")", ")" ]
Returns the result of applying function (lhs) to its arguments (rest). We use IApplicative to apply the function, because that gives the host application an opportunity to compare the function being called against a whitelist. EFILTER will never directly call a function that wasn't provided through a protocol implementation.
[ "Returns", "the", "result", "of", "applying", "function", "(", "lhs", ")", "to", "its", "arguments", "(", "rest", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L326-L350
train
google/dotty
efilter/transforms/solve.py
solve_bind
def solve_bind(expr, vars): """Build a RowTuple from key/value pairs under the bind. The Bind subtree is arranged as follows: Bind | First KV Pair | | First Key Expression | | First Value Expression | Second KV Pair | | Second Key Expression | | Second Value Expression Etc... As we evaluate the subtree, each subsequent KV pair is evaluated with the all previous bingings already in scope. For example: bind(x: 5, y: x + 5) # Will bind y = 10 because x is already available. """ value_expressions = [] keys = [] for pair in expr.children: keys.append(solve(pair.key, vars).value) value_expressions.append(pair.value) result = row_tuple.RowTuple(ordered_columns=keys) intermediate_scope = scope.ScopeStack(vars, result) for idx, value_expression in enumerate(value_expressions): value = solve(value_expression, intermediate_scope).value # Update the intermediate bindings so as to make earlier bindings # already available to the next child-expression. result[keys[idx]] = value return Result(result, ())
python
def solve_bind(expr, vars): """Build a RowTuple from key/value pairs under the bind. The Bind subtree is arranged as follows: Bind | First KV Pair | | First Key Expression | | First Value Expression | Second KV Pair | | Second Key Expression | | Second Value Expression Etc... As we evaluate the subtree, each subsequent KV pair is evaluated with the all previous bingings already in scope. For example: bind(x: 5, y: x + 5) # Will bind y = 10 because x is already available. """ value_expressions = [] keys = [] for pair in expr.children: keys.append(solve(pair.key, vars).value) value_expressions.append(pair.value) result = row_tuple.RowTuple(ordered_columns=keys) intermediate_scope = scope.ScopeStack(vars, result) for idx, value_expression in enumerate(value_expressions): value = solve(value_expression, intermediate_scope).value # Update the intermediate bindings so as to make earlier bindings # already available to the next child-expression. result[keys[idx]] = value return Result(result, ())
[ "def", "solve_bind", "(", "expr", ",", "vars", ")", ":", "value_expressions", "=", "[", "]", "keys", "=", "[", "]", "for", "pair", "in", "expr", ".", "children", ":", "keys", ".", "append", "(", "solve", "(", "pair", ".", "key", ",", "vars", ")", ".", "value", ")", "value_expressions", ".", "append", "(", "pair", ".", "value", ")", "result", "=", "row_tuple", ".", "RowTuple", "(", "ordered_columns", "=", "keys", ")", "intermediate_scope", "=", "scope", ".", "ScopeStack", "(", "vars", ",", "result", ")", "for", "idx", ",", "value_expression", "in", "enumerate", "(", "value_expressions", ")", ":", "value", "=", "solve", "(", "value_expression", ",", "intermediate_scope", ")", ".", "value", "# Update the intermediate bindings so as to make earlier bindings", "# already available to the next child-expression.", "result", "[", "keys", "[", "idx", "]", "]", "=", "value", "return", "Result", "(", "result", ",", "(", ")", ")" ]
Build a RowTuple from key/value pairs under the bind. The Bind subtree is arranged as follows: Bind | First KV Pair | | First Key Expression | | First Value Expression | Second KV Pair | | Second Key Expression | | Second Value Expression Etc... As we evaluate the subtree, each subsequent KV pair is evaluated with the all previous bingings already in scope. For example: bind(x: 5, y: x + 5) # Will bind y = 10 because x is already available.
[ "Build", "a", "RowTuple", "from", "key", "/", "value", "pairs", "under", "the", "bind", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L354-L388
train
google/dotty
efilter/transforms/solve.py
solve_repeat
def solve_repeat(expr, vars): """Build a repeated value from subexpressions.""" try: result = repeated.meld(*[solve(x, vars).value for x in expr.children]) return Result(result, ()) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="All values in a repeated value must be of the same type.")
python
def solve_repeat(expr, vars): """Build a repeated value from subexpressions.""" try: result = repeated.meld(*[solve(x, vars).value for x in expr.children]) return Result(result, ()) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="All values in a repeated value must be of the same type.")
[ "def", "solve_repeat", "(", "expr", ",", "vars", ")", ":", "try", ":", "result", "=", "repeated", ".", "meld", "(", "*", "[", "solve", "(", "x", ",", "vars", ")", ".", "value", "for", "x", "in", "expr", ".", "children", "]", ")", "return", "Result", "(", "result", ",", "(", ")", ")", "except", "TypeError", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"All values in a repeated value must be of the same type.\"", ")" ]
Build a repeated value from subexpressions.
[ "Build", "a", "repeated", "value", "from", "subexpressions", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L392-L400
train
google/dotty
efilter/transforms/solve.py
solve_tuple
def solve_tuple(expr, vars): """Build a tuple from subexpressions.""" result = tuple(solve(x, vars).value for x in expr.children) return Result(result, ())
python
def solve_tuple(expr, vars): """Build a tuple from subexpressions.""" result = tuple(solve(x, vars).value for x in expr.children) return Result(result, ())
[ "def", "solve_tuple", "(", "expr", ",", "vars", ")", ":", "result", "=", "tuple", "(", "solve", "(", "x", ",", "vars", ")", ".", "value", "for", "x", "in", "expr", ".", "children", ")", "return", "Result", "(", "result", ",", "(", ")", ")" ]
Build a tuple from subexpressions.
[ "Build", "a", "tuple", "from", "subexpressions", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L404-L407
train
google/dotty
efilter/transforms/solve.py
solve_ifelse
def solve_ifelse(expr, vars): """Evaluate conditions and return the one that matches.""" for condition, result in expr.conditions(): if boolean.asbool(solve(condition, vars).value): return solve(result, vars) return solve(expr.default(), vars)
python
def solve_ifelse(expr, vars): """Evaluate conditions and return the one that matches.""" for condition, result in expr.conditions(): if boolean.asbool(solve(condition, vars).value): return solve(result, vars) return solve(expr.default(), vars)
[ "def", "solve_ifelse", "(", "expr", ",", "vars", ")", ":", "for", "condition", ",", "result", "in", "expr", ".", "conditions", "(", ")", ":", "if", "boolean", ".", "asbool", "(", "solve", "(", "condition", ",", "vars", ")", ".", "value", ")", ":", "return", "solve", "(", "result", ",", "vars", ")", "return", "solve", "(", "expr", ".", "default", "(", ")", ",", "vars", ")" ]
Evaluate conditions and return the one that matches.
[ "Evaluate", "conditions", "and", "return", "the", "one", "that", "matches", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L411-L417
train
google/dotty
efilter/transforms/solve.py
solve_map
def solve_map(expr, vars): """Solves the map-form, by recursively calling its RHS with new vars. let-forms are binary expressions. The LHS should evaluate to an IAssociative that can be used as new vars with which to solve a new query, of which the RHS is the root. In most cases, the LHS will be a Var (var). Typically, map-forms result from the dotty "dot" (.) operator. For example, the query "User.name" will translate to a map-form with the var "User" on LHS and a var to "name" on the RHS. With top-level vars being something like {"User": {"name": "Bob"}}, the Var on the LHS will evaluate to {"name": "Bob"}, which subdict will then be used on the RHS as new vars, and that whole form will evaluate to "Bob". """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_map(): try: for lhs_value in repeated.getvalues(lhs_values): yield solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value except errors.EfilterNoneError as error: error.root = expr raise return Result(repeated.lazy(lazy_map), ())
python
def solve_map(expr, vars): """Solves the map-form, by recursively calling its RHS with new vars. let-forms are binary expressions. The LHS should evaluate to an IAssociative that can be used as new vars with which to solve a new query, of which the RHS is the root. In most cases, the LHS will be a Var (var). Typically, map-forms result from the dotty "dot" (.) operator. For example, the query "User.name" will translate to a map-form with the var "User" on LHS and a var to "name" on the RHS. With top-level vars being something like {"User": {"name": "Bob"}}, the Var on the LHS will evaluate to {"name": "Bob"}, which subdict will then be used on the RHS as new vars, and that whole form will evaluate to "Bob". """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_map(): try: for lhs_value in repeated.getvalues(lhs_values): yield solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value except errors.EfilterNoneError as error: error.root = expr raise return Result(repeated.lazy(lazy_map), ())
[ "def", "solve_map", "(", "expr", ",", "vars", ")", ":", "lhs_values", ",", "_", "=", "__solve_for_repeated", "(", "expr", ".", "lhs", ",", "vars", ")", "def", "lazy_map", "(", ")", ":", "try", ":", "for", "lhs_value", "in", "repeated", ".", "getvalues", "(", "lhs_values", ")", ":", "yield", "solve", "(", "expr", ".", "rhs", ",", "__nest_scope", "(", "expr", ".", "lhs", ",", "vars", ",", "lhs_value", ")", ")", ".", "value", "except", "errors", ".", "EfilterNoneError", "as", "error", ":", "error", ".", "root", "=", "expr", "raise", "return", "Result", "(", "repeated", ".", "lazy", "(", "lazy_map", ")", ",", "(", ")", ")" ]
Solves the map-form, by recursively calling its RHS with new vars. let-forms are binary expressions. The LHS should evaluate to an IAssociative that can be used as new vars with which to solve a new query, of which the RHS is the root. In most cases, the LHS will be a Var (var). Typically, map-forms result from the dotty "dot" (.) operator. For example, the query "User.name" will translate to a map-form with the var "User" on LHS and a var to "name" on the RHS. With top-level vars being something like {"User": {"name": "Bob"}}, the Var on the LHS will evaluate to {"name": "Bob"}, which subdict will then be used on the RHS as new vars, and that whole form will evaluate to "Bob".
[ "Solves", "the", "map", "-", "form", "by", "recursively", "calling", "its", "RHS", "with", "new", "vars", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L421-L446
train
google/dotty
efilter/transforms/solve.py
solve_let
def solve_let(expr, vars): """Solves a let-form by calling RHS with nested scope.""" lhs_value = solve(expr.lhs, vars).value if not isinstance(lhs_value, structured.IStructured): raise errors.EfilterTypeError( root=expr.lhs, query=expr.original, message="The LHS of 'let' must evaluate to an IStructured. Got %r." % (lhs_value,)) return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
python
def solve_let(expr, vars): """Solves a let-form by calling RHS with nested scope.""" lhs_value = solve(expr.lhs, vars).value if not isinstance(lhs_value, structured.IStructured): raise errors.EfilterTypeError( root=expr.lhs, query=expr.original, message="The LHS of 'let' must evaluate to an IStructured. Got %r." % (lhs_value,)) return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
[ "def", "solve_let", "(", "expr", ",", "vars", ")", ":", "lhs_value", "=", "solve", "(", "expr", ".", "lhs", ",", "vars", ")", ".", "value", "if", "not", "isinstance", "(", "lhs_value", ",", "structured", ".", "IStructured", ")", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ".", "lhs", ",", "query", "=", "expr", ".", "original", ",", "message", "=", "\"The LHS of 'let' must evaluate to an IStructured. Got %r.\"", "%", "(", "lhs_value", ",", ")", ")", "return", "solve", "(", "expr", ".", "rhs", ",", "__nest_scope", "(", "expr", ".", "lhs", ",", "vars", ",", "lhs_value", ")", ")" ]
Solves a let-form by calling RHS with nested scope.
[ "Solves", "a", "let", "-", "form", "by", "calling", "RHS", "with", "nested", "scope", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L450-L459
train
google/dotty
efilter/transforms/solve.py
solve_filter
def solve_filter(expr, vars): """Filter values on the LHS by evaluating RHS with each value. Returns any LHS values for which RHS evaluates to a true value. """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_filter(): for lhs_value in repeated.getvalues(lhs_values): if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value: yield lhs_value return Result(repeated.lazy(lazy_filter), ())
python
def solve_filter(expr, vars): """Filter values on the LHS by evaluating RHS with each value. Returns any LHS values for which RHS evaluates to a true value. """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_filter(): for lhs_value in repeated.getvalues(lhs_values): if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value: yield lhs_value return Result(repeated.lazy(lazy_filter), ())
[ "def", "solve_filter", "(", "expr", ",", "vars", ")", ":", "lhs_values", ",", "_", "=", "__solve_for_repeated", "(", "expr", ".", "lhs", ",", "vars", ")", "def", "lazy_filter", "(", ")", ":", "for", "lhs_value", "in", "repeated", ".", "getvalues", "(", "lhs_values", ")", ":", "if", "solve", "(", "expr", ".", "rhs", ",", "__nest_scope", "(", "expr", ".", "lhs", ",", "vars", ",", "lhs_value", ")", ")", ".", "value", ":", "yield", "lhs_value", "return", "Result", "(", "repeated", ".", "lazy", "(", "lazy_filter", ")", ",", "(", ")", ")" ]
Filter values on the LHS by evaluating RHS with each value. Returns any LHS values for which RHS evaluates to a true value.
[ "Filter", "values", "on", "the", "LHS", "by", "evaluating", "RHS", "with", "each", "value", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L463-L475
train
google/dotty
efilter/transforms/solve.py
solve_sort
def solve_sort(expr, vars): """Sort values on the LHS by the value they yield when passed to RHS.""" lhs_values = repeated.getvalues(__solve_for_repeated(expr.lhs, vars)[0]) sort_expression = expr.rhs def _key_func(x): return solve(sort_expression, __nest_scope(expr.lhs, vars, x)).value results = ordered.ordered(lhs_values, key_func=_key_func) return Result(repeated.meld(*results), ())
python
def solve_sort(expr, vars): """Sort values on the LHS by the value they yield when passed to RHS.""" lhs_values = repeated.getvalues(__solve_for_repeated(expr.lhs, vars)[0]) sort_expression = expr.rhs def _key_func(x): return solve(sort_expression, __nest_scope(expr.lhs, vars, x)).value results = ordered.ordered(lhs_values, key_func=_key_func) return Result(repeated.meld(*results), ())
[ "def", "solve_sort", "(", "expr", ",", "vars", ")", ":", "lhs_values", "=", "repeated", ".", "getvalues", "(", "__solve_for_repeated", "(", "expr", ".", "lhs", ",", "vars", ")", "[", "0", "]", ")", "sort_expression", "=", "expr", ".", "rhs", "def", "_key_func", "(", "x", ")", ":", "return", "solve", "(", "sort_expression", ",", "__nest_scope", "(", "expr", ".", "lhs", ",", "vars", ",", "x", ")", ")", ".", "value", "results", "=", "ordered", ".", "ordered", "(", "lhs_values", ",", "key_func", "=", "_key_func", ")", "return", "Result", "(", "repeated", ".", "meld", "(", "*", "results", ")", ",", "(", ")", ")" ]
Sort values on the LHS by the value they yield when passed to RHS.
[ "Sort", "values", "on", "the", "LHS", "by", "the", "value", "they", "yield", "when", "passed", "to", "RHS", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L528-L539
train
google/dotty
efilter/transforms/solve.py
solve_each
def solve_each(expr, vars): """Return True if RHS evaluates to a true value with each state of LHS. If LHS evaluates to a normal IAssociative object then this is the same as a regular let-form, except the return value is always a boolean. If LHS evaluates to a repeared var (see efilter.protocols.repeated) of IAssociative objects then RHS will be evaluated with each state and True will be returned only if each result is true. """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) for lhs_value in repeated.getvalues(lhs_values): result = solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) if not result.value: # Each is required to return an actual boolean. return result._replace(value=False) return Result(True, ())
python
def solve_each(expr, vars): """Return True if RHS evaluates to a true value with each state of LHS. If LHS evaluates to a normal IAssociative object then this is the same as a regular let-form, except the return value is always a boolean. If LHS evaluates to a repeared var (see efilter.protocols.repeated) of IAssociative objects then RHS will be evaluated with each state and True will be returned only if each result is true. """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) for lhs_value in repeated.getvalues(lhs_values): result = solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) if not result.value: # Each is required to return an actual boolean. return result._replace(value=False) return Result(True, ())
[ "def", "solve_each", "(", "expr", ",", "vars", ")", ":", "lhs_values", ",", "_", "=", "__solve_for_repeated", "(", "expr", ".", "lhs", ",", "vars", ")", "for", "lhs_value", "in", "repeated", ".", "getvalues", "(", "lhs_values", ")", ":", "result", "=", "solve", "(", "expr", ".", "rhs", ",", "__nest_scope", "(", "expr", ".", "lhs", ",", "vars", ",", "lhs_value", ")", ")", "if", "not", "result", ".", "value", ":", "# Each is required to return an actual boolean.", "return", "result", ".", "_replace", "(", "value", "=", "False", ")", "return", "Result", "(", "True", ",", "(", ")", ")" ]
Return True if RHS evaluates to a true value with each state of LHS. If LHS evaluates to a normal IAssociative object then this is the same as a regular let-form, except the return value is always a boolean. If LHS evaluates to a repeared var (see efilter.protocols.repeated) of IAssociative objects then RHS will be evaluated with each state and True will be returned only if each result is true.
[ "Return", "True", "if", "RHS", "evaluates", "to", "a", "true", "value", "with", "each", "state", "of", "LHS", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L543-L560
train
google/dotty
efilter/transforms/solve.py
solve_cast
def solve_cast(expr, vars): """Get cast LHS to RHS.""" lhs = solve(expr.lhs, vars).value t = solve(expr.rhs, vars).value if t is None: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Cannot find type named %r." % expr.rhs.value) if not isinstance(t, type): raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="%r is not a type and cannot be used with 'cast'." % (t,)) try: cast_value = t(lhs) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Invalid cast %s -> %s." % (type(lhs), t)) return Result(cast_value, ())
python
def solve_cast(expr, vars): """Get cast LHS to RHS.""" lhs = solve(expr.lhs, vars).value t = solve(expr.rhs, vars).value if t is None: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Cannot find type named %r." % expr.rhs.value) if not isinstance(t, type): raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="%r is not a type and cannot be used with 'cast'." % (t,)) try: cast_value = t(lhs) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Invalid cast %s -> %s." % (type(lhs), t)) return Result(cast_value, ())
[ "def", "solve_cast", "(", "expr", ",", "vars", ")", ":", "lhs", "=", "solve", "(", "expr", ".", "lhs", ",", "vars", ")", ".", "value", "t", "=", "solve", "(", "expr", ".", "rhs", ",", "vars", ")", ".", "value", "if", "t", "is", "None", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Cannot find type named %r.\"", "%", "expr", ".", "rhs", ".", "value", ")", "if", "not", "isinstance", "(", "t", ",", "type", ")", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ".", "rhs", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"%r is not a type and cannot be used with 'cast'.\"", "%", "(", "t", ",", ")", ")", "try", ":", "cast_value", "=", "t", "(", "lhs", ")", "except", "TypeError", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Invalid cast %s -> %s.\"", "%", "(", "type", "(", "lhs", ")", ",", "t", ")", ")", "return", "Result", "(", "cast_value", ",", "(", ")", ")" ]
Get cast LHS to RHS.
[ "Get", "cast", "LHS", "to", "RHS", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L586-L608
train
google/dotty
efilter/transforms/solve.py
solve_isinstance
def solve_isinstance(expr, vars): """Typecheck whether LHS is type on the RHS.""" lhs = solve(expr.lhs, vars) try: t = solve(expr.rhs, vars).value except errors.EfilterKeyError: t = None if t is None: raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="Cannot find type named %r." % expr.rhs.value) if not isinstance(t, type): raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="%r is not a type and cannot be used with 'isa'." % (t,)) return Result(protocol.implements(lhs.value, t), ())
python
def solve_isinstance(expr, vars): """Typecheck whether LHS is type on the RHS.""" lhs = solve(expr.lhs, vars) try: t = solve(expr.rhs, vars).value except errors.EfilterKeyError: t = None if t is None: raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="Cannot find type named %r." % expr.rhs.value) if not isinstance(t, type): raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="%r is not a type and cannot be used with 'isa'." % (t,)) return Result(protocol.implements(lhs.value, t), ())
[ "def", "solve_isinstance", "(", "expr", ",", "vars", ")", ":", "lhs", "=", "solve", "(", "expr", ".", "lhs", ",", "vars", ")", "try", ":", "t", "=", "solve", "(", "expr", ".", "rhs", ",", "vars", ")", ".", "value", "except", "errors", ".", "EfilterKeyError", ":", "t", "=", "None", "if", "t", "is", "None", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ".", "rhs", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"Cannot find type named %r.\"", "%", "expr", ".", "rhs", ".", "value", ")", "if", "not", "isinstance", "(", "t", ",", "type", ")", ":", "raise", "errors", ".", "EfilterTypeError", "(", "root", "=", "expr", ".", "rhs", ",", "query", "=", "expr", ".", "source", ",", "message", "=", "\"%r is not a type and cannot be used with 'isa'.\"", "%", "(", "t", ",", ")", ")", "return", "Result", "(", "protocol", ".", "implements", "(", "lhs", ".", "value", ",", "t", ")", ",", "(", ")", ")" ]
Typecheck whether LHS is type on the RHS.
[ "Typecheck", "whether", "LHS", "is", "type", "on", "the", "RHS", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L612-L631
train
radical-cybertools/radical.entk
setup.py
set_version
def set_version(mod_root): """ mod_root a VERSION file containes the version strings is created in mod_root, during installation. That file is used at runtime to get the version information. """ try: version_base = None version_detail = None # get version from './VERSION' src_root = os.path.dirname(__file__) if not src_root: src_root = '.' with open(src_root + '/VERSION', 'r') as f: version_base = f.readline().strip() # attempt to get version detail information from git # We only do that though if we are in a repo root dir, # ie. if 'git rev-parse --show-prefix' returns an empty string -- # otherwise we get confused if the ve lives beneath another repository, # and the pip version used uses an install tmp dir in the ve space # instead of /tmp (which seems to happen with some pip/setuptools # versions). p = sp.Popen('cd %s ; ' 'test -z `git rev-parse --show-prefix` || exit -1; ' 'tag=`git describe --tags --always` 2>/dev/null ; ' 'branch=`git branch | grep -e "^*" | cut -f 2- -d " "` 2>/dev/null ; ' 'echo $tag@$branch' % src_root, stdout=sp.PIPE, stderr=sp.STDOUT, shell=True) version_detail = str(p.communicate()[0].strip()) version_detail = version_detail.replace('detached from ', 'detached-') # remove all non-alphanumeric (and then some) chars version_detail = re.sub('[/ ]+', '-', version_detail) version_detail = re.sub('[^a-zA-Z0-9_+@.-]+', '', version_detail) if p.returncode != 0 or \ version_detail == '@' or \ 'git-error' in version_detail or \ 'not-a-git-repo' in version_detail or \ 'not-found' in version_detail or \ 'fatal' in version_detail : version = version_base elif '@' not in version_base: version = '%s-%s' % (version_base, version_detail) else: version = version_base # make sure the version files exist for the runtime version inspection path = '%s/%s' % (src_root, mod_root) with open(path + "/VERSION", "w") as f: f.write(version + "\n") sdist_name = "%s-%s.tar.gz" % (name, version) sdist_name = sdist_name.replace('/', '-') sdist_name = sdist_name.replace('@', '-') sdist_name = sdist_name.replace('#', '-') sdist_name = sdist_name.replace('_', '-') if '--record' in sys.argv or \ 'bdist_egg' in sys.argv or \ 'bdist_wheel' in sys.argv : # pip install stage 2 or easy_install stage 1 # # pip install will untar the sdist in a tmp tree. In that tmp # tree, we won't be able to derive git version tags -- so we pack the # formerly derived version as ./VERSION shutil.move("VERSION", "VERSION.bak") # backup version shutil.copy("%s/VERSION" % path, "VERSION") # use full version instead os.system ("python setup.py sdist") # build sdist shutil.copy('dist/%s' % sdist_name, '%s/%s' % (mod_root, sdist_name)) # copy into tree shutil.move("VERSION.bak", "VERSION") # restore version with open(path + "/SDIST", "w") as f: f.write(sdist_name + "\n") return version_base, version_detail, sdist_name except Exception as e : raise RuntimeError('Could not extract/set version: %s' % e)
python
def set_version(mod_root): """ mod_root a VERSION file containes the version strings is created in mod_root, during installation. That file is used at runtime to get the version information. """ try: version_base = None version_detail = None # get version from './VERSION' src_root = os.path.dirname(__file__) if not src_root: src_root = '.' with open(src_root + '/VERSION', 'r') as f: version_base = f.readline().strip() # attempt to get version detail information from git # We only do that though if we are in a repo root dir, # ie. if 'git rev-parse --show-prefix' returns an empty string -- # otherwise we get confused if the ve lives beneath another repository, # and the pip version used uses an install tmp dir in the ve space # instead of /tmp (which seems to happen with some pip/setuptools # versions). p = sp.Popen('cd %s ; ' 'test -z `git rev-parse --show-prefix` || exit -1; ' 'tag=`git describe --tags --always` 2>/dev/null ; ' 'branch=`git branch | grep -e "^*" | cut -f 2- -d " "` 2>/dev/null ; ' 'echo $tag@$branch' % src_root, stdout=sp.PIPE, stderr=sp.STDOUT, shell=True) version_detail = str(p.communicate()[0].strip()) version_detail = version_detail.replace('detached from ', 'detached-') # remove all non-alphanumeric (and then some) chars version_detail = re.sub('[/ ]+', '-', version_detail) version_detail = re.sub('[^a-zA-Z0-9_+@.-]+', '', version_detail) if p.returncode != 0 or \ version_detail == '@' or \ 'git-error' in version_detail or \ 'not-a-git-repo' in version_detail or \ 'not-found' in version_detail or \ 'fatal' in version_detail : version = version_base elif '@' not in version_base: version = '%s-%s' % (version_base, version_detail) else: version = version_base # make sure the version files exist for the runtime version inspection path = '%s/%s' % (src_root, mod_root) with open(path + "/VERSION", "w") as f: f.write(version + "\n") sdist_name = "%s-%s.tar.gz" % (name, version) sdist_name = sdist_name.replace('/', '-') sdist_name = sdist_name.replace('@', '-') sdist_name = sdist_name.replace('#', '-') sdist_name = sdist_name.replace('_', '-') if '--record' in sys.argv or \ 'bdist_egg' in sys.argv or \ 'bdist_wheel' in sys.argv : # pip install stage 2 or easy_install stage 1 # # pip install will untar the sdist in a tmp tree. In that tmp # tree, we won't be able to derive git version tags -- so we pack the # formerly derived version as ./VERSION shutil.move("VERSION", "VERSION.bak") # backup version shutil.copy("%s/VERSION" % path, "VERSION") # use full version instead os.system ("python setup.py sdist") # build sdist shutil.copy('dist/%s' % sdist_name, '%s/%s' % (mod_root, sdist_name)) # copy into tree shutil.move("VERSION.bak", "VERSION") # restore version with open(path + "/SDIST", "w") as f: f.write(sdist_name + "\n") return version_base, version_detail, sdist_name except Exception as e : raise RuntimeError('Could not extract/set version: %s' % e)
[ "def", "set_version", "(", "mod_root", ")", ":", "try", ":", "version_base", "=", "None", "version_detail", "=", "None", "# get version from './VERSION'", "src_root", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "if", "not", "src_root", ":", "src_root", "=", "'.'", "with", "open", "(", "src_root", "+", "'/VERSION'", ",", "'r'", ")", "as", "f", ":", "version_base", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", "# attempt to get version detail information from git", "# We only do that though if we are in a repo root dir,", "# ie. if 'git rev-parse --show-prefix' returns an empty string --", "# otherwise we get confused if the ve lives beneath another repository,", "# and the pip version used uses an install tmp dir in the ve space", "# instead of /tmp (which seems to happen with some pip/setuptools", "# versions).", "p", "=", "sp", ".", "Popen", "(", "'cd %s ; '", "'test -z `git rev-parse --show-prefix` || exit -1; '", "'tag=`git describe --tags --always` 2>/dev/null ; '", "'branch=`git branch | grep -e \"^*\" | cut -f 2- -d \" \"` 2>/dev/null ; '", "'echo $tag@$branch'", "%", "src_root", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "STDOUT", ",", "shell", "=", "True", ")", "version_detail", "=", "str", "(", "p", ".", "communicate", "(", ")", "[", "0", "]", ".", "strip", "(", ")", ")", "version_detail", "=", "version_detail", ".", "replace", "(", "'detached from '", ",", "'detached-'", ")", "# remove all non-alphanumeric (and then some) chars", "version_detail", "=", "re", ".", "sub", "(", "'[/ ]+'", ",", "'-'", ",", "version_detail", ")", "version_detail", "=", "re", ".", "sub", "(", "'[^a-zA-Z0-9_+@.-]+'", ",", "''", ",", "version_detail", ")", "if", "p", ".", "returncode", "!=", "0", "or", "version_detail", "==", "'@'", "or", "'git-error'", "in", "version_detail", "or", "'not-a-git-repo'", "in", "version_detail", "or", "'not-found'", "in", "version_detail", "or", "'fatal'", "in", "version_detail", ":", "version", "=", "version_base", "elif", "'@'", "not", "in", "version_base", ":", "version", "=", "'%s-%s'", "%", "(", "version_base", ",", "version_detail", ")", "else", ":", "version", "=", "version_base", "# make sure the version files exist for the runtime version inspection", "path", "=", "'%s/%s'", "%", "(", "src_root", ",", "mod_root", ")", "with", "open", "(", "path", "+", "\"/VERSION\"", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "version", "+", "\"\\n\"", ")", "sdist_name", "=", "\"%s-%s.tar.gz\"", "%", "(", "name", ",", "version", ")", "sdist_name", "=", "sdist_name", ".", "replace", "(", "'/'", ",", "'-'", ")", "sdist_name", "=", "sdist_name", ".", "replace", "(", "'@'", ",", "'-'", ")", "sdist_name", "=", "sdist_name", ".", "replace", "(", "'#'", ",", "'-'", ")", "sdist_name", "=", "sdist_name", ".", "replace", "(", "'_'", ",", "'-'", ")", "if", "'--record'", "in", "sys", ".", "argv", "or", "'bdist_egg'", "in", "sys", ".", "argv", "or", "'bdist_wheel'", "in", "sys", ".", "argv", ":", "# pip install stage 2 or easy_install stage 1", "#", "# pip install will untar the sdist in a tmp tree. In that tmp", "# tree, we won't be able to derive git version tags -- so we pack the", "# formerly derived version as ./VERSION", "shutil", ".", "move", "(", "\"VERSION\"", ",", "\"VERSION.bak\"", ")", "# backup version", "shutil", ".", "copy", "(", "\"%s/VERSION\"", "%", "path", ",", "\"VERSION\"", ")", "# use full version instead", "os", ".", "system", "(", "\"python setup.py sdist\"", ")", "# build sdist", "shutil", ".", "copy", "(", "'dist/%s'", "%", "sdist_name", ",", "'%s/%s'", "%", "(", "mod_root", ",", "sdist_name", ")", ")", "# copy into tree", "shutil", ".", "move", "(", "\"VERSION.bak\"", ",", "\"VERSION\"", ")", "# restore version", "with", "open", "(", "path", "+", "\"/SDIST\"", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "sdist_name", "+", "\"\\n\"", ")", "return", "version_base", ",", "version_detail", ",", "sdist_name", "except", "Exception", "as", "e", ":", "raise", "RuntimeError", "(", "'Could not extract/set version: %s'", "%", "e", ")" ]
mod_root a VERSION file containes the version strings is created in mod_root, during installation. That file is used at runtime to get the version information.
[ "mod_root", "a", "VERSION", "file", "containes", "the", "version", "strings", "is", "created", "in", "mod_root", "during", "installation", ".", "That", "file", "is", "used", "at", "runtime", "to", "get", "the", "version", "information", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/setup.py#L26-L111
train
radical-cybertools/radical.entk
setup.py
makeDataFiles
def makeDataFiles(prefix, dir): """ Create distutils data_files structure from dir distutil will copy all file rooted under dir into prefix, excluding dir itself, just like 'ditto src dst' works, and unlike 'cp -r src dst, which copy src into dst'. Typical usage: # install the contents of 'wiki' under sys.prefix+'share/moin' data_files = makeDataFiles('share/moin', 'wiki') For this directory structure: root file1 file2 dir file subdir file makeDataFiles('prefix', 'root') will create this distutil data_files structure: [('prefix', ['file1', 'file2']), ('prefix/dir', ['file']), ('prefix/dir/subdir', ['file'])] """ # Strip 'dir/' from of path before joining with prefix dir = dir.rstrip('/') strip = len(dir) + 1 found = [] os.path.walk(dir, visit, (prefix, strip, found)) #print found[0] return found[0]
python
def makeDataFiles(prefix, dir): """ Create distutils data_files structure from dir distutil will copy all file rooted under dir into prefix, excluding dir itself, just like 'ditto src dst' works, and unlike 'cp -r src dst, which copy src into dst'. Typical usage: # install the contents of 'wiki' under sys.prefix+'share/moin' data_files = makeDataFiles('share/moin', 'wiki') For this directory structure: root file1 file2 dir file subdir file makeDataFiles('prefix', 'root') will create this distutil data_files structure: [('prefix', ['file1', 'file2']), ('prefix/dir', ['file']), ('prefix/dir/subdir', ['file'])] """ # Strip 'dir/' from of path before joining with prefix dir = dir.rstrip('/') strip = len(dir) + 1 found = [] os.path.walk(dir, visit, (prefix, strip, found)) #print found[0] return found[0]
[ "def", "makeDataFiles", "(", "prefix", ",", "dir", ")", ":", "# Strip 'dir/' from of path before joining with prefix", "dir", "=", "dir", ".", "rstrip", "(", "'/'", ")", "strip", "=", "len", "(", "dir", ")", "+", "1", "found", "=", "[", "]", "os", ".", "path", ".", "walk", "(", "dir", ",", "visit", ",", "(", "prefix", ",", "strip", ",", "found", ")", ")", "#print found[0]", "return", "found", "[", "0", "]" ]
Create distutils data_files structure from dir distutil will copy all file rooted under dir into prefix, excluding dir itself, just like 'ditto src dst' works, and unlike 'cp -r src dst, which copy src into dst'. Typical usage: # install the contents of 'wiki' under sys.prefix+'share/moin' data_files = makeDataFiles('share/moin', 'wiki') For this directory structure: root file1 file2 dir file subdir file makeDataFiles('prefix', 'root') will create this distutil data_files structure: [('prefix', ['file1', 'file2']), ('prefix/dir', ['file']), ('prefix/dir/subdir', ['file'])]
[ "Create", "distutils", "data_files", "structure", "from", "dir", "distutil", "will", "copy", "all", "file", "rooted", "under", "dir", "into", "prefix", "excluding", "dir", "itself", "just", "like", "ditto", "src", "dst", "works", "and", "unlike", "cp", "-", "r", "src", "dst", "which", "copy", "src", "into", "dst", ".", "Typical", "usage", ":", "#", "install", "the", "contents", "of", "wiki", "under", "sys", ".", "prefix", "+", "share", "/", "moin", "data_files", "=", "makeDataFiles", "(", "share", "/", "moin", "wiki", ")", "For", "this", "directory", "structure", ":", "root", "file1", "file2", "dir", "file", "subdir", "file", "makeDataFiles", "(", "prefix", "root", ")", "will", "create", "this", "distutil", "data_files", "structure", ":", "[", "(", "prefix", "[", "file1", "file2", "]", ")", "(", "prefix", "/", "dir", "[", "file", "]", ")", "(", "prefix", "/", "dir", "/", "subdir", "[", "file", "]", ")", "]" ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/setup.py#L126-L153
train
radical-cybertools/radical.entk
setup.py
visit
def visit((prefix, strip, found), dirname, names): """ Visit directory, create distutil tuple Add distutil tuple for each directory using this format: (destination, [dirname/file1, dirname/file2, ...]) distutil will copy later file1, file2, ... info destination. """ files = [] # Iterate over a copy of names, modify names for name in names[:]: path = os.path.join(dirname, name) # Ignore directories - we will visit later if os.path.isdir(path): # Remove directories we don't want to visit later if isbad(name): names.remove(name) continue elif isgood(name): files.append(path) destination = os.path.join(prefix, dirname[strip:]) found.append((destination, files))
python
def visit((prefix, strip, found), dirname, names): """ Visit directory, create distutil tuple Add distutil tuple for each directory using this format: (destination, [dirname/file1, dirname/file2, ...]) distutil will copy later file1, file2, ... info destination. """ files = [] # Iterate over a copy of names, modify names for name in names[:]: path = os.path.join(dirname, name) # Ignore directories - we will visit later if os.path.isdir(path): # Remove directories we don't want to visit later if isbad(name): names.remove(name) continue elif isgood(name): files.append(path) destination = os.path.join(prefix, dirname[strip:]) found.append((destination, files))
[ "def", "visit", "(", "(", "prefix", ",", "strip", ",", "found", ")", ",", "dirname", ",", "names", ")", ":", "files", "=", "[", "]", "# Iterate over a copy of names, modify names", "for", "name", "in", "names", "[", ":", "]", ":", "path", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "name", ")", "# Ignore directories - we will visit later", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "# Remove directories we don't want to visit later", "if", "isbad", "(", "name", ")", ":", "names", ".", "remove", "(", "name", ")", "continue", "elif", "isgood", "(", "name", ")", ":", "files", ".", "append", "(", "path", ")", "destination", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "dirname", "[", "strip", ":", "]", ")", "found", ".", "append", "(", "(", "destination", ",", "files", ")", ")" ]
Visit directory, create distutil tuple Add distutil tuple for each directory using this format: (destination, [dirname/file1, dirname/file2, ...]) distutil will copy later file1, file2, ... info destination.
[ "Visit", "directory", "create", "distutil", "tuple", "Add", "distutil", "tuple", "for", "each", "directory", "using", "this", "format", ":", "(", "destination", "[", "dirname", "/", "file1", "dirname", "/", "file2", "...", "]", ")", "distutil", "will", "copy", "later", "file1", "file2", "...", "info", "destination", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/setup.py#L155-L174
train
radical-cybertools/radical.entk
setup.py
isgood
def isgood(name): """ Whether name should be installed """ if not isbad(name): if name.endswith('.py') or name.endswith('.json') or name.endswith('.tar'): return True return False
python
def isgood(name): """ Whether name should be installed """ if not isbad(name): if name.endswith('.py') or name.endswith('.json') or name.endswith('.tar'): return True return False
[ "def", "isgood", "(", "name", ")", ":", "if", "not", "isbad", "(", "name", ")", ":", "if", "name", ".", "endswith", "(", "'.py'", ")", "or", "name", ".", "endswith", "(", "'.json'", ")", "or", "name", ".", "endswith", "(", "'.tar'", ")", ":", "return", "True", "return", "False" ]
Whether name should be installed
[ "Whether", "name", "should", "be", "installed" ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/setup.py#L183-L188
train
radical-cybertools/radical.entk
src/radical/entk/appman/wfprocessor.py
WFprocessor._initialize_workflow
def _initialize_workflow(self): """ **Purpose**: Initialize the PST of the workflow with a uid and type checks """ try: self._prof.prof('initializing workflow', uid=self._uid) for p in self._workflow: p._assign_uid(self._sid) self._prof.prof('workflow initialized', uid=self._uid) except Exception, ex: self._logger.exception( 'Fatal error while initializing workflow: %s' % ex) raise
python
def _initialize_workflow(self): """ **Purpose**: Initialize the PST of the workflow with a uid and type checks """ try: self._prof.prof('initializing workflow', uid=self._uid) for p in self._workflow: p._assign_uid(self._sid) self._prof.prof('workflow initialized', uid=self._uid) except Exception, ex: self._logger.exception( 'Fatal error while initializing workflow: %s' % ex) raise
[ "def", "_initialize_workflow", "(", "self", ")", ":", "try", ":", "self", ".", "_prof", ".", "prof", "(", "'initializing workflow'", ",", "uid", "=", "self", ".", "_uid", ")", "for", "p", "in", "self", ".", "_workflow", ":", "p", ".", "_assign_uid", "(", "self", ".", "_sid", ")", "self", ".", "_prof", ".", "prof", "(", "'workflow initialized'", ",", "uid", "=", "self", ".", "_uid", ")", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Fatal error while initializing workflow: %s'", "%", "ex", ")", "raise" ]
**Purpose**: Initialize the PST of the workflow with a uid and type checks
[ "**", "Purpose", "**", ":", "Initialize", "the", "PST", "of", "the", "workflow", "with", "a", "uid", "and", "type", "checks" ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/wfprocessor.py#L87-L104
train
radical-cybertools/radical.entk
src/radical/entk/appman/wfprocessor.py
WFprocessor._enqueue
def _enqueue(self, local_prof): """ **Purpose**: This is the function that is run in the enqueue thread. This function extracts Tasks from the copy of workflow that exists in the WFprocessor object and pushes them to the queues in the pending_q list. Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated queues to communicate with the master. Details: Termination condition of this thread is set by the wfp process. """ try: local_prof.prof('enqueue-thread started', uid=self._uid) self._logger.info('enqueue-thread started') # Acquire a connection+channel to the rmq server mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() last = time.time() while not self._enqueue_thread_terminate.is_set(): ''' We iterate through all pipelines to collect tasks from stages that are pending scheduling. Once collected, these tasks will be communicated to the tmgr in bulk. ''' workload = [] scheduled_stages = [] for pipe in self._workflow: with pipe.lock: if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)): # Test if the pipeline is already in the final state if pipe.state in states.FINAL: continue elif pipe.state == states.INITIAL: # Set state of pipeline to SCHEDULING if it is in INITIAL transition(obj=pipe, obj_type='Pipeline', new_state=states.SCHEDULING, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) executable_stage = pipe.stages[pipe.current_stage - 1] if not executable_stage.uid: executable_stage.parent_pipeline['uid'] = pipe.uid executable_stage.parent_pipeline['name'] = pipe.name executable_stage._assign_uid(self._sid) if executable_stage.state in [states.INITIAL, states.SCHEDULED]: if executable_stage.state == states.INITIAL: transition(obj=executable_stage, obj_type='Stage', new_state=states.SCHEDULING, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) executable_tasks = executable_stage.tasks for executable_task in executable_tasks: if (executable_task.state == states.INITIAL)or \ ((executable_task.state == states.FAILED)and(self._resubmit_failed)): # Set state of Tasks in current Stage to SCHEDULING transition(obj=executable_task, obj_type='Task', new_state=states.SCHEDULING, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # task_as_dict = json.dumps(executable_task.to_dict()) workload.append(executable_task) if executable_stage not in scheduled_stages: scheduled_stages.append( executable_stage) if workload: # Put the task on one of the pending_queues workload_as_dict = list() for task in workload: workload_as_dict.append(task.to_dict()) workload_as_dict = json.dumps(workload_as_dict) mq_channel.basic_publish(exchange='', routing_key=self._pending_queue[0], body=workload_as_dict # properties=pika.BasicProperties( # make message persistent # delivery_mode = 2) ) for task in workload: # Set state of Tasks in current Stage to SCHEDULED transition(obj=task, obj_type='Task', new_state=states.SCHEDULED, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) self._logger.debug( 'Task %s published to pending queue' % task.uid) if scheduled_stages: for executable_stage in scheduled_stages: transition(obj=executable_stage, obj_type='Stage', new_state=states.SCHEDULED, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Appease pika cos it thinks the connection is dead now = time.time() if now - last >= self._rmq_ping_interval: mq_connection.process_data_events() last = now self._logger.info('Enqueue thread terminated') mq_connection.close() local_prof.prof('terminating enqueue-thread', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel enqueuer thread gracefully...') mq_connection.close() raise KeyboardInterrupt except Exception, ex: self._logger.exception('Error in enqueue-thread: %s' % ex) try: mq_connection.close() except Exception as ex: self._logger.warning('mq_connection not created, %s' % ex) raise
python
def _enqueue(self, local_prof): """ **Purpose**: This is the function that is run in the enqueue thread. This function extracts Tasks from the copy of workflow that exists in the WFprocessor object and pushes them to the queues in the pending_q list. Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated queues to communicate with the master. Details: Termination condition of this thread is set by the wfp process. """ try: local_prof.prof('enqueue-thread started', uid=self._uid) self._logger.info('enqueue-thread started') # Acquire a connection+channel to the rmq server mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() last = time.time() while not self._enqueue_thread_terminate.is_set(): ''' We iterate through all pipelines to collect tasks from stages that are pending scheduling. Once collected, these tasks will be communicated to the tmgr in bulk. ''' workload = [] scheduled_stages = [] for pipe in self._workflow: with pipe.lock: if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)): # Test if the pipeline is already in the final state if pipe.state in states.FINAL: continue elif pipe.state == states.INITIAL: # Set state of pipeline to SCHEDULING if it is in INITIAL transition(obj=pipe, obj_type='Pipeline', new_state=states.SCHEDULING, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) executable_stage = pipe.stages[pipe.current_stage - 1] if not executable_stage.uid: executable_stage.parent_pipeline['uid'] = pipe.uid executable_stage.parent_pipeline['name'] = pipe.name executable_stage._assign_uid(self._sid) if executable_stage.state in [states.INITIAL, states.SCHEDULED]: if executable_stage.state == states.INITIAL: transition(obj=executable_stage, obj_type='Stage', new_state=states.SCHEDULING, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) executable_tasks = executable_stage.tasks for executable_task in executable_tasks: if (executable_task.state == states.INITIAL)or \ ((executable_task.state == states.FAILED)and(self._resubmit_failed)): # Set state of Tasks in current Stage to SCHEDULING transition(obj=executable_task, obj_type='Task', new_state=states.SCHEDULING, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # task_as_dict = json.dumps(executable_task.to_dict()) workload.append(executable_task) if executable_stage not in scheduled_stages: scheduled_stages.append( executable_stage) if workload: # Put the task on one of the pending_queues workload_as_dict = list() for task in workload: workload_as_dict.append(task.to_dict()) workload_as_dict = json.dumps(workload_as_dict) mq_channel.basic_publish(exchange='', routing_key=self._pending_queue[0], body=workload_as_dict # properties=pika.BasicProperties( # make message persistent # delivery_mode = 2) ) for task in workload: # Set state of Tasks in current Stage to SCHEDULED transition(obj=task, obj_type='Task', new_state=states.SCHEDULED, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) self._logger.debug( 'Task %s published to pending queue' % task.uid) if scheduled_stages: for executable_stage in scheduled_stages: transition(obj=executable_stage, obj_type='Stage', new_state=states.SCHEDULED, channel=mq_channel, queue='%s-enq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Appease pika cos it thinks the connection is dead now = time.time() if now - last >= self._rmq_ping_interval: mq_connection.process_data_events() last = now self._logger.info('Enqueue thread terminated') mq_connection.close() local_prof.prof('terminating enqueue-thread', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel enqueuer thread gracefully...') mq_connection.close() raise KeyboardInterrupt except Exception, ex: self._logger.exception('Error in enqueue-thread: %s' % ex) try: mq_connection.close() except Exception as ex: self._logger.warning('mq_connection not created, %s' % ex) raise
[ "def", "_enqueue", "(", "self", ",", "local_prof", ")", ":", "try", ":", "local_prof", ".", "prof", "(", "'enqueue-thread started'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'enqueue-thread started'", ")", "# Acquire a connection+channel to the rmq server", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "self", ".", "_mq_hostname", ",", "port", "=", "self", ".", "_port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "last", "=", "time", ".", "time", "(", ")", "while", "not", "self", ".", "_enqueue_thread_terminate", ".", "is_set", "(", ")", ":", "'''\n We iterate through all pipelines to collect tasks from\n stages that are pending scheduling. Once collected, these tasks\n will be communicated to the tmgr in bulk.\n '''", "workload", "=", "[", "]", "scheduled_stages", "=", "[", "]", "for", "pipe", "in", "self", ".", "_workflow", ":", "with", "pipe", ".", "lock", ":", "if", "(", "(", "not", "pipe", ".", "completed", ")", "and", "(", "not", "pipe", ".", "state", "==", "states", ".", "SUSPENDED", ")", ")", ":", "# Test if the pipeline is already in the final state", "if", "pipe", ".", "state", "in", "states", ".", "FINAL", ":", "continue", "elif", "pipe", ".", "state", "==", "states", ".", "INITIAL", ":", "# Set state of pipeline to SCHEDULING if it is in INITIAL", "transition", "(", "obj", "=", "pipe", ",", "obj_type", "=", "'Pipeline'", ",", "new_state", "=", "states", ".", "SCHEDULING", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-enq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "executable_stage", "=", "pipe", ".", "stages", "[", "pipe", ".", "current_stage", "-", "1", "]", "if", "not", "executable_stage", ".", "uid", ":", "executable_stage", ".", "parent_pipeline", "[", "'uid'", "]", "=", "pipe", ".", "uid", "executable_stage", ".", "parent_pipeline", "[", "'name'", "]", "=", "pipe", ".", "name", "executable_stage", ".", "_assign_uid", "(", "self", ".", "_sid", ")", "if", "executable_stage", ".", "state", "in", "[", "states", ".", "INITIAL", ",", "states", ".", "SCHEDULED", "]", ":", "if", "executable_stage", ".", "state", "==", "states", ".", "INITIAL", ":", "transition", "(", "obj", "=", "executable_stage", ",", "obj_type", "=", "'Stage'", ",", "new_state", "=", "states", ".", "SCHEDULING", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-enq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "executable_tasks", "=", "executable_stage", ".", "tasks", "for", "executable_task", "in", "executable_tasks", ":", "if", "(", "executable_task", ".", "state", "==", "states", ".", "INITIAL", ")", "or", "(", "(", "executable_task", ".", "state", "==", "states", ".", "FAILED", ")", "and", "(", "self", ".", "_resubmit_failed", ")", ")", ":", "# Set state of Tasks in current Stage to SCHEDULING", "transition", "(", "obj", "=", "executable_task", ",", "obj_type", "=", "'Task'", ",", "new_state", "=", "states", ".", "SCHEDULING", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-enq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "# task_as_dict = json.dumps(executable_task.to_dict())", "workload", ".", "append", "(", "executable_task", ")", "if", "executable_stage", "not", "in", "scheduled_stages", ":", "scheduled_stages", ".", "append", "(", "executable_stage", ")", "if", "workload", ":", "# Put the task on one of the pending_queues", "workload_as_dict", "=", "list", "(", ")", "for", "task", "in", "workload", ":", "workload_as_dict", ".", "append", "(", "task", ".", "to_dict", "(", ")", ")", "workload_as_dict", "=", "json", ".", "dumps", "(", "workload_as_dict", ")", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "self", ".", "_pending_queue", "[", "0", "]", ",", "body", "=", "workload_as_dict", "# properties=pika.BasicProperties(", "# make message persistent", "# delivery_mode = 2)", ")", "for", "task", "in", "workload", ":", "# Set state of Tasks in current Stage to SCHEDULED", "transition", "(", "obj", "=", "task", ",", "obj_type", "=", "'Task'", ",", "new_state", "=", "states", ".", "SCHEDULED", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-enq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "self", ".", "_logger", ".", "debug", "(", "'Task %s published to pending queue'", "%", "task", ".", "uid", ")", "if", "scheduled_stages", ":", "for", "executable_stage", "in", "scheduled_stages", ":", "transition", "(", "obj", "=", "executable_stage", ",", "obj_type", "=", "'Stage'", ",", "new_state", "=", "states", ".", "SCHEDULED", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-enq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "# Appease pika cos it thinks the connection is dead", "now", "=", "time", ".", "time", "(", ")", "if", "now", "-", "last", ">=", "self", ".", "_rmq_ping_interval", ":", "mq_connection", ".", "process_data_events", "(", ")", "last", "=", "now", "self", ".", "_logger", ".", "info", "(", "'Enqueue thread terminated'", ")", "mq_connection", ".", "close", "(", ")", "local_prof", ".", "prof", "(", "'terminating enqueue-thread'", ",", "uid", "=", "self", ".", "_uid", ")", "except", "KeyboardInterrupt", ":", "self", ".", "_logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to cancel enqueuer thread gracefully...'", ")", "mq_connection", ".", "close", "(", ")", "raise", "KeyboardInterrupt", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Error in enqueue-thread: %s'", "%", "ex", ")", "try", ":", "mq_connection", ".", "close", "(", ")", "except", "Exception", "as", "ex", ":", "self", ".", "_logger", ".", "warning", "(", "'mq_connection not created, %s'", "%", "ex", ")", "raise" ]
**Purpose**: This is the function that is run in the enqueue thread. This function extracts Tasks from the copy of workflow that exists in the WFprocessor object and pushes them to the queues in the pending_q list. Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated queues to communicate with the master. Details: Termination condition of this thread is set by the wfp process.
[ "**", "Purpose", "**", ":", "This", "is", "the", "function", "that", "is", "run", "in", "the", "enqueue", "thread", ".", "This", "function", "extracts", "Tasks", "from", "the", "copy", "of", "workflow", "that", "exists", "in", "the", "WFprocessor", "object", "and", "pushes", "them", "to", "the", "queues", "in", "the", "pending_q", "list", ".", "Since", "this", "thread", "works", "on", "the", "copy", "of", "the", "workflow", "every", "state", "update", "to", "the", "Task", "Stage", "and", "Pipeline", "is", "communicated", "back", "to", "the", "AppManager", "(", "master", "process", ")", "via", "the", "sync_with_master", "function", "that", "has", "dedicated", "queues", "to", "communicate", "with", "the", "master", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/wfprocessor.py#L106-L271
train
radical-cybertools/radical.entk
src/radical/entk/appman/wfprocessor.py
WFprocessor._dequeue
def _dequeue(self, local_prof): """ **Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the completed queus and updates the copy of workflow that exists in the WFprocessor object. Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated queues to communicate with the master. Details: Termination condition of this thread is set by the wfp process. """ try: local_prof.prof('dequeue-thread started', uid=self._uid) self._logger.info('Dequeue thread started') mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() last = time.time() while not self._dequeue_thread_terminate.is_set(): try: method_frame, header_frame, body = mq_channel.basic_get( queue=self._completed_queue[0]) if body: # Get task from the message completed_task = Task() completed_task.from_dict(json.loads(body)) self._logger.info( 'Got finished task %s from queue' % (completed_task.uid)) transition(obj=completed_task, obj_type='Task', new_state=states.DEQUEUEING, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Traverse the entire workflow to find out the correct Task for pipe in self._workflow: with pipe.lock: if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)): if completed_task.parent_pipeline['uid'] == pipe.uid: self._logger.debug( 'Found parent pipeline: %s' % pipe.uid) for stage in pipe.stages: if completed_task.parent_stage['uid'] == stage.uid: self._logger.debug( 'Found parent stage: %s' % (stage.uid)) transition(obj=completed_task, obj_type='Task', new_state=states.DEQUEUED, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) if not completed_task.exit_code: completed_task.state = states.DONE else: completed_task.state = states.FAILED for task in stage.tasks: if task.uid == completed_task.uid: task.state = str( completed_task.state) if (task.state == states.FAILED) and (self._resubmit_failed): task.state = states.INITIAL transition(obj=task, obj_type='Task', new_state=task.state, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) if stage._check_stage_complete(): transition(obj=stage, obj_type='Stage', new_state=states.DONE, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Check if Stage has a post-exec that needs to be # executed if stage.post_exec: try: self._logger.info('Executing post-exec for stage %s' % stage.uid) self._prof.prof('Adap: executing post-exec', uid=self._uid) stage.post_exec() self._logger.info( 'Post-exec executed for stage %s' % stage.uid) self._prof.prof( 'Adap: post-exec executed', uid=self._uid) except Exception, ex: self._logger.exception('Execution failed in post_exec of stage %s' % stage.uid) raise pipe._increment_stage() if pipe.completed: transition(obj=pipe, obj_type='Pipeline', new_state=states.DONE, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Found the task and processed it -- no more iterations needed break # Found the stage and processed it -- no more iterations neeeded break # Found the pipeline and processed it -- no more iterations neeeded break mq_channel.basic_ack( delivery_tag=method_frame.delivery_tag) # Appease pika cos it thinks the connection is dead now = time.time() if now - last >= self._rmq_ping_interval: mq_connection.process_data_events() last = now except Exception, ex: self._logger.exception( 'Unable to receive message from completed queue: %s' % ex) raise self._logger.info('Terminated dequeue thread') mq_connection.close() local_prof.prof('terminating dequeue-thread', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit gracefully...') mq_connection.close() raise KeyboardInterrupt except Exception, ex: self._logger.exception('Error in dequeue-thread: %s' % ex) try: mq_connection.close() except: self._logger.warning('mq_connection not created') raise EnTKError(ex)
python
def _dequeue(self, local_prof): """ **Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the completed queus and updates the copy of workflow that exists in the WFprocessor object. Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated queues to communicate with the master. Details: Termination condition of this thread is set by the wfp process. """ try: local_prof.prof('dequeue-thread started', uid=self._uid) self._logger.info('Dequeue thread started') mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=self._mq_hostname, port=self._port)) mq_channel = mq_connection.channel() last = time.time() while not self._dequeue_thread_terminate.is_set(): try: method_frame, header_frame, body = mq_channel.basic_get( queue=self._completed_queue[0]) if body: # Get task from the message completed_task = Task() completed_task.from_dict(json.loads(body)) self._logger.info( 'Got finished task %s from queue' % (completed_task.uid)) transition(obj=completed_task, obj_type='Task', new_state=states.DEQUEUEING, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Traverse the entire workflow to find out the correct Task for pipe in self._workflow: with pipe.lock: if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)): if completed_task.parent_pipeline['uid'] == pipe.uid: self._logger.debug( 'Found parent pipeline: %s' % pipe.uid) for stage in pipe.stages: if completed_task.parent_stage['uid'] == stage.uid: self._logger.debug( 'Found parent stage: %s' % (stage.uid)) transition(obj=completed_task, obj_type='Task', new_state=states.DEQUEUED, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) if not completed_task.exit_code: completed_task.state = states.DONE else: completed_task.state = states.FAILED for task in stage.tasks: if task.uid == completed_task.uid: task.state = str( completed_task.state) if (task.state == states.FAILED) and (self._resubmit_failed): task.state = states.INITIAL transition(obj=task, obj_type='Task', new_state=task.state, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) if stage._check_stage_complete(): transition(obj=stage, obj_type='Stage', new_state=states.DONE, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Check if Stage has a post-exec that needs to be # executed if stage.post_exec: try: self._logger.info('Executing post-exec for stage %s' % stage.uid) self._prof.prof('Adap: executing post-exec', uid=self._uid) stage.post_exec() self._logger.info( 'Post-exec executed for stage %s' % stage.uid) self._prof.prof( 'Adap: post-exec executed', uid=self._uid) except Exception, ex: self._logger.exception('Execution failed in post_exec of stage %s' % stage.uid) raise pipe._increment_stage() if pipe.completed: transition(obj=pipe, obj_type='Pipeline', new_state=states.DONE, channel=mq_channel, queue='%s-deq-to-sync' % self._sid, profiler=local_prof, logger=self._logger) # Found the task and processed it -- no more iterations needed break # Found the stage and processed it -- no more iterations neeeded break # Found the pipeline and processed it -- no more iterations neeeded break mq_channel.basic_ack( delivery_tag=method_frame.delivery_tag) # Appease pika cos it thinks the connection is dead now = time.time() if now - last >= self._rmq_ping_interval: mq_connection.process_data_events() last = now except Exception, ex: self._logger.exception( 'Unable to receive message from completed queue: %s' % ex) raise self._logger.info('Terminated dequeue thread') mq_connection.close() local_prof.prof('terminating dequeue-thread', uid=self._uid) except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit gracefully...') mq_connection.close() raise KeyboardInterrupt except Exception, ex: self._logger.exception('Error in dequeue-thread: %s' % ex) try: mq_connection.close() except: self._logger.warning('mq_connection not created') raise EnTKError(ex)
[ "def", "_dequeue", "(", "self", ",", "local_prof", ")", ":", "try", ":", "local_prof", ".", "prof", "(", "'dequeue-thread started'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'Dequeue thread started'", ")", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "self", ".", "_mq_hostname", ",", "port", "=", "self", ".", "_port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "last", "=", "time", ".", "time", "(", ")", "while", "not", "self", ".", "_dequeue_thread_terminate", ".", "is_set", "(", ")", ":", "try", ":", "method_frame", ",", "header_frame", ",", "body", "=", "mq_channel", ".", "basic_get", "(", "queue", "=", "self", ".", "_completed_queue", "[", "0", "]", ")", "if", "body", ":", "# Get task from the message", "completed_task", "=", "Task", "(", ")", "completed_task", ".", "from_dict", "(", "json", ".", "loads", "(", "body", ")", ")", "self", ".", "_logger", ".", "info", "(", "'Got finished task %s from queue'", "%", "(", "completed_task", ".", "uid", ")", ")", "transition", "(", "obj", "=", "completed_task", ",", "obj_type", "=", "'Task'", ",", "new_state", "=", "states", ".", "DEQUEUEING", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-deq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "# Traverse the entire workflow to find out the correct Task", "for", "pipe", "in", "self", ".", "_workflow", ":", "with", "pipe", ".", "lock", ":", "if", "(", "(", "not", "pipe", ".", "completed", ")", "and", "(", "not", "pipe", ".", "state", "==", "states", ".", "SUSPENDED", ")", ")", ":", "if", "completed_task", ".", "parent_pipeline", "[", "'uid'", "]", "==", "pipe", ".", "uid", ":", "self", ".", "_logger", ".", "debug", "(", "'Found parent pipeline: %s'", "%", "pipe", ".", "uid", ")", "for", "stage", "in", "pipe", ".", "stages", ":", "if", "completed_task", ".", "parent_stage", "[", "'uid'", "]", "==", "stage", ".", "uid", ":", "self", ".", "_logger", ".", "debug", "(", "'Found parent stage: %s'", "%", "(", "stage", ".", "uid", ")", ")", "transition", "(", "obj", "=", "completed_task", ",", "obj_type", "=", "'Task'", ",", "new_state", "=", "states", ".", "DEQUEUED", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-deq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "if", "not", "completed_task", ".", "exit_code", ":", "completed_task", ".", "state", "=", "states", ".", "DONE", "else", ":", "completed_task", ".", "state", "=", "states", ".", "FAILED", "for", "task", "in", "stage", ".", "tasks", ":", "if", "task", ".", "uid", "==", "completed_task", ".", "uid", ":", "task", ".", "state", "=", "str", "(", "completed_task", ".", "state", ")", "if", "(", "task", ".", "state", "==", "states", ".", "FAILED", ")", "and", "(", "self", ".", "_resubmit_failed", ")", ":", "task", ".", "state", "=", "states", ".", "INITIAL", "transition", "(", "obj", "=", "task", ",", "obj_type", "=", "'Task'", ",", "new_state", "=", "task", ".", "state", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-deq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "if", "stage", ".", "_check_stage_complete", "(", ")", ":", "transition", "(", "obj", "=", "stage", ",", "obj_type", "=", "'Stage'", ",", "new_state", "=", "states", ".", "DONE", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-deq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "# Check if Stage has a post-exec that needs to be", "# executed", "if", "stage", ".", "post_exec", ":", "try", ":", "self", ".", "_logger", ".", "info", "(", "'Executing post-exec for stage %s'", "%", "stage", ".", "uid", ")", "self", ".", "_prof", ".", "prof", "(", "'Adap: executing post-exec'", ",", "uid", "=", "self", ".", "_uid", ")", "stage", ".", "post_exec", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Post-exec executed for stage %s'", "%", "stage", ".", "uid", ")", "self", ".", "_prof", ".", "prof", "(", "'Adap: post-exec executed'", ",", "uid", "=", "self", ".", "_uid", ")", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Execution failed in post_exec of stage %s'", "%", "stage", ".", "uid", ")", "raise", "pipe", ".", "_increment_stage", "(", ")", "if", "pipe", ".", "completed", ":", "transition", "(", "obj", "=", "pipe", ",", "obj_type", "=", "'Pipeline'", ",", "new_state", "=", "states", ".", "DONE", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-deq-to-sync'", "%", "self", ".", "_sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "self", ".", "_logger", ")", "# Found the task and processed it -- no more iterations needed", "break", "# Found the stage and processed it -- no more iterations neeeded", "break", "# Found the pipeline and processed it -- no more iterations neeeded", "break", "mq_channel", ".", "basic_ack", "(", "delivery_tag", "=", "method_frame", ".", "delivery_tag", ")", "# Appease pika cos it thinks the connection is dead", "now", "=", "time", ".", "time", "(", ")", "if", "now", "-", "last", ">=", "self", ".", "_rmq_ping_interval", ":", "mq_connection", ".", "process_data_events", "(", ")", "last", "=", "now", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Unable to receive message from completed queue: %s'", "%", "ex", ")", "raise", "self", ".", "_logger", ".", "info", "(", "'Terminated dequeue thread'", ")", "mq_connection", ".", "close", "(", ")", "local_prof", ".", "prof", "(", "'terminating dequeue-thread'", ",", "uid", "=", "self", ".", "_uid", ")", "except", "KeyboardInterrupt", ":", "self", ".", "_logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to exit gracefully...'", ")", "mq_connection", ".", "close", "(", ")", "raise", "KeyboardInterrupt", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Error in dequeue-thread: %s'", "%", "ex", ")", "try", ":", "mq_connection", ".", "close", "(", ")", "except", ":", "self", ".", "_logger", ".", "warning", "(", "'mq_connection not created'", ")", "raise", "EnTKError", "(", "ex", ")" ]
**Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the completed queus and updates the copy of workflow that exists in the WFprocessor object. Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated queues to communicate with the master. Details: Termination condition of this thread is set by the wfp process.
[ "**", "Purpose", "**", ":", "This", "is", "the", "function", "that", "is", "run", "in", "the", "dequeue", "thread", ".", "This", "function", "extracts", "Tasks", "from", "the", "completed", "queus", "and", "updates", "the", "copy", "of", "workflow", "that", "exists", "in", "the", "WFprocessor", "object", ".", "Since", "this", "thread", "works", "on", "the", "copy", "of", "the", "workflow", "every", "state", "update", "to", "the", "Task", "Stage", "and", "Pipeline", "is", "communicated", "back", "to", "the", "AppManager", "(", "master", "process", ")", "via", "the", "sync_with_master", "function", "that", "has", "dedicated", "queues", "to", "communicate", "with", "the", "master", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/wfprocessor.py#L273-L456
train
radical-cybertools/radical.entk
src/radical/entk/appman/wfprocessor.py
WFprocessor._wfp
def _wfp(self): """ **Purpose**: This is the function executed in the wfp process. The function is used to simply create and spawn two threads: enqueue, dequeue. The enqueue thread pushes ready tasks to the queues in the pending_q slow list whereas the dequeue thread pulls completed tasks from the queues in the completed_q. This function is also responsible for the termination of these threads and hence blocking. """ try: local_prof = ru.Profiler( name='radical.entk.%s' % self._uid + '-proc', path=self._path) local_prof.prof('wfp process started', uid=self._uid) self._logger.info('WFprocessor started') # Process should run till terminate condtion is encountered while (not self._wfp_terminate.is_set()): try: # Start dequeue thread if (not self._dequeue_thread) or (not self._dequeue_thread.is_alive()): local_prof.prof( 'creating dequeue-thread', uid=self._uid) self._dequeue_thread = threading.Thread( target=self._dequeue, args=(local_prof,), name='dequeue-thread') self._logger.info('Starting dequeue-thread') local_prof.prof( 'starting dequeue-thread', uid=self._uid) self._dequeue_thread.start() # Start enqueue thread if (not self._enqueue_thread) or (not self._enqueue_thread.is_alive()): local_prof.prof( 'creating enqueue-thread', uid=self._uid) self._enqueue_thread = threading.Thread( target=self._enqueue, args=(local_prof,), name='enqueue-thread') self._logger.info('Starting enqueue-thread') local_prof.prof( 'starting enqueue-thread', uid=self._uid) self._enqueue_thread.start() except Exception, ex: self._logger.exception('WFProcessor interrupted') raise local_prof.prof('start termination', uid=self._uid) self._logger.info('Terminating enqueue-thread') self._enqueue_thread_terminate.set() self._enqueue_thread.join() self._logger.info('Terminating dequeue-thread') self._dequeue_thread_terminate.set() self._dequeue_thread.join() local_prof.prof('termination done', uid=self._uid) local_prof.prof('terminating wfp process', uid=self._uid) local_prof.close() except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel wfprocessor process gracefully...') if self._enqueue_thread: if not self._enqueue_thread_terminate.is_set(): self._logger.info('Terminating enqueue-thread') self._enqueue_thread_terminate.set() self._enqueue_thread.join() if self._dequeue_thread: if not self._dequeue_thread_terminate.is_set(): self._logger.info('Terminating dequeue-thread') self._dequeue_thread_terminate.set() self._dequeue_thread.join() self._logger.info('WFprocessor process terminated') raise KeyboardInterrupt except Exception, ex: self._logger.exception( 'Error in wfp process: %s. \n Closing enqueue, dequeue threads' % ex) if self._enqueue_thread: if not self._enqueue_thread_terminate.is_set(): self._logger.info('Terminating enqueue-thread') self._enqueue_thread_terminate.set() self._enqueue_thread.join() if self._dequeue_thread: if not self._dequeue_thread_terminate.is_set(): self._logger.info('Terminating dequeue-thread') self._dequeue_thread_terminate.set() self._dequeue_thread.join() self._logger.info('WFprocessor process terminated') self._logger.exception('%s failed with %s'%(self._uid, ex)) raise EnTKError(ex)
python
def _wfp(self): """ **Purpose**: This is the function executed in the wfp process. The function is used to simply create and spawn two threads: enqueue, dequeue. The enqueue thread pushes ready tasks to the queues in the pending_q slow list whereas the dequeue thread pulls completed tasks from the queues in the completed_q. This function is also responsible for the termination of these threads and hence blocking. """ try: local_prof = ru.Profiler( name='radical.entk.%s' % self._uid + '-proc', path=self._path) local_prof.prof('wfp process started', uid=self._uid) self._logger.info('WFprocessor started') # Process should run till terminate condtion is encountered while (not self._wfp_terminate.is_set()): try: # Start dequeue thread if (not self._dequeue_thread) or (not self._dequeue_thread.is_alive()): local_prof.prof( 'creating dequeue-thread', uid=self._uid) self._dequeue_thread = threading.Thread( target=self._dequeue, args=(local_prof,), name='dequeue-thread') self._logger.info('Starting dequeue-thread') local_prof.prof( 'starting dequeue-thread', uid=self._uid) self._dequeue_thread.start() # Start enqueue thread if (not self._enqueue_thread) or (not self._enqueue_thread.is_alive()): local_prof.prof( 'creating enqueue-thread', uid=self._uid) self._enqueue_thread = threading.Thread( target=self._enqueue, args=(local_prof,), name='enqueue-thread') self._logger.info('Starting enqueue-thread') local_prof.prof( 'starting enqueue-thread', uid=self._uid) self._enqueue_thread.start() except Exception, ex: self._logger.exception('WFProcessor interrupted') raise local_prof.prof('start termination', uid=self._uid) self._logger.info('Terminating enqueue-thread') self._enqueue_thread_terminate.set() self._enqueue_thread.join() self._logger.info('Terminating dequeue-thread') self._dequeue_thread_terminate.set() self._dequeue_thread.join() local_prof.prof('termination done', uid=self._uid) local_prof.prof('terminating wfp process', uid=self._uid) local_prof.close() except KeyboardInterrupt: self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel wfprocessor process gracefully...') if self._enqueue_thread: if not self._enqueue_thread_terminate.is_set(): self._logger.info('Terminating enqueue-thread') self._enqueue_thread_terminate.set() self._enqueue_thread.join() if self._dequeue_thread: if not self._dequeue_thread_terminate.is_set(): self._logger.info('Terminating dequeue-thread') self._dequeue_thread_terminate.set() self._dequeue_thread.join() self._logger.info('WFprocessor process terminated') raise KeyboardInterrupt except Exception, ex: self._logger.exception( 'Error in wfp process: %s. \n Closing enqueue, dequeue threads' % ex) if self._enqueue_thread: if not self._enqueue_thread_terminate.is_set(): self._logger.info('Terminating enqueue-thread') self._enqueue_thread_terminate.set() self._enqueue_thread.join() if self._dequeue_thread: if not self._dequeue_thread_terminate.is_set(): self._logger.info('Terminating dequeue-thread') self._dequeue_thread_terminate.set() self._dequeue_thread.join() self._logger.info('WFprocessor process terminated') self._logger.exception('%s failed with %s'%(self._uid, ex)) raise EnTKError(ex)
[ "def", "_wfp", "(", "self", ")", ":", "try", ":", "local_prof", "=", "ru", ".", "Profiler", "(", "name", "=", "'radical.entk.%s'", "%", "self", ".", "_uid", "+", "'-proc'", ",", "path", "=", "self", ".", "_path", ")", "local_prof", ".", "prof", "(", "'wfp process started'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'WFprocessor started'", ")", "# Process should run till terminate condtion is encountered", "while", "(", "not", "self", ".", "_wfp_terminate", ".", "is_set", "(", ")", ")", ":", "try", ":", "# Start dequeue thread", "if", "(", "not", "self", ".", "_dequeue_thread", ")", "or", "(", "not", "self", ".", "_dequeue_thread", ".", "is_alive", "(", ")", ")", ":", "local_prof", ".", "prof", "(", "'creating dequeue-thread'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_dequeue_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_dequeue", ",", "args", "=", "(", "local_prof", ",", ")", ",", "name", "=", "'dequeue-thread'", ")", "self", ".", "_logger", ".", "info", "(", "'Starting dequeue-thread'", ")", "local_prof", ".", "prof", "(", "'starting dequeue-thread'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_dequeue_thread", ".", "start", "(", ")", "# Start enqueue thread", "if", "(", "not", "self", ".", "_enqueue_thread", ")", "or", "(", "not", "self", ".", "_enqueue_thread", ".", "is_alive", "(", ")", ")", ":", "local_prof", ".", "prof", "(", "'creating enqueue-thread'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_enqueue_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_enqueue", ",", "args", "=", "(", "local_prof", ",", ")", ",", "name", "=", "'enqueue-thread'", ")", "self", ".", "_logger", ".", "info", "(", "'Starting enqueue-thread'", ")", "local_prof", ".", "prof", "(", "'starting enqueue-thread'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_enqueue_thread", ".", "start", "(", ")", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'WFProcessor interrupted'", ")", "raise", "local_prof", ".", "prof", "(", "'start termination'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_logger", ".", "info", "(", "'Terminating enqueue-thread'", ")", "self", ".", "_enqueue_thread_terminate", ".", "set", "(", ")", "self", ".", "_enqueue_thread", ".", "join", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Terminating dequeue-thread'", ")", "self", ".", "_dequeue_thread_terminate", ".", "set", "(", ")", "self", ".", "_dequeue_thread", ".", "join", "(", ")", "local_prof", ".", "prof", "(", "'termination done'", ",", "uid", "=", "self", ".", "_uid", ")", "local_prof", ".", "prof", "(", "'terminating wfp process'", ",", "uid", "=", "self", ".", "_uid", ")", "local_prof", ".", "close", "(", ")", "except", "KeyboardInterrupt", ":", "self", ".", "_logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to cancel wfprocessor process gracefully...'", ")", "if", "self", ".", "_enqueue_thread", ":", "if", "not", "self", ".", "_enqueue_thread_terminate", ".", "is_set", "(", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating enqueue-thread'", ")", "self", ".", "_enqueue_thread_terminate", ".", "set", "(", ")", "self", ".", "_enqueue_thread", ".", "join", "(", ")", "if", "self", ".", "_dequeue_thread", ":", "if", "not", "self", ".", "_dequeue_thread_terminate", ".", "is_set", "(", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating dequeue-thread'", ")", "self", ".", "_dequeue_thread_terminate", ".", "set", "(", ")", "self", ".", "_dequeue_thread", ".", "join", "(", ")", "self", ".", "_logger", ".", "info", "(", "'WFprocessor process terminated'", ")", "raise", "KeyboardInterrupt", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Error in wfp process: %s. \\n Closing enqueue, dequeue threads'", "%", "ex", ")", "if", "self", ".", "_enqueue_thread", ":", "if", "not", "self", ".", "_enqueue_thread_terminate", ".", "is_set", "(", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating enqueue-thread'", ")", "self", ".", "_enqueue_thread_terminate", ".", "set", "(", ")", "self", ".", "_enqueue_thread", ".", "join", "(", ")", "if", "self", ".", "_dequeue_thread", ":", "if", "not", "self", ".", "_dequeue_thread_terminate", ".", "is_set", "(", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Terminating dequeue-thread'", ")", "self", ".", "_dequeue_thread_terminate", ".", "set", "(", ")", "self", ".", "_dequeue_thread", ".", "join", "(", ")", "self", ".", "_logger", ".", "info", "(", "'WFprocessor process terminated'", ")", "self", ".", "_logger", ".", "exception", "(", "'%s failed with %s'", "%", "(", "self", ".", "_uid", ",", "ex", ")", ")", "raise", "EnTKError", "(", "ex", ")" ]
**Purpose**: This is the function executed in the wfp process. The function is used to simply create and spawn two threads: enqueue, dequeue. The enqueue thread pushes ready tasks to the queues in the pending_q slow list whereas the dequeue thread pulls completed tasks from the queues in the completed_q. This function is also responsible for the termination of these threads and hence blocking.
[ "**", "Purpose", "**", ":", "This", "is", "the", "function", "executed", "in", "the", "wfp", "process", ".", "The", "function", "is", "used", "to", "simply", "create", "and", "spawn", "two", "threads", ":", "enqueue", "dequeue", ".", "The", "enqueue", "thread", "pushes", "ready", "tasks", "to", "the", "queues", "in", "the", "pending_q", "slow", "list", "whereas", "the", "dequeue", "thread", "pulls", "completed", "tasks", "from", "the", "queues", "in", "the", "completed_q", ".", "This", "function", "is", "also", "responsible", "for", "the", "termination", "of", "these", "threads", "and", "hence", "blocking", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/wfprocessor.py#L458-L569
train
radical-cybertools/radical.entk
src/radical/entk/appman/wfprocessor.py
WFprocessor.start_processor
def start_processor(self): """ **Purpose**: Method to start the wfp process. The wfp function is not to be accessed directly. The function is started in a separate process using this method. """ if not self._wfp_process: try: self._prof.prof('creating wfp process', uid=self._uid) self._wfp_process = Process( target=self._wfp, name='wfprocessor') self._enqueue_thread = None self._dequeue_thread = None self._enqueue_thread_terminate = threading.Event() self._dequeue_thread_terminate = threading.Event() self._wfp_terminate = Event() self._logger.info('Starting WFprocessor process') self._prof.prof('starting wfp process', uid=self._uid) self._wfp_process.start() return True except Exception, ex: self._logger.exception('WFprocessor not started') self.terminate_processor() raise else: self._logger.warn( 'Wfp process already running, attempted to restart!')
python
def start_processor(self): """ **Purpose**: Method to start the wfp process. The wfp function is not to be accessed directly. The function is started in a separate process using this method. """ if not self._wfp_process: try: self._prof.prof('creating wfp process', uid=self._uid) self._wfp_process = Process( target=self._wfp, name='wfprocessor') self._enqueue_thread = None self._dequeue_thread = None self._enqueue_thread_terminate = threading.Event() self._dequeue_thread_terminate = threading.Event() self._wfp_terminate = Event() self._logger.info('Starting WFprocessor process') self._prof.prof('starting wfp process', uid=self._uid) self._wfp_process.start() return True except Exception, ex: self._logger.exception('WFprocessor not started') self.terminate_processor() raise else: self._logger.warn( 'Wfp process already running, attempted to restart!')
[ "def", "start_processor", "(", "self", ")", ":", "if", "not", "self", ".", "_wfp_process", ":", "try", ":", "self", ".", "_prof", ".", "prof", "(", "'creating wfp process'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_wfp_process", "=", "Process", "(", "target", "=", "self", ".", "_wfp", ",", "name", "=", "'wfprocessor'", ")", "self", ".", "_enqueue_thread", "=", "None", "self", ".", "_dequeue_thread", "=", "None", "self", ".", "_enqueue_thread_terminate", "=", "threading", ".", "Event", "(", ")", "self", ".", "_dequeue_thread_terminate", "=", "threading", ".", "Event", "(", ")", "self", ".", "_wfp_terminate", "=", "Event", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Starting WFprocessor process'", ")", "self", ".", "_prof", ".", "prof", "(", "'starting wfp process'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_wfp_process", ".", "start", "(", ")", "return", "True", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'WFprocessor not started'", ")", "self", ".", "terminate_processor", "(", ")", "raise", "else", ":", "self", ".", "_logger", ".", "warn", "(", "'Wfp process already running, attempted to restart!'", ")" ]
**Purpose**: Method to start the wfp process. The wfp function is not to be accessed directly. The function is started in a separate process using this method.
[ "**", "Purpose", "**", ":", "Method", "to", "start", "the", "wfp", "process", ".", "The", "wfp", "function", "is", "not", "to", "be", "accessed", "directly", ".", "The", "function", "is", "started", "in", "a", "separate", "process", "using", "this", "method", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/wfprocessor.py#L575-L610
train
radical-cybertools/radical.entk
src/radical/entk/appman/wfprocessor.py
WFprocessor.terminate_processor
def terminate_processor(self): """ **Purpose**: Method to terminate the wfp process. This method is blocking as it waits for the wfp process to terminate (aka join). """ try: if self.check_processor(): self._logger.debug( 'Attempting to end WFprocessor... event: %s' % self._wfp_terminate.is_set()) self._wfp_terminate.set() self._wfp_process.join() self._wfp_process = None self._logger.debug('WFprocessor process terminated') else: self._logger.debug('WFprocessor process already terminated') self._prof.prof('wfp process terminated', uid=self._uid) self._prof.close() except Exception, ex: self._logger.exception('Could not terminate wfprocessor process') raise
python
def terminate_processor(self): """ **Purpose**: Method to terminate the wfp process. This method is blocking as it waits for the wfp process to terminate (aka join). """ try: if self.check_processor(): self._logger.debug( 'Attempting to end WFprocessor... event: %s' % self._wfp_terminate.is_set()) self._wfp_terminate.set() self._wfp_process.join() self._wfp_process = None self._logger.debug('WFprocessor process terminated') else: self._logger.debug('WFprocessor process already terminated') self._prof.prof('wfp process terminated', uid=self._uid) self._prof.close() except Exception, ex: self._logger.exception('Could not terminate wfprocessor process') raise
[ "def", "terminate_processor", "(", "self", ")", ":", "try", ":", "if", "self", ".", "check_processor", "(", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Attempting to end WFprocessor... event: %s'", "%", "self", ".", "_wfp_terminate", ".", "is_set", "(", ")", ")", "self", ".", "_wfp_terminate", ".", "set", "(", ")", "self", ".", "_wfp_process", ".", "join", "(", ")", "self", ".", "_wfp_process", "=", "None", "self", ".", "_logger", ".", "debug", "(", "'WFprocessor process terminated'", ")", "else", ":", "self", ".", "_logger", ".", "debug", "(", "'WFprocessor process already terminated'", ")", "self", ".", "_prof", ".", "prof", "(", "'wfp process terminated'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_prof", ".", "close", "(", ")", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Could not terminate wfprocessor process'", ")", "raise" ]
**Purpose**: Method to terminate the wfp process. This method is blocking as it waits for the wfp process to terminate (aka join).
[ "**", "Purpose", "**", ":", "Method", "to", "terminate", "the", "wfp", "process", ".", "This", "method", "is", "blocking", "as", "it", "waits", "for", "the", "wfp", "process", "to", "terminate", "(", "aka", "join", ")", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/wfprocessor.py#L612-L636
train
radical-cybertools/radical.entk
src/radical/entk/appman/wfprocessor.py
WFprocessor.workflow_incomplete
def workflow_incomplete(self): """ **Purpose**: Method to check if the workflow execution is incomplete. """ try: for pipe in self._workflow: with pipe.lock: if pipe.completed: pass else: return True return False except Exception, ex: self._logger.exception( 'Could not check if workflow is incomplete, error:%s' % ex) raise
python
def workflow_incomplete(self): """ **Purpose**: Method to check if the workflow execution is incomplete. """ try: for pipe in self._workflow: with pipe.lock: if pipe.completed: pass else: return True return False except Exception, ex: self._logger.exception( 'Could not check if workflow is incomplete, error:%s' % ex) raise
[ "def", "workflow_incomplete", "(", "self", ")", ":", "try", ":", "for", "pipe", "in", "self", ".", "_workflow", ":", "with", "pipe", ".", "lock", ":", "if", "pipe", ".", "completed", ":", "pass", "else", ":", "return", "True", "return", "False", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Could not check if workflow is incomplete, error:%s'", "%", "ex", ")", "raise" ]
**Purpose**: Method to check if the workflow execution is incomplete.
[ "**", "Purpose", "**", ":", "Method", "to", "check", "if", "the", "workflow", "execution", "is", "incomplete", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/appman/wfprocessor.py#L638-L655
train
google/dotty
efilter/protocols/repeated.py
meld
def meld(*values): """Return the repeated value, or the first value if there's only one. This is a convenience function, equivalent to calling getvalue(repeated(x)) to get x. This function skips over instances of None in values (None is not allowed in repeated variables). Examples: meld("foo", "bar") # => ListRepetition("foo", "bar") meld("foo", "foo") # => ListRepetition("foo", "foo") meld("foo", None) # => "foo" meld(None) # => None """ values = [x for x in values if x is not None] if not values: return None result = repeated(*values) if isrepeating(result): return result return getvalue(result)
python
def meld(*values): """Return the repeated value, or the first value if there's only one. This is a convenience function, equivalent to calling getvalue(repeated(x)) to get x. This function skips over instances of None in values (None is not allowed in repeated variables). Examples: meld("foo", "bar") # => ListRepetition("foo", "bar") meld("foo", "foo") # => ListRepetition("foo", "foo") meld("foo", None) # => "foo" meld(None) # => None """ values = [x for x in values if x is not None] if not values: return None result = repeated(*values) if isrepeating(result): return result return getvalue(result)
[ "def", "meld", "(", "*", "values", ")", ":", "values", "=", "[", "x", "for", "x", "in", "values", "if", "x", "is", "not", "None", "]", "if", "not", "values", ":", "return", "None", "result", "=", "repeated", "(", "*", "values", ")", "if", "isrepeating", "(", "result", ")", ":", "return", "result", "return", "getvalue", "(", "result", ")" ]
Return the repeated value, or the first value if there's only one. This is a convenience function, equivalent to calling getvalue(repeated(x)) to get x. This function skips over instances of None in values (None is not allowed in repeated variables). Examples: meld("foo", "bar") # => ListRepetition("foo", "bar") meld("foo", "foo") # => ListRepetition("foo", "foo") meld("foo", None) # => "foo" meld(None) # => None
[ "Return", "the", "repeated", "value", "or", "the", "first", "value", "if", "there", "s", "only", "one", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocols/repeated.py#L55-L78
train
google/dotty
efilter/protocols/repeated.py
getvalue
def getvalue(x): """Return the single value of x or raise TypError if more than one value.""" if isrepeating(x): raise TypeError( "Ambiguous call to getvalue for %r which has more than one value." % x) for value in getvalues(x): return value
python
def getvalue(x): """Return the single value of x or raise TypError if more than one value.""" if isrepeating(x): raise TypeError( "Ambiguous call to getvalue for %r which has more than one value." % x) for value in getvalues(x): return value
[ "def", "getvalue", "(", "x", ")", ":", "if", "isrepeating", "(", "x", ")", ":", "raise", "TypeError", "(", "\"Ambiguous call to getvalue for %r which has more than one value.\"", "%", "x", ")", "for", "value", "in", "getvalues", "(", "x", ")", ":", "return", "value" ]
Return the single value of x or raise TypError if more than one value.
[ "Return", "the", "single", "value", "of", "x", "or", "raise", "TypError", "if", "more", "than", "one", "value", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocols/repeated.py#L120-L128
train
radical-cybertools/radical.entk
src/radical/entk/task/task.py
Task.luid
def luid(self): """ Unique ID of the current task (fully qualified). example: >>> task.luid pipe.0001.stage.0004.task.0234 :getter: Returns the fully qualified uid of the current task :type: String """ p_elem = self.parent_pipeline.get('name') if not p_elem: p_elem = self.parent_pipeline['uid'] s_elem = self.parent_stage.get('name') if not s_elem: s_elem = self.parent_stage['uid'] t_elem = self.name if not t_elem: t_elem = self.uid return '%s.%s.%s' % (p_elem, s_elem, t_elem)
python
def luid(self): """ Unique ID of the current task (fully qualified). example: >>> task.luid pipe.0001.stage.0004.task.0234 :getter: Returns the fully qualified uid of the current task :type: String """ p_elem = self.parent_pipeline.get('name') if not p_elem: p_elem = self.parent_pipeline['uid'] s_elem = self.parent_stage.get('name') if not s_elem: s_elem = self.parent_stage['uid'] t_elem = self.name if not t_elem: t_elem = self.uid return '%s.%s.%s' % (p_elem, s_elem, t_elem)
[ "def", "luid", "(", "self", ")", ":", "p_elem", "=", "self", ".", "parent_pipeline", ".", "get", "(", "'name'", ")", "if", "not", "p_elem", ":", "p_elem", "=", "self", ".", "parent_pipeline", "[", "'uid'", "]", "s_elem", "=", "self", ".", "parent_stage", ".", "get", "(", "'name'", ")", "if", "not", "s_elem", ":", "s_elem", "=", "self", ".", "parent_stage", "[", "'uid'", "]", "t_elem", "=", "self", ".", "name", "if", "not", "t_elem", ":", "t_elem", "=", "self", ".", "uid", "return", "'%s.%s.%s'", "%", "(", "p_elem", ",", "s_elem", ",", "t_elem", ")" ]
Unique ID of the current task (fully qualified). example: >>> task.luid pipe.0001.stage.0004.task.0234 :getter: Returns the fully qualified uid of the current task :type: String
[ "Unique", "ID", "of", "the", "current", "task", "(", "fully", "qualified", ")", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/task/task.py#L88-L111
train
radical-cybertools/radical.entk
src/radical/entk/task/task.py
Task.to_dict
def to_dict(self): """ Convert current Task into a dictionary :return: python dictionary """ task_desc_as_dict = { 'uid': self._uid, 'name': self._name, 'state': self._state, 'state_history': self._state_history, 'pre_exec': self._pre_exec, 'executable': self._executable, 'arguments': self._arguments, 'post_exec': self._post_exec, 'cpu_reqs': self._cpu_reqs, 'gpu_reqs': self._gpu_reqs, 'lfs_per_process': self._lfs_per_process, 'upload_input_data': self._upload_input_data, 'copy_input_data': self._copy_input_data, 'link_input_data': self._link_input_data, 'move_input_data': self._move_input_data, 'copy_output_data': self._copy_output_data, 'move_output_data': self._move_output_data, 'download_output_data': self._download_output_data, 'stdout': self._stdout, 'stderr': self._stderr, 'exit_code': self._exit_code, 'path': self._path, 'tag': self._tag, 'parent_stage': self._p_stage, 'parent_pipeline': self._p_pipeline, } return task_desc_as_dict
python
def to_dict(self): """ Convert current Task into a dictionary :return: python dictionary """ task_desc_as_dict = { 'uid': self._uid, 'name': self._name, 'state': self._state, 'state_history': self._state_history, 'pre_exec': self._pre_exec, 'executable': self._executable, 'arguments': self._arguments, 'post_exec': self._post_exec, 'cpu_reqs': self._cpu_reqs, 'gpu_reqs': self._gpu_reqs, 'lfs_per_process': self._lfs_per_process, 'upload_input_data': self._upload_input_data, 'copy_input_data': self._copy_input_data, 'link_input_data': self._link_input_data, 'move_input_data': self._move_input_data, 'copy_output_data': self._copy_output_data, 'move_output_data': self._move_output_data, 'download_output_data': self._download_output_data, 'stdout': self._stdout, 'stderr': self._stderr, 'exit_code': self._exit_code, 'path': self._path, 'tag': self._tag, 'parent_stage': self._p_stage, 'parent_pipeline': self._p_pipeline, } return task_desc_as_dict
[ "def", "to_dict", "(", "self", ")", ":", "task_desc_as_dict", "=", "{", "'uid'", ":", "self", ".", "_uid", ",", "'name'", ":", "self", ".", "_name", ",", "'state'", ":", "self", ".", "_state", ",", "'state_history'", ":", "self", ".", "_state_history", ",", "'pre_exec'", ":", "self", ".", "_pre_exec", ",", "'executable'", ":", "self", ".", "_executable", ",", "'arguments'", ":", "self", ".", "_arguments", ",", "'post_exec'", ":", "self", ".", "_post_exec", ",", "'cpu_reqs'", ":", "self", ".", "_cpu_reqs", ",", "'gpu_reqs'", ":", "self", ".", "_gpu_reqs", ",", "'lfs_per_process'", ":", "self", ".", "_lfs_per_process", ",", "'upload_input_data'", ":", "self", ".", "_upload_input_data", ",", "'copy_input_data'", ":", "self", ".", "_copy_input_data", ",", "'link_input_data'", ":", "self", ".", "_link_input_data", ",", "'move_input_data'", ":", "self", ".", "_move_input_data", ",", "'copy_output_data'", ":", "self", ".", "_copy_output_data", ",", "'move_output_data'", ":", "self", ".", "_move_output_data", ",", "'download_output_data'", ":", "self", ".", "_download_output_data", ",", "'stdout'", ":", "self", ".", "_stdout", ",", "'stderr'", ":", "self", ".", "_stderr", ",", "'exit_code'", ":", "self", ".", "_exit_code", ",", "'path'", ":", "self", ".", "_path", ",", "'tag'", ":", "self", ".", "_tag", ",", "'parent_stage'", ":", "self", ".", "_p_stage", ",", "'parent_pipeline'", ":", "self", ".", "_p_pipeline", ",", "}", "return", "task_desc_as_dict" ]
Convert current Task into a dictionary :return: python dictionary
[ "Convert", "current", "Task", "into", "a", "dictionary" ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/task/task.py#L724-L764
train
radical-cybertools/radical.entk
src/radical/entk/task/task.py
Task.from_dict
def from_dict(self, d): """ Create a Task from a dictionary. The change is in inplace. :argument: python dictionary :return: None """ if 'uid' in d: if d['uid']: self._uid = d['uid'] if 'name' in d: if d['name']: self._name = d['name'] if 'state' in d: if isinstance(d['state'], str) or isinstance(d['state'], unicode): self._state = d['state'] else: raise TypeError(entity='state', expected_type=str, actual_type=type(d['state'])) else: self._state = states.INITIAL if 'state_history' in d: if isinstance(d['state_history'], list): self._state_history = d['state_history'] else: raise TypeError(entity='state_history', expected_type=list, actual_type=type( d['state_history'])) if 'pre_exec' in d: if isinstance(d['pre_exec'], list): self._pre_exec = d['pre_exec'] else: raise TypeError(expected_type=list, actual_type=type(d['pre_exec'])) if 'executable' in d: if isinstance(d['executable'], str) or isinstance(d['executable'], unicode): self._executable = d['executable'] else: raise TypeError(expected_type=str, actual_type=type(d['executable'])) if 'arguments' in d: if isinstance(d['arguments'], list): self._arguments = d['arguments'] else: raise TypeError(expected_type=list, actual_type=type(d['arguments'])) if 'post_exec' in d: if isinstance(d['post_exec'], list): self._post_exec = d['post_exec'] else: raise TypeError(expected_type=list, actual_type=type(d['post_exec'])) if 'cpu_reqs' in d: if isinstance(d['cpu_reqs'], dict): self._cpu_reqs = d['cpu_reqs'] else: raise TypeError(expected_type=dict, actual_type=type(d['cpu_reqs'])) if 'gpu_reqs' in d: if isinstance(d['gpu_reqs'], dict): self._gpu_reqs = d['gpu_reqs'] else: raise TypeError(expected_type=dict, actual_type=type(d['gpu_reqs'])) if 'lfs_per_process' in d: if d['lfs_per_process']: if isinstance(d['lfs_per_process'], int): self._lfs_per_process = d['lfs_per_process'] else: raise TypeError(expected_type=int, actual_type=type(d['lfs_per_process'])) if 'upload_input_data' in d: if isinstance(d['upload_input_data'], list): self._upload_input_data = d['upload_input_data'] else: raise TypeError(expected_type=list, actual_type=type(d['upload_input_data'])) if 'copy_input_data' in d: if isinstance(d['copy_input_data'], list): self._copy_input_data = d['copy_input_data'] else: raise TypeError(expected_type=list, actual_type=type(d['copy_input_data'])) if 'link_input_data' in d: if isinstance(d['link_input_data'], list): self._link_input_data = d['link_input_data'] else: raise TypeError(expected_type=list, actual_type=type(d['link_input_data'])) if 'move_input_data' in d: if isinstance(d['move_input_data'], list): self._move_input_data = d['move_input_data'] else: raise TypeError(expected_type=list, actual_type=type(d['move_input_data'])) if 'copy_output_data' in d: if isinstance(d['copy_output_data'], list): self._copy_output_data = d['copy_output_data'] else: raise TypeError(expected_type=list, actual_type=type(d['copy_output_data'])) if 'move_output_data' in d: if isinstance(d['move_output_data'], list): self._move_output_data = d['move_output_data'] else: raise TypeError(expected_type=list, actual_type=type(d['move_output_data'])) if 'download_output_data' in d: if isinstance(d['download_output_data'], list): self._download_output_data = d['download_output_data'] else: raise TypeError(expected_type=list, actual_type=type( d['download_output_data'])) if 'stdout' in d: if d['stdout']: if isinstance(d['stdout'], str) or isinstance(d['stdout'], unicode): self._stdout = d['stdout'] else: raise TypeError(expected_type=str, actual_type=type(d['stdout'])) if 'stderr' in d: if d['stderr']: if isinstance(d['stderr'], str) or isinstance(d['stderr'], unicode): self._stderr = d['stderr'] else: raise TypeError(expected_type=str, actual_type=type(d['stderr'])) if 'exit_code' in d: if d['exit_code']: if isinstance(d['exit_code'], int): self._exit_code = d['exit_code'] else: raise TypeError( entity='exit_code', expected_type=int, actual_type=type(d['exit_code'])) if 'path' in d: if d['path']: if isinstance(d['path'], str) or isinstance(d['path'], unicode): self._path = d['path'] else: raise TypeError(entity='path', expected_type=str, actual_type=type(d['path'])) if 'tag' in d: if d['tag']: if isinstance(d['tag'], str) or isinstance(d['tag'], unicode): self._tag = str(d['tag']) else: raise TypeError(expected_type=str, actual_type=type(d['tag'])) if 'parent_stage' in d: if isinstance(d['parent_stage'], dict): self._p_stage = d['parent_stage'] else: raise TypeError( entity='parent_stage', expected_type=dict, actual_type=type(d['parent_stage'])) if 'parent_pipeline' in d: if isinstance(d['parent_pipeline'], dict): self._p_pipeline = d['parent_pipeline'] else: raise TypeError(entity='parent_pipeline', expected_type=dict, actual_type=type( d['parent_pipeline']))
python
def from_dict(self, d): """ Create a Task from a dictionary. The change is in inplace. :argument: python dictionary :return: None """ if 'uid' in d: if d['uid']: self._uid = d['uid'] if 'name' in d: if d['name']: self._name = d['name'] if 'state' in d: if isinstance(d['state'], str) or isinstance(d['state'], unicode): self._state = d['state'] else: raise TypeError(entity='state', expected_type=str, actual_type=type(d['state'])) else: self._state = states.INITIAL if 'state_history' in d: if isinstance(d['state_history'], list): self._state_history = d['state_history'] else: raise TypeError(entity='state_history', expected_type=list, actual_type=type( d['state_history'])) if 'pre_exec' in d: if isinstance(d['pre_exec'], list): self._pre_exec = d['pre_exec'] else: raise TypeError(expected_type=list, actual_type=type(d['pre_exec'])) if 'executable' in d: if isinstance(d['executable'], str) or isinstance(d['executable'], unicode): self._executable = d['executable'] else: raise TypeError(expected_type=str, actual_type=type(d['executable'])) if 'arguments' in d: if isinstance(d['arguments'], list): self._arguments = d['arguments'] else: raise TypeError(expected_type=list, actual_type=type(d['arguments'])) if 'post_exec' in d: if isinstance(d['post_exec'], list): self._post_exec = d['post_exec'] else: raise TypeError(expected_type=list, actual_type=type(d['post_exec'])) if 'cpu_reqs' in d: if isinstance(d['cpu_reqs'], dict): self._cpu_reqs = d['cpu_reqs'] else: raise TypeError(expected_type=dict, actual_type=type(d['cpu_reqs'])) if 'gpu_reqs' in d: if isinstance(d['gpu_reqs'], dict): self._gpu_reqs = d['gpu_reqs'] else: raise TypeError(expected_type=dict, actual_type=type(d['gpu_reqs'])) if 'lfs_per_process' in d: if d['lfs_per_process']: if isinstance(d['lfs_per_process'], int): self._lfs_per_process = d['lfs_per_process'] else: raise TypeError(expected_type=int, actual_type=type(d['lfs_per_process'])) if 'upload_input_data' in d: if isinstance(d['upload_input_data'], list): self._upload_input_data = d['upload_input_data'] else: raise TypeError(expected_type=list, actual_type=type(d['upload_input_data'])) if 'copy_input_data' in d: if isinstance(d['copy_input_data'], list): self._copy_input_data = d['copy_input_data'] else: raise TypeError(expected_type=list, actual_type=type(d['copy_input_data'])) if 'link_input_data' in d: if isinstance(d['link_input_data'], list): self._link_input_data = d['link_input_data'] else: raise TypeError(expected_type=list, actual_type=type(d['link_input_data'])) if 'move_input_data' in d: if isinstance(d['move_input_data'], list): self._move_input_data = d['move_input_data'] else: raise TypeError(expected_type=list, actual_type=type(d['move_input_data'])) if 'copy_output_data' in d: if isinstance(d['copy_output_data'], list): self._copy_output_data = d['copy_output_data'] else: raise TypeError(expected_type=list, actual_type=type(d['copy_output_data'])) if 'move_output_data' in d: if isinstance(d['move_output_data'], list): self._move_output_data = d['move_output_data'] else: raise TypeError(expected_type=list, actual_type=type(d['move_output_data'])) if 'download_output_data' in d: if isinstance(d['download_output_data'], list): self._download_output_data = d['download_output_data'] else: raise TypeError(expected_type=list, actual_type=type( d['download_output_data'])) if 'stdout' in d: if d['stdout']: if isinstance(d['stdout'], str) or isinstance(d['stdout'], unicode): self._stdout = d['stdout'] else: raise TypeError(expected_type=str, actual_type=type(d['stdout'])) if 'stderr' in d: if d['stderr']: if isinstance(d['stderr'], str) or isinstance(d['stderr'], unicode): self._stderr = d['stderr'] else: raise TypeError(expected_type=str, actual_type=type(d['stderr'])) if 'exit_code' in d: if d['exit_code']: if isinstance(d['exit_code'], int): self._exit_code = d['exit_code'] else: raise TypeError( entity='exit_code', expected_type=int, actual_type=type(d['exit_code'])) if 'path' in d: if d['path']: if isinstance(d['path'], str) or isinstance(d['path'], unicode): self._path = d['path'] else: raise TypeError(entity='path', expected_type=str, actual_type=type(d['path'])) if 'tag' in d: if d['tag']: if isinstance(d['tag'], str) or isinstance(d['tag'], unicode): self._tag = str(d['tag']) else: raise TypeError(expected_type=str, actual_type=type(d['tag'])) if 'parent_stage' in d: if isinstance(d['parent_stage'], dict): self._p_stage = d['parent_stage'] else: raise TypeError( entity='parent_stage', expected_type=dict, actual_type=type(d['parent_stage'])) if 'parent_pipeline' in d: if isinstance(d['parent_pipeline'], dict): self._p_pipeline = d['parent_pipeline'] else: raise TypeError(entity='parent_pipeline', expected_type=dict, actual_type=type( d['parent_pipeline']))
[ "def", "from_dict", "(", "self", ",", "d", ")", ":", "if", "'uid'", "in", "d", ":", "if", "d", "[", "'uid'", "]", ":", "self", ".", "_uid", "=", "d", "[", "'uid'", "]", "if", "'name'", "in", "d", ":", "if", "d", "[", "'name'", "]", ":", "self", ".", "_name", "=", "d", "[", "'name'", "]", "if", "'state'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'state'", "]", ",", "str", ")", "or", "isinstance", "(", "d", "[", "'state'", "]", ",", "unicode", ")", ":", "self", ".", "_state", "=", "d", "[", "'state'", "]", "else", ":", "raise", "TypeError", "(", "entity", "=", "'state'", ",", "expected_type", "=", "str", ",", "actual_type", "=", "type", "(", "d", "[", "'state'", "]", ")", ")", "else", ":", "self", ".", "_state", "=", "states", ".", "INITIAL", "if", "'state_history'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'state_history'", "]", ",", "list", ")", ":", "self", ".", "_state_history", "=", "d", "[", "'state_history'", "]", "else", ":", "raise", "TypeError", "(", "entity", "=", "'state_history'", ",", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'state_history'", "]", ")", ")", "if", "'pre_exec'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'pre_exec'", "]", ",", "list", ")", ":", "self", ".", "_pre_exec", "=", "d", "[", "'pre_exec'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'pre_exec'", "]", ")", ")", "if", "'executable'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'executable'", "]", ",", "str", ")", "or", "isinstance", "(", "d", "[", "'executable'", "]", ",", "unicode", ")", ":", "self", ".", "_executable", "=", "d", "[", "'executable'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "str", ",", "actual_type", "=", "type", "(", "d", "[", "'executable'", "]", ")", ")", "if", "'arguments'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'arguments'", "]", ",", "list", ")", ":", "self", ".", "_arguments", "=", "d", "[", "'arguments'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'arguments'", "]", ")", ")", "if", "'post_exec'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'post_exec'", "]", ",", "list", ")", ":", "self", ".", "_post_exec", "=", "d", "[", "'post_exec'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'post_exec'", "]", ")", ")", "if", "'cpu_reqs'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'cpu_reqs'", "]", ",", "dict", ")", ":", "self", ".", "_cpu_reqs", "=", "d", "[", "'cpu_reqs'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "dict", ",", "actual_type", "=", "type", "(", "d", "[", "'cpu_reqs'", "]", ")", ")", "if", "'gpu_reqs'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'gpu_reqs'", "]", ",", "dict", ")", ":", "self", ".", "_gpu_reqs", "=", "d", "[", "'gpu_reqs'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "dict", ",", "actual_type", "=", "type", "(", "d", "[", "'gpu_reqs'", "]", ")", ")", "if", "'lfs_per_process'", "in", "d", ":", "if", "d", "[", "'lfs_per_process'", "]", ":", "if", "isinstance", "(", "d", "[", "'lfs_per_process'", "]", ",", "int", ")", ":", "self", ".", "_lfs_per_process", "=", "d", "[", "'lfs_per_process'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "int", ",", "actual_type", "=", "type", "(", "d", "[", "'lfs_per_process'", "]", ")", ")", "if", "'upload_input_data'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'upload_input_data'", "]", ",", "list", ")", ":", "self", ".", "_upload_input_data", "=", "d", "[", "'upload_input_data'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'upload_input_data'", "]", ")", ")", "if", "'copy_input_data'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'copy_input_data'", "]", ",", "list", ")", ":", "self", ".", "_copy_input_data", "=", "d", "[", "'copy_input_data'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'copy_input_data'", "]", ")", ")", "if", "'link_input_data'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'link_input_data'", "]", ",", "list", ")", ":", "self", ".", "_link_input_data", "=", "d", "[", "'link_input_data'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'link_input_data'", "]", ")", ")", "if", "'move_input_data'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'move_input_data'", "]", ",", "list", ")", ":", "self", ".", "_move_input_data", "=", "d", "[", "'move_input_data'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'move_input_data'", "]", ")", ")", "if", "'copy_output_data'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'copy_output_data'", "]", ",", "list", ")", ":", "self", ".", "_copy_output_data", "=", "d", "[", "'copy_output_data'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'copy_output_data'", "]", ")", ")", "if", "'move_output_data'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'move_output_data'", "]", ",", "list", ")", ":", "self", ".", "_move_output_data", "=", "d", "[", "'move_output_data'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'move_output_data'", "]", ")", ")", "if", "'download_output_data'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'download_output_data'", "]", ",", "list", ")", ":", "self", ".", "_download_output_data", "=", "d", "[", "'download_output_data'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "list", ",", "actual_type", "=", "type", "(", "d", "[", "'download_output_data'", "]", ")", ")", "if", "'stdout'", "in", "d", ":", "if", "d", "[", "'stdout'", "]", ":", "if", "isinstance", "(", "d", "[", "'stdout'", "]", ",", "str", ")", "or", "isinstance", "(", "d", "[", "'stdout'", "]", ",", "unicode", ")", ":", "self", ".", "_stdout", "=", "d", "[", "'stdout'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "str", ",", "actual_type", "=", "type", "(", "d", "[", "'stdout'", "]", ")", ")", "if", "'stderr'", "in", "d", ":", "if", "d", "[", "'stderr'", "]", ":", "if", "isinstance", "(", "d", "[", "'stderr'", "]", ",", "str", ")", "or", "isinstance", "(", "d", "[", "'stderr'", "]", ",", "unicode", ")", ":", "self", ".", "_stderr", "=", "d", "[", "'stderr'", "]", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "str", ",", "actual_type", "=", "type", "(", "d", "[", "'stderr'", "]", ")", ")", "if", "'exit_code'", "in", "d", ":", "if", "d", "[", "'exit_code'", "]", ":", "if", "isinstance", "(", "d", "[", "'exit_code'", "]", ",", "int", ")", ":", "self", ".", "_exit_code", "=", "d", "[", "'exit_code'", "]", "else", ":", "raise", "TypeError", "(", "entity", "=", "'exit_code'", ",", "expected_type", "=", "int", ",", "actual_type", "=", "type", "(", "d", "[", "'exit_code'", "]", ")", ")", "if", "'path'", "in", "d", ":", "if", "d", "[", "'path'", "]", ":", "if", "isinstance", "(", "d", "[", "'path'", "]", ",", "str", ")", "or", "isinstance", "(", "d", "[", "'path'", "]", ",", "unicode", ")", ":", "self", ".", "_path", "=", "d", "[", "'path'", "]", "else", ":", "raise", "TypeError", "(", "entity", "=", "'path'", ",", "expected_type", "=", "str", ",", "actual_type", "=", "type", "(", "d", "[", "'path'", "]", ")", ")", "if", "'tag'", "in", "d", ":", "if", "d", "[", "'tag'", "]", ":", "if", "isinstance", "(", "d", "[", "'tag'", "]", ",", "str", ")", "or", "isinstance", "(", "d", "[", "'tag'", "]", ",", "unicode", ")", ":", "self", ".", "_tag", "=", "str", "(", "d", "[", "'tag'", "]", ")", "else", ":", "raise", "TypeError", "(", "expected_type", "=", "str", ",", "actual_type", "=", "type", "(", "d", "[", "'tag'", "]", ")", ")", "if", "'parent_stage'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'parent_stage'", "]", ",", "dict", ")", ":", "self", ".", "_p_stage", "=", "d", "[", "'parent_stage'", "]", "else", ":", "raise", "TypeError", "(", "entity", "=", "'parent_stage'", ",", "expected_type", "=", "dict", ",", "actual_type", "=", "type", "(", "d", "[", "'parent_stage'", "]", ")", ")", "if", "'parent_pipeline'", "in", "d", ":", "if", "isinstance", "(", "d", "[", "'parent_pipeline'", "]", ",", "dict", ")", ":", "self", ".", "_p_pipeline", "=", "d", "[", "'parent_pipeline'", "]", "else", ":", "raise", "TypeError", "(", "entity", "=", "'parent_pipeline'", ",", "expected_type", "=", "dict", ",", "actual_type", "=", "type", "(", "d", "[", "'parent_pipeline'", "]", ")", ")" ]
Create a Task from a dictionary. The change is in inplace. :argument: python dictionary :return: None
[ "Create", "a", "Task", "from", "a", "dictionary", ".", "The", "change", "is", "in", "inplace", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/task/task.py#L766-L948
train
radical-cybertools/radical.entk
src/radical/entk/task/task.py
Task._assign_uid
def _assign_uid(self, sid): """ Purpose: Assign a uid to the current object based on the sid passed """ self._uid = ru.generate_id( 'task.%(item_counter)04d', ru.ID_CUSTOM, namespace=sid)
python
def _assign_uid(self, sid): """ Purpose: Assign a uid to the current object based on the sid passed """ self._uid = ru.generate_id( 'task.%(item_counter)04d', ru.ID_CUSTOM, namespace=sid)
[ "def", "_assign_uid", "(", "self", ",", "sid", ")", ":", "self", ".", "_uid", "=", "ru", ".", "generate_id", "(", "'task.%(item_counter)04d'", ",", "ru", ".", "ID_CUSTOM", ",", "namespace", "=", "sid", ")" ]
Purpose: Assign a uid to the current object based on the sid passed
[ "Purpose", ":", "Assign", "a", "uid", "to", "the", "current", "object", "based", "on", "the", "sid", "passed" ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/task/task.py#L954-L959
train
radical-cybertools/radical.entk
src/radical/entk/task/task.py
Task._validate
def _validate(self): """ Purpose: Validate that the state of the task is 'DESCRIBED' and that an executable has been specified for the task. """ if self._state is not states.INITIAL: raise ValueError(obj=self._uid, attribute='state', expected_value=states.INITIAL, actual_value=self._state) if not self._executable: raise MissingError(obj=self._uid, missing_attribute='executable')
python
def _validate(self): """ Purpose: Validate that the state of the task is 'DESCRIBED' and that an executable has been specified for the task. """ if self._state is not states.INITIAL: raise ValueError(obj=self._uid, attribute='state', expected_value=states.INITIAL, actual_value=self._state) if not self._executable: raise MissingError(obj=self._uid, missing_attribute='executable')
[ "def", "_validate", "(", "self", ")", ":", "if", "self", ".", "_state", "is", "not", "states", ".", "INITIAL", ":", "raise", "ValueError", "(", "obj", "=", "self", ".", "_uid", ",", "attribute", "=", "'state'", ",", "expected_value", "=", "states", ".", "INITIAL", ",", "actual_value", "=", "self", ".", "_state", ")", "if", "not", "self", ".", "_executable", ":", "raise", "MissingError", "(", "obj", "=", "self", ".", "_uid", ",", "missing_attribute", "=", "'executable'", ")" ]
Purpose: Validate that the state of the task is 'DESCRIBED' and that an executable has been specified for the task.
[ "Purpose", ":", "Validate", "that", "the", "state", "of", "the", "task", "is", "DESCRIBED", "and", "that", "an", "executable", "has", "been", "specified", "for", "the", "task", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/task/task.py#L961-L975
train
radical-cybertools/radical.entk
src/radical/entk/execman/rp/task_manager.py
TaskManager._process_tasks
def _process_tasks(self, task_queue, rmgr, logger, mq_hostname, port, local_prof, sid): ''' **Purpose**: The new thread that gets spawned by the main tmgr process invokes this function. This function receives tasks from 'task_queue' and submits them to the RADICAL Pilot RTS. ''' placeholder_dict = dict() def load_placeholder(task, rts_uid): parent_pipeline = str(task.parent_pipeline['name']) parent_stage = str(task.parent_stage['name']) if parent_pipeline not in placeholder_dict: placeholder_dict[parent_pipeline] = dict() if parent_stage not in placeholder_dict[parent_pipeline]: placeholder_dict[parent_pipeline][parent_stage] = dict() if None not in [parent_pipeline, parent_stage, task.name]: placeholder_dict[parent_pipeline][parent_stage][str(task.name)] = {'path': str(task.path), 'rts_uid': rts_uid} def unit_state_cb(unit, state): try: logger.debug('Unit %s in state %s' % (unit.uid, unit.state)) if unit.state in rp.FINAL: # Acquire a connection+channel to the rmq server mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=mq_hostname, port=port)) mq_channel = mq_connection.channel() task = None task = create_task_from_cu(unit, local_prof) transition(obj=task, obj_type='Task', new_state=states.COMPLETED, channel=mq_channel, queue='%s-cb-to-sync' % sid, profiler=local_prof, logger=logger) load_placeholder(task, unit.uid) task_as_dict = json.dumps(task.to_dict()) mq_channel.basic_publish(exchange='', routing_key='%s-completedq-1' % sid, body=task_as_dict # properties=pika.BasicProperties( # make message persistent # delivery_mode = 2, # ) ) logger.info('Pushed task %s with state %s to completed queue %s-completedq-1' % (task.uid, task.state, sid)) mq_connection.close() except KeyboardInterrupt: logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit callback thread gracefully...') raise KeyboardInterrupt except Exception, ex: logger.exception('Error in RP callback thread: %s' % ex) umgr = rp.UnitManager(session=rmgr._session) umgr.add_pilots(rmgr.pilot) umgr.register_callback(unit_state_cb) try: while not self._tmgr_terminate.is_set(): body = None try: body = task_queue.get(block=True, timeout=10) except Queue.Empty: # Ignore empty exception, we don't always have new tasks to run pass if body: task_queue.task_done() bulk_tasks = list() bulk_cuds = list() for task in body: t = Task() t.from_dict(task) bulk_tasks.append(t) bulk_cuds.append(create_cud_from_task( t, placeholder_dict, local_prof)) mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=mq_hostname, port=port)) mq_channel = mq_connection.channel() transition(obj=t, obj_type='Task', new_state=states.SUBMITTING, channel=mq_channel, queue='%s-tmgr-to-sync' % sid, profiler=local_prof, logger=logger) mq_connection.close() umgr.submit_units(bulk_cuds) for task in bulk_tasks: mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=mq_hostname, port=port)) mq_channel = mq_connection.channel() transition(obj=task, obj_type='Task', new_state=states.SUBMITTED, channel=mq_channel, queue='%s-tmgr-to-sync' % sid, profiler=local_prof, logger=logger) mq_connection.close() except KeyboardInterrupt as ex: logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel task processor gracefully...') except Exception as ex: logger.exception('%s failed with %s'%(self._uid, ex)) raise EnTKError(ex)
python
def _process_tasks(self, task_queue, rmgr, logger, mq_hostname, port, local_prof, sid): ''' **Purpose**: The new thread that gets spawned by the main tmgr process invokes this function. This function receives tasks from 'task_queue' and submits them to the RADICAL Pilot RTS. ''' placeholder_dict = dict() def load_placeholder(task, rts_uid): parent_pipeline = str(task.parent_pipeline['name']) parent_stage = str(task.parent_stage['name']) if parent_pipeline not in placeholder_dict: placeholder_dict[parent_pipeline] = dict() if parent_stage not in placeholder_dict[parent_pipeline]: placeholder_dict[parent_pipeline][parent_stage] = dict() if None not in [parent_pipeline, parent_stage, task.name]: placeholder_dict[parent_pipeline][parent_stage][str(task.name)] = {'path': str(task.path), 'rts_uid': rts_uid} def unit_state_cb(unit, state): try: logger.debug('Unit %s in state %s' % (unit.uid, unit.state)) if unit.state in rp.FINAL: # Acquire a connection+channel to the rmq server mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=mq_hostname, port=port)) mq_channel = mq_connection.channel() task = None task = create_task_from_cu(unit, local_prof) transition(obj=task, obj_type='Task', new_state=states.COMPLETED, channel=mq_channel, queue='%s-cb-to-sync' % sid, profiler=local_prof, logger=logger) load_placeholder(task, unit.uid) task_as_dict = json.dumps(task.to_dict()) mq_channel.basic_publish(exchange='', routing_key='%s-completedq-1' % sid, body=task_as_dict # properties=pika.BasicProperties( # make message persistent # delivery_mode = 2, # ) ) logger.info('Pushed task %s with state %s to completed queue %s-completedq-1' % (task.uid, task.state, sid)) mq_connection.close() except KeyboardInterrupt: logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to exit callback thread gracefully...') raise KeyboardInterrupt except Exception, ex: logger.exception('Error in RP callback thread: %s' % ex) umgr = rp.UnitManager(session=rmgr._session) umgr.add_pilots(rmgr.pilot) umgr.register_callback(unit_state_cb) try: while not self._tmgr_terminate.is_set(): body = None try: body = task_queue.get(block=True, timeout=10) except Queue.Empty: # Ignore empty exception, we don't always have new tasks to run pass if body: task_queue.task_done() bulk_tasks = list() bulk_cuds = list() for task in body: t = Task() t.from_dict(task) bulk_tasks.append(t) bulk_cuds.append(create_cud_from_task( t, placeholder_dict, local_prof)) mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=mq_hostname, port=port)) mq_channel = mq_connection.channel() transition(obj=t, obj_type='Task', new_state=states.SUBMITTING, channel=mq_channel, queue='%s-tmgr-to-sync' % sid, profiler=local_prof, logger=logger) mq_connection.close() umgr.submit_units(bulk_cuds) for task in bulk_tasks: mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=mq_hostname, port=port)) mq_channel = mq_connection.channel() transition(obj=task, obj_type='Task', new_state=states.SUBMITTED, channel=mq_channel, queue='%s-tmgr-to-sync' % sid, profiler=local_prof, logger=logger) mq_connection.close() except KeyboardInterrupt as ex: logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel task processor gracefully...') except Exception as ex: logger.exception('%s failed with %s'%(self._uid, ex)) raise EnTKError(ex)
[ "def", "_process_tasks", "(", "self", ",", "task_queue", ",", "rmgr", ",", "logger", ",", "mq_hostname", ",", "port", ",", "local_prof", ",", "sid", ")", ":", "placeholder_dict", "=", "dict", "(", ")", "def", "load_placeholder", "(", "task", ",", "rts_uid", ")", ":", "parent_pipeline", "=", "str", "(", "task", ".", "parent_pipeline", "[", "'name'", "]", ")", "parent_stage", "=", "str", "(", "task", ".", "parent_stage", "[", "'name'", "]", ")", "if", "parent_pipeline", "not", "in", "placeholder_dict", ":", "placeholder_dict", "[", "parent_pipeline", "]", "=", "dict", "(", ")", "if", "parent_stage", "not", "in", "placeholder_dict", "[", "parent_pipeline", "]", ":", "placeholder_dict", "[", "parent_pipeline", "]", "[", "parent_stage", "]", "=", "dict", "(", ")", "if", "None", "not", "in", "[", "parent_pipeline", ",", "parent_stage", ",", "task", ".", "name", "]", ":", "placeholder_dict", "[", "parent_pipeline", "]", "[", "parent_stage", "]", "[", "str", "(", "task", ".", "name", ")", "]", "=", "{", "'path'", ":", "str", "(", "task", ".", "path", ")", ",", "'rts_uid'", ":", "rts_uid", "}", "def", "unit_state_cb", "(", "unit", ",", "state", ")", ":", "try", ":", "logger", ".", "debug", "(", "'Unit %s in state %s'", "%", "(", "unit", ".", "uid", ",", "unit", ".", "state", ")", ")", "if", "unit", ".", "state", "in", "rp", ".", "FINAL", ":", "# Acquire a connection+channel to the rmq server", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "mq_hostname", ",", "port", "=", "port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "task", "=", "None", "task", "=", "create_task_from_cu", "(", "unit", ",", "local_prof", ")", "transition", "(", "obj", "=", "task", ",", "obj_type", "=", "'Task'", ",", "new_state", "=", "states", ".", "COMPLETED", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-cb-to-sync'", "%", "sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "logger", ")", "load_placeholder", "(", "task", ",", "unit", ".", "uid", ")", "task_as_dict", "=", "json", ".", "dumps", "(", "task", ".", "to_dict", "(", ")", ")", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "'%s-completedq-1'", "%", "sid", ",", "body", "=", "task_as_dict", "# properties=pika.BasicProperties(", "# make message persistent", "# delivery_mode = 2,", "# )", ")", "logger", ".", "info", "(", "'Pushed task %s with state %s to completed queue %s-completedq-1'", "%", "(", "task", ".", "uid", ",", "task", ".", "state", ",", "sid", ")", ")", "mq_connection", ".", "close", "(", ")", "except", "KeyboardInterrupt", ":", "logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to exit callback thread gracefully...'", ")", "raise", "KeyboardInterrupt", "except", "Exception", ",", "ex", ":", "logger", ".", "exception", "(", "'Error in RP callback thread: %s'", "%", "ex", ")", "umgr", "=", "rp", ".", "UnitManager", "(", "session", "=", "rmgr", ".", "_session", ")", "umgr", ".", "add_pilots", "(", "rmgr", ".", "pilot", ")", "umgr", ".", "register_callback", "(", "unit_state_cb", ")", "try", ":", "while", "not", "self", ".", "_tmgr_terminate", ".", "is_set", "(", ")", ":", "body", "=", "None", "try", ":", "body", "=", "task_queue", ".", "get", "(", "block", "=", "True", ",", "timeout", "=", "10", ")", "except", "Queue", ".", "Empty", ":", "# Ignore empty exception, we don't always have new tasks to run", "pass", "if", "body", ":", "task_queue", ".", "task_done", "(", ")", "bulk_tasks", "=", "list", "(", ")", "bulk_cuds", "=", "list", "(", ")", "for", "task", "in", "body", ":", "t", "=", "Task", "(", ")", "t", ".", "from_dict", "(", "task", ")", "bulk_tasks", ".", "append", "(", "t", ")", "bulk_cuds", ".", "append", "(", "create_cud_from_task", "(", "t", ",", "placeholder_dict", ",", "local_prof", ")", ")", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "mq_hostname", ",", "port", "=", "port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "transition", "(", "obj", "=", "t", ",", "obj_type", "=", "'Task'", ",", "new_state", "=", "states", ".", "SUBMITTING", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-tmgr-to-sync'", "%", "sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "logger", ")", "mq_connection", ".", "close", "(", ")", "umgr", ".", "submit_units", "(", "bulk_cuds", ")", "for", "task", "in", "bulk_tasks", ":", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "mq_hostname", ",", "port", "=", "port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "transition", "(", "obj", "=", "task", ",", "obj_type", "=", "'Task'", ",", "new_state", "=", "states", ".", "SUBMITTED", ",", "channel", "=", "mq_channel", ",", "queue", "=", "'%s-tmgr-to-sync'", "%", "sid", ",", "profiler", "=", "local_prof", ",", "logger", "=", "logger", ")", "mq_connection", ".", "close", "(", ")", "except", "KeyboardInterrupt", "as", "ex", ":", "logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to cancel task processor gracefully...'", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "exception", "(", "'%s failed with %s'", "%", "(", "self", ".", "_uid", ",", "ex", ")", ")", "raise", "EnTKError", "(", "ex", ")" ]
**Purpose**: The new thread that gets spawned by the main tmgr process invokes this function. This function receives tasks from 'task_queue' and submits them to the RADICAL Pilot RTS.
[ "**", "Purpose", "**", ":", "The", "new", "thread", "that", "gets", "spawned", "by", "the", "main", "tmgr", "process", "invokes", "this", "function", ".", "This", "function", "receives", "tasks", "from", "task_queue", "and", "submits", "them", "to", "the", "RADICAL", "Pilot", "RTS", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/execman/rp/task_manager.py#L187-L330
train
google/dotty
efilter/transforms/infer_type.py
infer_type
def infer_type(expr, scope): """Try to infer the type of x[y] if y is a known value (literal).""" # Do we know what the key even is? if isinstance(expr.key, ast.Literal): key = expr.key.value else: return protocol.AnyType container_type = infer_type(expr.value, scope) try: # Associative types are not subject to scoping rules so we can just # reflect using IAssociative. return associative.reflect(container_type, key) or protocol.AnyType except NotImplementedError: return protocol.AnyType
python
def infer_type(expr, scope): """Try to infer the type of x[y] if y is a known value (literal).""" # Do we know what the key even is? if isinstance(expr.key, ast.Literal): key = expr.key.value else: return protocol.AnyType container_type = infer_type(expr.value, scope) try: # Associative types are not subject to scoping rules so we can just # reflect using IAssociative. return associative.reflect(container_type, key) or protocol.AnyType except NotImplementedError: return protocol.AnyType
[ "def", "infer_type", "(", "expr", ",", "scope", ")", ":", "# Do we know what the key even is?", "if", "isinstance", "(", "expr", ".", "key", ",", "ast", ".", "Literal", ")", ":", "key", "=", "expr", ".", "key", ".", "value", "else", ":", "return", "protocol", ".", "AnyType", "container_type", "=", "infer_type", "(", "expr", ".", "value", ",", "scope", ")", "try", ":", "# Associative types are not subject to scoping rules so we can just", "# reflect using IAssociative.", "return", "associative", ".", "reflect", "(", "container_type", ",", "key", ")", "or", "protocol", ".", "AnyType", "except", "NotImplementedError", ":", "return", "protocol", ".", "AnyType" ]
Try to infer the type of x[y] if y is a known value (literal).
[ "Try", "to", "infer", "the", "type", "of", "x", "[", "y", "]", "if", "y", "is", "a", "known", "value", "(", "literal", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/infer_type.py#L108-L123
train
google/dotty
efilter/transforms/infer_type.py
infer_type
def infer_type(expr, scope): """Try to infer the type of x.y if y is a known value (literal).""" # Do we know what the member is? if isinstance(expr.member, ast.Literal): member = expr.member.value else: return protocol.AnyType container_type = infer_type(expr.obj, scope) try: # We are not using lexical scope here on purpose - we want to see what # the type of the member is only on the container_type. return structured.reflect(container_type, member) or protocol.AnyType except NotImplementedError: return protocol.AnyType
python
def infer_type(expr, scope): """Try to infer the type of x.y if y is a known value (literal).""" # Do we know what the member is? if isinstance(expr.member, ast.Literal): member = expr.member.value else: return protocol.AnyType container_type = infer_type(expr.obj, scope) try: # We are not using lexical scope here on purpose - we want to see what # the type of the member is only on the container_type. return structured.reflect(container_type, member) or protocol.AnyType except NotImplementedError: return protocol.AnyType
[ "def", "infer_type", "(", "expr", ",", "scope", ")", ":", "# Do we know what the member is?", "if", "isinstance", "(", "expr", ".", "member", ",", "ast", ".", "Literal", ")", ":", "member", "=", "expr", ".", "member", ".", "value", "else", ":", "return", "protocol", ".", "AnyType", "container_type", "=", "infer_type", "(", "expr", ".", "obj", ",", "scope", ")", "try", ":", "# We are not using lexical scope here on purpose - we want to see what", "# the type of the member is only on the container_type.", "return", "structured", ".", "reflect", "(", "container_type", ",", "member", ")", "or", "protocol", ".", "AnyType", "except", "NotImplementedError", ":", "return", "protocol", ".", "AnyType" ]
Try to infer the type of x.y if y is a known value (literal).
[ "Try", "to", "infer", "the", "type", "of", "x", ".", "y", "if", "y", "is", "a", "known", "value", "(", "literal", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/infer_type.py#L127-L142
train
radical-cybertools/radical.entk
src/radical/entk/execman/mock/task_manager.py
TaskManager._tmgr
def _tmgr(self, uid, rmgr, logger, mq_hostname, port, pending_queue, completed_queue): """ **Purpose**: Method to be run by the tmgr process. This method receives a Task from the pending_queue and submits it to the RTS. Currently, it also converts Tasks into CUDs and CUs into (partially described) Tasks. This conversion is necessary since the current RTS is RADICAL Pilot. Once Tasks are recovered from a CU, they are then pushed to the completed_queue. At all state transititons, they are synced (blocking) with the AppManager in the master process. In addition the tmgr also receives heartbeat 'request' msgs from the heartbeat-req queue. It responds with a 'response' message to the 'heartbeart-res' queue. **Details**: The AppManager can re-invoke the tmgr process with this function if the execution of the workflow is still incomplete. There is also population of a dictionary, placeholder_dict, which stores the path of each of the tasks on the remote machine. """ try: def heartbeat_response(mq_channel): try: # Get request from heartbeat-req for heartbeat response hb_method_frame, hb_props, hb_body = mq_channel.basic_get( queue=self._hb_request_q) if hb_body: logger.info('Received heartbeat request') mq_channel.basic_publish(exchange='', routing_key=self._hb_response_q, properties=pika.BasicProperties( correlation_id=hb_props.correlation_id), body='response') logger.info('Sent heartbeat response') mq_channel.basic_ack( delivery_tag=hb_method_frame.delivery_tag) except Exception, ex: logger.exception( 'Failed to respond to heartbeat request, error: %s' % ex) raise local_prof = ru.Profiler( name='radical.entk.%s' % self._uid + '-proc', path=self._path) local_prof.prof('tmgr process started', uid=self._uid) logger.info('Task Manager process started') # Thread should run till terminate condtion is encountered mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=mq_hostname, port=port)) mq_channel = mq_connection.channel() # Queue for communication between threads of this process task_queue = Queue.Queue() # Start second thread to receive tasks and push to RTS self._rts_runner = threading.Thread(target=self._process_tasks, args=(task_queue, rmgr, logger, mq_hostname, port, local_prof, self._sid)) self._rts_runner.start() local_prof.prof('tmgr infrastructure setup done', uid=uid) last = time.time() while not self._tmgr_terminate.is_set(): try: method_frame, header_frame, body = mq_channel.basic_get( queue=pending_queue[0]) if body: body = json.loads(body) task_queue.put(body) mq_channel.basic_ack( delivery_tag=method_frame.delivery_tag) heartbeat_response(mq_channel) except Exception, ex: logger.exception('Error in task execution: %s' % ex) raise except KeyboardInterrupt: logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel tmgr process gracefully...') except Exception, ex: logger.exception('%s failed with %s'%(self._uid, ex)) raise EnTKError(ex) finally: local_prof.prof('terminating tmgr process', uid=uid) if self._rts_runner: self._rts_runner.join() mq_connection.close() local_prof.close()
python
def _tmgr(self, uid, rmgr, logger, mq_hostname, port, pending_queue, completed_queue): """ **Purpose**: Method to be run by the tmgr process. This method receives a Task from the pending_queue and submits it to the RTS. Currently, it also converts Tasks into CUDs and CUs into (partially described) Tasks. This conversion is necessary since the current RTS is RADICAL Pilot. Once Tasks are recovered from a CU, they are then pushed to the completed_queue. At all state transititons, they are synced (blocking) with the AppManager in the master process. In addition the tmgr also receives heartbeat 'request' msgs from the heartbeat-req queue. It responds with a 'response' message to the 'heartbeart-res' queue. **Details**: The AppManager can re-invoke the tmgr process with this function if the execution of the workflow is still incomplete. There is also population of a dictionary, placeholder_dict, which stores the path of each of the tasks on the remote machine. """ try: def heartbeat_response(mq_channel): try: # Get request from heartbeat-req for heartbeat response hb_method_frame, hb_props, hb_body = mq_channel.basic_get( queue=self._hb_request_q) if hb_body: logger.info('Received heartbeat request') mq_channel.basic_publish(exchange='', routing_key=self._hb_response_q, properties=pika.BasicProperties( correlation_id=hb_props.correlation_id), body='response') logger.info('Sent heartbeat response') mq_channel.basic_ack( delivery_tag=hb_method_frame.delivery_tag) except Exception, ex: logger.exception( 'Failed to respond to heartbeat request, error: %s' % ex) raise local_prof = ru.Profiler( name='radical.entk.%s' % self._uid + '-proc', path=self._path) local_prof.prof('tmgr process started', uid=self._uid) logger.info('Task Manager process started') # Thread should run till terminate condtion is encountered mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=mq_hostname, port=port)) mq_channel = mq_connection.channel() # Queue for communication between threads of this process task_queue = Queue.Queue() # Start second thread to receive tasks and push to RTS self._rts_runner = threading.Thread(target=self._process_tasks, args=(task_queue, rmgr, logger, mq_hostname, port, local_prof, self._sid)) self._rts_runner.start() local_prof.prof('tmgr infrastructure setup done', uid=uid) last = time.time() while not self._tmgr_terminate.is_set(): try: method_frame, header_frame, body = mq_channel.basic_get( queue=pending_queue[0]) if body: body = json.loads(body) task_queue.put(body) mq_channel.basic_ack( delivery_tag=method_frame.delivery_tag) heartbeat_response(mq_channel) except Exception, ex: logger.exception('Error in task execution: %s' % ex) raise except KeyboardInterrupt: logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' + 'trying to cancel tmgr process gracefully...') except Exception, ex: logger.exception('%s failed with %s'%(self._uid, ex)) raise EnTKError(ex) finally: local_prof.prof('terminating tmgr process', uid=uid) if self._rts_runner: self._rts_runner.join() mq_connection.close() local_prof.close()
[ "def", "_tmgr", "(", "self", ",", "uid", ",", "rmgr", ",", "logger", ",", "mq_hostname", ",", "port", ",", "pending_queue", ",", "completed_queue", ")", ":", "try", ":", "def", "heartbeat_response", "(", "mq_channel", ")", ":", "try", ":", "# Get request from heartbeat-req for heartbeat response", "hb_method_frame", ",", "hb_props", ",", "hb_body", "=", "mq_channel", ".", "basic_get", "(", "queue", "=", "self", ".", "_hb_request_q", ")", "if", "hb_body", ":", "logger", ".", "info", "(", "'Received heartbeat request'", ")", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "self", ".", "_hb_response_q", ",", "properties", "=", "pika", ".", "BasicProperties", "(", "correlation_id", "=", "hb_props", ".", "correlation_id", ")", ",", "body", "=", "'response'", ")", "logger", ".", "info", "(", "'Sent heartbeat response'", ")", "mq_channel", ".", "basic_ack", "(", "delivery_tag", "=", "hb_method_frame", ".", "delivery_tag", ")", "except", "Exception", ",", "ex", ":", "logger", ".", "exception", "(", "'Failed to respond to heartbeat request, error: %s'", "%", "ex", ")", "raise", "local_prof", "=", "ru", ".", "Profiler", "(", "name", "=", "'radical.entk.%s'", "%", "self", ".", "_uid", "+", "'-proc'", ",", "path", "=", "self", ".", "_path", ")", "local_prof", ".", "prof", "(", "'tmgr process started'", ",", "uid", "=", "self", ".", "_uid", ")", "logger", ".", "info", "(", "'Task Manager process started'", ")", "# Thread should run till terminate condtion is encountered", "mq_connection", "=", "pika", ".", "BlockingConnection", "(", "pika", ".", "ConnectionParameters", "(", "host", "=", "mq_hostname", ",", "port", "=", "port", ")", ")", "mq_channel", "=", "mq_connection", ".", "channel", "(", ")", "# Queue for communication between threads of this process", "task_queue", "=", "Queue", ".", "Queue", "(", ")", "# Start second thread to receive tasks and push to RTS", "self", ".", "_rts_runner", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_process_tasks", ",", "args", "=", "(", "task_queue", ",", "rmgr", ",", "logger", ",", "mq_hostname", ",", "port", ",", "local_prof", ",", "self", ".", "_sid", ")", ")", "self", ".", "_rts_runner", ".", "start", "(", ")", "local_prof", ".", "prof", "(", "'tmgr infrastructure setup done'", ",", "uid", "=", "uid", ")", "last", "=", "time", ".", "time", "(", ")", "while", "not", "self", ".", "_tmgr_terminate", ".", "is_set", "(", ")", ":", "try", ":", "method_frame", ",", "header_frame", ",", "body", "=", "mq_channel", ".", "basic_get", "(", "queue", "=", "pending_queue", "[", "0", "]", ")", "if", "body", ":", "body", "=", "json", ".", "loads", "(", "body", ")", "task_queue", ".", "put", "(", "body", ")", "mq_channel", ".", "basic_ack", "(", "delivery_tag", "=", "method_frame", ".", "delivery_tag", ")", "heartbeat_response", "(", "mq_channel", ")", "except", "Exception", ",", "ex", ":", "logger", ".", "exception", "(", "'Error in task execution: %s'", "%", "ex", ")", "raise", "except", "KeyboardInterrupt", ":", "logger", ".", "exception", "(", "'Execution interrupted by user (you probably hit Ctrl+C), '", "+", "'trying to cancel tmgr process gracefully...'", ")", "except", "Exception", ",", "ex", ":", "logger", ".", "exception", "(", "'%s failed with %s'", "%", "(", "self", ".", "_uid", ",", "ex", ")", ")", "raise", "EnTKError", "(", "ex", ")", "finally", ":", "local_prof", ".", "prof", "(", "'terminating tmgr process'", ",", "uid", "=", "uid", ")", "if", "self", ".", "_rts_runner", ":", "self", ".", "_rts_runner", ".", "join", "(", ")", "mq_connection", ".", "close", "(", ")", "local_prof", ".", "close", "(", ")" ]
**Purpose**: Method to be run by the tmgr process. This method receives a Task from the pending_queue and submits it to the RTS. Currently, it also converts Tasks into CUDs and CUs into (partially described) Tasks. This conversion is necessary since the current RTS is RADICAL Pilot. Once Tasks are recovered from a CU, they are then pushed to the completed_queue. At all state transititons, they are synced (blocking) with the AppManager in the master process. In addition the tmgr also receives heartbeat 'request' msgs from the heartbeat-req queue. It responds with a 'response' message to the 'heartbeart-res' queue. **Details**: The AppManager can re-invoke the tmgr process with this function if the execution of the workflow is still incomplete. There is also population of a dictionary, placeholder_dict, which stores the path of each of the tasks on the remote machine.
[ "**", "Purpose", "**", ":", "Method", "to", "be", "run", "by", "the", "tmgr", "process", ".", "This", "method", "receives", "a", "Task", "from", "the", "pending_queue", "and", "submits", "it", "to", "the", "RTS", ".", "Currently", "it", "also", "converts", "Tasks", "into", "CUDs", "and", "CUs", "into", "(", "partially", "described", ")", "Tasks", ".", "This", "conversion", "is", "necessary", "since", "the", "current", "RTS", "is", "RADICAL", "Pilot", ".", "Once", "Tasks", "are", "recovered", "from", "a", "CU", "they", "are", "then", "pushed", "to", "the", "completed_queue", ".", "At", "all", "state", "transititons", "they", "are", "synced", "(", "blocking", ")", "with", "the", "AppManager", "in", "the", "master", "process", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/execman/mock/task_manager.py#L59-L171
train
radical-cybertools/radical.entk
src/radical/entk/execman/mock/task_manager.py
TaskManager.start_manager
def start_manager(self): """ **Purpose**: Method to start the tmgr process. The tmgr function is not to be accessed directly. The function is started in a separate thread using this method. """ if not self._tmgr_process: try: self._prof.prof('creating tmgr process', uid=self._uid) self._tmgr_terminate = Event() self._tmgr_process = Process(target=self._tmgr, name='task-manager', args=( self._uid, self._rmgr, self._logger, self._mq_hostname, self._port, self._pending_queue, self._completed_queue) ) self._logger.info('Starting task manager process') self._prof.prof('starting tmgr process', uid=self._uid) self._tmgr_process.start() return True except Exception, ex: self._logger.exception('Task manager not started, error: %s' % ex) self.terminate_manager() raise else: self._logger.warn( 'tmgr process already running, but attempted to restart!')
python
def start_manager(self): """ **Purpose**: Method to start the tmgr process. The tmgr function is not to be accessed directly. The function is started in a separate thread using this method. """ if not self._tmgr_process: try: self._prof.prof('creating tmgr process', uid=self._uid) self._tmgr_terminate = Event() self._tmgr_process = Process(target=self._tmgr, name='task-manager', args=( self._uid, self._rmgr, self._logger, self._mq_hostname, self._port, self._pending_queue, self._completed_queue) ) self._logger.info('Starting task manager process') self._prof.prof('starting tmgr process', uid=self._uid) self._tmgr_process.start() return True except Exception, ex: self._logger.exception('Task manager not started, error: %s' % ex) self.terminate_manager() raise else: self._logger.warn( 'tmgr process already running, but attempted to restart!')
[ "def", "start_manager", "(", "self", ")", ":", "if", "not", "self", ".", "_tmgr_process", ":", "try", ":", "self", ".", "_prof", ".", "prof", "(", "'creating tmgr process'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_tmgr_terminate", "=", "Event", "(", ")", "self", ".", "_tmgr_process", "=", "Process", "(", "target", "=", "self", ".", "_tmgr", ",", "name", "=", "'task-manager'", ",", "args", "=", "(", "self", ".", "_uid", ",", "self", ".", "_rmgr", ",", "self", ".", "_logger", ",", "self", ".", "_mq_hostname", ",", "self", ".", "_port", ",", "self", ".", "_pending_queue", ",", "self", ".", "_completed_queue", ")", ")", "self", ".", "_logger", ".", "info", "(", "'Starting task manager process'", ")", "self", ".", "_prof", ".", "prof", "(", "'starting tmgr process'", ",", "uid", "=", "self", ".", "_uid", ")", "self", ".", "_tmgr_process", ".", "start", "(", ")", "return", "True", "except", "Exception", ",", "ex", ":", "self", ".", "_logger", ".", "exception", "(", "'Task manager not started, error: %s'", "%", "ex", ")", "self", ".", "terminate_manager", "(", ")", "raise", "else", ":", "self", ".", "_logger", ".", "warn", "(", "'tmgr process already running, but attempted to restart!'", ")" ]
**Purpose**: Method to start the tmgr process. The tmgr function is not to be accessed directly. The function is started in a separate thread using this method.
[ "**", "Purpose", "**", ":", "Method", "to", "start", "the", "tmgr", "process", ".", "The", "tmgr", "function", "is", "not", "to", "be", "accessed", "directly", ".", "The", "function", "is", "started", "in", "a", "separate", "thread", "using", "this", "method", "." ]
945f6c93c9a62db90ad191b306418d5c1cdd9d24
https://github.com/radical-cybertools/radical.entk/blob/945f6c93c9a62db90ad191b306418d5c1cdd9d24/src/radical/entk/execman/mock/task_manager.py#L277-L317
train
google/dotty
efilter/parsers/common/grammar.py
keyword
def keyword(tokens, expected): """Case-insensitive keyword match.""" try: token = next(iter(tokens)) except StopIteration: return if token and token.name == "symbol" and token.value.lower() == expected: return TokenMatch(None, token.value, (token,))
python
def keyword(tokens, expected): """Case-insensitive keyword match.""" try: token = next(iter(tokens)) except StopIteration: return if token and token.name == "symbol" and token.value.lower() == expected: return TokenMatch(None, token.value, (token,))
[ "def", "keyword", "(", "tokens", ",", "expected", ")", ":", "try", ":", "token", "=", "next", "(", "iter", "(", "tokens", ")", ")", "except", "StopIteration", ":", "return", "if", "token", "and", "token", ".", "name", "==", "\"symbol\"", "and", "token", ".", "value", ".", "lower", "(", ")", "==", "expected", ":", "return", "TokenMatch", "(", "None", ",", "token", ".", "value", ",", "(", "token", ",", ")", ")" ]
Case-insensitive keyword match.
[ "Case", "-", "insensitive", "keyword", "match", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/grammar.py#L234-L242
train
google/dotty
efilter/parsers/common/grammar.py
multi_keyword
def multi_keyword(tokens, keyword_parts): """Match a case-insensitive keyword consisting of multiple tokens.""" tokens = iter(tokens) matched_tokens = [] limit = len(keyword_parts) for idx in six.moves.range(limit): try: token = next(tokens) except StopIteration: return if (not token or token.name != "symbol" or token.value.lower() != keyword_parts[idx]): return matched_tokens.append(token) return TokenMatch(None, token.value, matched_tokens)
python
def multi_keyword(tokens, keyword_parts): """Match a case-insensitive keyword consisting of multiple tokens.""" tokens = iter(tokens) matched_tokens = [] limit = len(keyword_parts) for idx in six.moves.range(limit): try: token = next(tokens) except StopIteration: return if (not token or token.name != "symbol" or token.value.lower() != keyword_parts[idx]): return matched_tokens.append(token) return TokenMatch(None, token.value, matched_tokens)
[ "def", "multi_keyword", "(", "tokens", ",", "keyword_parts", ")", ":", "tokens", "=", "iter", "(", "tokens", ")", "matched_tokens", "=", "[", "]", "limit", "=", "len", "(", "keyword_parts", ")", "for", "idx", "in", "six", ".", "moves", ".", "range", "(", "limit", ")", ":", "try", ":", "token", "=", "next", "(", "tokens", ")", "except", "StopIteration", ":", "return", "if", "(", "not", "token", "or", "token", ".", "name", "!=", "\"symbol\"", "or", "token", ".", "value", ".", "lower", "(", ")", "!=", "keyword_parts", "[", "idx", "]", ")", ":", "return", "matched_tokens", ".", "append", "(", "token", ")", "return", "TokenMatch", "(", "None", ",", "token", ".", "value", ",", "matched_tokens", ")" ]
Match a case-insensitive keyword consisting of multiple tokens.
[ "Match", "a", "case", "-", "insensitive", "keyword", "consisting", "of", "multiple", "tokens", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/grammar.py#L245-L263
train
google/dotty
efilter/parsers/common/grammar.py
prefix
def prefix(tokens, operator_table): """Match a prefix of an operator.""" operator, matched_tokens = operator_table.prefix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
python
def prefix(tokens, operator_table): """Match a prefix of an operator.""" operator, matched_tokens = operator_table.prefix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
[ "def", "prefix", "(", "tokens", ",", "operator_table", ")", ":", "operator", ",", "matched_tokens", "=", "operator_table", ".", "prefix", ".", "match", "(", "tokens", ")", "if", "operator", ":", "return", "TokenMatch", "(", "operator", ",", "None", ",", "matched_tokens", ")" ]
Match a prefix of an operator.
[ "Match", "a", "prefix", "of", "an", "operator", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/grammar.py#L281-L285
train
google/dotty
efilter/parsers/common/grammar.py
infix
def infix(tokens, operator_table): """Match an infix of an operator.""" operator, matched_tokens = operator_table.infix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
python
def infix(tokens, operator_table): """Match an infix of an operator.""" operator, matched_tokens = operator_table.infix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
[ "def", "infix", "(", "tokens", ",", "operator_table", ")", ":", "operator", ",", "matched_tokens", "=", "operator_table", ".", "infix", ".", "match", "(", "tokens", ")", "if", "operator", ":", "return", "TokenMatch", "(", "operator", ",", "None", ",", "matched_tokens", ")" ]
Match an infix of an operator.
[ "Match", "an", "infix", "of", "an", "operator", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/grammar.py#L288-L292
train
google/dotty
efilter/parsers/common/grammar.py
suffix
def suffix(tokens, operator_table): """Match a suffix of an operator.""" operator, matched_tokens = operator_table.suffix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
python
def suffix(tokens, operator_table): """Match a suffix of an operator.""" operator, matched_tokens = operator_table.suffix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
[ "def", "suffix", "(", "tokens", ",", "operator_table", ")", ":", "operator", ",", "matched_tokens", "=", "operator_table", ".", "suffix", ".", "match", "(", "tokens", ")", "if", "operator", ":", "return", "TokenMatch", "(", "operator", ",", "None", ",", "matched_tokens", ")" ]
Match a suffix of an operator.
[ "Match", "a", "suffix", "of", "an", "operator", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/grammar.py#L295-L299
train
google/dotty
efilter/parsers/common/grammar.py
token_name
def token_name(tokens, expected): """Match a token name (type).""" try: token = next(iter(tokens)) except StopIteration: return if token and token.name == expected: return TokenMatch(None, token.value, (token,))
python
def token_name(tokens, expected): """Match a token name (type).""" try: token = next(iter(tokens)) except StopIteration: return if token and token.name == expected: return TokenMatch(None, token.value, (token,))
[ "def", "token_name", "(", "tokens", ",", "expected", ")", ":", "try", ":", "token", "=", "next", "(", "iter", "(", "tokens", ")", ")", "except", "StopIteration", ":", "return", "if", "token", "and", "token", ".", "name", "==", "expected", ":", "return", "TokenMatch", "(", "None", ",", "token", ".", "value", ",", "(", "token", ",", ")", ")" ]
Match a token name (type).
[ "Match", "a", "token", "name", "(", "type", ")", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/grammar.py#L302-L310
train
google/dotty
efilter/parsers/common/grammar.py
match_tokens
def match_tokens(expected_tokens): """Generate a grammar function that will match 'expected_tokens' only.""" if isinstance(expected_tokens, Token): # Match a single token. def _grammar_func(tokens): try: next_token = next(iter(tokens)) except StopIteration: return if next_token == expected_tokens: return TokenMatch(None, next_token.value, (next_token,)) elif isinstance(expected_tokens, tuple): # Match multiple tokens. match_len = len(expected_tokens) def _grammar_func(tokens): upcoming = tuple(itertools.islice(tokens, match_len)) if upcoming == expected_tokens: return TokenMatch(None, None, upcoming) else: raise TypeError( "'expected_tokens' must be an instance of Token or a tuple " "thereof. Got %r." % expected_tokens) return _grammar_func
python
def match_tokens(expected_tokens): """Generate a grammar function that will match 'expected_tokens' only.""" if isinstance(expected_tokens, Token): # Match a single token. def _grammar_func(tokens): try: next_token = next(iter(tokens)) except StopIteration: return if next_token == expected_tokens: return TokenMatch(None, next_token.value, (next_token,)) elif isinstance(expected_tokens, tuple): # Match multiple tokens. match_len = len(expected_tokens) def _grammar_func(tokens): upcoming = tuple(itertools.islice(tokens, match_len)) if upcoming == expected_tokens: return TokenMatch(None, None, upcoming) else: raise TypeError( "'expected_tokens' must be an instance of Token or a tuple " "thereof. Got %r." % expected_tokens) return _grammar_func
[ "def", "match_tokens", "(", "expected_tokens", ")", ":", "if", "isinstance", "(", "expected_tokens", ",", "Token", ")", ":", "# Match a single token.", "def", "_grammar_func", "(", "tokens", ")", ":", "try", ":", "next_token", "=", "next", "(", "iter", "(", "tokens", ")", ")", "except", "StopIteration", ":", "return", "if", "next_token", "==", "expected_tokens", ":", "return", "TokenMatch", "(", "None", ",", "next_token", ".", "value", ",", "(", "next_token", ",", ")", ")", "elif", "isinstance", "(", "expected_tokens", ",", "tuple", ")", ":", "# Match multiple tokens.", "match_len", "=", "len", "(", "expected_tokens", ")", "def", "_grammar_func", "(", "tokens", ")", ":", "upcoming", "=", "tuple", "(", "itertools", ".", "islice", "(", "tokens", ",", "match_len", ")", ")", "if", "upcoming", "==", "expected_tokens", ":", "return", "TokenMatch", "(", "None", ",", "None", ",", "upcoming", ")", "else", ":", "raise", "TypeError", "(", "\"'expected_tokens' must be an instance of Token or a tuple \"", "\"thereof. Got %r.\"", "%", "expected_tokens", ")", "return", "_grammar_func" ]
Generate a grammar function that will match 'expected_tokens' only.
[ "Generate", "a", "grammar", "function", "that", "will", "match", "expected_tokens", "only", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/grammar.py#L313-L338
train
bradmontgomery/django-redis-metrics
redis_metrics/utils.py
set_metric
def set_metric(slug, value, category=None, expire=None, date=None): """Create/Increment a metric.""" get_r().set_metric(slug, value, category=category, expire=expire, date=date)
python
def set_metric(slug, value, category=None, expire=None, date=None): """Create/Increment a metric.""" get_r().set_metric(slug, value, category=category, expire=expire, date=date)
[ "def", "set_metric", "(", "slug", ",", "value", ",", "category", "=", "None", ",", "expire", "=", "None", ",", "date", "=", "None", ")", ":", "get_r", "(", ")", ".", "set_metric", "(", "slug", ",", "value", ",", "category", "=", "category", ",", "expire", "=", "expire", ",", "date", "=", "date", ")" ]
Create/Increment a metric.
[ "Create", "/", "Increment", "a", "metric", "." ]
2c92332920113d28c39234b949aa496b39a091d1
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/utils.py#L18-L20
train
bradmontgomery/django-redis-metrics
redis_metrics/utils.py
metric
def metric(slug, num=1, category=None, expire=None, date=None): """Create/Increment a metric.""" get_r().metric(slug, num=num, category=category, expire=expire, date=date)
python
def metric(slug, num=1, category=None, expire=None, date=None): """Create/Increment a metric.""" get_r().metric(slug, num=num, category=category, expire=expire, date=date)
[ "def", "metric", "(", "slug", ",", "num", "=", "1", ",", "category", "=", "None", ",", "expire", "=", "None", ",", "date", "=", "None", ")", ":", "get_r", "(", ")", ".", "metric", "(", "slug", ",", "num", "=", "num", ",", "category", "=", "category", ",", "expire", "=", "expire", ",", "date", "=", "date", ")" ]
Create/Increment a metric.
[ "Create", "/", "Increment", "a", "metric", "." ]
2c92332920113d28c39234b949aa496b39a091d1
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/utils.py#L23-L25
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.expression
def expression(self, previous_precedence=0): """An expression is an atom or an infix expression. Grammar (sort of, actually a precedence-climbing parser): expression = atom [ binary_operator expression ] . Args: previous_precedence: What operator precedence should we start with? """ lhs = self.atom() return self.operator(lhs, previous_precedence)
python
def expression(self, previous_precedence=0): """An expression is an atom or an infix expression. Grammar (sort of, actually a precedence-climbing parser): expression = atom [ binary_operator expression ] . Args: previous_precedence: What operator precedence should we start with? """ lhs = self.atom() return self.operator(lhs, previous_precedence)
[ "def", "expression", "(", "self", ",", "previous_precedence", "=", "0", ")", ":", "lhs", "=", "self", ".", "atom", "(", ")", "return", "self", ".", "operator", "(", "lhs", ",", "previous_precedence", ")" ]
An expression is an atom or an infix expression. Grammar (sort of, actually a precedence-climbing parser): expression = atom [ binary_operator expression ] . Args: previous_precedence: What operator precedence should we start with?
[ "An", "expression", "is", "an", "atom", "or", "an", "infix", "expression", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L143-L154
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.atom
def atom(self): """Parse an atom, which is most things. Grammar: atom = [ prefix ] ( select_expression | any_expression | func_application | let_expr | var | literal | list | "(" expression ")" ) . """ # Parameter replacement with literals. if self.tokens.accept(grammar.param): return self.param() # Let expressions (let(x = 5, y = 10) x + y) if self.tokens.accept(grammar.let): return self.let() # At the top level, we try to see if we are recursing into an SQL query. if self.tokens.accept(grammar.select): return self.select() # A SELECT query can also start with 'ANY'. if self.tokens.accept(grammar.select_any): return self.select_any() # Explicitly reject any keywords from SQL other than SELECT and ANY. # If we don't do this they will match as valid symbols (variables) # and that might be confusing to the user. self.tokens.reject(grammar.sql_keyword) # Match if-else before other things that consume symbols. if self.tokens.accept(grammar.if_if): return self.if_if() # Operators must be matched first because the same symbols could also # be vars or applications. if self.tokens.accept(grammar.prefix): operator = self.tokens.matched.operator start = self.tokens.matched.start expr = self.expression(operator.precedence) return operator.handler(expr, start=start, end=expr.end, source=self.original) if self.tokens.accept(grammar.literal): return ast.Literal(self.tokens.matched.value, source=self.original, start=self.tokens.matched.start, end=self.tokens.matched.end) # Match builtin pseudo-functions before functions and vars to prevent # overrides. if self.tokens.accept(grammar.builtin): return self.builtin(self.tokens.matched.value) # Match applications before vars, because obviously. if self.tokens.accept(grammar.application): return self.application( ast.Var(self.tokens.matched.value, source=self.original, start=self.tokens.matched.start, end=self.tokens.matched.first.end)) if self.tokens.accept(common_grammar.symbol): return ast.Var(self.tokens.matched.value, source=self.original, start=self.tokens.matched.start, end=self.tokens.matched.end) if self.tokens.accept(common_grammar.lparen): # Parens will contain one or more expressions. If there are several # expressions, separated by commas, then they are a repeated value. # # Unlike lists, repeated values must all be of the same type, # otherwise evaluation of the query will fail at runtime (or # type-check time, for simple cases.) start = self.tokens.matched.start expressions = [self.expression()] while self.tokens.accept(common_grammar.comma): expressions.append(self.expression()) self.tokens.expect(common_grammar.rparen) if len(expressions) == 1: return expressions[0] else: return ast.Repeat(*expressions, source=self.original, start=start, end=self.tokens.matched.end) if self.tokens.accept(common_grammar.lbracket): return self.list() # We've run out of things we know the next atom could be. If there is # still input left then it's illegal syntax. If there is nothing then # the input cuts off when we still need an atom. Either is an error. if self.tokens.peek(0): return self.error( "Was not expecting %r here." % self.tokens.peek(0).name, start_token=self.tokens.peek(0)) else: return self.error("Unexpected end of input.")
python
def atom(self): """Parse an atom, which is most things. Grammar: atom = [ prefix ] ( select_expression | any_expression | func_application | let_expr | var | literal | list | "(" expression ")" ) . """ # Parameter replacement with literals. if self.tokens.accept(grammar.param): return self.param() # Let expressions (let(x = 5, y = 10) x + y) if self.tokens.accept(grammar.let): return self.let() # At the top level, we try to see if we are recursing into an SQL query. if self.tokens.accept(grammar.select): return self.select() # A SELECT query can also start with 'ANY'. if self.tokens.accept(grammar.select_any): return self.select_any() # Explicitly reject any keywords from SQL other than SELECT and ANY. # If we don't do this they will match as valid symbols (variables) # and that might be confusing to the user. self.tokens.reject(grammar.sql_keyword) # Match if-else before other things that consume symbols. if self.tokens.accept(grammar.if_if): return self.if_if() # Operators must be matched first because the same symbols could also # be vars or applications. if self.tokens.accept(grammar.prefix): operator = self.tokens.matched.operator start = self.tokens.matched.start expr = self.expression(operator.precedence) return operator.handler(expr, start=start, end=expr.end, source=self.original) if self.tokens.accept(grammar.literal): return ast.Literal(self.tokens.matched.value, source=self.original, start=self.tokens.matched.start, end=self.tokens.matched.end) # Match builtin pseudo-functions before functions and vars to prevent # overrides. if self.tokens.accept(grammar.builtin): return self.builtin(self.tokens.matched.value) # Match applications before vars, because obviously. if self.tokens.accept(grammar.application): return self.application( ast.Var(self.tokens.matched.value, source=self.original, start=self.tokens.matched.start, end=self.tokens.matched.first.end)) if self.tokens.accept(common_grammar.symbol): return ast.Var(self.tokens.matched.value, source=self.original, start=self.tokens.matched.start, end=self.tokens.matched.end) if self.tokens.accept(common_grammar.lparen): # Parens will contain one or more expressions. If there are several # expressions, separated by commas, then they are a repeated value. # # Unlike lists, repeated values must all be of the same type, # otherwise evaluation of the query will fail at runtime (or # type-check time, for simple cases.) start = self.tokens.matched.start expressions = [self.expression()] while self.tokens.accept(common_grammar.comma): expressions.append(self.expression()) self.tokens.expect(common_grammar.rparen) if len(expressions) == 1: return expressions[0] else: return ast.Repeat(*expressions, source=self.original, start=start, end=self.tokens.matched.end) if self.tokens.accept(common_grammar.lbracket): return self.list() # We've run out of things we know the next atom could be. If there is # still input left then it's illegal syntax. If there is nothing then # the input cuts off when we still need an atom. Either is an error. if self.tokens.peek(0): return self.error( "Was not expecting %r here." % self.tokens.peek(0).name, start_token=self.tokens.peek(0)) else: return self.error("Unexpected end of input.")
[ "def", "atom", "(", "self", ")", ":", "# Parameter replacement with literals.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "param", ")", ":", "return", "self", ".", "param", "(", ")", "# Let expressions (let(x = 5, y = 10) x + y)", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "let", ")", ":", "return", "self", ".", "let", "(", ")", "# At the top level, we try to see if we are recursing into an SQL query.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "select", ")", ":", "return", "self", ".", "select", "(", ")", "# A SELECT query can also start with 'ANY'.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "select_any", ")", ":", "return", "self", ".", "select_any", "(", ")", "# Explicitly reject any keywords from SQL other than SELECT and ANY.", "# If we don't do this they will match as valid symbols (variables)", "# and that might be confusing to the user.", "self", ".", "tokens", ".", "reject", "(", "grammar", ".", "sql_keyword", ")", "# Match if-else before other things that consume symbols.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "if_if", ")", ":", "return", "self", ".", "if_if", "(", ")", "# Operators must be matched first because the same symbols could also", "# be vars or applications.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "prefix", ")", ":", "operator", "=", "self", ".", "tokens", ".", "matched", ".", "operator", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", "expr", "=", "self", ".", "expression", "(", "operator", ".", "precedence", ")", "return", "operator", ".", "handler", "(", "expr", ",", "start", "=", "start", ",", "end", "=", "expr", ".", "end", ",", "source", "=", "self", ".", "original", ")", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "literal", ")", ":", "return", "ast", ".", "Literal", "(", "self", ".", "tokens", ".", "matched", ".", "value", ",", "source", "=", "self", ".", "original", ",", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ")", "# Match builtin pseudo-functions before functions and vars to prevent", "# overrides.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "builtin", ")", ":", "return", "self", ".", "builtin", "(", "self", ".", "tokens", ".", "matched", ".", "value", ")", "# Match applications before vars, because obviously.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "application", ")", ":", "return", "self", ".", "application", "(", "ast", ".", "Var", "(", "self", ".", "tokens", ".", "matched", ".", "value", ",", "source", "=", "self", ".", "original", ",", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "first", ".", "end", ")", ")", "if", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "symbol", ")", ":", "return", "ast", ".", "Var", "(", "self", ".", "tokens", ".", "matched", ".", "value", ",", "source", "=", "self", ".", "original", ",", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ")", "if", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "lparen", ")", ":", "# Parens will contain one or more expressions. If there are several", "# expressions, separated by commas, then they are a repeated value.", "#", "# Unlike lists, repeated values must all be of the same type,", "# otherwise evaluation of the query will fail at runtime (or", "# type-check time, for simple cases.)", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", "expressions", "=", "[", "self", ".", "expression", "(", ")", "]", "while", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "comma", ")", ":", "expressions", ".", "append", "(", "self", ".", "expression", "(", ")", ")", "self", ".", "tokens", ".", "expect", "(", "common_grammar", ".", "rparen", ")", "if", "len", "(", "expressions", ")", "==", "1", ":", "return", "expressions", "[", "0", "]", "else", ":", "return", "ast", ".", "Repeat", "(", "*", "expressions", ",", "source", "=", "self", ".", "original", ",", "start", "=", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ")", "if", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "lbracket", ")", ":", "return", "self", ".", "list", "(", ")", "# We've run out of things we know the next atom could be. If there is", "# still input left then it's illegal syntax. If there is nothing then", "# the input cuts off when we still need an atom. Either is an error.", "if", "self", ".", "tokens", ".", "peek", "(", "0", ")", ":", "return", "self", ".", "error", "(", "\"Was not expecting %r here.\"", "%", "self", ".", "tokens", ".", "peek", "(", "0", ")", ".", "name", ",", "start_token", "=", "self", ".", "tokens", ".", "peek", "(", "0", ")", ")", "else", ":", "return", "self", ".", "error", "(", "\"Unexpected end of input.\"", ")" ]
Parse an atom, which is most things. Grammar: atom = [ prefix ] ( select_expression | any_expression | func_application | let_expr | var | literal | list | "(" expression ")" ) .
[ "Parse", "an", "atom", "which", "is", "most", "things", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L156-L259
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.accept_operator
def accept_operator(self, precedence): """Accept the next binary operator only if it's of higher precedence.""" match = grammar.infix(self.tokens) if not match: return if match.operator.precedence < precedence: return # The next thing is an operator that we want. Now match it for real. return self.tokens.accept(grammar.infix)
python
def accept_operator(self, precedence): """Accept the next binary operator only if it's of higher precedence.""" match = grammar.infix(self.tokens) if not match: return if match.operator.precedence < precedence: return # The next thing is an operator that we want. Now match it for real. return self.tokens.accept(grammar.infix)
[ "def", "accept_operator", "(", "self", ",", "precedence", ")", ":", "match", "=", "grammar", ".", "infix", "(", "self", ".", "tokens", ")", "if", "not", "match", ":", "return", "if", "match", ".", "operator", ".", "precedence", "<", "precedence", ":", "return", "# The next thing is an operator that we want. Now match it for real.", "return", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "infix", ")" ]
Accept the next binary operator only if it's of higher precedence.
[ "Accept", "the", "next", "binary", "operator", "only", "if", "it", "s", "of", "higher", "precedence", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L315-L325
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.operator
def operator(self, lhs, min_precedence): """Climb operator precedence as long as there are operators. This function implements a basic precedence climbing parser to deal with binary operators in a sane fashion. The outer loop will keep spinning as long as the next token is an operator with a precedence of at least 'min_precedence', parsing operands as atoms (which, in turn, recurse into 'expression' which recurses back into 'operator'). This supports both left- and right-associativity. The only part of the code that's not a regular precedence-climber deals with mixfix operators. A mixfix operator in DottySQL consists of an infix part and a suffix (they are still binary, they just have a terminator). """ # Spin as long as the next token is an operator of higher # precedence. (This may not do anything, which is fine.) while self.accept_operator(precedence=min_precedence): operator = self.tokens.matched.operator # If we're parsing a mixfix operator we can keep going until # the suffix. if operator.suffix: rhs = self.expression() self.tokens.expect(common_grammar.match_tokens(operator.suffix)) rhs.end = self.tokens.matched.end elif operator.name == ".": # The dot operator changes the meaning of RHS. rhs = self.dot_rhs() else: # The right hand side is an atom, which might turn out to be # an expression. Isn't recursion exciting? rhs = self.atom() # Keep going as long as the next token is an infix operator of # higher precedence. next_min_precedence = operator.precedence if operator.assoc == "left": next_min_precedence += 1 while self.tokens.match(grammar.infix): if (self.tokens.matched.operator.precedence < next_min_precedence): break rhs = self.operator(rhs, self.tokens.matched.operator.precedence) lhs = operator.handler(lhs, rhs, start=lhs.start, end=rhs.end, source=self.original) return lhs
python
def operator(self, lhs, min_precedence): """Climb operator precedence as long as there are operators. This function implements a basic precedence climbing parser to deal with binary operators in a sane fashion. The outer loop will keep spinning as long as the next token is an operator with a precedence of at least 'min_precedence', parsing operands as atoms (which, in turn, recurse into 'expression' which recurses back into 'operator'). This supports both left- and right-associativity. The only part of the code that's not a regular precedence-climber deals with mixfix operators. A mixfix operator in DottySQL consists of an infix part and a suffix (they are still binary, they just have a terminator). """ # Spin as long as the next token is an operator of higher # precedence. (This may not do anything, which is fine.) while self.accept_operator(precedence=min_precedence): operator = self.tokens.matched.operator # If we're parsing a mixfix operator we can keep going until # the suffix. if operator.suffix: rhs = self.expression() self.tokens.expect(common_grammar.match_tokens(operator.suffix)) rhs.end = self.tokens.matched.end elif operator.name == ".": # The dot operator changes the meaning of RHS. rhs = self.dot_rhs() else: # The right hand side is an atom, which might turn out to be # an expression. Isn't recursion exciting? rhs = self.atom() # Keep going as long as the next token is an infix operator of # higher precedence. next_min_precedence = operator.precedence if operator.assoc == "left": next_min_precedence += 1 while self.tokens.match(grammar.infix): if (self.tokens.matched.operator.precedence < next_min_precedence): break rhs = self.operator(rhs, self.tokens.matched.operator.precedence) lhs = operator.handler(lhs, rhs, start=lhs.start, end=rhs.end, source=self.original) return lhs
[ "def", "operator", "(", "self", ",", "lhs", ",", "min_precedence", ")", ":", "# Spin as long as the next token is an operator of higher", "# precedence. (This may not do anything, which is fine.)", "while", "self", ".", "accept_operator", "(", "precedence", "=", "min_precedence", ")", ":", "operator", "=", "self", ".", "tokens", ".", "matched", ".", "operator", "# If we're parsing a mixfix operator we can keep going until", "# the suffix.", "if", "operator", ".", "suffix", ":", "rhs", "=", "self", ".", "expression", "(", ")", "self", ".", "tokens", ".", "expect", "(", "common_grammar", ".", "match_tokens", "(", "operator", ".", "suffix", ")", ")", "rhs", ".", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", "elif", "operator", ".", "name", "==", "\".\"", ":", "# The dot operator changes the meaning of RHS.", "rhs", "=", "self", ".", "dot_rhs", "(", ")", "else", ":", "# The right hand side is an atom, which might turn out to be", "# an expression. Isn't recursion exciting?", "rhs", "=", "self", ".", "atom", "(", ")", "# Keep going as long as the next token is an infix operator of", "# higher precedence.", "next_min_precedence", "=", "operator", ".", "precedence", "if", "operator", ".", "assoc", "==", "\"left\"", ":", "next_min_precedence", "+=", "1", "while", "self", ".", "tokens", ".", "match", "(", "grammar", ".", "infix", ")", ":", "if", "(", "self", ".", "tokens", ".", "matched", ".", "operator", ".", "precedence", "<", "next_min_precedence", ")", ":", "break", "rhs", "=", "self", ".", "operator", "(", "rhs", ",", "self", ".", "tokens", ".", "matched", ".", "operator", ".", "precedence", ")", "lhs", "=", "operator", ".", "handler", "(", "lhs", ",", "rhs", ",", "start", "=", "lhs", ".", "start", ",", "end", "=", "rhs", ".", "end", ",", "source", "=", "self", ".", "original", ")", "return", "lhs" ]
Climb operator precedence as long as there are operators. This function implements a basic precedence climbing parser to deal with binary operators in a sane fashion. The outer loop will keep spinning as long as the next token is an operator with a precedence of at least 'min_precedence', parsing operands as atoms (which, in turn, recurse into 'expression' which recurses back into 'operator'). This supports both left- and right-associativity. The only part of the code that's not a regular precedence-climber deals with mixfix operators. A mixfix operator in DottySQL consists of an infix part and a suffix (they are still binary, they just have a terminator).
[ "Climb", "operator", "precedence", "as", "long", "as", "there", "are", "operators", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L327-L377
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.dot_rhs
def dot_rhs(self): """Match the right-hand side of a dot (.) operator. The RHS must be a symbol token, but it is interpreted as a literal string (because that's what goes in the AST of Resolve.) """ self.tokens.expect(common_grammar.symbol) return ast.Literal(self.tokens.matched.value, start=self.tokens.matched.start, end=self.tokens.matched.end, source=self.original)
python
def dot_rhs(self): """Match the right-hand side of a dot (.) operator. The RHS must be a symbol token, but it is interpreted as a literal string (because that's what goes in the AST of Resolve.) """ self.tokens.expect(common_grammar.symbol) return ast.Literal(self.tokens.matched.value, start=self.tokens.matched.start, end=self.tokens.matched.end, source=self.original)
[ "def", "dot_rhs", "(", "self", ")", ":", "self", ".", "tokens", ".", "expect", "(", "common_grammar", ".", "symbol", ")", "return", "ast", ".", "Literal", "(", "self", ".", "tokens", ".", "matched", ".", "value", ",", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ",", "source", "=", "self", ".", "original", ")" ]
Match the right-hand side of a dot (.) operator. The RHS must be a symbol token, but it is interpreted as a literal string (because that's what goes in the AST of Resolve.)
[ "Match", "the", "right", "-", "hand", "side", "of", "a", "dot", "(", ".", ")", "operator", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L379-L388
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.select
def select(self): """First part of an SQL query.""" # Try to match the asterisk, any or list of vars. if self.tokens.accept(grammar.select_any): return self.select_any() if self.tokens.accept(grammar.select_all): # The FROM after SELECT * is required. self.tokens.expect(grammar.select_from) return self.select_from() return self.select_what()
python
def select(self): """First part of an SQL query.""" # Try to match the asterisk, any or list of vars. if self.tokens.accept(grammar.select_any): return self.select_any() if self.tokens.accept(grammar.select_all): # The FROM after SELECT * is required. self.tokens.expect(grammar.select_from) return self.select_from() return self.select_what()
[ "def", "select", "(", "self", ")", ":", "# Try to match the asterisk, any or list of vars.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "select_any", ")", ":", "return", "self", ".", "select_any", "(", ")", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "select_all", ")", ":", "# The FROM after SELECT * is required.", "self", ".", "tokens", ".", "expect", "(", "grammar", ".", "select_from", ")", "return", "self", ".", "select_from", "(", ")", "return", "self", ".", "select_what", "(", ")" ]
First part of an SQL query.
[ "First", "part", "of", "an", "SQL", "query", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L392-L403
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser._guess_name_of
def _guess_name_of(self, expr): """Tries to guess what variable name 'expr' ends in. This is a heuristic that roughly emulates what most SQL databases name columns, based on selected variable names or applied functions. """ if isinstance(expr, ast.Var): return expr.value if isinstance(expr, ast.Resolve): # We know the RHS of resolve is a Literal because that's what # Parser.dot_rhs does. return expr.rhs.value if isinstance(expr, ast.Select) and isinstance(expr.rhs, ast.Literal): name = self._guess_name_of(expr.lhs) if name is not None: return "%s_%s" % (name, expr.rhs.value) if isinstance(expr, ast.Apply) and isinstance(expr.func, ast.Var): return expr.func.value
python
def _guess_name_of(self, expr): """Tries to guess what variable name 'expr' ends in. This is a heuristic that roughly emulates what most SQL databases name columns, based on selected variable names or applied functions. """ if isinstance(expr, ast.Var): return expr.value if isinstance(expr, ast.Resolve): # We know the RHS of resolve is a Literal because that's what # Parser.dot_rhs does. return expr.rhs.value if isinstance(expr, ast.Select) and isinstance(expr.rhs, ast.Literal): name = self._guess_name_of(expr.lhs) if name is not None: return "%s_%s" % (name, expr.rhs.value) if isinstance(expr, ast.Apply) and isinstance(expr.func, ast.Var): return expr.func.value
[ "def", "_guess_name_of", "(", "self", ",", "expr", ")", ":", "if", "isinstance", "(", "expr", ",", "ast", ".", "Var", ")", ":", "return", "expr", ".", "value", "if", "isinstance", "(", "expr", ",", "ast", ".", "Resolve", ")", ":", "# We know the RHS of resolve is a Literal because that's what", "# Parser.dot_rhs does.", "return", "expr", ".", "rhs", ".", "value", "if", "isinstance", "(", "expr", ",", "ast", ".", "Select", ")", "and", "isinstance", "(", "expr", ".", "rhs", ",", "ast", ".", "Literal", ")", ":", "name", "=", "self", ".", "_guess_name_of", "(", "expr", ".", "lhs", ")", "if", "name", "is", "not", "None", ":", "return", "\"%s_%s\"", "%", "(", "name", ",", "expr", ".", "rhs", ".", "value", ")", "if", "isinstance", "(", "expr", ",", "ast", ".", "Apply", ")", "and", "isinstance", "(", "expr", ".", "func", ",", "ast", ".", "Var", ")", ":", "return", "expr", ".", "func", ".", "value" ]
Tries to guess what variable name 'expr' ends in. This is a heuristic that roughly emulates what most SQL databases name columns, based on selected variable names or applied functions.
[ "Tries", "to", "guess", "what", "variable", "name", "expr", "ends", "in", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L439-L459
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.select_limit
def select_limit(self, source_expression): """Match LIMIT take [OFFSET drop].""" start = self.tokens.matched.start # The expression right after LIMIT is the count to take. limit_count_expression = self.expression() # Optional OFFSET follows. if self.tokens.accept(grammar.select_offset): offset_start = self.tokens.matched.start offset_end = self.tokens.matched.end # Next thing is the count to drop. offset_count_expression = self.expression() # We have a new source expression, which is drop(count, original). offset_source_expression = ast.Apply( ast.Var("drop", start=offset_start, end=offset_end, source=self.original), offset_count_expression, source_expression, start=offset_start, end=offset_count_expression.end, source=self.original) # Drop before taking, because obviously. source_expression = offset_source_expression limit_expression = ast.Apply( ast.Var("take", start=start, end=limit_count_expression.end, source=self.original), limit_count_expression, source_expression, start=start, end=self.tokens.matched.end, source=self.original) return limit_expression
python
def select_limit(self, source_expression): """Match LIMIT take [OFFSET drop].""" start = self.tokens.matched.start # The expression right after LIMIT is the count to take. limit_count_expression = self.expression() # Optional OFFSET follows. if self.tokens.accept(grammar.select_offset): offset_start = self.tokens.matched.start offset_end = self.tokens.matched.end # Next thing is the count to drop. offset_count_expression = self.expression() # We have a new source expression, which is drop(count, original). offset_source_expression = ast.Apply( ast.Var("drop", start=offset_start, end=offset_end, source=self.original), offset_count_expression, source_expression, start=offset_start, end=offset_count_expression.end, source=self.original) # Drop before taking, because obviously. source_expression = offset_source_expression limit_expression = ast.Apply( ast.Var("take", start=start, end=limit_count_expression.end, source=self.original), limit_count_expression, source_expression, start=start, end=self.tokens.matched.end, source=self.original) return limit_expression
[ "def", "select_limit", "(", "self", ",", "source_expression", ")", ":", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", "# The expression right after LIMIT is the count to take.", "limit_count_expression", "=", "self", ".", "expression", "(", ")", "# Optional OFFSET follows.", "if", "self", ".", "tokens", ".", "accept", "(", "grammar", ".", "select_offset", ")", ":", "offset_start", "=", "self", ".", "tokens", ".", "matched", ".", "start", "offset_end", "=", "self", ".", "tokens", ".", "matched", ".", "end", "# Next thing is the count to drop.", "offset_count_expression", "=", "self", ".", "expression", "(", ")", "# We have a new source expression, which is drop(count, original).", "offset_source_expression", "=", "ast", ".", "Apply", "(", "ast", ".", "Var", "(", "\"drop\"", ",", "start", "=", "offset_start", ",", "end", "=", "offset_end", ",", "source", "=", "self", ".", "original", ")", ",", "offset_count_expression", ",", "source_expression", ",", "start", "=", "offset_start", ",", "end", "=", "offset_count_expression", ".", "end", ",", "source", "=", "self", ".", "original", ")", "# Drop before taking, because obviously.", "source_expression", "=", "offset_source_expression", "limit_expression", "=", "ast", ".", "Apply", "(", "ast", ".", "Var", "(", "\"take\"", ",", "start", "=", "start", ",", "end", "=", "limit_count_expression", ".", "end", ",", "source", "=", "self", ".", "original", ")", ",", "limit_count_expression", ",", "source_expression", ",", "start", "=", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ",", "source", "=", "self", ".", "original", ")", "return", "limit_expression" ]
Match LIMIT take [OFFSET drop].
[ "Match", "LIMIT", "take", "[", "OFFSET", "drop", "]", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L573-L607
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.builtin
def builtin(self, keyword): """Parse the pseudo-function application subgrammar.""" # The match includes the lparen token, so the keyword is just the first # token in the match, not the whole thing. keyword_start = self.tokens.matched.first.start keyword_end = self.tokens.matched.first.end self.tokens.expect(common_grammar.lparen) if self.tokens.matched.start != keyword_end: return self.error( "No whitespace allowed between function and lparen.", start_token=self.tokens.matched.first) expr_type = grammar.BUILTINS[keyword.lower()] arguments = [self.expression()] while self.tokens.accept(common_grammar.comma): arguments.append(self.expression()) self.tokens.expect(common_grammar.rparen) if expr_type.arity and expr_type.arity != len(arguments): return self.error( "%s expects %d arguments, but was passed %d." % ( keyword, expr_type.arity, len(arguments)), start_token=self.tokens.matched.first) return expr_type(*arguments, start=keyword_start, end=self.tokens.matched.end, source=self.original)
python
def builtin(self, keyword): """Parse the pseudo-function application subgrammar.""" # The match includes the lparen token, so the keyword is just the first # token in the match, not the whole thing. keyword_start = self.tokens.matched.first.start keyword_end = self.tokens.matched.first.end self.tokens.expect(common_grammar.lparen) if self.tokens.matched.start != keyword_end: return self.error( "No whitespace allowed between function and lparen.", start_token=self.tokens.matched.first) expr_type = grammar.BUILTINS[keyword.lower()] arguments = [self.expression()] while self.tokens.accept(common_grammar.comma): arguments.append(self.expression()) self.tokens.expect(common_grammar.rparen) if expr_type.arity and expr_type.arity != len(arguments): return self.error( "%s expects %d arguments, but was passed %d." % ( keyword, expr_type.arity, len(arguments)), start_token=self.tokens.matched.first) return expr_type(*arguments, start=keyword_start, end=self.tokens.matched.end, source=self.original)
[ "def", "builtin", "(", "self", ",", "keyword", ")", ":", "# The match includes the lparen token, so the keyword is just the first", "# token in the match, not the whole thing.", "keyword_start", "=", "self", ".", "tokens", ".", "matched", ".", "first", ".", "start", "keyword_end", "=", "self", ".", "tokens", ".", "matched", ".", "first", ".", "end", "self", ".", "tokens", ".", "expect", "(", "common_grammar", ".", "lparen", ")", "if", "self", ".", "tokens", ".", "matched", ".", "start", "!=", "keyword_end", ":", "return", "self", ".", "error", "(", "\"No whitespace allowed between function and lparen.\"", ",", "start_token", "=", "self", ".", "tokens", ".", "matched", ".", "first", ")", "expr_type", "=", "grammar", ".", "BUILTINS", "[", "keyword", ".", "lower", "(", ")", "]", "arguments", "=", "[", "self", ".", "expression", "(", ")", "]", "while", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "comma", ")", ":", "arguments", ".", "append", "(", "self", ".", "expression", "(", ")", ")", "self", ".", "tokens", ".", "expect", "(", "common_grammar", ".", "rparen", ")", "if", "expr_type", ".", "arity", "and", "expr_type", ".", "arity", "!=", "len", "(", "arguments", ")", ":", "return", "self", ".", "error", "(", "\"%s expects %d arguments, but was passed %d.\"", "%", "(", "keyword", ",", "expr_type", ".", "arity", ",", "len", "(", "arguments", ")", ")", ",", "start_token", "=", "self", ".", "tokens", ".", "matched", ".", "first", ")", "return", "expr_type", "(", "*", "arguments", ",", "start", "=", "keyword_start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ",", "source", "=", "self", ".", "original", ")" ]
Parse the pseudo-function application subgrammar.
[ "Parse", "the", "pseudo", "-", "function", "application", "subgrammar", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L611-L638
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.application
def application(self, func): """Parse the function application subgrammar. Function application can, conceptually, be thought of as a mixfix operator, similar to the way array subscripting works. However, it is not clear at this point whether we want to allow it to work as such, because doing so would permit queries to, at runtime, select methods out of an arbitrary object and then call them. While there is a function whitelist and preventing this sort of thing in the syntax isn't a security feature, it still seems like the syntax should make it clear what the intended use of application is. If we later decide to extend DottySQL to allow function application over an arbitrary LHS expression then that syntax would be a strict superset of the current syntax and backwards compatible. """ start = self.tokens.matched.start if self.tokens.accept(common_grammar.rparen): # That was easy. return ast.Apply(func, start=start, end=self.tokens.matched.end, source=self.original) arguments = [self.expression()] while self.tokens.accept(common_grammar.comma): arguments.append(self.expression()) self.tokens.expect(common_grammar.rparen) return ast.Apply(func, *arguments, start=start, end=self.tokens.matched.end, source=self.original)
python
def application(self, func): """Parse the function application subgrammar. Function application can, conceptually, be thought of as a mixfix operator, similar to the way array subscripting works. However, it is not clear at this point whether we want to allow it to work as such, because doing so would permit queries to, at runtime, select methods out of an arbitrary object and then call them. While there is a function whitelist and preventing this sort of thing in the syntax isn't a security feature, it still seems like the syntax should make it clear what the intended use of application is. If we later decide to extend DottySQL to allow function application over an arbitrary LHS expression then that syntax would be a strict superset of the current syntax and backwards compatible. """ start = self.tokens.matched.start if self.tokens.accept(common_grammar.rparen): # That was easy. return ast.Apply(func, start=start, end=self.tokens.matched.end, source=self.original) arguments = [self.expression()] while self.tokens.accept(common_grammar.comma): arguments.append(self.expression()) self.tokens.expect(common_grammar.rparen) return ast.Apply(func, *arguments, start=start, end=self.tokens.matched.end, source=self.original)
[ "def", "application", "(", "self", ",", "func", ")", ":", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", "if", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "rparen", ")", ":", "# That was easy.", "return", "ast", ".", "Apply", "(", "func", ",", "start", "=", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ",", "source", "=", "self", ".", "original", ")", "arguments", "=", "[", "self", ".", "expression", "(", ")", "]", "while", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "comma", ")", ":", "arguments", ".", "append", "(", "self", ".", "expression", "(", ")", ")", "self", ".", "tokens", ".", "expect", "(", "common_grammar", ".", "rparen", ")", "return", "ast", ".", "Apply", "(", "func", ",", "*", "arguments", ",", "start", "=", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ",", "source", "=", "self", ".", "original", ")" ]
Parse the function application subgrammar. Function application can, conceptually, be thought of as a mixfix operator, similar to the way array subscripting works. However, it is not clear at this point whether we want to allow it to work as such, because doing so would permit queries to, at runtime, select methods out of an arbitrary object and then call them. While there is a function whitelist and preventing this sort of thing in the syntax isn't a security feature, it still seems like the syntax should make it clear what the intended use of application is. If we later decide to extend DottySQL to allow function application over an arbitrary LHS expression then that syntax would be a strict superset of the current syntax and backwards compatible.
[ "Parse", "the", "function", "application", "subgrammar", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L666-L695
train
google/dotty
efilter/parsers/dottysql/parser.py
Parser.list
def list(self): """Parse a list (tuple) which can contain any combination of types.""" start = self.tokens.matched.start if self.tokens.accept(common_grammar.rbracket): return ast.Tuple(start=start, end=self.tokens.matched.end, source=self.original) elements = [self.expression()] while self.tokens.accept(common_grammar.comma): elements.append(self.expression()) self.tokens.expect(common_grammar.rbracket) return ast.Tuple(*elements, start=start, end=self.tokens.matched.end, source=self.original)
python
def list(self): """Parse a list (tuple) which can contain any combination of types.""" start = self.tokens.matched.start if self.tokens.accept(common_grammar.rbracket): return ast.Tuple(start=start, end=self.tokens.matched.end, source=self.original) elements = [self.expression()] while self.tokens.accept(common_grammar.comma): elements.append(self.expression()) self.tokens.expect(common_grammar.rbracket) return ast.Tuple(*elements, start=start, end=self.tokens.matched.end, source=self.original)
[ "def", "list", "(", "self", ")", ":", "start", "=", "self", ".", "tokens", ".", "matched", ".", "start", "if", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "rbracket", ")", ":", "return", "ast", ".", "Tuple", "(", "start", "=", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ",", "source", "=", "self", ".", "original", ")", "elements", "=", "[", "self", ".", "expression", "(", ")", "]", "while", "self", ".", "tokens", ".", "accept", "(", "common_grammar", ".", "comma", ")", ":", "elements", ".", "append", "(", "self", ".", "expression", "(", ")", ")", "self", ".", "tokens", ".", "expect", "(", "common_grammar", ".", "rbracket", ")", "return", "ast", ".", "Tuple", "(", "*", "elements", ",", "start", "=", "start", ",", "end", "=", "self", ".", "tokens", ".", "matched", ".", "end", ",", "source", "=", "self", ".", "original", ")" ]
Parse a list (tuple) which can contain any combination of types.
[ "Parse", "a", "list", "(", "tuple", ")", "which", "can", "contain", "any", "combination", "of", "types", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/dottysql/parser.py#L699-L714
train
google/dotty
efilter/ext/row_tuple.py
RowTuple.get_singleton
def get_singleton(self): """If the row only has one column, return that value; otherwise raise. Raises: ValueError, if count of columns is not 1. """ only_value = None for value in six.itervalues(self.ordered_dict): # This loop will raise if it runs more than once. if only_value is not None: raise ValueError("%r is not a singleton." % self) only_value = value if only_value is self.__UnsetSentinel or only_value is None: raise ValueError("%r is empty." % self) return only_value
python
def get_singleton(self): """If the row only has one column, return that value; otherwise raise. Raises: ValueError, if count of columns is not 1. """ only_value = None for value in six.itervalues(self.ordered_dict): # This loop will raise if it runs more than once. if only_value is not None: raise ValueError("%r is not a singleton." % self) only_value = value if only_value is self.__UnsetSentinel or only_value is None: raise ValueError("%r is empty." % self) return only_value
[ "def", "get_singleton", "(", "self", ")", ":", "only_value", "=", "None", "for", "value", "in", "six", ".", "itervalues", "(", "self", ".", "ordered_dict", ")", ":", "# This loop will raise if it runs more than once.", "if", "only_value", "is", "not", "None", ":", "raise", "ValueError", "(", "\"%r is not a singleton.\"", "%", "self", ")", "only_value", "=", "value", "if", "only_value", "is", "self", ".", "__UnsetSentinel", "or", "only_value", "is", "None", ":", "raise", "ValueError", "(", "\"%r is empty.\"", "%", "self", ")", "return", "only_value" ]
If the row only has one column, return that value; otherwise raise. Raises: ValueError, if count of columns is not 1.
[ "If", "the", "row", "only", "has", "one", "column", "return", "that", "value", ";", "otherwise", "raise", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/ext/row_tuple.py#L93-L110
train
bradmontgomery/django-redis-metrics
redis_metrics/management/commands/system_metric.py
Command._cpu
def _cpu(self): """Record CPU usage.""" value = int(psutil.cpu_percent()) set_metric("cpu", value, category=self.category) gauge("cpu", value)
python
def _cpu(self): """Record CPU usage.""" value = int(psutil.cpu_percent()) set_metric("cpu", value, category=self.category) gauge("cpu", value)
[ "def", "_cpu", "(", "self", ")", ":", "value", "=", "int", "(", "psutil", ".", "cpu_percent", "(", ")", ")", "set_metric", "(", "\"cpu\"", ",", "value", ",", "category", "=", "self", ".", "category", ")", "gauge", "(", "\"cpu\"", ",", "value", ")" ]
Record CPU usage.
[ "Record", "CPU", "usage", "." ]
2c92332920113d28c39234b949aa496b39a091d1
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/management/commands/system_metric.py#L83-L87
train
bradmontgomery/django-redis-metrics
redis_metrics/management/commands/system_metric.py
Command._mem
def _mem(self): """Record Memory usage.""" value = int(psutil.virtual_memory().percent) set_metric("memory", value, category=self.category) gauge("memory", value)
python
def _mem(self): """Record Memory usage.""" value = int(psutil.virtual_memory().percent) set_metric("memory", value, category=self.category) gauge("memory", value)
[ "def", "_mem", "(", "self", ")", ":", "value", "=", "int", "(", "psutil", ".", "virtual_memory", "(", ")", ".", "percent", ")", "set_metric", "(", "\"memory\"", ",", "value", ",", "category", "=", "self", ".", "category", ")", "gauge", "(", "\"memory\"", ",", "value", ")" ]
Record Memory usage.
[ "Record", "Memory", "usage", "." ]
2c92332920113d28c39234b949aa496b39a091d1
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/management/commands/system_metric.py#L89-L93
train
bradmontgomery/django-redis-metrics
redis_metrics/management/commands/system_metric.py
Command._disk
def _disk(self): """Record Disk usage.""" mountpoints = [ p.mountpoint for p in psutil.disk_partitions() if p.device.endswith(self.device) ] if len(mountpoints) != 1: raise CommandError("Unknown device: {0}".format(self.device)) value = int(psutil.disk_usage(mountpoints[0]).percent) set_metric("disk-{0}".format(self.device), value, category=self.category) gauge("disk-{0}".format(self.device), value)
python
def _disk(self): """Record Disk usage.""" mountpoints = [ p.mountpoint for p in psutil.disk_partitions() if p.device.endswith(self.device) ] if len(mountpoints) != 1: raise CommandError("Unknown device: {0}".format(self.device)) value = int(psutil.disk_usage(mountpoints[0]).percent) set_metric("disk-{0}".format(self.device), value, category=self.category) gauge("disk-{0}".format(self.device), value)
[ "def", "_disk", "(", "self", ")", ":", "mountpoints", "=", "[", "p", ".", "mountpoint", "for", "p", "in", "psutil", ".", "disk_partitions", "(", ")", "if", "p", ".", "device", ".", "endswith", "(", "self", ".", "device", ")", "]", "if", "len", "(", "mountpoints", ")", "!=", "1", ":", "raise", "CommandError", "(", "\"Unknown device: {0}\"", ".", "format", "(", "self", ".", "device", ")", ")", "value", "=", "int", "(", "psutil", ".", "disk_usage", "(", "mountpoints", "[", "0", "]", ")", ".", "percent", ")", "set_metric", "(", "\"disk-{0}\"", ".", "format", "(", "self", ".", "device", ")", ",", "value", ",", "category", "=", "self", ".", "category", ")", "gauge", "(", "\"disk-{0}\"", ".", "format", "(", "self", ".", "device", ")", ",", "value", ")" ]
Record Disk usage.
[ "Record", "Disk", "usage", "." ]
2c92332920113d28c39234b949aa496b39a091d1
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/management/commands/system_metric.py#L95-L106
train
bradmontgomery/django-redis-metrics
redis_metrics/management/commands/system_metric.py
Command._net
def _net(self): """Record Network usage.""" data = psutil.network_io_counters(pernic=True) if self.device not in data: raise CommandError("Unknown device: {0}".format(self.device)) # Network bytes sent value = data[self.device].bytes_sent metric("net-{0}-sent".format(self.device), value, category=self.category) gauge("net-{0}-sent".format(self.device), value) # Network bytes received value = data[self.device].bytes_recv metric("net-{0}-recv".format(self.device), value, category=self.category)
python
def _net(self): """Record Network usage.""" data = psutil.network_io_counters(pernic=True) if self.device not in data: raise CommandError("Unknown device: {0}".format(self.device)) # Network bytes sent value = data[self.device].bytes_sent metric("net-{0}-sent".format(self.device), value, category=self.category) gauge("net-{0}-sent".format(self.device), value) # Network bytes received value = data[self.device].bytes_recv metric("net-{0}-recv".format(self.device), value, category=self.category)
[ "def", "_net", "(", "self", ")", ":", "data", "=", "psutil", ".", "network_io_counters", "(", "pernic", "=", "True", ")", "if", "self", ".", "device", "not", "in", "data", ":", "raise", "CommandError", "(", "\"Unknown device: {0}\"", ".", "format", "(", "self", ".", "device", ")", ")", "# Network bytes sent", "value", "=", "data", "[", "self", ".", "device", "]", ".", "bytes_sent", "metric", "(", "\"net-{0}-sent\"", ".", "format", "(", "self", ".", "device", ")", ",", "value", ",", "category", "=", "self", ".", "category", ")", "gauge", "(", "\"net-{0}-sent\"", ".", "format", "(", "self", ".", "device", ")", ",", "value", ")", "# Network bytes received", "value", "=", "data", "[", "self", ".", "device", "]", ".", "bytes_recv", "metric", "(", "\"net-{0}-recv\"", ".", "format", "(", "self", ".", "device", ")", ",", "value", ",", "category", "=", "self", ".", "category", ")" ]
Record Network usage.
[ "Record", "Network", "usage", "." ]
2c92332920113d28c39234b949aa496b39a091d1
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/management/commands/system_metric.py#L108-L121
train
google/dotty
efilter/protocol.py
implements
def implements(obj, protocol): """Does the object 'obj' implement the 'prococol'?""" if isinstance(obj, type): raise TypeError("First argument to implements must be an instance. " "Got %r." % obj) return isinstance(obj, protocol) or issubclass(AnyType, protocol)
python
def implements(obj, protocol): """Does the object 'obj' implement the 'prococol'?""" if isinstance(obj, type): raise TypeError("First argument to implements must be an instance. " "Got %r." % obj) return isinstance(obj, protocol) or issubclass(AnyType, protocol)
[ "def", "implements", "(", "obj", ",", "protocol", ")", ":", "if", "isinstance", "(", "obj", ",", "type", ")", ":", "raise", "TypeError", "(", "\"First argument to implements must be an instance. \"", "\"Got %r.\"", "%", "obj", ")", "return", "isinstance", "(", "obj", ",", "protocol", ")", "or", "issubclass", "(", "AnyType", ",", "protocol", ")" ]
Does the object 'obj' implement the 'prococol'?
[ "Does", "the", "object", "obj", "implement", "the", "prococol", "?" ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L73-L78
train
google/dotty
efilter/protocol.py
isa
def isa(cls, protocol): """Does the type 'cls' participate in the 'protocol'?""" if not isinstance(cls, type): raise TypeError("First argument to isa must be a type. Got %s." % repr(cls)) if not isinstance(protocol, type): raise TypeError(("Second argument to isa must be a type or a Protocol. " "Got an instance of %r.") % type(protocol)) return issubclass(cls, protocol) or issubclass(AnyType, protocol)
python
def isa(cls, protocol): """Does the type 'cls' participate in the 'protocol'?""" if not isinstance(cls, type): raise TypeError("First argument to isa must be a type. Got %s." % repr(cls)) if not isinstance(protocol, type): raise TypeError(("Second argument to isa must be a type or a Protocol. " "Got an instance of %r.") % type(protocol)) return issubclass(cls, protocol) or issubclass(AnyType, protocol)
[ "def", "isa", "(", "cls", ",", "protocol", ")", ":", "if", "not", "isinstance", "(", "cls", ",", "type", ")", ":", "raise", "TypeError", "(", "\"First argument to isa must be a type. Got %s.\"", "%", "repr", "(", "cls", ")", ")", "if", "not", "isinstance", "(", "protocol", ",", "type", ")", ":", "raise", "TypeError", "(", "(", "\"Second argument to isa must be a type or a Protocol. \"", "\"Got an instance of %r.\"", ")", "%", "type", "(", "protocol", ")", ")", "return", "issubclass", "(", "cls", ",", "protocol", ")", "or", "issubclass", "(", "AnyType", ",", "protocol", ")" ]
Does the type 'cls' participate in the 'protocol'?
[ "Does", "the", "type", "cls", "participate", "in", "the", "protocol", "?" ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L81-L90
train
google/dotty
efilter/protocol.py
Protocol.implemented
def implemented(cls, for_type): """Assert that protocol 'cls' is implemented for type 'for_type'. This will cause 'for_type' to be registered with the protocol 'cls'. Subsequently, protocol.isa(for_type, cls) will return True, as will isinstance, issubclass and others. Raises: TypeError if 'for_type' doesn't implement all required functions. """ for function in cls.required(): if not function.implemented_for_type(for_type): raise TypeError( "%r doesn't implement %r so it cannot participate in " "the protocol %r." % (for_type, function.func.__name__, cls)) cls.register(for_type)
python
def implemented(cls, for_type): """Assert that protocol 'cls' is implemented for type 'for_type'. This will cause 'for_type' to be registered with the protocol 'cls'. Subsequently, protocol.isa(for_type, cls) will return True, as will isinstance, issubclass and others. Raises: TypeError if 'for_type' doesn't implement all required functions. """ for function in cls.required(): if not function.implemented_for_type(for_type): raise TypeError( "%r doesn't implement %r so it cannot participate in " "the protocol %r." % (for_type, function.func.__name__, cls)) cls.register(for_type)
[ "def", "implemented", "(", "cls", ",", "for_type", ")", ":", "for", "function", "in", "cls", ".", "required", "(", ")", ":", "if", "not", "function", ".", "implemented_for_type", "(", "for_type", ")", ":", "raise", "TypeError", "(", "\"%r doesn't implement %r so it cannot participate in \"", "\"the protocol %r.\"", "%", "(", "for_type", ",", "function", ".", "func", ".", "__name__", ",", "cls", ")", ")", "cls", ".", "register", "(", "for_type", ")" ]
Assert that protocol 'cls' is implemented for type 'for_type'. This will cause 'for_type' to be registered with the protocol 'cls'. Subsequently, protocol.isa(for_type, cls) will return True, as will isinstance, issubclass and others. Raises: TypeError if 'for_type' doesn't implement all required functions.
[ "Assert", "that", "protocol", "cls", "is", "implemented", "for", "type", "for_type", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L126-L144
train
google/dotty
efilter/protocol.py
Protocol.__get_type_args
def __get_type_args(for_type=None, for_types=None): """Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate. """ if for_type: if for_types: raise ValueError("Cannot pass both for_type and for_types.") for_types = (for_type,) elif for_types: if not isinstance(for_types, tuple): raise TypeError("for_types must be passed as a tuple of " "types (classes).") else: raise ValueError("Must pass either for_type or for_types.") return for_types
python
def __get_type_args(for_type=None, for_types=None): """Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate. """ if for_type: if for_types: raise ValueError("Cannot pass both for_type and for_types.") for_types = (for_type,) elif for_types: if not isinstance(for_types, tuple): raise TypeError("for_types must be passed as a tuple of " "types (classes).") else: raise ValueError("Must pass either for_type or for_types.") return for_types
[ "def", "__get_type_args", "(", "for_type", "=", "None", ",", "for_types", "=", "None", ")", ":", "if", "for_type", ":", "if", "for_types", ":", "raise", "ValueError", "(", "\"Cannot pass both for_type and for_types.\"", ")", "for_types", "=", "(", "for_type", ",", ")", "elif", "for_types", ":", "if", "not", "isinstance", "(", "for_types", ",", "tuple", ")", ":", "raise", "TypeError", "(", "\"for_types must be passed as a tuple of \"", "\"types (classes).\"", ")", "else", ":", "raise", "ValueError", "(", "\"Must pass either for_type or for_types.\"", ")", "return", "for_types" ]
Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate.
[ "Parse", "the", "arguments", "and", "return", "a", "tuple", "of", "types", "to", "implement", "for", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L147-L164
train
google/dotty
efilter/protocol.py
Protocol.implicit_static
def implicit_static(cls, for_type=None, for_types=None): """Automatically generate implementations for a type. Implement the protocol for the 'for_type' type by dispatching each member function of the protocol to an instance method of the same name declared on the type 'for_type'. Arguments: for_type: The type to implictly implement the protocol with. Raises: TypeError if not all implementations are provided by 'for_type'. """ for type_ in cls.__get_type_args(for_type, for_types): implementations = {} for function in cls.required(): method = getattr(type_, function.__name__, None) if not callable(method): raise TypeError( "%s.implicit invokation on type %r is missing instance " "method %r." % (cls.__name__, type_, function.__name__)) implementations[function] = method for function in cls.optional(): method = getattr(type_, function.__name__, None) if callable(method): implementations[function] = method return cls.implement(for_type=type_, implementations=implementations)
python
def implicit_static(cls, for_type=None, for_types=None): """Automatically generate implementations for a type. Implement the protocol for the 'for_type' type by dispatching each member function of the protocol to an instance method of the same name declared on the type 'for_type'. Arguments: for_type: The type to implictly implement the protocol with. Raises: TypeError if not all implementations are provided by 'for_type'. """ for type_ in cls.__get_type_args(for_type, for_types): implementations = {} for function in cls.required(): method = getattr(type_, function.__name__, None) if not callable(method): raise TypeError( "%s.implicit invokation on type %r is missing instance " "method %r." % (cls.__name__, type_, function.__name__)) implementations[function] = method for function in cls.optional(): method = getattr(type_, function.__name__, None) if callable(method): implementations[function] = method return cls.implement(for_type=type_, implementations=implementations)
[ "def", "implicit_static", "(", "cls", ",", "for_type", "=", "None", ",", "for_types", "=", "None", ")", ":", "for", "type_", "in", "cls", ".", "__get_type_args", "(", "for_type", ",", "for_types", ")", ":", "implementations", "=", "{", "}", "for", "function", "in", "cls", ".", "required", "(", ")", ":", "method", "=", "getattr", "(", "type_", ",", "function", ".", "__name__", ",", "None", ")", "if", "not", "callable", "(", "method", ")", ":", "raise", "TypeError", "(", "\"%s.implicit invokation on type %r is missing instance \"", "\"method %r.\"", "%", "(", "cls", ".", "__name__", ",", "type_", ",", "function", ".", "__name__", ")", ")", "implementations", "[", "function", "]", "=", "method", "for", "function", "in", "cls", ".", "optional", "(", ")", ":", "method", "=", "getattr", "(", "type_", ",", "function", ".", "__name__", ",", "None", ")", "if", "callable", "(", "method", ")", ":", "implementations", "[", "function", "]", "=", "method", "return", "cls", ".", "implement", "(", "for_type", "=", "type_", ",", "implementations", "=", "implementations", ")" ]
Automatically generate implementations for a type. Implement the protocol for the 'for_type' type by dispatching each member function of the protocol to an instance method of the same name declared on the type 'for_type'. Arguments: for_type: The type to implictly implement the protocol with. Raises: TypeError if not all implementations are provided by 'for_type'.
[ "Automatically", "generate", "implementations", "for", "a", "type", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L187-L219
train
google/dotty
efilter/protocol.py
Protocol._build_late_dispatcher
def _build_late_dispatcher(func_name): """Return a function that calls method 'func_name' on objects. This is useful for building late-bound dynamic dispatch. Arguments: func_name: The name of the instance method that should be called. Returns: A function that takes an 'obj' parameter, followed by *args and returns the result of calling the instance method with the same name as the contents of 'func_name' on the 'obj' object with the arguments from *args. """ def _late_dynamic_dispatcher(obj, *args): method = getattr(obj, func_name, None) if not callable(method): raise NotImplementedError( "Instance method %r is not implemented by %r." % ( func_name, obj)) return method(*args) return _late_dynamic_dispatcher
python
def _build_late_dispatcher(func_name): """Return a function that calls method 'func_name' on objects. This is useful for building late-bound dynamic dispatch. Arguments: func_name: The name of the instance method that should be called. Returns: A function that takes an 'obj' parameter, followed by *args and returns the result of calling the instance method with the same name as the contents of 'func_name' on the 'obj' object with the arguments from *args. """ def _late_dynamic_dispatcher(obj, *args): method = getattr(obj, func_name, None) if not callable(method): raise NotImplementedError( "Instance method %r is not implemented by %r." % ( func_name, obj)) return method(*args) return _late_dynamic_dispatcher
[ "def", "_build_late_dispatcher", "(", "func_name", ")", ":", "def", "_late_dynamic_dispatcher", "(", "obj", ",", "*", "args", ")", ":", "method", "=", "getattr", "(", "obj", ",", "func_name", ",", "None", ")", "if", "not", "callable", "(", "method", ")", ":", "raise", "NotImplementedError", "(", "\"Instance method %r is not implemented by %r.\"", "%", "(", "func_name", ",", "obj", ")", ")", "return", "method", "(", "*", "args", ")", "return", "_late_dynamic_dispatcher" ]
Return a function that calls method 'func_name' on objects. This is useful for building late-bound dynamic dispatch. Arguments: func_name: The name of the instance method that should be called. Returns: A function that takes an 'obj' parameter, followed by *args and returns the result of calling the instance method with the same name as the contents of 'func_name' on the 'obj' object with the arguments from *args.
[ "Return", "a", "function", "that", "calls", "method", "func_name", "on", "objects", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L222-L245
train
google/dotty
efilter/protocol.py
Protocol.implicit_dynamic
def implicit_dynamic(cls, for_type=None, for_types=None): """Automatically generate late dynamic dispatchers to type. This is similar to 'implicit_static', except instead of binding the instance methods, it generates a dispatcher that will call whatever instance method of the same name happens to be available at time of dispatch. This has the obvious advantage of supporting arbitrary subclasses, but can do no verification at bind time. Arguments: for_type: The type to implictly implement the protocol with. """ for type_ in cls.__get_type_args(for_type, for_types): implementations = {} for function in cls.functions(): implementations[function] = cls._build_late_dispatcher( func_name=function.__name__) cls.implement(for_type=type_, implementations=implementations)
python
def implicit_dynamic(cls, for_type=None, for_types=None): """Automatically generate late dynamic dispatchers to type. This is similar to 'implicit_static', except instead of binding the instance methods, it generates a dispatcher that will call whatever instance method of the same name happens to be available at time of dispatch. This has the obvious advantage of supporting arbitrary subclasses, but can do no verification at bind time. Arguments: for_type: The type to implictly implement the protocol with. """ for type_ in cls.__get_type_args(for_type, for_types): implementations = {} for function in cls.functions(): implementations[function] = cls._build_late_dispatcher( func_name=function.__name__) cls.implement(for_type=type_, implementations=implementations)
[ "def", "implicit_dynamic", "(", "cls", ",", "for_type", "=", "None", ",", "for_types", "=", "None", ")", ":", "for", "type_", "in", "cls", ".", "__get_type_args", "(", "for_type", ",", "for_types", ")", ":", "implementations", "=", "{", "}", "for", "function", "in", "cls", ".", "functions", "(", ")", ":", "implementations", "[", "function", "]", "=", "cls", ".", "_build_late_dispatcher", "(", "func_name", "=", "function", ".", "__name__", ")", "cls", ".", "implement", "(", "for_type", "=", "type_", ",", "implementations", "=", "implementations", ")" ]
Automatically generate late dynamic dispatchers to type. This is similar to 'implicit_static', except instead of binding the instance methods, it generates a dispatcher that will call whatever instance method of the same name happens to be available at time of dispatch. This has the obvious advantage of supporting arbitrary subclasses, but can do no verification at bind time. Arguments: for_type: The type to implictly implement the protocol with.
[ "Automatically", "generate", "late", "dynamic", "dispatchers", "to", "type", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L248-L268
train
google/dotty
efilter/protocol.py
Protocol.implement
def implement(cls, implementations, for_type=None, for_types=None): """Provide protocol implementation for a type. Register all implementations of multimethod functions in this protocol and add the type into the abstract base class of the protocol. Arguments: implementations: A dict of (function, implementation), where each function is multimethod and each implementation is a callable. for_type: The concrete type implementations apply to. for_types: Same as for_type, but takes a tuple of types. You may not supply both for_type and for_types for obvious reasons. Raises: ValueError for arguments. TypeError if not all implementations are provided or if there are issues related to polymorphism (e.g. attempting to implement a non-multimethod function. """ for type_ in cls.__get_type_args(for_type, for_types): cls._implement_for_type(for_type=type_, implementations=implementations)
python
def implement(cls, implementations, for_type=None, for_types=None): """Provide protocol implementation for a type. Register all implementations of multimethod functions in this protocol and add the type into the abstract base class of the protocol. Arguments: implementations: A dict of (function, implementation), where each function is multimethod and each implementation is a callable. for_type: The concrete type implementations apply to. for_types: Same as for_type, but takes a tuple of types. You may not supply both for_type and for_types for obvious reasons. Raises: ValueError for arguments. TypeError if not all implementations are provided or if there are issues related to polymorphism (e.g. attempting to implement a non-multimethod function. """ for type_ in cls.__get_type_args(for_type, for_types): cls._implement_for_type(for_type=type_, implementations=implementations)
[ "def", "implement", "(", "cls", ",", "implementations", ",", "for_type", "=", "None", ",", "for_types", "=", "None", ")", ":", "for", "type_", "in", "cls", ".", "__get_type_args", "(", "for_type", ",", "for_types", ")", ":", "cls", ".", "_implement_for_type", "(", "for_type", "=", "type_", ",", "implementations", "=", "implementations", ")" ]
Provide protocol implementation for a type. Register all implementations of multimethod functions in this protocol and add the type into the abstract base class of the protocol. Arguments: implementations: A dict of (function, implementation), where each function is multimethod and each implementation is a callable. for_type: The concrete type implementations apply to. for_types: Same as for_type, but takes a tuple of types. You may not supply both for_type and for_types for obvious reasons. Raises: ValueError for arguments. TypeError if not all implementations are provided or if there are issues related to polymorphism (e.g. attempting to implement a non-multimethod function.
[ "Provide", "protocol", "implementation", "for", "a", "type", "." ]
b145131499be0c4b755fc2e2ac19be11a50bce6a
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L271-L294
train