repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
Jaymon/endpoints
endpoints/http.py
Request.body
def body(self): """return the raw version of the body""" body = None if self.body_input: body = self.body_input.read(int(self.get_header('content-length', -1))) return body
python
def body(self): """return the raw version of the body""" body = None if self.body_input: body = self.body_input.read(int(self.get_header('content-length', -1))) return body
[ "def", "body", "(", "self", ")", ":", "body", "=", "None", "if", "self", ".", "body_input", ":", "body", "=", "self", ".", "body_input", ".", "read", "(", "int", "(", "self", ".", "get_header", "(", "'content-length'", ",", "-", "1", ")", ")", ")",...
return the raw version of the body
[ "return", "the", "raw", "version", "of", "the", "body" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1004-L1010
Jaymon/endpoints
endpoints/http.py
Request.body_kwargs
def body_kwargs(self): """ the request body, if this is a POST request this tries to do the right thing with the body, so if you have set the body and the content type is json, then it will return the body json decoded, if you need the original string body, use body example -- self.body = '{"foo":{"name":"bar"}}' b = self.body_kwargs # dict with: {"foo": { "name": "bar"}} print self.body # string with: '{"foo":{"name":"bar"}}' """ body_kwargs = {} ct = self.get_header("content-type") if ct: ct = ct.lower() if ct.rfind("json") >= 0: body = self.body if body: body_kwargs = json.loads(body) else: if self.body_input: body = RequestBody( fp=self.body_input, headers=self.headers, environ=self.environ #environ=self.raw_request ) body_kwargs = dict(body) else: body = self.body if body: body_kwargs = self._parse_query_str(body) return body_kwargs
python
def body_kwargs(self): """ the request body, if this is a POST request this tries to do the right thing with the body, so if you have set the body and the content type is json, then it will return the body json decoded, if you need the original string body, use body example -- self.body = '{"foo":{"name":"bar"}}' b = self.body_kwargs # dict with: {"foo": { "name": "bar"}} print self.body # string with: '{"foo":{"name":"bar"}}' """ body_kwargs = {} ct = self.get_header("content-type") if ct: ct = ct.lower() if ct.rfind("json") >= 0: body = self.body if body: body_kwargs = json.loads(body) else: if self.body_input: body = RequestBody( fp=self.body_input, headers=self.headers, environ=self.environ #environ=self.raw_request ) body_kwargs = dict(body) else: body = self.body if body: body_kwargs = self._parse_query_str(body) return body_kwargs
[ "def", "body_kwargs", "(", "self", ")", ":", "body_kwargs", "=", "{", "}", "ct", "=", "self", ".", "get_header", "(", "\"content-type\"", ")", "if", "ct", ":", "ct", "=", "ct", ".", "lower", "(", ")", "if", "ct", ".", "rfind", "(", "\"json\"", ")",...
the request body, if this is a POST request this tries to do the right thing with the body, so if you have set the body and the content type is json, then it will return the body json decoded, if you need the original string body, use body example -- self.body = '{"foo":{"name":"bar"}}' b = self.body_kwargs # dict with: {"foo": { "name": "bar"}} print self.body # string with: '{"foo":{"name":"bar"}}'
[ "the", "request", "body", "if", "this", "is", "a", "POST", "request" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1021-L1059
Jaymon/endpoints
endpoints/http.py
Request.kwargs
def kwargs(self): """combine GET and POST params to be passed to the controller""" kwargs = dict(self.query_kwargs) kwargs.update(self.body_kwargs) return kwargs
python
def kwargs(self): """combine GET and POST params to be passed to the controller""" kwargs = dict(self.query_kwargs) kwargs.update(self.body_kwargs) return kwargs
[ "def", "kwargs", "(", "self", ")", ":", "kwargs", "=", "dict", "(", "self", ".", "query_kwargs", ")", "kwargs", ".", "update", "(", "self", ".", "body_kwargs", ")", "return", "kwargs" ]
combine GET and POST params to be passed to the controller
[ "combine", "GET", "and", "POST", "params", "to", "be", "passed", "to", "the", "controller" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1068-L1073
Jaymon/endpoints
endpoints/http.py
Request.version
def version(self, content_type="*/*"): """ versioning is based off of this post http://urthen.github.io/2013/05/09/ways-to-version-your-api/ """ v = "" accept_header = self.get_header('accept', "") if accept_header: a = AcceptHeader(accept_header) for mt in a.filter(content_type): v = mt[2].get("version", "") if v: break return v
python
def version(self, content_type="*/*"): """ versioning is based off of this post http://urthen.github.io/2013/05/09/ways-to-version-your-api/ """ v = "" accept_header = self.get_header('accept', "") if accept_header: a = AcceptHeader(accept_header) for mt in a.filter(content_type): v = mt[2].get("version", "") if v: break return v
[ "def", "version", "(", "self", ",", "content_type", "=", "\"*/*\"", ")", ":", "v", "=", "\"\"", "accept_header", "=", "self", ".", "get_header", "(", "'accept'", ",", "\"\"", ")", "if", "accept_header", ":", "a", "=", "AcceptHeader", "(", "accept_header", ...
versioning is based off of this post http://urthen.github.io/2013/05/09/ways-to-version-your-api/
[ "versioning", "is", "based", "off", "of", "this", "post", "http", ":", "//", "urthen", ".", "github", ".", "io", "/", "2013", "/", "05", "/", "09", "/", "ways", "-", "to", "-", "version", "-", "your", "-", "api", "/" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1079-L1092
Jaymon/endpoints
endpoints/http.py
Request.get_auth_bearer
def get_auth_bearer(self): """return the bearer token in the authorization header if it exists""" access_token = '' auth_header = self.get_header('authorization') if auth_header: m = re.search(r"^Bearer\s+(\S+)$", auth_header, re.I) if m: access_token = m.group(1) return access_token
python
def get_auth_bearer(self): """return the bearer token in the authorization header if it exists""" access_token = '' auth_header = self.get_header('authorization') if auth_header: m = re.search(r"^Bearer\s+(\S+)$", auth_header, re.I) if m: access_token = m.group(1) return access_token
[ "def", "get_auth_bearer", "(", "self", ")", ":", "access_token", "=", "''", "auth_header", "=", "self", ".", "get_header", "(", "'authorization'", ")", "if", "auth_header", ":", "m", "=", "re", ".", "search", "(", "r\"^Bearer\\s+(\\S+)$\"", ",", "auth_header",...
return the bearer token in the authorization header if it exists
[ "return", "the", "bearer", "token", "in", "the", "authorization", "header", "if", "it", "exists" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1103-L1111
Jaymon/endpoints
endpoints/http.py
Request.get_auth_basic
def get_auth_basic(self): """return the username and password of a basic auth header if it exists""" username = '' password = '' auth_header = self.get_header('authorization') if auth_header: m = re.search(r"^Basic\s+(\S+)$", auth_header, re.I) if m: auth_str = Base64.decode(m.group(1)) username, password = auth_str.split(':', 1) return username, password
python
def get_auth_basic(self): """return the username and password of a basic auth header if it exists""" username = '' password = '' auth_header = self.get_header('authorization') if auth_header: m = re.search(r"^Basic\s+(\S+)$", auth_header, re.I) if m: auth_str = Base64.decode(m.group(1)) username, password = auth_str.split(':', 1) return username, password
[ "def", "get_auth_basic", "(", "self", ")", ":", "username", "=", "''", "password", "=", "''", "auth_header", "=", "self", ".", "get_header", "(", "'authorization'", ")", "if", "auth_header", ":", "m", "=", "re", ".", "search", "(", "r\"^Basic\\s+(\\S+)$\"", ...
return the username and password of a basic auth header if it exists
[ "return", "the", "username", "and", "password", "of", "a", "basic", "auth", "header", "if", "it", "exists" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1113-L1124
Jaymon/endpoints
endpoints/http.py
Response.code
def code(self): """the http status code to return to the client, by default, 200 if a body is present otherwise 204""" code = getattr(self, '_code', None) if not code: if self.has_body(): code = 200 else: code = 204 return code
python
def code(self): """the http status code to return to the client, by default, 200 if a body is present otherwise 204""" code = getattr(self, '_code', None) if not code: if self.has_body(): code = 200 else: code = 204 return code
[ "def", "code", "(", "self", ")", ":", "code", "=", "getattr", "(", "self", ",", "'_code'", ",", "None", ")", "if", "not", "code", ":", "if", "self", ".", "has_body", "(", ")", ":", "code", "=", "200", "else", ":", "code", "=", "204", "return", ...
the http status code to return to the client, by default, 200 if a body is present otherwise 204
[ "the", "http", "status", "code", "to", "return", "to", "the", "client", "by", "default", "200", "if", "a", "body", "is", "present", "otherwise", "204" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1144-L1153
Jaymon/endpoints
endpoints/http.py
Response.normalize_body
def normalize_body(self, b): """return the body as a string, formatted to the appropriate content type :param b: mixed, the current raw body :returns: unicode string """ if b is None: return '' if self.is_json(): # TODO ??? # I don't like this, if we have a content type but it isn't one # of the supported ones we were returning the exception, which threw # Jarid off, but now it just returns a string, which is not best either # my thought is we could have a body_type_subtype method that would # make it possible to easily handle custom types # eg, "application/json" would become: self.body_application_json(b, is_error) b = json.dumps(b, cls=ResponseBody) else: # just return a string representation of body if no content type b = String(b, self.encoding) return b
python
def normalize_body(self, b): """return the body as a string, formatted to the appropriate content type :param b: mixed, the current raw body :returns: unicode string """ if b is None: return '' if self.is_json(): # TODO ??? # I don't like this, if we have a content type but it isn't one # of the supported ones we were returning the exception, which threw # Jarid off, but now it just returns a string, which is not best either # my thought is we could have a body_type_subtype method that would # make it possible to easily handle custom types # eg, "application/json" would become: self.body_application_json(b, is_error) b = json.dumps(b, cls=ResponseBody) else: # just return a string representation of body if no content type b = String(b, self.encoding) return b
[ "def", "normalize_body", "(", "self", ",", "b", ")", ":", "if", "b", "is", "None", ":", "return", "''", "if", "self", ".", "is_json", "(", ")", ":", "# TODO ???", "# I don't like this, if we have a content type but it isn't one", "# of the supported ones we were retur...
return the body as a string, formatted to the appropriate content type :param b: mixed, the current raw body :returns: unicode string
[ "return", "the", "body", "as", "a", "string", "formatted", "to", "the", "appropriate", "content", "type" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1218-L1240
Jaymon/endpoints
endpoints/decorators/base.py
TargetDecorator.normalize_target_params
def normalize_target_params(self, request, controller_args, controller_kwargs): """get params ready for calling target this method exists because child classes might only really need certain params passed to the method, this allows the child classes to decided what their target methods need :param request: the http.Request instance for this specific request :param controller_args: the arguments that will be passed to the controller :param controller_kwargs: the key/val arguments that will be passed to the controller, these usually come from query strings and post bodies :returns: a tuple (list, dict) that correspond to the *args, **kwargs that will be passed to the target() method """ return [], dict( request=request, controller_args=controller_args, controller_kwargs=controller_kwargs )
python
def normalize_target_params(self, request, controller_args, controller_kwargs): """get params ready for calling target this method exists because child classes might only really need certain params passed to the method, this allows the child classes to decided what their target methods need :param request: the http.Request instance for this specific request :param controller_args: the arguments that will be passed to the controller :param controller_kwargs: the key/val arguments that will be passed to the controller, these usually come from query strings and post bodies :returns: a tuple (list, dict) that correspond to the *args, **kwargs that will be passed to the target() method """ return [], dict( request=request, controller_args=controller_args, controller_kwargs=controller_kwargs )
[ "def", "normalize_target_params", "(", "self", ",", "request", ",", "controller_args", ",", "controller_kwargs", ")", ":", "return", "[", "]", ",", "dict", "(", "request", "=", "request", ",", "controller_args", "=", "controller_args", ",", "controller_kwargs", ...
get params ready for calling target this method exists because child classes might only really need certain params passed to the method, this allows the child classes to decided what their target methods need :param request: the http.Request instance for this specific request :param controller_args: the arguments that will be passed to the controller :param controller_kwargs: the key/val arguments that will be passed to the controller, these usually come from query strings and post bodies :returns: a tuple (list, dict) that correspond to the *args, **kwargs that will be passed to the target() method
[ "get", "params", "ready", "for", "calling", "target" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/decorators/base.py#L19-L37
Jaymon/endpoints
endpoints/decorators/base.py
TargetDecorator.handle_target
def handle_target(self, request, controller_args, controller_kwargs): """Internal method for this class handles normalizing the passed in values from the decorator using .normalize_target_params() and then passes them to the set .target() """ try: param_args, param_kwargs = self.normalize_target_params( request=request, controller_args=controller_args, controller_kwargs=controller_kwargs ) ret = self.target(*param_args, **param_kwargs) if not ret: raise ValueError("{} check failed".format(self.__class__.__name__)) except CallError: raise except Exception as e: self.handle_error(e)
python
def handle_target(self, request, controller_args, controller_kwargs): """Internal method for this class handles normalizing the passed in values from the decorator using .normalize_target_params() and then passes them to the set .target() """ try: param_args, param_kwargs = self.normalize_target_params( request=request, controller_args=controller_args, controller_kwargs=controller_kwargs ) ret = self.target(*param_args, **param_kwargs) if not ret: raise ValueError("{} check failed".format(self.__class__.__name__)) except CallError: raise except Exception as e: self.handle_error(e)
[ "def", "handle_target", "(", "self", ",", "request", ",", "controller_args", ",", "controller_kwargs", ")", ":", "try", ":", "param_args", ",", "param_kwargs", "=", "self", ".", "normalize_target_params", "(", "request", "=", "request", ",", "controller_args", "...
Internal method for this class handles normalizing the passed in values from the decorator using .normalize_target_params() and then passes them to the set .target()
[ "Internal", "method", "for", "this", "class" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/decorators/base.py#L50-L70
Jaymon/endpoints
endpoints/decorators/base.py
TargetDecorator.decorate
def decorate(self, func, target, *anoop, **kwnoop): """decorate the passed in func calling target when func is called :param func: the function being decorated :param target: the target that will be run when func is called :returns: the decorated func """ if target: self.target = target def decorated(decorated_self, *args, **kwargs): self.handle_target( request=decorated_self.request, controller_args=args, controller_kwargs=kwargs ) return func(decorated_self, *args, **kwargs) return decorated
python
def decorate(self, func, target, *anoop, **kwnoop): """decorate the passed in func calling target when func is called :param func: the function being decorated :param target: the target that will be run when func is called :returns: the decorated func """ if target: self.target = target def decorated(decorated_self, *args, **kwargs): self.handle_target( request=decorated_self.request, controller_args=args, controller_kwargs=kwargs ) return func(decorated_self, *args, **kwargs) return decorated
[ "def", "decorate", "(", "self", ",", "func", ",", "target", ",", "*", "anoop", ",", "*", "*", "kwnoop", ")", ":", "if", "target", ":", "self", ".", "target", "=", "target", "def", "decorated", "(", "decorated_self", ",", "*", "args", ",", "*", "*",...
decorate the passed in func calling target when func is called :param func: the function being decorated :param target: the target that will be run when func is called :returns: the decorated func
[ "decorate", "the", "passed", "in", "func", "calling", "target", "when", "func", "is", "called" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/decorators/base.py#L72-L90
Jaymon/endpoints
endpoints/decorators/__init__.py
param.normalize_flags
def normalize_flags(self, flags): """normalize the flags to make sure needed values are there after this method is called self.flags is available :param flags: the flags that will be normalized """ flags['type'] = flags.get('type', None) paction = flags.get('action', 'store') if paction == 'store_false': flags['default'] = True flags['type'] = bool elif paction == 'store_true': flags['default'] = False flags['type'] = bool prequired = False if 'default' in flags else flags.get('required', True) flags["action"] = paction flags["required"] = prequired self.flags = flags
python
def normalize_flags(self, flags): """normalize the flags to make sure needed values are there after this method is called self.flags is available :param flags: the flags that will be normalized """ flags['type'] = flags.get('type', None) paction = flags.get('action', 'store') if paction == 'store_false': flags['default'] = True flags['type'] = bool elif paction == 'store_true': flags['default'] = False flags['type'] = bool prequired = False if 'default' in flags else flags.get('required', True) flags["action"] = paction flags["required"] = prequired self.flags = flags
[ "def", "normalize_flags", "(", "self", ",", "flags", ")", ":", "flags", "[", "'type'", "]", "=", "flags", ".", "get", "(", "'type'", ",", "None", ")", "paction", "=", "flags", ".", "get", "(", "'action'", ",", "'store'", ")", "if", "paction", "==", ...
normalize the flags to make sure needed values are there after this method is called self.flags is available :param flags: the flags that will be normalized
[ "normalize", "the", "flags", "to", "make", "sure", "needed", "values", "are", "there" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/decorators/__init__.py#L259-L280
Jaymon/endpoints
endpoints/decorators/__init__.py
param.normalize_type
def normalize_type(self, names): """Decide if this param is an arg or a kwarg and set appropriate internal flags""" self.name = names[0] self.is_kwarg = False self.is_arg = False self.names = [] try: # http://stackoverflow.com/a/16488383/5006 uses ask forgiveness because # of py2/3 differences of integer check self.index = int(self.name) self.name = "" self.is_arg = True except ValueError: self.is_kwarg = True self.names = names
python
def normalize_type(self, names): """Decide if this param is an arg or a kwarg and set appropriate internal flags""" self.name = names[0] self.is_kwarg = False self.is_arg = False self.names = [] try: # http://stackoverflow.com/a/16488383/5006 uses ask forgiveness because # of py2/3 differences of integer check self.index = int(self.name) self.name = "" self.is_arg = True except ValueError: self.is_kwarg = True self.names = names
[ "def", "normalize_type", "(", "self", ",", "names", ")", ":", "self", ".", "name", "=", "names", "[", "0", "]", "self", ".", "is_kwarg", "=", "False", "self", ".", "is_arg", "=", "False", "self", ".", "names", "=", "[", "]", "try", ":", "# http://s...
Decide if this param is an arg or a kwarg and set appropriate internal flags
[ "Decide", "if", "this", "param", "is", "an", "arg", "or", "a", "kwarg", "and", "set", "appropriate", "internal", "flags" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/decorators/__init__.py#L282-L298
Jaymon/endpoints
endpoints/decorators/__init__.py
param.normalize_param
def normalize_param(self, slf, args, kwargs): """this is where all the magic happens, this will try and find the param and put its value in kwargs if it has a default and stuff""" if self.is_kwarg: kwargs = self.normalize_kwarg(slf.request, kwargs) else: args = self.normalize_arg(slf.request, args) return slf, args, kwargs
python
def normalize_param(self, slf, args, kwargs): """this is where all the magic happens, this will try and find the param and put its value in kwargs if it has a default and stuff""" if self.is_kwarg: kwargs = self.normalize_kwarg(slf.request, kwargs) else: args = self.normalize_arg(slf.request, args) return slf, args, kwargs
[ "def", "normalize_param", "(", "self", ",", "slf", ",", "args", ",", "kwargs", ")", ":", "if", "self", ".", "is_kwarg", ":", "kwargs", "=", "self", ".", "normalize_kwarg", "(", "slf", ".", "request", ",", "kwargs", ")", "else", ":", "args", "=", "sel...
this is where all the magic happens, this will try and find the param and put its value in kwargs if it has a default and stuff
[ "this", "is", "where", "all", "the", "magic", "happens", "this", "will", "try", "and", "find", "the", "param", "and", "put", "its", "value", "in", "kwargs", "if", "it", "has", "a", "default", "and", "stuff" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/decorators/__init__.py#L314-L321
Jaymon/endpoints
endpoints/decorators/__init__.py
param.find_kwarg
def find_kwarg(self, request, names, required, default, kwargs): """actually try to retrieve names key from params dict :param request: the current request instance, handy for child classes :param names: the names this kwarg can be :param required: True if a name has to be found in kwargs :param default: the default value if name isn't found :param kwargs: the kwargs that will be used to find the value :returns: tuple, found_name, val where found_name is the actual name kwargs contained """ val = default found_name = '' for name in names: if name in kwargs: val = kwargs[name] found_name = name break if not found_name and required: raise ValueError("required param {} does not exist".format(self.name)) return found_name, val
python
def find_kwarg(self, request, names, required, default, kwargs): """actually try to retrieve names key from params dict :param request: the current request instance, handy for child classes :param names: the names this kwarg can be :param required: True if a name has to be found in kwargs :param default: the default value if name isn't found :param kwargs: the kwargs that will be used to find the value :returns: tuple, found_name, val where found_name is the actual name kwargs contained """ val = default found_name = '' for name in names: if name in kwargs: val = kwargs[name] found_name = name break if not found_name and required: raise ValueError("required param {} does not exist".format(self.name)) return found_name, val
[ "def", "find_kwarg", "(", "self", ",", "request", ",", "names", ",", "required", ",", "default", ",", "kwargs", ")", ":", "val", "=", "default", "found_name", "=", "''", "for", "name", "in", "names", ":", "if", "name", "in", "kwargs", ":", "val", "="...
actually try to retrieve names key from params dict :param request: the current request instance, handy for child classes :param names: the names this kwarg can be :param required: True if a name has to be found in kwargs :param default: the default value if name isn't found :param kwargs: the kwargs that will be used to find the value :returns: tuple, found_name, val where found_name is the actual name kwargs contained
[ "actually", "try", "to", "retrieve", "names", "key", "from", "params", "dict" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/decorators/__init__.py#L355-L376
Jaymon/endpoints
endpoints/decorators/__init__.py
param.normalize_val
def normalize_val(self, request, val): """This will take the value and make sure it meets expectations :param request: the current request instance :param val: the raw value pulled from kwargs or args :returns: val that has met all param checks :raises: ValueError if val fails any checks """ flags = self.flags paction = flags['action'] ptype = flags['type'] pchoices = flags.get('choices', None) allow_empty = flags.get('allow_empty', False) min_size = flags.get('min_size', None) max_size = flags.get('max_size', None) regex = flags.get('regex', None) if paction in set(['store_list']): if isinstance(val, list) and len(val) > 1: raise ValueError("too many values for param") if isinstance(val, basestring): val = list(val.split(',')) else: val = list(val) elif paction in set(['append', 'append_list']): if not isinstance(val, list): val = [val] if paction == 'append_list': vs = [] for v in val: if isinstance(v, basestring): vs.extend(v.split(',')) else: vs.append(v) val = vs else: if paction not in set(['store', 'store_false', 'store_true']): raise RuntimeError('unknown param action {}'.format(paction)) if regex: failed = False if isinstance(regex, basestring): if not re.search(regex, val): failed = True else: if not regex.search(val): failed = True if failed: raise ValueError("param failed regex check") if ptype: if isinstance(val, list) and ptype != list: val = list(map(ptype, val)) else: if isinstance(ptype, type): if issubclass(ptype, bool): if val in set(['true', 'True', '1']): val = True elif val in set(['false', 'False', '0']): val = False else: val = ptype(val) elif issubclass(ptype, str): charset = request.encoding if is_py2: val = ptype(ByteString(val, charset)) else: val = ptype(String(val, charset)) # if charset and isinstance(val, unicode): # val = val.encode(charset) # else: # val = ptype(val) else: val = ptype(val) else: val = ptype(val) if pchoices: if isinstance(val, list) and ptype != list: for v in val: if v not in pchoices: raise ValueError("param value {} not in choices {}".format(v, pchoices)) else: if val not in pchoices: raise ValueError("param value {} not in choices {}".format(val, pchoices)) # at some point this if statement is just going to be too ridiculous # FieldStorage check is because of this bug https://bugs.python.org/issue19097 if not isinstance(val, cgi.FieldStorage): if not allow_empty and val is not False and not val: if 'default' not in flags: raise ValueError("param was empty") if min_size is not None: failed = False if isinstance(val, (int, float)): if val < min_size: failed = True else: if len(val) < min_size: failed = True if failed: raise ValueError("param was smaller than {}".format(min_size)) if max_size is not None: failed = False if isinstance(val, (int, float)): if val > max_size: failed = True else: if len(val) > max_size: failed = True if failed: raise ValueError("param was bigger than {}".format(max_size)) return val
python
def normalize_val(self, request, val): """This will take the value and make sure it meets expectations :param request: the current request instance :param val: the raw value pulled from kwargs or args :returns: val that has met all param checks :raises: ValueError if val fails any checks """ flags = self.flags paction = flags['action'] ptype = flags['type'] pchoices = flags.get('choices', None) allow_empty = flags.get('allow_empty', False) min_size = flags.get('min_size', None) max_size = flags.get('max_size', None) regex = flags.get('regex', None) if paction in set(['store_list']): if isinstance(val, list) and len(val) > 1: raise ValueError("too many values for param") if isinstance(val, basestring): val = list(val.split(',')) else: val = list(val) elif paction in set(['append', 'append_list']): if not isinstance(val, list): val = [val] if paction == 'append_list': vs = [] for v in val: if isinstance(v, basestring): vs.extend(v.split(',')) else: vs.append(v) val = vs else: if paction not in set(['store', 'store_false', 'store_true']): raise RuntimeError('unknown param action {}'.format(paction)) if regex: failed = False if isinstance(regex, basestring): if not re.search(regex, val): failed = True else: if not regex.search(val): failed = True if failed: raise ValueError("param failed regex check") if ptype: if isinstance(val, list) and ptype != list: val = list(map(ptype, val)) else: if isinstance(ptype, type): if issubclass(ptype, bool): if val in set(['true', 'True', '1']): val = True elif val in set(['false', 'False', '0']): val = False else: val = ptype(val) elif issubclass(ptype, str): charset = request.encoding if is_py2: val = ptype(ByteString(val, charset)) else: val = ptype(String(val, charset)) # if charset and isinstance(val, unicode): # val = val.encode(charset) # else: # val = ptype(val) else: val = ptype(val) else: val = ptype(val) if pchoices: if isinstance(val, list) and ptype != list: for v in val: if v not in pchoices: raise ValueError("param value {} not in choices {}".format(v, pchoices)) else: if val not in pchoices: raise ValueError("param value {} not in choices {}".format(val, pchoices)) # at some point this if statement is just going to be too ridiculous # FieldStorage check is because of this bug https://bugs.python.org/issue19097 if not isinstance(val, cgi.FieldStorage): if not allow_empty and val is not False and not val: if 'default' not in flags: raise ValueError("param was empty") if min_size is not None: failed = False if isinstance(val, (int, float)): if val < min_size: failed = True else: if len(val) < min_size: failed = True if failed: raise ValueError("param was smaller than {}".format(min_size)) if max_size is not None: failed = False if isinstance(val, (int, float)): if val > max_size: failed = True else: if len(val) > max_size: failed = True if failed: raise ValueError("param was bigger than {}".format(max_size)) return val
[ "def", "normalize_val", "(", "self", ",", "request", ",", "val", ")", ":", "flags", "=", "self", ".", "flags", "paction", "=", "flags", "[", "'action'", "]", "ptype", "=", "flags", "[", "'type'", "]", "pchoices", "=", "flags", ".", "get", "(", "'choi...
This will take the value and make sure it meets expectations :param request: the current request instance :param val: the raw value pulled from kwargs or args :returns: val that has met all param checks :raises: ValueError if val fails any checks
[ "This", "will", "take", "the", "value", "and", "make", "sure", "it", "meets", "expectations" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/decorators/__init__.py#L405-L529
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.open
def open(cls, *args, **kwargs): """just something to make it easier to quickly open a connection, do something and then close it""" c = cls(*args, **kwargs) c.connect() try: yield c finally: c.close()
python
def open(cls, *args, **kwargs): """just something to make it easier to quickly open a connection, do something and then close it""" c = cls(*args, **kwargs) c.connect() try: yield c finally: c.close()
[ "def", "open", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "c", "=", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")", "c", ".", "connect", "(", ")", "try", ":", "yield", "c", "finally", ":", "c", ".", "close", "("...
just something to make it easier to quickly open a connection, do something and then close it
[ "just", "something", "to", "make", "it", "easier", "to", "quickly", "open", "a", "connection", "do", "something", "and", "then", "close", "it" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L74-L83
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.connect
def connect(self, path="", headers=None, query=None, timeout=0, **kwargs): """ make the actual connection to the websocket :param headers: dict, key/val pairs of any headers to add to connection, if you would like to override headers just pass in an empty value :param query: dict, any query string params you want to send up with the connection url :returns: Payload, this will return the CONNECT response from the websocket """ ret = None ws_url = self.get_fetch_url(path, query) ws_headers = self.get_fetch_headers("GET", headers) ws_headers = ['{}: {}'.format(h[0], h[1]) for h in ws_headers.items() if h[1]] timeout = self.get_timeout(timeout=timeout, **kwargs) self.set_trace(kwargs.pop("trace", False)) #pout.v(websocket_url, websocket_headers, self.query_kwargs, self.headers) try: logger.debug("{} connecting to {}".format(self.client_id, ws_url)) self.ws = websocket.create_connection( ws_url, header=ws_headers, timeout=timeout, sslopt={'cert_reqs':ssl.CERT_NONE}, ) ret = self.recv_callback(callback=lambda r: r.uuid == "CONNECT") if ret.code >= 400: raise IOError("Failed to connect with code {}".format(ret.code)) # self.headers = headers # self.query_kwargs = query_kwargs except websocket.WebSocketTimeoutException: raise IOError("Failed to connect within {} seconds".format(timeout)) except websocket.WebSocketException as e: raise IOError("Failed to connect with error: {}".format(e)) except socket.error as e: # this is an IOError, I just wanted to be aware of that, most common # problem is: [Errno 111] Connection refused raise return ret
python
def connect(self, path="", headers=None, query=None, timeout=0, **kwargs): """ make the actual connection to the websocket :param headers: dict, key/val pairs of any headers to add to connection, if you would like to override headers just pass in an empty value :param query: dict, any query string params you want to send up with the connection url :returns: Payload, this will return the CONNECT response from the websocket """ ret = None ws_url = self.get_fetch_url(path, query) ws_headers = self.get_fetch_headers("GET", headers) ws_headers = ['{}: {}'.format(h[0], h[1]) for h in ws_headers.items() if h[1]] timeout = self.get_timeout(timeout=timeout, **kwargs) self.set_trace(kwargs.pop("trace", False)) #pout.v(websocket_url, websocket_headers, self.query_kwargs, self.headers) try: logger.debug("{} connecting to {}".format(self.client_id, ws_url)) self.ws = websocket.create_connection( ws_url, header=ws_headers, timeout=timeout, sslopt={'cert_reqs':ssl.CERT_NONE}, ) ret = self.recv_callback(callback=lambda r: r.uuid == "CONNECT") if ret.code >= 400: raise IOError("Failed to connect with code {}".format(ret.code)) # self.headers = headers # self.query_kwargs = query_kwargs except websocket.WebSocketTimeoutException: raise IOError("Failed to connect within {} seconds".format(timeout)) except websocket.WebSocketException as e: raise IOError("Failed to connect with error: {}".format(e)) except socket.error as e: # this is an IOError, I just wanted to be aware of that, most common # problem is: [Errno 111] Connection refused raise return ret
[ "def", "connect", "(", "self", ",", "path", "=", "\"\"", ",", "headers", "=", "None", ",", "query", "=", "None", ",", "timeout", "=", "0", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "None", "ws_url", "=", "self", ".", "get_fetch_url", "(", "p...
make the actual connection to the websocket :param headers: dict, key/val pairs of any headers to add to connection, if you would like to override headers just pass in an empty value :param query: dict, any query string params you want to send up with the connection url :returns: Payload, this will return the CONNECT response from the websocket
[ "make", "the", "actual", "connection", "to", "the", "websocket" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L105-L151
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.fetch
def fetch(self, method, path, query=None, body=None, timeout=0, **kwargs): """send a Message :param method: string, something like "POST" or "GET" :param path: string, the path part of a uri (eg, /foo/bar) :param body: dict, what you want to send to "method path" :param timeout: integer, how long to wait before failing trying to send """ ret = None if not query: query = {} if not body: body = {} query.update(body) # body takes precedence body = query self.send_count += 1 payload = self.get_fetch_request(method, path, body) attempts = 1 max_attempts = self.attempts success = False while not success: kwargs['timeout'] = timeout try: try: if not self.connected: self.connect(path) with self.wstimeout(**kwargs) as timeout: kwargs['timeout'] = timeout logger.debug('{} send {} attempt {}/{} with timeout {}'.format( self.client_id, payload.uuid, attempts, max_attempts, timeout )) sent_bits = self.ws.send(payload.payload) logger.debug('{} sent {} bytes'.format(self.client_id, sent_bits)) if sent_bits: ret = self.fetch_response(payload, **kwargs) if ret: success = True except websocket.WebSocketConnectionClosedException as e: self.ws.shutdown() raise IOError("connection is not open but reported it was open: {}".format(e)) except (IOError, TypeError) as e: logger.debug('{} error on send attempt {}: {}'.format(self.client_id, attempts, e)) success = False finally: if not success: attempts += 1 if attempts > max_attempts: raise else: timeout *= 2 if (attempts / max_attempts) > 0.50: logger.debug( "{} closing and re-opening connection for next attempt".format(self.client_id) ) self.close() return ret
python
def fetch(self, method, path, query=None, body=None, timeout=0, **kwargs): """send a Message :param method: string, something like "POST" or "GET" :param path: string, the path part of a uri (eg, /foo/bar) :param body: dict, what you want to send to "method path" :param timeout: integer, how long to wait before failing trying to send """ ret = None if not query: query = {} if not body: body = {} query.update(body) # body takes precedence body = query self.send_count += 1 payload = self.get_fetch_request(method, path, body) attempts = 1 max_attempts = self.attempts success = False while not success: kwargs['timeout'] = timeout try: try: if not self.connected: self.connect(path) with self.wstimeout(**kwargs) as timeout: kwargs['timeout'] = timeout logger.debug('{} send {} attempt {}/{} with timeout {}'.format( self.client_id, payload.uuid, attempts, max_attempts, timeout )) sent_bits = self.ws.send(payload.payload) logger.debug('{} sent {} bytes'.format(self.client_id, sent_bits)) if sent_bits: ret = self.fetch_response(payload, **kwargs) if ret: success = True except websocket.WebSocketConnectionClosedException as e: self.ws.shutdown() raise IOError("connection is not open but reported it was open: {}".format(e)) except (IOError, TypeError) as e: logger.debug('{} error on send attempt {}: {}'.format(self.client_id, attempts, e)) success = False finally: if not success: attempts += 1 if attempts > max_attempts: raise else: timeout *= 2 if (attempts / max_attempts) > 0.50: logger.debug( "{} closing and re-opening connection for next attempt".format(self.client_id) ) self.close() return ret
[ "def", "fetch", "(", "self", ",", "method", ",", "path", ",", "query", "=", "None", ",", "body", "=", "None", ",", "timeout", "=", "0", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "None", "if", "not", "query", ":", "query", "=", "{", "}", ...
send a Message :param method: string, something like "POST" or "GET" :param path: string, the path part of a uri (eg, /foo/bar) :param body: dict, what you want to send to "method path" :param timeout: integer, how long to wait before failing trying to send
[ "send", "a", "Message" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L161-L226
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.fetch_response
def fetch_response(self, req_payload, **kwargs): """payload has been sent, do anything else you need to do (eg, wait for response?) :param req_payload: Payload, the payload sent to the server :returns: Payload, the response payload """ if req_payload.uuid: uuids = set([req_payload.uuid, "CONNECT"]) def callback(res_payload): #pout.v(req_payload, res_payload) #ret = req_payload.uuid == res_payload.uuid or res_payload.uuid == "CONNECT" ret = res_payload.uuid in uuids if ret: logger.debug('{} received {} response for {}'.format( self.client_id, res_payload.code, res_payload.uuid, )) return ret res_payload = self.recv_callback(callback, **kwargs) return res_payload
python
def fetch_response(self, req_payload, **kwargs): """payload has been sent, do anything else you need to do (eg, wait for response?) :param req_payload: Payload, the payload sent to the server :returns: Payload, the response payload """ if req_payload.uuid: uuids = set([req_payload.uuid, "CONNECT"]) def callback(res_payload): #pout.v(req_payload, res_payload) #ret = req_payload.uuid == res_payload.uuid or res_payload.uuid == "CONNECT" ret = res_payload.uuid in uuids if ret: logger.debug('{} received {} response for {}'.format( self.client_id, res_payload.code, res_payload.uuid, )) return ret res_payload = self.recv_callback(callback, **kwargs) return res_payload
[ "def", "fetch_response", "(", "self", ",", "req_payload", ",", "*", "*", "kwargs", ")", ":", "if", "req_payload", ".", "uuid", ":", "uuids", "=", "set", "(", "[", "req_payload", ".", "uuid", ",", "\"CONNECT\"", "]", ")", "def", "callback", "(", "res_pa...
payload has been sent, do anything else you need to do (eg, wait for response?) :param req_payload: Payload, the payload sent to the server :returns: Payload, the response payload
[ "payload", "has", "been", "sent", "do", "anything", "else", "you", "need", "to", "do", "(", "eg", "wait", "for", "response?", ")" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L228-L250
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.ping
def ping(self, timeout=0, **kwargs): """THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS""" # http://stackoverflow.com/a/2257449/5006 def rand_id(size=8, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) payload = rand_id() self.ws.ping(payload) opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_PONG], **kwargs) if data != payload: raise IOError("Pinged server but did not receive correct pong")
python
def ping(self, timeout=0, **kwargs): """THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS""" # http://stackoverflow.com/a/2257449/5006 def rand_id(size=8, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) payload = rand_id() self.ws.ping(payload) opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_PONG], **kwargs) if data != payload: raise IOError("Pinged server but did not receive correct pong")
[ "def", "ping", "(", "self", ",", "timeout", "=", "0", ",", "*", "*", "kwargs", ")", ":", "# http://stackoverflow.com/a/2257449/5006", "def", "rand_id", "(", "size", "=", "8", ",", "chars", "=", "string", ".", "ascii_uppercase", "+", "string", ".", "digits"...
THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS
[ "THIS", "DOES", "NOT", "WORK", "UWSGI", "DOES", "NOT", "RESPOND", "TO", "PINGS" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L252-L263
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.recv_raw
def recv_raw(self, timeout, opcodes, **kwargs): """this is very internal, it will return the raw opcode and data if they match the passed in opcodes""" orig_timeout = self.get_timeout(timeout) timeout = orig_timeout while timeout > 0.0: start = time.time() if not self.connected: self.connect(timeout=timeout, **kwargs) with self.wstimeout(timeout, **kwargs) as timeout: logger.debug('{} waiting to receive for {} seconds'.format(self.client_id, timeout)) try: opcode, data = self.ws.recv_data() if opcode in opcodes: timeout = 0.0 break else: if opcode == websocket.ABNF.OPCODE_CLOSE: raise websocket.WebSocketConnectionClosedException() except websocket.WebSocketTimeoutException: pass except websocket.WebSocketConnectionClosedException: # bug in Websocket.recv_data(), this should be done by Websocket try: self.ws.shutdown() except AttributeError: pass #raise EOFError("websocket closed by server and reconnection did nothing") if timeout: stop = time.time() timeout -= (stop - start) else: break if timeout < 0.0: raise IOError("recv timed out in {} seconds".format(orig_timeout)) return opcode, data
python
def recv_raw(self, timeout, opcodes, **kwargs): """this is very internal, it will return the raw opcode and data if they match the passed in opcodes""" orig_timeout = self.get_timeout(timeout) timeout = orig_timeout while timeout > 0.0: start = time.time() if not self.connected: self.connect(timeout=timeout, **kwargs) with self.wstimeout(timeout, **kwargs) as timeout: logger.debug('{} waiting to receive for {} seconds'.format(self.client_id, timeout)) try: opcode, data = self.ws.recv_data() if opcode in opcodes: timeout = 0.0 break else: if opcode == websocket.ABNF.OPCODE_CLOSE: raise websocket.WebSocketConnectionClosedException() except websocket.WebSocketTimeoutException: pass except websocket.WebSocketConnectionClosedException: # bug in Websocket.recv_data(), this should be done by Websocket try: self.ws.shutdown() except AttributeError: pass #raise EOFError("websocket closed by server and reconnection did nothing") if timeout: stop = time.time() timeout -= (stop - start) else: break if timeout < 0.0: raise IOError("recv timed out in {} seconds".format(orig_timeout)) return opcode, data
[ "def", "recv_raw", "(", "self", ",", "timeout", ",", "opcodes", ",", "*", "*", "kwargs", ")", ":", "orig_timeout", "=", "self", ".", "get_timeout", "(", "timeout", ")", "timeout", "=", "orig_timeout", "while", "timeout", ">", "0.0", ":", "start", "=", ...
this is very internal, it will return the raw opcode and data if they match the passed in opcodes
[ "this", "is", "very", "internal", "it", "will", "return", "the", "raw", "opcode", "and", "data", "if", "they", "match", "the", "passed", "in", "opcodes" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L265-L307
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.get_fetch_response
def get_fetch_response(self, raw): """This just makes the payload instance more HTTPClient like""" p = Payload(raw) p._body = p.body return p
python
def get_fetch_response(self, raw): """This just makes the payload instance more HTTPClient like""" p = Payload(raw) p._body = p.body return p
[ "def", "get_fetch_response", "(", "self", ",", "raw", ")", ":", "p", "=", "Payload", "(", "raw", ")", "p", ".", "_body", "=", "p", ".", "body", "return", "p" ]
This just makes the payload instance more HTTPClient like
[ "This", "just", "makes", "the", "payload", "instance", "more", "HTTPClient", "like" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L309-L313
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.recv
def recv(self, timeout=0, **kwargs): """this will receive data and convert it into a message, really this is more of an internal method, it is used in recv_callback and recv_msg""" opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_TEXT], **kwargs) return self.get_fetch_response(data)
python
def recv(self, timeout=0, **kwargs): """this will receive data and convert it into a message, really this is more of an internal method, it is used in recv_callback and recv_msg""" opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_TEXT], **kwargs) return self.get_fetch_response(data)
[ "def", "recv", "(", "self", ",", "timeout", "=", "0", ",", "*", "*", "kwargs", ")", ":", "opcode", ",", "data", "=", "self", ".", "recv_raw", "(", "timeout", ",", "[", "websocket", ".", "ABNF", ".", "OPCODE_TEXT", "]", ",", "*", "*", "kwargs", ")...
this will receive data and convert it into a message, really this is more of an internal method, it is used in recv_callback and recv_msg
[ "this", "will", "receive", "data", "and", "convert", "it", "into", "a", "message", "really", "this", "is", "more", "of", "an", "internal", "method", "it", "is", "used", "in", "recv_callback", "and", "recv_msg" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L315-L319
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.recv_callback
def recv_callback(self, callback, **kwargs): """receive messages and validate them with the callback, if the callback returns True then the message is valid and will be returned, if False then this will try and receive another message until timeout is 0""" payload = None timeout = self.get_timeout(**kwargs) full_timeout = timeout while timeout > 0.0: kwargs['timeout'] = timeout start = time.time() payload = self.recv(**kwargs) if callback(payload): break payload = None stop = time.time() elapsed = stop - start timeout -= elapsed if not payload: raise IOError("recv_callback timed out in {}".format(full_timeout)) return payload
python
def recv_callback(self, callback, **kwargs): """receive messages and validate them with the callback, if the callback returns True then the message is valid and will be returned, if False then this will try and receive another message until timeout is 0""" payload = None timeout = self.get_timeout(**kwargs) full_timeout = timeout while timeout > 0.0: kwargs['timeout'] = timeout start = time.time() payload = self.recv(**kwargs) if callback(payload): break payload = None stop = time.time() elapsed = stop - start timeout -= elapsed if not payload: raise IOError("recv_callback timed out in {}".format(full_timeout)) return payload
[ "def", "recv_callback", "(", "self", ",", "callback", ",", "*", "*", "kwargs", ")", ":", "payload", "=", "None", "timeout", "=", "self", ".", "get_timeout", "(", "*", "*", "kwargs", ")", "full_timeout", "=", "timeout", "while", "timeout", ">", "0.0", "...
receive messages and validate them with the callback, if the callback returns True then the message is valid and will be returned, if False then this will try and receive another message until timeout is 0
[ "receive", "messages", "and", "validate", "them", "with", "the", "callback", "if", "the", "callback", "returns", "True", "then", "the", "message", "is", "valid", "and", "will", "be", "returned", "if", "False", "then", "this", "will", "try", "and", "receive",...
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L321-L342
Jaymon/endpoints
endpoints/call.py
Call.create_controller
def create_controller(self): """Create a controller to handle the request :returns: Controller, this Controller instance should be able to handle the request """ body = None req = self.request res = self.response rou = self.router con = None controller_info = {} try: controller_info = rou.find(req, res) except IOError as e: logger.warning(str(e), exc_info=True) raise CallError( 408, "The client went away before the request body was retrieved." ) except (ImportError, AttributeError, TypeError) as e: exc_info = sys.exc_info() logger.warning(str(e), exc_info=exc_info) raise CallError( 404, "{} not found because of {} \"{}\" on {}:{}".format( req.path, exc_info[0].__name__, str(e), os.path.basename(exc_info[2].tb_frame.f_code.co_filename), exc_info[2].tb_lineno ) ) else: con = controller_info['class_instance'] return con
python
def create_controller(self): """Create a controller to handle the request :returns: Controller, this Controller instance should be able to handle the request """ body = None req = self.request res = self.response rou = self.router con = None controller_info = {} try: controller_info = rou.find(req, res) except IOError as e: logger.warning(str(e), exc_info=True) raise CallError( 408, "The client went away before the request body was retrieved." ) except (ImportError, AttributeError, TypeError) as e: exc_info = sys.exc_info() logger.warning(str(e), exc_info=exc_info) raise CallError( 404, "{} not found because of {} \"{}\" on {}:{}".format( req.path, exc_info[0].__name__, str(e), os.path.basename(exc_info[2].tb_frame.f_code.co_filename), exc_info[2].tb_lineno ) ) else: con = controller_info['class_instance'] return con
[ "def", "create_controller", "(", "self", ")", ":", "body", "=", "None", "req", "=", "self", ".", "request", "res", "=", "self", ".", "response", "rou", "=", "self", ".", "router", "con", "=", "None", "controller_info", "=", "{", "}", "try", ":", "con...
Create a controller to handle the request :returns: Controller, this Controller instance should be able to handle the request
[ "Create", "a", "controller", "to", "handle", "the", "request" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L43-L83
Jaymon/endpoints
endpoints/call.py
Call.handle
def handle(self): """Called from the interface to actually handle the request.""" body = None req = self.request res = self.response rou = self.router con = None start = time.time() try: con = self.create_controller() con.call = self self.controller = con if not self.quiet: con.log_start(start) # the controller handle method will manipulate self.response, it first # tries to find a handle_HTTP_METHOD method, if it can't find that it # will default to the handle method (which is implemented on Controller). # method arguments are passed in so child classes can add decorators # just like the HTTP_METHOD that will actually handle the request controller_args, controller_kwargs = con.find_method_params() controller_method = getattr(con, "handle_{}".format(req.method), None) if not controller_method: controller_method = getattr(con, "handle") if not self.quiet: logger.debug("Using handle method: {}.{}".format( con.__class__.__name__, controller_method.__name__ )) controller_method(*controller_args, **controller_kwargs) except Exception as e: self.handle_error(e) # this will manipulate self.response finally: if res.code == 204: res.headers.pop('Content-Type', None) res.body = None # just to be sure since body could've been "" if con: if not self.quiet: con.log_stop(start) return res
python
def handle(self): """Called from the interface to actually handle the request.""" body = None req = self.request res = self.response rou = self.router con = None start = time.time() try: con = self.create_controller() con.call = self self.controller = con if not self.quiet: con.log_start(start) # the controller handle method will manipulate self.response, it first # tries to find a handle_HTTP_METHOD method, if it can't find that it # will default to the handle method (which is implemented on Controller). # method arguments are passed in so child classes can add decorators # just like the HTTP_METHOD that will actually handle the request controller_args, controller_kwargs = con.find_method_params() controller_method = getattr(con, "handle_{}".format(req.method), None) if not controller_method: controller_method = getattr(con, "handle") if not self.quiet: logger.debug("Using handle method: {}.{}".format( con.__class__.__name__, controller_method.__name__ )) controller_method(*controller_args, **controller_kwargs) except Exception as e: self.handle_error(e) # this will manipulate self.response finally: if res.code == 204: res.headers.pop('Content-Type', None) res.body = None # just to be sure since body could've been "" if con: if not self.quiet: con.log_stop(start) return res
[ "def", "handle", "(", "self", ")", ":", "body", "=", "None", "req", "=", "self", ".", "request", "res", "=", "self", ".", "response", "rou", "=", "self", ".", "router", "con", "=", "None", "start", "=", "time", ".", "time", "(", ")", "try", ":", ...
Called from the interface to actually handle the request.
[ "Called", "from", "the", "interface", "to", "actually", "handle", "the", "request", "." ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L85-L130
Jaymon/endpoints
endpoints/call.py
Call.handle_error
def handle_error(self, e, **kwargs): """if an exception is raised while trying to handle the request it will go through this method This method will set the response body and then also call Controller.handle_error for further customization if the Controller is available :param e: Exception, the error that was raised :param **kwargs: dict, any other information that might be handy """ req = self.request res = self.response con = self.controller if isinstance(e, CallStop): logger.info(str(e), exc_info=True) res.code = e.code res.add_headers(e.headers) res.body = e.body elif isinstance(e, Redirect): logger.info(str(e), exc_info=True) res.code = e.code res.add_headers(e.headers) res.body = None elif isinstance(e, (AccessDenied, CallError)): logger.warning(str(e), exc_info=True) res.code = e.code res.add_headers(e.headers) res.body = e elif isinstance(e, NotImplementedError): logger.warning(str(e), exc_info=True) res.code = 501 res.body = e elif isinstance(e, TypeError): e_msg = unicode(e) if e_msg.startswith(req.method) and 'argument' in e_msg: logger.debug(e_msg, exc_info=True) logger.warning( " ".join([ "Either the path arguments ({} args) or the keyword arguments", "({} args) for {}.{} do not match the {} handling method's", "definition" ]).format( len(req.controller_info["method_args"]), len(req.controller_info["method_kwargs"]), req.controller_info['module_name'], req.controller_info['class_name'], req.method ) ) res.code = 405 else: logger.exception(e) res.code = 500 res.body = e else: logger.exception(e) res.code = 500 res.body = e if con: error_method = getattr(con, "handle_{}_error".format(req.method), None) if not error_method: error_method = getattr(con, "handle_error") logger.debug("Using error method: {}.{}".format( con.__class__.__name__, error_method.__name__ )) error_method(e, **kwargs)
python
def handle_error(self, e, **kwargs): """if an exception is raised while trying to handle the request it will go through this method This method will set the response body and then also call Controller.handle_error for further customization if the Controller is available :param e: Exception, the error that was raised :param **kwargs: dict, any other information that might be handy """ req = self.request res = self.response con = self.controller if isinstance(e, CallStop): logger.info(str(e), exc_info=True) res.code = e.code res.add_headers(e.headers) res.body = e.body elif isinstance(e, Redirect): logger.info(str(e), exc_info=True) res.code = e.code res.add_headers(e.headers) res.body = None elif isinstance(e, (AccessDenied, CallError)): logger.warning(str(e), exc_info=True) res.code = e.code res.add_headers(e.headers) res.body = e elif isinstance(e, NotImplementedError): logger.warning(str(e), exc_info=True) res.code = 501 res.body = e elif isinstance(e, TypeError): e_msg = unicode(e) if e_msg.startswith(req.method) and 'argument' in e_msg: logger.debug(e_msg, exc_info=True) logger.warning( " ".join([ "Either the path arguments ({} args) or the keyword arguments", "({} args) for {}.{} do not match the {} handling method's", "definition" ]).format( len(req.controller_info["method_args"]), len(req.controller_info["method_kwargs"]), req.controller_info['module_name'], req.controller_info['class_name'], req.method ) ) res.code = 405 else: logger.exception(e) res.code = 500 res.body = e else: logger.exception(e) res.code = 500 res.body = e if con: error_method = getattr(con, "handle_{}_error".format(req.method), None) if not error_method: error_method = getattr(con, "handle_error") logger.debug("Using error method: {}.{}".format( con.__class__.__name__, error_method.__name__ )) error_method(e, **kwargs)
[ "def", "handle_error", "(", "self", ",", "e", ",", "*", "*", "kwargs", ")", ":", "req", "=", "self", ".", "request", "res", "=", "self", ".", "response", "con", "=", "self", ".", "controller", "if", "isinstance", "(", "e", ",", "CallStop", ")", ":"...
if an exception is raised while trying to handle the request it will go through this method This method will set the response body and then also call Controller.handle_error for further customization if the Controller is available :param e: Exception, the error that was raised :param **kwargs: dict, any other information that might be handy
[ "if", "an", "exception", "is", "raised", "while", "trying", "to", "handle", "the", "request", "it", "will", "go", "through", "this", "method" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L132-L208
Jaymon/endpoints
endpoints/call.py
Router.module_names
def module_names(self): """get all the modules in the controller_prefix :returns: set, a set of string module names """ controller_prefix = self.controller_prefix _module_name_cache = self._module_name_cache if controller_prefix in _module_name_cache: return _module_name_cache[controller_prefix] module = self.get_module(controller_prefix) if hasattr(module, "__path__"): # path attr exists so this is a package modules = self.find_modules(module.__path__[0], controller_prefix) else: # we have a lonely .py file modules = set([controller_prefix]) _module_name_cache.setdefault(controller_prefix, {}) _module_name_cache[controller_prefix] = modules return modules
python
def module_names(self): """get all the modules in the controller_prefix :returns: set, a set of string module names """ controller_prefix = self.controller_prefix _module_name_cache = self._module_name_cache if controller_prefix in _module_name_cache: return _module_name_cache[controller_prefix] module = self.get_module(controller_prefix) if hasattr(module, "__path__"): # path attr exists so this is a package modules = self.find_modules(module.__path__[0], controller_prefix) else: # we have a lonely .py file modules = set([controller_prefix]) _module_name_cache.setdefault(controller_prefix, {}) _module_name_cache[controller_prefix] = modules return modules
[ "def", "module_names", "(", "self", ")", ":", "controller_prefix", "=", "self", ".", "controller_prefix", "_module_name_cache", "=", "self", ".", "_module_name_cache", "if", "controller_prefix", "in", "_module_name_cache", ":", "return", "_module_name_cache", "[", "co...
get all the modules in the controller_prefix :returns: set, a set of string module names
[ "get", "all", "the", "modules", "in", "the", "controller_prefix" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L224-L247
Jaymon/endpoints
endpoints/call.py
Router.modules
def modules(self): """Returns an iterator of the actual modules, not just their names :returns: generator, each module under self.controller_prefix """ for modname in self.module_names: module = importlib.import_module(modname) yield module
python
def modules(self): """Returns an iterator of the actual modules, not just their names :returns: generator, each module under self.controller_prefix """ for modname in self.module_names: module = importlib.import_module(modname) yield module
[ "def", "modules", "(", "self", ")", ":", "for", "modname", "in", "self", ".", "module_names", ":", "module", "=", "importlib", ".", "import_module", "(", "modname", ")", "yield", "module" ]
Returns an iterator of the actual modules, not just their names :returns: generator, each module under self.controller_prefix
[ "Returns", "an", "iterator", "of", "the", "actual", "modules", "not", "just", "their", "names" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L250-L257
Jaymon/endpoints
endpoints/call.py
Router.find_modules
def find_modules(self, path, prefix): """recursive method that will find all the submodules of the given module at prefix with path""" modules = set([prefix]) # https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules for module_info in pkgutil.iter_modules([path]): # we want to ignore any "private" modules if module_info[1].startswith('_'): continue module_prefix = ".".join([prefix, module_info[1]]) if module_info[2]: # module is a package submodules = self.find_modules(os.path.join(path, module_info[1]), module_prefix) modules.update(submodules) else: modules.add(module_prefix) return modules
python
def find_modules(self, path, prefix): """recursive method that will find all the submodules of the given module at prefix with path""" modules = set([prefix]) # https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules for module_info in pkgutil.iter_modules([path]): # we want to ignore any "private" modules if module_info[1].startswith('_'): continue module_prefix = ".".join([prefix, module_info[1]]) if module_info[2]: # module is a package submodules = self.find_modules(os.path.join(path, module_info[1]), module_prefix) modules.update(submodules) else: modules.add(module_prefix) return modules
[ "def", "find_modules", "(", "self", ",", "path", ",", "prefix", ")", ":", "modules", "=", "set", "(", "[", "prefix", "]", ")", "# https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules", "for", "module_info", "in", "pkgutil", ".", "iter_modules", "(", ...
recursive method that will find all the submodules of the given module at prefix with path
[ "recursive", "method", "that", "will", "find", "all", "the", "submodules", "of", "the", "given", "module", "at", "prefix", "with", "path" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L317-L336
Jaymon/endpoints
endpoints/call.py
Router.get_module_name
def get_module_name(self, path_args): """returns the module_name and remaining path args. return -- tuple -- (module_name, path_args)""" controller_prefix = self.controller_prefix cset = self.module_names module_name = controller_prefix mod_name = module_name while path_args: mod_name += "." + path_args[0] if mod_name in cset: module_name = mod_name path_args.pop(0) else: break return module_name, path_args
python
def get_module_name(self, path_args): """returns the module_name and remaining path args. return -- tuple -- (module_name, path_args)""" controller_prefix = self.controller_prefix cset = self.module_names module_name = controller_prefix mod_name = module_name while path_args: mod_name += "." + path_args[0] if mod_name in cset: module_name = mod_name path_args.pop(0) else: break return module_name, path_args
[ "def", "get_module_name", "(", "self", ",", "path_args", ")", ":", "controller_prefix", "=", "self", ".", "controller_prefix", "cset", "=", "self", ".", "module_names", "module_name", "=", "controller_prefix", "mod_name", "=", "module_name", "while", "path_args", ...
returns the module_name and remaining path args. return -- tuple -- (module_name, path_args)
[ "returns", "the", "module_name", "and", "remaining", "path", "args", "." ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L338-L354
Jaymon/endpoints
endpoints/call.py
Router.get_class
def get_class(self, module, class_name): """try and get the class_name from the module and make sure it is a valid controller""" # let's get the class class_object = getattr(module, class_name, None) if not class_object or not issubclass(class_object, Controller): class_object = None return class_object
python
def get_class(self, module, class_name): """try and get the class_name from the module and make sure it is a valid controller""" # let's get the class class_object = getattr(module, class_name, None) if not class_object or not issubclass(class_object, Controller): class_object = None return class_object
[ "def", "get_class", "(", "self", ",", "module", ",", "class_name", ")", ":", "# let's get the class", "class_object", "=", "getattr", "(", "module", ",", "class_name", ",", "None", ")", "if", "not", "class_object", "or", "not", "issubclass", "(", "class_object...
try and get the class_name from the module and make sure it is a valid controller
[ "try", "and", "get", "the", "class_name", "from", "the", "module", "and", "make", "sure", "it", "is", "a", "valid", "controller" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L360-L368
Jaymon/endpoints
endpoints/call.py
Controller.OPTIONS
def OPTIONS(self, *args, **kwargs): """Handles CORS requests for this controller if self.cors is False then this will raise a 405, otherwise it sets everything necessary to satisfy the request in self.response """ if not self.cors: raise CallError(405) req = self.request origin = req.get_header('origin') if not origin: raise CallError(400, 'Need Origin header') call_headers = [ ('Access-Control-Request-Headers', 'Access-Control-Allow-Headers'), ('Access-Control-Request-Method', 'Access-Control-Allow-Methods') ] for req_header, res_header in call_headers: v = req.get_header(req_header) if v: self.response.set_header(res_header, v) else: raise CallError(400, 'Need {} header'.format(req_header)) other_headers = { 'Access-Control-Allow-Credentials': 'true', 'Access-Control-Max-Age': 3600 } self.response.add_headers(other_headers)
python
def OPTIONS(self, *args, **kwargs): """Handles CORS requests for this controller if self.cors is False then this will raise a 405, otherwise it sets everything necessary to satisfy the request in self.response """ if not self.cors: raise CallError(405) req = self.request origin = req.get_header('origin') if not origin: raise CallError(400, 'Need Origin header') call_headers = [ ('Access-Control-Request-Headers', 'Access-Control-Allow-Headers'), ('Access-Control-Request-Method', 'Access-Control-Allow-Methods') ] for req_header, res_header in call_headers: v = req.get_header(req_header) if v: self.response.set_header(res_header, v) else: raise CallError(400, 'Need {} header'.format(req_header)) other_headers = { 'Access-Control-Allow-Credentials': 'true', 'Access-Control-Max-Age': 3600 } self.response.add_headers(other_headers)
[ "def", "OPTIONS", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "cors", ":", "raise", "CallError", "(", "405", ")", "req", "=", "self", ".", "request", "origin", "=", "req", ".", "get_header", "(", "'...
Handles CORS requests for this controller if self.cors is False then this will raise a 405, otherwise it sets everything necessary to satisfy the request in self.response
[ "Handles", "CORS", "requests", "for", "this", "controller" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L439-L468
Jaymon/endpoints
endpoints/call.py
Controller.set_cors_common_headers
def set_cors_common_headers(self): """ This will set the headers that are needed for any cors request (OPTIONS or real) """ if not self.cors: return req = self.request origin = req.get_header('origin') if origin: self.response.set_header('Access-Control-Allow-Origin', origin)
python
def set_cors_common_headers(self): """ This will set the headers that are needed for any cors request (OPTIONS or real) """ if not self.cors: return req = self.request origin = req.get_header('origin') if origin: self.response.set_header('Access-Control-Allow-Origin', origin)
[ "def", "set_cors_common_headers", "(", "self", ")", ":", "if", "not", "self", ".", "cors", ":", "return", "req", "=", "self", ".", "request", "origin", "=", "req", ".", "get_header", "(", "'origin'", ")", "if", "origin", ":", "self", ".", "response", "...
This will set the headers that are needed for any cors request (OPTIONS or real)
[ "This", "will", "set", "the", "headers", "that", "are", "needed", "for", "any", "cors", "request", "(", "OPTIONS", "or", "real", ")" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L470-L479
Jaymon/endpoints
endpoints/call.py
Controller.handle
def handle(self, *controller_args, **controller_kwargs): """handles the request and returns the response This should set any response information directly onto self.response this method has the same signature as the request handling methods (eg, GET, POST) so subclasses can override this method and add decorators :param *controller_args: tuple, the path arguments that will be passed to the request handling method (eg, GET, POST) :param **controller_kwargs: dict, the query and body params merged together """ req = self.request res = self.response res.set_header('Content-Type', "{};charset={}".format( self.content_type, self.encoding )) encoding = req.accept_encoding res.encoding = encoding if encoding else self.encoding res_method_name = "" controller_methods = self.find_methods() #controller_args, controller_kwargs = self.find_method_params() for controller_method_name, controller_method in controller_methods: try: logger.debug("Attempting to handle request with {}.{}.{}".format( req.controller_info['module_name'], req.controller_info['class_name'], controller_method_name )) res.body = controller_method( *controller_args, **controller_kwargs ) res_method_name = controller_method_name break except VersionError as e: logger.debug("Request {}.{}.{} failed version check [{} not in {}]".format( req.controller_info['module_name'], req.controller_info['class_name'], controller_method_name, e.request_version, e.versions )) except RouteError: logger.debug("Request {}.{}.{} failed routing check".format( req.controller_info['module_name'], req.controller_info['class_name'], controller_method_name )) if not res_method_name: # https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1 # An origin server SHOULD return the status code 405 (Method Not Allowed) # if the method is known by the origin server but not allowed for the # requested resource raise CallError(405, "Could not find a method to satisfy {}".format( req.path ))
python
def handle(self, *controller_args, **controller_kwargs): """handles the request and returns the response This should set any response information directly onto self.response this method has the same signature as the request handling methods (eg, GET, POST) so subclasses can override this method and add decorators :param *controller_args: tuple, the path arguments that will be passed to the request handling method (eg, GET, POST) :param **controller_kwargs: dict, the query and body params merged together """ req = self.request res = self.response res.set_header('Content-Type', "{};charset={}".format( self.content_type, self.encoding )) encoding = req.accept_encoding res.encoding = encoding if encoding else self.encoding res_method_name = "" controller_methods = self.find_methods() #controller_args, controller_kwargs = self.find_method_params() for controller_method_name, controller_method in controller_methods: try: logger.debug("Attempting to handle request with {}.{}.{}".format( req.controller_info['module_name'], req.controller_info['class_name'], controller_method_name )) res.body = controller_method( *controller_args, **controller_kwargs ) res_method_name = controller_method_name break except VersionError as e: logger.debug("Request {}.{}.{} failed version check [{} not in {}]".format( req.controller_info['module_name'], req.controller_info['class_name'], controller_method_name, e.request_version, e.versions )) except RouteError: logger.debug("Request {}.{}.{} failed routing check".format( req.controller_info['module_name'], req.controller_info['class_name'], controller_method_name )) if not res_method_name: # https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1 # An origin server SHOULD return the status code 405 (Method Not Allowed) # if the method is known by the origin server but not allowed for the # requested resource raise CallError(405, "Could not find a method to satisfy {}".format( req.path ))
[ "def", "handle", "(", "self", ",", "*", "controller_args", ",", "*", "*", "controller_kwargs", ")", ":", "req", "=", "self", ".", "request", "res", "=", "self", ".", "response", "res", ".", "set_header", "(", "'Content-Type'", ",", "\"{};charset={}\"", "."...
handles the request and returns the response This should set any response information directly onto self.response this method has the same signature as the request handling methods (eg, GET, POST) so subclasses can override this method and add decorators :param *controller_args: tuple, the path arguments that will be passed to the request handling method (eg, GET, POST) :param **controller_kwargs: dict, the query and body params merged together
[ "handles", "the", "request", "and", "returns", "the", "response" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L481-L543
Jaymon/endpoints
endpoints/call.py
Controller.find_methods
def find_methods(self): """Find the methods that could satisfy this request This will go through and find any method that starts with the request.method, so if the request was GET /foo then this would find any methods that start with GET https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html :returns: list of tuples (method_name, method), all the found methods """ methods = [] req = self.request method_name = req.method.upper() method_names = set() members = inspect.getmembers(self) for member_name, member in members: if member_name.startswith(method_name): if member: methods.append((member_name, member)) method_names.add(member_name) if len(methods) == 0: # https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1 # and 501 (Not Implemented) if the method is unrecognized or not # implemented by the origin server logger.warning("No methods to handle {} found".format(method_name), exc_info=True) raise CallError(501, "{} {} not implemented".format(req.method, req.path)) elif len(methods) > 1 and method_name in method_names: raise ValueError( " ".join([ "A multi method {} request should not have any methods named {}.", "Instead, all {} methods should use use an appropriate decorator", "like @route or @version and have a unique name starting with {}_" ]).format( method_name, method_name, method_name, method_name ) ) return methods
python
def find_methods(self): """Find the methods that could satisfy this request This will go through and find any method that starts with the request.method, so if the request was GET /foo then this would find any methods that start with GET https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html :returns: list of tuples (method_name, method), all the found methods """ methods = [] req = self.request method_name = req.method.upper() method_names = set() members = inspect.getmembers(self) for member_name, member in members: if member_name.startswith(method_name): if member: methods.append((member_name, member)) method_names.add(member_name) if len(methods) == 0: # https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1 # and 501 (Not Implemented) if the method is unrecognized or not # implemented by the origin server logger.warning("No methods to handle {} found".format(method_name), exc_info=True) raise CallError(501, "{} {} not implemented".format(req.method, req.path)) elif len(methods) > 1 and method_name in method_names: raise ValueError( " ".join([ "A multi method {} request should not have any methods named {}.", "Instead, all {} methods should use use an appropriate decorator", "like @route or @version and have a unique name starting with {}_" ]).format( method_name, method_name, method_name, method_name ) ) return methods
[ "def", "find_methods", "(", "self", ")", ":", "methods", "=", "[", "]", "req", "=", "self", ".", "request", "method_name", "=", "req", ".", "method", ".", "upper", "(", ")", "method_names", "=", "set", "(", ")", "members", "=", "inspect", ".", "getme...
Find the methods that could satisfy this request This will go through and find any method that starts with the request.method, so if the request was GET /foo then this would find any methods that start with GET https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html :returns: list of tuples (method_name, method), all the found methods
[ "Find", "the", "methods", "that", "could", "satisfy", "this", "request" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L554-L598
Jaymon/endpoints
endpoints/call.py
Controller.find_method_params
def find_method_params(self): """Return the method params :returns: tuple (args, kwargs) that will be passed as *args, **kwargs """ req = self.request args = req.controller_info["method_args"] kwargs = req.controller_info["method_kwargs"] return args, kwargs
python
def find_method_params(self): """Return the method params :returns: tuple (args, kwargs) that will be passed as *args, **kwargs """ req = self.request args = req.controller_info["method_args"] kwargs = req.controller_info["method_kwargs"] return args, kwargs
[ "def", "find_method_params", "(", "self", ")", ":", "req", "=", "self", ".", "request", "args", "=", "req", ".", "controller_info", "[", "\"method_args\"", "]", "kwargs", "=", "req", ".", "controller_info", "[", "\"method_kwargs\"", "]", "return", "args", ",...
Return the method params :returns: tuple (args, kwargs) that will be passed as *args, **kwargs
[ "Return", "the", "method", "params" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L600-L608
Jaymon/endpoints
endpoints/call.py
Controller.log_start
def log_start(self, start): """log all the headers and stuff at the start of the request""" if not logger.isEnabledFor(logging.INFO): return try: req = self.request logger.info("REQUEST {} {}?{}".format(req.method, req.path, req.query)) logger.info(datetime.datetime.strftime(datetime.datetime.utcnow(), "DATE %Y-%m-%dT%H:%M:%S.%f")) ip = req.ip if ip: logger.info("\tIP ADDRESS: {}".format(ip)) if 'authorization' in req.headers: logger.info('AUTH {}'.format(req.headers['authorization'])) ignore_hs = set([ 'accept-language', 'accept-encoding', 'connection', 'authorization', 'host', 'x-forwarded-for' ]) hs = ["Request Headers..."] for k, v in req.headers.items(): if k not in ignore_hs: hs.append("\t{}: {}".format(k, v)) logger.info(os.linesep.join(hs)) except Exception as e: logger.warn(e, exc_info=True)
python
def log_start(self, start): """log all the headers and stuff at the start of the request""" if not logger.isEnabledFor(logging.INFO): return try: req = self.request logger.info("REQUEST {} {}?{}".format(req.method, req.path, req.query)) logger.info(datetime.datetime.strftime(datetime.datetime.utcnow(), "DATE %Y-%m-%dT%H:%M:%S.%f")) ip = req.ip if ip: logger.info("\tIP ADDRESS: {}".format(ip)) if 'authorization' in req.headers: logger.info('AUTH {}'.format(req.headers['authorization'])) ignore_hs = set([ 'accept-language', 'accept-encoding', 'connection', 'authorization', 'host', 'x-forwarded-for' ]) hs = ["Request Headers..."] for k, v in req.headers.items(): if k not in ignore_hs: hs.append("\t{}: {}".format(k, v)) logger.info(os.linesep.join(hs)) except Exception as e: logger.warn(e, exc_info=True)
[ "def", "log_start", "(", "self", ",", "start", ")", ":", "if", "not", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "return", "try", ":", "req", "=", "self", ".", "request", "logger", ".", "info", "(", "\"REQUEST {} {}?{}\"", "....
log all the headers and stuff at the start of the request
[ "log", "all", "the", "headers", "and", "stuff", "at", "the", "start", "of", "the", "request" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L610-L643
Jaymon/endpoints
endpoints/call.py
Controller.log_stop
def log_stop(self, start): """log a summary line on how the request went""" if not logger.isEnabledFor(logging.INFO): return stop = time.time() get_elapsed = lambda start, stop, multiplier, rnd: round(abs(stop - start) * float(multiplier), rnd) elapsed = get_elapsed(start, stop, 1000.00, 1) total = "%0.1f ms" % (elapsed) logger.info("RESPONSE {} {} in {}".format(self.response.code, self.response.status, total))
python
def log_stop(self, start): """log a summary line on how the request went""" if not logger.isEnabledFor(logging.INFO): return stop = time.time() get_elapsed = lambda start, stop, multiplier, rnd: round(abs(stop - start) * float(multiplier), rnd) elapsed = get_elapsed(start, stop, 1000.00, 1) total = "%0.1f ms" % (elapsed) logger.info("RESPONSE {} {} in {}".format(self.response.code, self.response.status, total))
[ "def", "log_stop", "(", "self", ",", "start", ")", ":", "if", "not", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "return", "stop", "=", "time", ".", "time", "(", ")", "get_elapsed", "=", "lambda", "start", ",", "stop", ",", ...
log a summary line on how the request went
[ "log", "a", "summary", "line", "on", "how", "the", "request", "went" ]
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/call.py#L645-L653
ksbg/sparklanes
sparklanes/_framework/lane.py
build_lane_from_yaml
def build_lane_from_yaml(path): """Builds a `sparklanes.Lane` object from a YAML definition file. Parameters ---------- path: str Path to the YAML definition file Returns ------- Lane Lane, built according to definition in YAML file """ # Open with open(path, 'rb') as yaml_definition: definition = yaml.load(yaml_definition) # Validate schema try: validate_schema(definition) except SchemaError as exc: raise LaneSchemaError(**exc.__dict__) def build(lb_def, branch=False): """Function to recursively build the `sparklanes.Lane` object from a YAML definition""" init_kwargs = {k: lb_def[k] for k in (a for a in ('run_parallel', 'name') if a in lb_def)} lane_or_branch = Lane(**init_kwargs) if not branch else Branch(**init_kwargs) for task in lb_def['tasks']: if 'branch' in task: branch_def = task['branch'] lane_or_branch.add(build(branch_def, True)) else: sep = task['class'].rfind('.') if sep == -1: raise LaneImportError('Class must include its parent module') mdl = task['class'][:sep] cls_ = task['class'][sep + 1:] try: cls = getattr(import_module(mdl), cls_) except ImportError: raise LaneImportError('Could not find module %s' % mdl) except AttributeError: raise LaneImportError('Could not find class %s' % cls_) args = task['args'] if 'args' in task else [] args = [args] if not isinstance(args, list) else args kwargs = task['kwargs'] if 'kwargs' in task else {} lane_or_branch.add(cls, *args, **kwargs) return lane_or_branch return build(definition['lane'])
python
def build_lane_from_yaml(path): """Builds a `sparklanes.Lane` object from a YAML definition file. Parameters ---------- path: str Path to the YAML definition file Returns ------- Lane Lane, built according to definition in YAML file """ # Open with open(path, 'rb') as yaml_definition: definition = yaml.load(yaml_definition) # Validate schema try: validate_schema(definition) except SchemaError as exc: raise LaneSchemaError(**exc.__dict__) def build(lb_def, branch=False): """Function to recursively build the `sparklanes.Lane` object from a YAML definition""" init_kwargs = {k: lb_def[k] for k in (a for a in ('run_parallel', 'name') if a in lb_def)} lane_or_branch = Lane(**init_kwargs) if not branch else Branch(**init_kwargs) for task in lb_def['tasks']: if 'branch' in task: branch_def = task['branch'] lane_or_branch.add(build(branch_def, True)) else: sep = task['class'].rfind('.') if sep == -1: raise LaneImportError('Class must include its parent module') mdl = task['class'][:sep] cls_ = task['class'][sep + 1:] try: cls = getattr(import_module(mdl), cls_) except ImportError: raise LaneImportError('Could not find module %s' % mdl) except AttributeError: raise LaneImportError('Could not find class %s' % cls_) args = task['args'] if 'args' in task else [] args = [args] if not isinstance(args, list) else args kwargs = task['kwargs'] if 'kwargs' in task else {} lane_or_branch.add(cls, *args, **kwargs) return lane_or_branch return build(definition['lane'])
[ "def", "build_lane_from_yaml", "(", "path", ")", ":", "# Open", "with", "open", "(", "path", ",", "'rb'", ")", "as", "yaml_definition", ":", "definition", "=", "yaml", ".", "load", "(", "yaml_definition", ")", "# Validate schema", "try", ":", "validate_schema"...
Builds a `sparklanes.Lane` object from a YAML definition file. Parameters ---------- path: str Path to the YAML definition file Returns ------- Lane Lane, built according to definition in YAML file
[ "Builds", "a", "sparklanes", ".", "Lane", "object", "from", "a", "YAML", "definition", "file", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/lane.py#L165-L218
ksbg/sparklanes
sparklanes/_framework/lane.py
Lane.__validate_task
def __validate_task(self, cls, entry_mtd_name, args, kwargs): """Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if the supplied args/kwargs match the signature of the task's entry method. Parameters ---------- cls : LaneTask entry_mtd_name : str Name of the method, which is called when the task is run args : list kwargs : dict """ if not isclass(cls) or not issubclass(cls, LaneTask): raise TypeError('Tried to add non-Task `%s` to a Lane. Are you sure the task was ' 'decorated with `sparklanes.Task`?' % str(cls)) validate_params(cls, entry_mtd_name, *args, **kwargs)
python
def __validate_task(self, cls, entry_mtd_name, args, kwargs): """Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if the supplied args/kwargs match the signature of the task's entry method. Parameters ---------- cls : LaneTask entry_mtd_name : str Name of the method, which is called when the task is run args : list kwargs : dict """ if not isclass(cls) or not issubclass(cls, LaneTask): raise TypeError('Tried to add non-Task `%s` to a Lane. Are you sure the task was ' 'decorated with `sparklanes.Task`?' % str(cls)) validate_params(cls, entry_mtd_name, *args, **kwargs)
[ "def", "__validate_task", "(", "self", ",", "cls", ",", "entry_mtd_name", ",", "args", ",", "kwargs", ")", ":", "if", "not", "isclass", "(", "cls", ")", "or", "not", "issubclass", "(", "cls", ",", "LaneTask", ")", ":", "raise", "TypeError", "(", "'Trie...
Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if the supplied args/kwargs match the signature of the task's entry method. Parameters ---------- cls : LaneTask entry_mtd_name : str Name of the method, which is called when the task is run args : list kwargs : dict
[ "Checks", "if", "a", "class", "is", "a", "task", "i", ".", "e", ".", "if", "it", "has", "been", "decorated", "with", "sparklanes", ".", "Task", "and", "if", "the", "supplied", "args", "/", "kwargs", "match", "the", "signature", "of", "the", "task", "...
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/lane.py#L68-L84
ksbg/sparklanes
sparklanes/_framework/lane.py
Lane.add
def add(self, cls_or_branch, *args, **kwargs): """Adds a task or branch to the lane. Parameters ---------- cls_or_branch : Class *args Variable length argument list to be passed to `cls_or_branch` during instantiation **kwargs Variable length keyword arguments to be passed to `cls_or_branch` during instantiation Returns ------- self: Returns `self` to allow method chaining """ if isinstance(cls_or_branch, Branch): self.tasks.append(cls_or_branch) # Add branch with already validated tasks else: # Validate self.__validate_task(cls_or_branch, '__init__', args, kwargs) # Append self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs}) return self
python
def add(self, cls_or_branch, *args, **kwargs): """Adds a task or branch to the lane. Parameters ---------- cls_or_branch : Class *args Variable length argument list to be passed to `cls_or_branch` during instantiation **kwargs Variable length keyword arguments to be passed to `cls_or_branch` during instantiation Returns ------- self: Returns `self` to allow method chaining """ if isinstance(cls_or_branch, Branch): self.tasks.append(cls_or_branch) # Add branch with already validated tasks else: # Validate self.__validate_task(cls_or_branch, '__init__', args, kwargs) # Append self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs}) return self
[ "def", "add", "(", "self", ",", "cls_or_branch", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "cls_or_branch", ",", "Branch", ")", ":", "self", ".", "tasks", ".", "append", "(", "cls_or_branch", ")", "# Add branch with al...
Adds a task or branch to the lane. Parameters ---------- cls_or_branch : Class *args Variable length argument list to be passed to `cls_or_branch` during instantiation **kwargs Variable length keyword arguments to be passed to `cls_or_branch` during instantiation Returns ------- self: Returns `self` to allow method chaining
[ "Adds", "a", "task", "or", "branch", "to", "the", "lane", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/lane.py#L86-L109
ksbg/sparklanes
sparklanes/_framework/lane.py
Lane.run
def run(self): """Executes the tasks in the lane in the order in which they have been added, unless `self.run_parallel` is True, then a thread is spawned for each task and executed in parallel (note that task threads are still spawned in the order in which they were added). """ logger = make_default_logger(INTERNAL_LOGGER_NAME) logger.info('\n%s\nExecuting `%s`\n%s\n', '-'*80, self.name, '-'*80) logger.info('\n%s', str(self)) threads = [] if not self.tasks: raise LaneExecutionError('No tasks to execute!') for task_def_or_branch in self.tasks: if isinstance(task_def_or_branch, Branch): task_def_or_branch.run() elif isinstance(task_def_or_branch['cls_or_branch'], Branch): # Nested Branch task_def_or_branch['cls_or_branch'].run() else: task = task_def_or_branch['cls_or_branch'](*task_def_or_branch['args'], **task_def_or_branch['kwargs']) if self.run_parallel: threads.append(LaneTaskThread(task)) else: task() if threads: for thread in threads: thread.start() for thread in threads: thread.join() logger.info('\n%s\nFinished executing `%s`\n%s', '-'*80, self.name, '-'*80) return self
python
def run(self): """Executes the tasks in the lane in the order in which they have been added, unless `self.run_parallel` is True, then a thread is spawned for each task and executed in parallel (note that task threads are still spawned in the order in which they were added). """ logger = make_default_logger(INTERNAL_LOGGER_NAME) logger.info('\n%s\nExecuting `%s`\n%s\n', '-'*80, self.name, '-'*80) logger.info('\n%s', str(self)) threads = [] if not self.tasks: raise LaneExecutionError('No tasks to execute!') for task_def_or_branch in self.tasks: if isinstance(task_def_or_branch, Branch): task_def_or_branch.run() elif isinstance(task_def_or_branch['cls_or_branch'], Branch): # Nested Branch task_def_or_branch['cls_or_branch'].run() else: task = task_def_or_branch['cls_or_branch'](*task_def_or_branch['args'], **task_def_or_branch['kwargs']) if self.run_parallel: threads.append(LaneTaskThread(task)) else: task() if threads: for thread in threads: thread.start() for thread in threads: thread.join() logger.info('\n%s\nFinished executing `%s`\n%s', '-'*80, self.name, '-'*80) return self
[ "def", "run", "(", "self", ")", ":", "logger", "=", "make_default_logger", "(", "INTERNAL_LOGGER_NAME", ")", "logger", ".", "info", "(", "'\\n%s\\nExecuting `%s`\\n%s\\n'", ",", "'-'", "*", "80", ",", "self", ".", "name", ",", "'-'", "*", "80", ")", "logge...
Executes the tasks in the lane in the order in which they have been added, unless `self.run_parallel` is True, then a thread is spawned for each task and executed in parallel (note that task threads are still spawned in the order in which they were added).
[ "Executes", "the", "tasks", "in", "the", "lane", "in", "the", "order", "in", "which", "they", "have", "been", "added", "unless", "self", ".", "run_parallel", "is", "True", "then", "a", "thread", "is", "spawned", "for", "each", "task", "and", "executed", ...
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/lane.py#L111-L146
cbclab/MOT
examples/german_tank_problem.py
get_historical_data
def get_historical_data(nmr_problems): """Get the historical tank data. Args: nmr_problems (int): the number of problems Returns: tuple: (observations, nmr_tanks_ground_truth) """ observations = np.tile(np.array([[10, 256, 202, 97]]), (nmr_problems, 1)) nmr_tanks_ground_truth = np.ones((nmr_problems,)) * 276 return observations, nmr_tanks_ground_truth
python
def get_historical_data(nmr_problems): """Get the historical tank data. Args: nmr_problems (int): the number of problems Returns: tuple: (observations, nmr_tanks_ground_truth) """ observations = np.tile(np.array([[10, 256, 202, 97]]), (nmr_problems, 1)) nmr_tanks_ground_truth = np.ones((nmr_problems,)) * 276 return observations, nmr_tanks_ground_truth
[ "def", "get_historical_data", "(", "nmr_problems", ")", ":", "observations", "=", "np", ".", "tile", "(", "np", ".", "array", "(", "[", "[", "10", ",", "256", ",", "202", ",", "97", "]", "]", ")", ",", "(", "nmr_problems", ",", "1", ")", ")", "nm...
Get the historical tank data. Args: nmr_problems (int): the number of problems Returns: tuple: (observations, nmr_tanks_ground_truth)
[ "Get", "the", "historical", "tank", "data", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/examples/german_tank_problem.py#L15-L26
cbclab/MOT
examples/german_tank_problem.py
get_simulated_data
def get_simulated_data(nmr_problems): """Simulate some data. This returns the simulated tank observations and the corresponding ground truth maximum number of tanks. Args: nmr_problems (int): the number of problems Returns: tuple: (observations, nmr_tanks_ground_truth) """ # The number of tanks we observe per problem nmr_observed_tanks = 10 # Generate some maximum number of tanks. Basically the ground truth of the estimation problem. nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint') # Generate some random tank observations observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint') return observations, nmr_tanks_ground_truth
python
def get_simulated_data(nmr_problems): """Simulate some data. This returns the simulated tank observations and the corresponding ground truth maximum number of tanks. Args: nmr_problems (int): the number of problems Returns: tuple: (observations, nmr_tanks_ground_truth) """ # The number of tanks we observe per problem nmr_observed_tanks = 10 # Generate some maximum number of tanks. Basically the ground truth of the estimation problem. nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint') # Generate some random tank observations observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint') return observations, nmr_tanks_ground_truth
[ "def", "get_simulated_data", "(", "nmr_problems", ")", ":", "# The number of tanks we observe per problem", "nmr_observed_tanks", "=", "10", "# Generate some maximum number of tanks. Basically the ground truth of the estimation problem.", "nmr_tanks_ground_truth", "=", "normal", "(", "...
Simulate some data. This returns the simulated tank observations and the corresponding ground truth maximum number of tanks. Args: nmr_problems (int): the number of problems Returns: tuple: (observations, nmr_tanks_ground_truth)
[ "Simulate", "some", "data", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/examples/german_tank_problem.py#L29-L49
cbclab/MOT
mot/cl_routines/numerical_differentiation.py
estimate_hessian
def estimate_hessian(objective_func, parameters, lower_bounds=None, upper_bounds=None, step_ratio=2, nmr_steps=5, max_step_sizes=None, data=None, cl_runtime_info=None): """Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters. This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error, taking into account outliers using a median filter. The Hessian is evaluated at the steps: .. math:: \quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) - (f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) / (4 d_j d_k) where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero and :math:`d_j` is a scalar spacing :math:`steps_j`. Steps are generated according to an exponentially diminishing ratio, defined as: steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1. Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for 4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest step size that fits between the Hessian point and the boundaries. The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate. Args: objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate. A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data); The objective function has the same signature as the minimization function in MOT. For the numerical hessian, the ``objective_list`` parameter is ignored. parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, and p parameters lower_bounds (list or None): a list of length (p,) for p parameters with the lower bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use -np.inf. upper_bounds (list or None): a list of length (p,) for p parameters with the upper bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use np.inf. step_ratio (float): the ratio at which the steps diminish. nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these step sizes and extrapolate the best step size from among them. The minimum number of steps is 1. max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter. If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters. If a list is given, it should be of the same length as the number of parameters. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information Returns: ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix. This array can hold NaN's, for elements where the Hessian failed to approximate. """ if len(parameters.shape) == 1: parameters = parameters[None, :] nmr_voxels = parameters.shape[0] nmr_params = parameters.shape[1] nmr_derivatives = nmr_params * (nmr_params + 1) // 2 initial_step = _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes) kernel_data = { 'parameters': Array(parameters, ctype='mot_float_type'), 'initial_step': Array(initial_step, ctype='float'), 'derivatives': Zeros((nmr_voxels, nmr_derivatives), 'double'), 'errors': Zeros((nmr_voxels, nmr_derivatives), 'double'), 'x_tmp': LocalMemory('mot_float_type', nmr_params), 'data': data, 'scratch': LocalMemory('double', nmr_steps + (nmr_steps - 1) + nmr_steps) } hessian_kernel = SimpleCLFunction.from_string(''' void _numdiff_hessian( global mot_float_type* parameters, global float* initial_step, global double* derivatives, global double* errors, local mot_float_type* x_tmp, void* data, local double* scratch){ if(get_local_id(0) == 0){ for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){ x_tmp[i] = parameters[i]; } } barrier(CLK_LOCAL_MEM_FENCE); double f_x_input = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data); // upper triangle loop uint coord_ind = 0; for(int i = 0; i < ''' + str(nmr_params) + '''; i++){ for(int j = i; j < ''' + str(nmr_params) + '''; j++){ _numdiff_hessian_element( data, x_tmp, f_x_input, i, j, initial_step, derivatives + coord_ind, errors + coord_ind, scratch); coord_ind++; } } } ''', dependencies=[objective_func, _get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio)]) hessian_kernel.evaluate(kernel_data, nmr_voxels, use_local_reduction=True, cl_runtime_info=cl_runtime_info) return kernel_data['derivatives'].get_data()
python
def estimate_hessian(objective_func, parameters, lower_bounds=None, upper_bounds=None, step_ratio=2, nmr_steps=5, max_step_sizes=None, data=None, cl_runtime_info=None): """Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters. This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error, taking into account outliers using a median filter. The Hessian is evaluated at the steps: .. math:: \quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) - (f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) / (4 d_j d_k) where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero and :math:`d_j` is a scalar spacing :math:`steps_j`. Steps are generated according to an exponentially diminishing ratio, defined as: steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1. Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for 4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest step size that fits between the Hessian point and the boundaries. The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate. Args: objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate. A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data); The objective function has the same signature as the minimization function in MOT. For the numerical hessian, the ``objective_list`` parameter is ignored. parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, and p parameters lower_bounds (list or None): a list of length (p,) for p parameters with the lower bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use -np.inf. upper_bounds (list or None): a list of length (p,) for p parameters with the upper bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use np.inf. step_ratio (float): the ratio at which the steps diminish. nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these step sizes and extrapolate the best step size from among them. The minimum number of steps is 1. max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter. If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters. If a list is given, it should be of the same length as the number of parameters. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information Returns: ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix. This array can hold NaN's, for elements where the Hessian failed to approximate. """ if len(parameters.shape) == 1: parameters = parameters[None, :] nmr_voxels = parameters.shape[0] nmr_params = parameters.shape[1] nmr_derivatives = nmr_params * (nmr_params + 1) // 2 initial_step = _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes) kernel_data = { 'parameters': Array(parameters, ctype='mot_float_type'), 'initial_step': Array(initial_step, ctype='float'), 'derivatives': Zeros((nmr_voxels, nmr_derivatives), 'double'), 'errors': Zeros((nmr_voxels, nmr_derivatives), 'double'), 'x_tmp': LocalMemory('mot_float_type', nmr_params), 'data': data, 'scratch': LocalMemory('double', nmr_steps + (nmr_steps - 1) + nmr_steps) } hessian_kernel = SimpleCLFunction.from_string(''' void _numdiff_hessian( global mot_float_type* parameters, global float* initial_step, global double* derivatives, global double* errors, local mot_float_type* x_tmp, void* data, local double* scratch){ if(get_local_id(0) == 0){ for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){ x_tmp[i] = parameters[i]; } } barrier(CLK_LOCAL_MEM_FENCE); double f_x_input = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data); // upper triangle loop uint coord_ind = 0; for(int i = 0; i < ''' + str(nmr_params) + '''; i++){ for(int j = i; j < ''' + str(nmr_params) + '''; j++){ _numdiff_hessian_element( data, x_tmp, f_x_input, i, j, initial_step, derivatives + coord_ind, errors + coord_ind, scratch); coord_ind++; } } } ''', dependencies=[objective_func, _get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio)]) hessian_kernel.evaluate(kernel_data, nmr_voxels, use_local_reduction=True, cl_runtime_info=cl_runtime_info) return kernel_data['derivatives'].get_data()
[ "def", "estimate_hessian", "(", "objective_func", ",", "parameters", ",", "lower_bounds", "=", "None", ",", "upper_bounds", "=", "None", ",", "step_ratio", "=", "2", ",", "nmr_steps", "=", "5", ",", "max_step_sizes", "=", "None", ",", "data", "=", "None", ...
Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters. This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error, taking into account outliers using a median filter. The Hessian is evaluated at the steps: .. math:: \quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) - (f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) / (4 d_j d_k) where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero and :math:`d_j` is a scalar spacing :math:`steps_j`. Steps are generated according to an exponentially diminishing ratio, defined as: steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1. Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for 4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest step size that fits between the Hessian point and the boundaries. The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate. Args: objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate. A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data); The objective function has the same signature as the minimization function in MOT. For the numerical hessian, the ``objective_list`` parameter is ignored. parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, and p parameters lower_bounds (list or None): a list of length (p,) for p parameters with the lower bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use -np.inf. upper_bounds (list or None): a list of length (p,) for p parameters with the upper bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use np.inf. step_ratio (float): the ratio at which the steps diminish. nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these step sizes and extrapolate the best step size from among them. The minimum number of steps is 1. max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter. If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters. If a list is given, it should be of the same length as the number of parameters. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information Returns: ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix. This array can hold NaN's, for elements where the Hessian failed to approximate.
[ "Estimate", "and", "return", "the", "upper", "triangular", "elements", "of", "the", "Hessian", "of", "the", "given", "function", "at", "the", "given", "parameters", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/cl_routines/numerical_differentiation.py#L15-L135
cbclab/MOT
mot/cl_routines/numerical_differentiation.py
_get_numdiff_hessian_element_func
def _get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio): """Return a function to compute one element of the Hessian matrix.""" return SimpleCLFunction.from_string(''' /** * Compute the Hessian using (possibly) multiple steps with various interpolations. */ void _numdiff_hessian_element( void* data, local mot_float_type* x_tmp, mot_float_type f_x_input, uint px, uint py, global float* initial_step, global double* derivative, global double* error, local double* scratch){ const uint nmr_steps = ''' + str(nmr_steps) + '''; uint nmr_steps_remaining = nmr_steps; local double* scratch_ind = scratch; local double* steps = scratch_ind; scratch_ind += nmr_steps; local double* errors = scratch_ind; scratch_ind += nmr_steps - 1; local double* steps_tmp = scratch_ind; scratch_ind += nmr_steps; if(get_local_id(0) == 0){ for(int i = 0; i < nmr_steps - 1; i++){ errors[i] = 0; } } barrier(CLK_LOCAL_MEM_FENCE); _numdiff_hessian_steps(data, x_tmp, f_x_input, px, py, steps, initial_step); if(nmr_steps_remaining > 1){ nmr_steps_remaining = _numdiff_hessian_richardson_extrapolation(steps); barrier(CLK_LOCAL_MEM_FENCE); } if(nmr_steps_remaining >= 3){ nmr_steps_remaining = _numdiff_wynn_extrapolation(steps, errors, nmr_steps_remaining); barrier(CLK_LOCAL_MEM_FENCE); } if(nmr_steps_remaining > 1){ _numdiff_find_best_step(steps, errors, steps_tmp, nmr_steps_remaining); barrier(CLK_LOCAL_MEM_FENCE); } if(get_local_id(0) == 0){ *derivative = steps[0]; *error = errors[0]; } } ''', dependencies=[ _get_numdiff_hessian_steps_func(objective_func, nmr_steps, step_ratio), _get_numdiff_hessian_richardson_extrapolation_func(nmr_steps, step_ratio), _get_numdiff_wynn_extrapolation_func(), _get_numdiff_find_best_step_func() ])
python
def _get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio): """Return a function to compute one element of the Hessian matrix.""" return SimpleCLFunction.from_string(''' /** * Compute the Hessian using (possibly) multiple steps with various interpolations. */ void _numdiff_hessian_element( void* data, local mot_float_type* x_tmp, mot_float_type f_x_input, uint px, uint py, global float* initial_step, global double* derivative, global double* error, local double* scratch){ const uint nmr_steps = ''' + str(nmr_steps) + '''; uint nmr_steps_remaining = nmr_steps; local double* scratch_ind = scratch; local double* steps = scratch_ind; scratch_ind += nmr_steps; local double* errors = scratch_ind; scratch_ind += nmr_steps - 1; local double* steps_tmp = scratch_ind; scratch_ind += nmr_steps; if(get_local_id(0) == 0){ for(int i = 0; i < nmr_steps - 1; i++){ errors[i] = 0; } } barrier(CLK_LOCAL_MEM_FENCE); _numdiff_hessian_steps(data, x_tmp, f_x_input, px, py, steps, initial_step); if(nmr_steps_remaining > 1){ nmr_steps_remaining = _numdiff_hessian_richardson_extrapolation(steps); barrier(CLK_LOCAL_MEM_FENCE); } if(nmr_steps_remaining >= 3){ nmr_steps_remaining = _numdiff_wynn_extrapolation(steps, errors, nmr_steps_remaining); barrier(CLK_LOCAL_MEM_FENCE); } if(nmr_steps_remaining > 1){ _numdiff_find_best_step(steps, errors, steps_tmp, nmr_steps_remaining); barrier(CLK_LOCAL_MEM_FENCE); } if(get_local_id(0) == 0){ *derivative = steps[0]; *error = errors[0]; } } ''', dependencies=[ _get_numdiff_hessian_steps_func(objective_func, nmr_steps, step_ratio), _get_numdiff_hessian_richardson_extrapolation_func(nmr_steps, step_ratio), _get_numdiff_wynn_extrapolation_func(), _get_numdiff_find_best_step_func() ])
[ "def", "_get_numdiff_hessian_element_func", "(", "objective_func", ",", "nmr_steps", ",", "step_ratio", ")", ":", "return", "SimpleCLFunction", ".", "from_string", "(", "'''\n /**\n * Compute the Hessian using (possibly) multiple steps with various interpolations. \n ...
Return a function to compute one element of the Hessian matrix.
[ "Return", "a", "function", "to", "compute", "one", "element", "of", "the", "Hessian", "matrix", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/cl_routines/numerical_differentiation.py#L138-L191
cbclab/MOT
mot/cl_routines/numerical_differentiation.py
_get_numdiff_hessian_steps_func
def _get_numdiff_hessian_steps_func(objective_func, nmr_steps, step_ratio): """Get a function to compute the multiple step sizes for a single element of the Hessian.""" return SimpleCLFunction.from_string(''' /** * Compute one element of the Hessian for a number of steps. * * This uses the initial steps in the data structure, indexed by the parameters to change (px, py). * * Args: * data: the data container * x_tmp: the array with the input parameters, needs to be writable, although it will return * the same values. * f_x_input: the objective function value at the original set of parameters * px: the index of the first parameter to perturbate * py: the index of the second parameter to perturbate * steps: storage location for the output steps * initial_step: the initial steps, array of same length as x_temp */ void _numdiff_hessian_steps(void* data, local mot_float_type* x_tmp, mot_float_type f_x_input, uint px, uint py, local double* steps, global float* initial_step){ double step_x; double step_y; double tmp; bool is_first_workitem = get_local_id(0) == 0; if(px == py){ for(uint step_ind = 0; step_ind < ''' + str(nmr_steps) + '''; step_ind++){ step_x = initial_step[px] / pown(''' + str(float(step_ratio)) + ''', step_ind); tmp = ( _numdiff_hessian_eval_step_mono(data, x_tmp, px, 2 * step_x) + _numdiff_hessian_eval_step_mono(data, x_tmp, px, -2 * step_x) - 2 * f_x_input ) / (4 * step_x * step_x); if(is_first_workitem){ steps[step_ind] = tmp; } } } else{ for(uint step_ind = 0; step_ind < ''' + str(nmr_steps) + '''; step_ind++){ step_x = initial_step[px] / pown(''' + str(float(step_ratio)) + ''', step_ind); step_y = initial_step[py] / pown(''' + str(float(step_ratio)) + ''', step_ind); tmp = ( _numdiff_hessian_eval_step_bi(data, x_tmp, px, step_x, py, step_y) - _numdiff_hessian_eval_step_bi(data, x_tmp, px, step_x, py, -step_y) - _numdiff_hessian_eval_step_bi(data, x_tmp, px, -step_x, py, step_y) + _numdiff_hessian_eval_step_bi(data, x_tmp, px, -step_x, py, -step_y) ) / (4 * step_x * step_y); if(is_first_workitem){ steps[step_ind] = tmp; } } } } ''', dependencies=[SimpleCLFunction.from_string(''' /** * Evaluate the model with a perturbation in one dimensions. * * Args: * data: the data container * x_tmp: the array with the input parameters, needs to be writable, although it will return * the same values. * perturb_dim0: the index (into the x_tmp parameters) of the parameter to perturbate * perturb_0: the added perturbation of the index corresponding to ``perturb_dim_0`` * * Returns: * the function evaluated at the parameters plus their perturbation. */ double _numdiff_hessian_eval_step_mono( void* data, local mot_float_type* x_tmp, uint perturb_dim_0, mot_float_type perturb_0){ mot_float_type old_0; double return_val; if(get_local_id(0) == 0){ old_0 = x_tmp[perturb_dim_0]; x_tmp[perturb_dim_0] += perturb_0; } barrier(CLK_LOCAL_MEM_FENCE); return_val = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data); barrier(CLK_LOCAL_MEM_FENCE); if(get_local_id(0) == 0){ x_tmp[perturb_dim_0] = old_0; } barrier(CLK_LOCAL_MEM_FENCE); return return_val; } '''), SimpleCLFunction.from_string(''' /** * Evaluate the model with a perturbation in two dimensions. * * Args: * data: the data container * x_tmp: the array with the input parameters, needs to be writable, although it will return * the same values. * perturb_dim_0: the index (into the x_tmp parameters) of the first parameter to perturbate * perturb_0: the added perturbation of the index corresponding to ``perturb_dim_0`` * perturb_dim_1: the index (into the x_tmp parameters) of the second parameter to perturbate * perturb_1: the added perturbation of the index corresponding to ``perturb_dim_1`` * * Returns: * the function evaluated at the parameters plus their perturbation. */ double _numdiff_hessian_eval_step_bi( void* data, local mot_float_type* x_tmp, uint perturb_dim_0, mot_float_type perturb_0, uint perturb_dim_1, mot_float_type perturb_1){ mot_float_type old_0; mot_float_type old_1; double return_val; if(get_local_id(0) == 0){ old_0 = x_tmp[perturb_dim_0]; old_1 = x_tmp[perturb_dim_1]; x_tmp[perturb_dim_0] += perturb_0; x_tmp[perturb_dim_1] += perturb_1; } barrier(CLK_LOCAL_MEM_FENCE); return_val = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data); barrier(CLK_LOCAL_MEM_FENCE); if(get_local_id(0) == 0){ x_tmp[perturb_dim_0] = old_0; x_tmp[perturb_dim_1] = old_1; } barrier(CLK_LOCAL_MEM_FENCE); return return_val; } ''')])
python
def _get_numdiff_hessian_steps_func(objective_func, nmr_steps, step_ratio): """Get a function to compute the multiple step sizes for a single element of the Hessian.""" return SimpleCLFunction.from_string(''' /** * Compute one element of the Hessian for a number of steps. * * This uses the initial steps in the data structure, indexed by the parameters to change (px, py). * * Args: * data: the data container * x_tmp: the array with the input parameters, needs to be writable, although it will return * the same values. * f_x_input: the objective function value at the original set of parameters * px: the index of the first parameter to perturbate * py: the index of the second parameter to perturbate * steps: storage location for the output steps * initial_step: the initial steps, array of same length as x_temp */ void _numdiff_hessian_steps(void* data, local mot_float_type* x_tmp, mot_float_type f_x_input, uint px, uint py, local double* steps, global float* initial_step){ double step_x; double step_y; double tmp; bool is_first_workitem = get_local_id(0) == 0; if(px == py){ for(uint step_ind = 0; step_ind < ''' + str(nmr_steps) + '''; step_ind++){ step_x = initial_step[px] / pown(''' + str(float(step_ratio)) + ''', step_ind); tmp = ( _numdiff_hessian_eval_step_mono(data, x_tmp, px, 2 * step_x) + _numdiff_hessian_eval_step_mono(data, x_tmp, px, -2 * step_x) - 2 * f_x_input ) / (4 * step_x * step_x); if(is_first_workitem){ steps[step_ind] = tmp; } } } else{ for(uint step_ind = 0; step_ind < ''' + str(nmr_steps) + '''; step_ind++){ step_x = initial_step[px] / pown(''' + str(float(step_ratio)) + ''', step_ind); step_y = initial_step[py] / pown(''' + str(float(step_ratio)) + ''', step_ind); tmp = ( _numdiff_hessian_eval_step_bi(data, x_tmp, px, step_x, py, step_y) - _numdiff_hessian_eval_step_bi(data, x_tmp, px, step_x, py, -step_y) - _numdiff_hessian_eval_step_bi(data, x_tmp, px, -step_x, py, step_y) + _numdiff_hessian_eval_step_bi(data, x_tmp, px, -step_x, py, -step_y) ) / (4 * step_x * step_y); if(is_first_workitem){ steps[step_ind] = tmp; } } } } ''', dependencies=[SimpleCLFunction.from_string(''' /** * Evaluate the model with a perturbation in one dimensions. * * Args: * data: the data container * x_tmp: the array with the input parameters, needs to be writable, although it will return * the same values. * perturb_dim0: the index (into the x_tmp parameters) of the parameter to perturbate * perturb_0: the added perturbation of the index corresponding to ``perturb_dim_0`` * * Returns: * the function evaluated at the parameters plus their perturbation. */ double _numdiff_hessian_eval_step_mono( void* data, local mot_float_type* x_tmp, uint perturb_dim_0, mot_float_type perturb_0){ mot_float_type old_0; double return_val; if(get_local_id(0) == 0){ old_0 = x_tmp[perturb_dim_0]; x_tmp[perturb_dim_0] += perturb_0; } barrier(CLK_LOCAL_MEM_FENCE); return_val = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data); barrier(CLK_LOCAL_MEM_FENCE); if(get_local_id(0) == 0){ x_tmp[perturb_dim_0] = old_0; } barrier(CLK_LOCAL_MEM_FENCE); return return_val; } '''), SimpleCLFunction.from_string(''' /** * Evaluate the model with a perturbation in two dimensions. * * Args: * data: the data container * x_tmp: the array with the input parameters, needs to be writable, although it will return * the same values. * perturb_dim_0: the index (into the x_tmp parameters) of the first parameter to perturbate * perturb_0: the added perturbation of the index corresponding to ``perturb_dim_0`` * perturb_dim_1: the index (into the x_tmp parameters) of the second parameter to perturbate * perturb_1: the added perturbation of the index corresponding to ``perturb_dim_1`` * * Returns: * the function evaluated at the parameters plus their perturbation. */ double _numdiff_hessian_eval_step_bi( void* data, local mot_float_type* x_tmp, uint perturb_dim_0, mot_float_type perturb_0, uint perturb_dim_1, mot_float_type perturb_1){ mot_float_type old_0; mot_float_type old_1; double return_val; if(get_local_id(0) == 0){ old_0 = x_tmp[perturb_dim_0]; old_1 = x_tmp[perturb_dim_1]; x_tmp[perturb_dim_0] += perturb_0; x_tmp[perturb_dim_1] += perturb_1; } barrier(CLK_LOCAL_MEM_FENCE); return_val = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data); barrier(CLK_LOCAL_MEM_FENCE); if(get_local_id(0) == 0){ x_tmp[perturb_dim_0] = old_0; x_tmp[perturb_dim_1] = old_1; } barrier(CLK_LOCAL_MEM_FENCE); return return_val; } ''')])
[ "def", "_get_numdiff_hessian_steps_func", "(", "objective_func", ",", "nmr_steps", ",", "step_ratio", ")", ":", "return", "SimpleCLFunction", ".", "from_string", "(", "'''\n /**\n * Compute one element of the Hessian for a number of steps.\n * \n * This u...
Get a function to compute the multiple step sizes for a single element of the Hessian.
[ "Get", "a", "function", "to", "compute", "the", "multiple", "step", "sizes", "for", "a", "single", "element", "of", "the", "Hessian", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/cl_routines/numerical_differentiation.py#L194-L338
cbclab/MOT
mot/cl_routines/numerical_differentiation.py
_get_initial_step
def _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes): """Get an initial step size to use for every parameter. This chooses the step sizes based on the maximum step size and the lower and upper bounds. Args: parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, p parameters and n samples. lower_bounds (list): lower bounds upper_bounds (list): upper bounds max_step_sizes (list or None): the maximum step size, or the maximum step size per parameter. Defaults to 0.1 Returns: ndarray: for every problem instance the vector with the initial step size for each parameter. """ nmr_params = parameters.shape[1] initial_step = np.zeros_like(parameters) if max_step_sizes is None: max_step_sizes = 0.1 if isinstance(max_step_sizes, Number): max_step_sizes = [max_step_sizes] * nmr_params max_step_sizes = np.array(max_step_sizes) for ind in range(parameters.shape[1]): minimum_allowed_step = np.minimum(np.abs(parameters[:, ind] - lower_bounds[ind]), np.abs(upper_bounds[ind] - parameters[:, ind])) initial_step[:, ind] = np.minimum(minimum_allowed_step, max_step_sizes[ind]) return initial_step / 2.
python
def _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes): """Get an initial step size to use for every parameter. This chooses the step sizes based on the maximum step size and the lower and upper bounds. Args: parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, p parameters and n samples. lower_bounds (list): lower bounds upper_bounds (list): upper bounds max_step_sizes (list or None): the maximum step size, or the maximum step size per parameter. Defaults to 0.1 Returns: ndarray: for every problem instance the vector with the initial step size for each parameter. """ nmr_params = parameters.shape[1] initial_step = np.zeros_like(parameters) if max_step_sizes is None: max_step_sizes = 0.1 if isinstance(max_step_sizes, Number): max_step_sizes = [max_step_sizes] * nmr_params max_step_sizes = np.array(max_step_sizes) for ind in range(parameters.shape[1]): minimum_allowed_step = np.minimum(np.abs(parameters[:, ind] - lower_bounds[ind]), np.abs(upper_bounds[ind] - parameters[:, ind])) initial_step[:, ind] = np.minimum(minimum_allowed_step, max_step_sizes[ind]) return initial_step / 2.
[ "def", "_get_initial_step", "(", "parameters", ",", "lower_bounds", ",", "upper_bounds", ",", "max_step_sizes", ")", ":", "nmr_params", "=", "parameters", ".", "shape", "[", "1", "]", "initial_step", "=", "np", ".", "zeros_like", "(", "parameters", ")", "if", ...
Get an initial step size to use for every parameter. This chooses the step sizes based on the maximum step size and the lower and upper bounds. Args: parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, p parameters and n samples. lower_bounds (list): lower bounds upper_bounds (list): upper bounds max_step_sizes (list or None): the maximum step size, or the maximum step size per parameter. Defaults to 0.1 Returns: ndarray: for every problem instance the vector with the initial step size for each parameter.
[ "Get", "an", "initial", "step", "size", "to", "use", "for", "every", "parameter", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/cl_routines/numerical_differentiation.py#L597-L627
cbclab/MOT
mot/configuration.py
SimpleConfigAction.apply
def apply(self): """Apply the current action to the current runtime configuration.""" self._old_config = {k: v for k, v in _config.items()} self._apply()
python
def apply(self): """Apply the current action to the current runtime configuration.""" self._old_config = {k: v for k, v in _config.items()} self._apply()
[ "def", "apply", "(", "self", ")", ":", "self", ".", "_old_config", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "_config", ".", "items", "(", ")", "}", "self", ".", "_apply", "(", ")" ]
Apply the current action to the current runtime configuration.
[ "Apply", "the", "current", "action", "to", "the", "current", "runtime", "configuration", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/configuration.py#L156-L159
cbclab/MOT
mot/configuration.py
SimpleConfigAction.unapply
def unapply(self): """Reset the current configuration to the previous state.""" for key, value in self._old_config.items(): _config[key] = value
python
def unapply(self): """Reset the current configuration to the previous state.""" for key, value in self._old_config.items(): _config[key] = value
[ "def", "unapply", "(", "self", ")", ":", "for", "key", ",", "value", "in", "self", ".", "_old_config", ".", "items", "(", ")", ":", "_config", "[", "key", "]", "=", "value" ]
Reset the current configuration to the previous state.
[ "Reset", "the", "current", "configuration", "to", "the", "previous", "state", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/configuration.py#L161-L164
ksbg/sparklanes
sparklanes/_framework/task.py
Task
def Task(entry): # pylint: disable=invalid-name """ Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is being decorated, it becomes a child of `LaneTask`. Parameters ---------- entry: The name of the task's "main" method, i.e. the method which is executed when task is run Returns ------- wrapper (function): The actual decorator function """ if not isinstance(entry, string_types): # In the event that no argument is supplied to the decorator, python passes the decorated # class itself as an argument. That way, we can detect if no argument (or an argument of # invalid type) was supplied. This allows passing of `entry` as both a named kwarg, and # as an arg. Isn't neat, but for now it suffices. raise TypeError('When decorating a class with `Task`, a single string argument must be ' 'supplied, which specifies the "main" task method, i.e. the class\'s entry ' 'point to the task.') else: def wrapper(cls): """The actual decorator function""" if isclass(cls): if not hasattr(cls, entry): # Check if cls has the specified entry method raise TypeError('Method `%s` not found in class `%s`.' % (entry, cls.__name__)) # We will have to inspect the task class's `__init__` method later (by inspecting # the arg signature, before it is instantiated). In various circumstances, classes # will not have an unbound `__init__` method. Let's deal with that now already, by # assigning an empty, unbound `__init__` method manually, in order to prevent # errors later on during method inspection (not an issue in Python 3): # - Whenever a class is not defined as a new-style class in Python 2.7, i.e. a # sub-class of object, and it does not have a `__init__` method definition, the # class will not have an attribute `__init__` # - If a class misses a `__init__` method definition, but is defined as a # new-style class, attribute `__init__` will be of type `slot wrapper`, which # cannot be inspected (and it also doesn't seem possible to check if a method is of # type `slot wrapper`, which is why we manually define one). if not hasattr(cls, '__init__') or cls.__init__ == object.__init__: init = MethodType(lambda self: None, None, cls) \ if PY2 else MethodType(lambda self: None, cls) setattr(cls, '__init__', init) # Check for attributes that will be overwritten, in order to warn the user reserved_attributes = ('__getattr__', '__call__', '_entry_mtd', 'cache', 'uncache', 'clear_cache', '_log_lock') for attr in dir(cls): if attr in reserved_attributes: make_default_logger(INTERNAL_LOGGER_NAME).warning( 'Attribute `%s` of class `%s` will be overwritten when decorated with ' '`sparklanes.Task`! Avoid assigning any of the following attributes ' '`%s`', attr, cls.__name__, str(reserved_attributes) ) assignments = {'_entry_mtd': entry, '__getattr__': lambda self, name: TaskCache.get(name), '__init__': cls.__init__, '_log_lock': Lock()} for attr in WRAPPER_ASSIGNMENTS: try: assignments[attr] = getattr(cls, attr) except AttributeError: pass # Build task as a subclass of LaneTask return type('Task_%s' % cls.__name__, (LaneTask, cls, object), assignments) else: raise TypeError('Only classes can be decorated with `Task`') return wrapper
python
def Task(entry): # pylint: disable=invalid-name """ Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is being decorated, it becomes a child of `LaneTask`. Parameters ---------- entry: The name of the task's "main" method, i.e. the method which is executed when task is run Returns ------- wrapper (function): The actual decorator function """ if not isinstance(entry, string_types): # In the event that no argument is supplied to the decorator, python passes the decorated # class itself as an argument. That way, we can detect if no argument (or an argument of # invalid type) was supplied. This allows passing of `entry` as both a named kwarg, and # as an arg. Isn't neat, but for now it suffices. raise TypeError('When decorating a class with `Task`, a single string argument must be ' 'supplied, which specifies the "main" task method, i.e. the class\'s entry ' 'point to the task.') else: def wrapper(cls): """The actual decorator function""" if isclass(cls): if not hasattr(cls, entry): # Check if cls has the specified entry method raise TypeError('Method `%s` not found in class `%s`.' % (entry, cls.__name__)) # We will have to inspect the task class's `__init__` method later (by inspecting # the arg signature, before it is instantiated). In various circumstances, classes # will not have an unbound `__init__` method. Let's deal with that now already, by # assigning an empty, unbound `__init__` method manually, in order to prevent # errors later on during method inspection (not an issue in Python 3): # - Whenever a class is not defined as a new-style class in Python 2.7, i.e. a # sub-class of object, and it does not have a `__init__` method definition, the # class will not have an attribute `__init__` # - If a class misses a `__init__` method definition, but is defined as a # new-style class, attribute `__init__` will be of type `slot wrapper`, which # cannot be inspected (and it also doesn't seem possible to check if a method is of # type `slot wrapper`, which is why we manually define one). if not hasattr(cls, '__init__') or cls.__init__ == object.__init__: init = MethodType(lambda self: None, None, cls) \ if PY2 else MethodType(lambda self: None, cls) setattr(cls, '__init__', init) # Check for attributes that will be overwritten, in order to warn the user reserved_attributes = ('__getattr__', '__call__', '_entry_mtd', 'cache', 'uncache', 'clear_cache', '_log_lock') for attr in dir(cls): if attr in reserved_attributes: make_default_logger(INTERNAL_LOGGER_NAME).warning( 'Attribute `%s` of class `%s` will be overwritten when decorated with ' '`sparklanes.Task`! Avoid assigning any of the following attributes ' '`%s`', attr, cls.__name__, str(reserved_attributes) ) assignments = {'_entry_mtd': entry, '__getattr__': lambda self, name: TaskCache.get(name), '__init__': cls.__init__, '_log_lock': Lock()} for attr in WRAPPER_ASSIGNMENTS: try: assignments[attr] = getattr(cls, attr) except AttributeError: pass # Build task as a subclass of LaneTask return type('Task_%s' % cls.__name__, (LaneTask, cls, object), assignments) else: raise TypeError('Only classes can be decorated with `Task`') return wrapper
[ "def", "Task", "(", "entry", ")", ":", "# pylint: disable=invalid-name", "if", "not", "isinstance", "(", "entry", ",", "string_types", ")", ":", "# In the event that no argument is supplied to the decorator, python passes the decorated", "# class itself as an argument. That way, we...
Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is being decorated, it becomes a child of `LaneTask`. Parameters ---------- entry: The name of the task's "main" method, i.e. the method which is executed when task is run Returns ------- wrapper (function): The actual decorator function
[ "Decorator", "with", "which", "classes", "who", "act", "as", "tasks", "in", "a", "Lane", "must", "be", "decorated", ".", "When", "a", "class", "is", "being", "decorated", "it", "becomes", "a", "child", "of", "LaneTask", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/task.py#L18-L89
ksbg/sparklanes
sparklanes/_framework/task.py
LaneTask.cache
def cache(self, name, val, overwrite=True): """Assigns an attribute reference to all subsequent tasks. For example, if a task caches a DataFrame `df` using `self.cache('some_df', df)`, all tasks that follow can access the DataFrame using `self.some_df`. Note that manually assigned attributes that share the same name have precedence over cached attributes. Parameters ---------- name : str Name of the attribute val Attribute value overwrite : bool Indicates if the attribute shall be overwritten, or not (if `False`, and a cached attribute with the given name already exists, `sparklanes.errors.CacheError` will be thrown). """ if name in TaskCache.cached and not overwrite: raise CacheError('Object with name `%s` already in cache.' % name) TaskCache.cached[name] = val
python
def cache(self, name, val, overwrite=True): """Assigns an attribute reference to all subsequent tasks. For example, if a task caches a DataFrame `df` using `self.cache('some_df', df)`, all tasks that follow can access the DataFrame using `self.some_df`. Note that manually assigned attributes that share the same name have precedence over cached attributes. Parameters ---------- name : str Name of the attribute val Attribute value overwrite : bool Indicates if the attribute shall be overwritten, or not (if `False`, and a cached attribute with the given name already exists, `sparklanes.errors.CacheError` will be thrown). """ if name in TaskCache.cached and not overwrite: raise CacheError('Object with name `%s` already in cache.' % name) TaskCache.cached[name] = val
[ "def", "cache", "(", "self", ",", "name", ",", "val", ",", "overwrite", "=", "True", ")", ":", "if", "name", "in", "TaskCache", ".", "cached", "and", "not", "overwrite", ":", "raise", "CacheError", "(", "'Object with name `%s` already in cache.'", "%", "name...
Assigns an attribute reference to all subsequent tasks. For example, if a task caches a DataFrame `df` using `self.cache('some_df', df)`, all tasks that follow can access the DataFrame using `self.some_df`. Note that manually assigned attributes that share the same name have precedence over cached attributes. Parameters ---------- name : str Name of the attribute val Attribute value overwrite : bool Indicates if the attribute shall be overwritten, or not (if `False`, and a cached attribute with the given name already exists, `sparklanes.errors.CacheError` will be thrown).
[ "Assigns", "an", "attribute", "reference", "to", "all", "subsequent", "tasks", ".", "For", "example", "if", "a", "task", "caches", "a", "DataFrame", "df", "using", "self", ".", "cache", "(", "some_df", "df", ")", "all", "tasks", "that", "follow", "can", ...
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/task.py#L121-L140
ksbg/sparklanes
sparklanes/_framework/task.py
LaneTaskThread.run
def run(self): """Overwrites `threading.Thread.run`, to allow handling of exceptions thrown by threads from within the main app.""" self.exc = None try: self.task() except BaseException: self.exc = sys.exc_info()
python
def run(self): """Overwrites `threading.Thread.run`, to allow handling of exceptions thrown by threads from within the main app.""" self.exc = None try: self.task() except BaseException: self.exc = sys.exc_info()
[ "def", "run", "(", "self", ")", ":", "self", ".", "exc", "=", "None", "try", ":", "self", ".", "task", "(", ")", "except", "BaseException", ":", "self", ".", "exc", "=", "sys", ".", "exc_info", "(", ")" ]
Overwrites `threading.Thread.run`, to allow handling of exceptions thrown by threads from within the main app.
[ "Overwrites", "threading", ".", "Thread", ".", "run", "to", "allow", "handling", "of", "exceptions", "thrown", "by", "threads", "from", "within", "the", "main", "app", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/task.py#L170-L177
ksbg/sparklanes
sparklanes/_framework/task.py
LaneTaskThread.join
def join(self, timeout=None): """Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads from within the main app.""" Thread.join(self, timeout=timeout) if self.exc: msg = "Thread '%s' threw an exception `%s`: %s" \ % (self.getName(), self.exc[0].__name__, self.exc[1]) new_exc = LaneExecutionError(msg) if PY3: raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member else: raise (new_exc.__class__, new_exc, self.exc[2])
python
def join(self, timeout=None): """Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads from within the main app.""" Thread.join(self, timeout=timeout) if self.exc: msg = "Thread '%s' threw an exception `%s`: %s" \ % (self.getName(), self.exc[0].__name__, self.exc[1]) new_exc = LaneExecutionError(msg) if PY3: raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member else: raise (new_exc.__class__, new_exc, self.exc[2])
[ "def", "join", "(", "self", ",", "timeout", "=", "None", ")", ":", "Thread", ".", "join", "(", "self", ",", "timeout", "=", "timeout", ")", "if", "self", ".", "exc", ":", "msg", "=", "\"Thread '%s' threw an exception `%s`: %s\"", "%", "(", "self", ".", ...
Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads from within the main app.
[ "Overwrites", "threading", ".", "Thread", ".", "join", "to", "allow", "handling", "of", "exceptions", "thrown", "by", "threads", "from", "within", "the", "main", "app", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/task.py#L179-L191
cbclab/MOT
docs/conf.py
mock_decorator
def mock_decorator(*args, **kwargs): """Mocked decorator, needed in the case we need to mock a decorator""" def _called_decorator(dec_func): @wraps(dec_func) def _decorator(*args, **kwargs): return dec_func() return _decorator return _called_decorator
python
def mock_decorator(*args, **kwargs): """Mocked decorator, needed in the case we need to mock a decorator""" def _called_decorator(dec_func): @wraps(dec_func) def _decorator(*args, **kwargs): return dec_func() return _decorator return _called_decorator
[ "def", "mock_decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "_called_decorator", "(", "dec_func", ")", ":", "@", "wraps", "(", "dec_func", ")", "def", "_decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return...
Mocked decorator, needed in the case we need to mock a decorator
[ "Mocked", "decorator", "needed", "in", "the", "case", "we", "need", "to", "mock", "a", "decorator" ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/docs/conf.py#L28-L35
cbclab/MOT
docs/conf.py
import_mock
def import_mock(name, *args, **kwargs): """Mock all modules starting with one of the mock_modules names.""" if any(name.startswith(s) for s in mock_modules): return MockModule() return orig_import(name, *args, **kwargs)
python
def import_mock(name, *args, **kwargs): """Mock all modules starting with one of the mock_modules names.""" if any(name.startswith(s) for s in mock_modules): return MockModule() return orig_import(name, *args, **kwargs)
[ "def", "import_mock", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "any", "(", "name", ".", "startswith", "(", "s", ")", "for", "s", "in", "mock_modules", ")", ":", "return", "MockModule", "(", ")", "return", "orig_import",...
Mock all modules starting with one of the mock_modules names.
[ "Mock", "all", "modules", "starting", "with", "one", "of", "the", "mock_modules", "names", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/docs/conf.py#L59-L63
cbclab/MOT
mot/lib/cl_function.py
apply_cl_function
def apply_cl_function(cl_function, kernel_data, nmr_instances, use_local_reduction=False, cl_runtime_info=None): """Run the given function/procedure on the given set of data. This class will wrap the given CL function in a kernel call and execute that that for every data instance using the provided kernel data. This class will respect the read write setting of the kernel data elements such that output can be written back to the according kernel data elements. Args: cl_function (mot.lib.cl_function.CLFunction): the function to run on the datasets. Either a name function tuple or an actual CLFunction object. kernel_data (dict[str: mot.lib.kernel_data.KernelData]): the data to use as input to the function. nmr_instances (int): the number of parallel threads to run (used as ``global_size``) use_local_reduction (boolean): set this to True if you want to use local memory reduction in your CL procedure. If this is set to True we will multiply the global size (given by the nmr_instances) by the work group sizes. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information """ cl_runtime_info = cl_runtime_info or CLRuntimeInfo() cl_environments = cl_runtime_info.cl_environments for param in cl_function.get_parameters(): if param.name not in kernel_data: names = [param.name for param in cl_function.get_parameters()] missing_names = [name for name in names if name not in kernel_data] raise ValueError('Some parameters are missing an input value, ' 'required parameters are: {}, missing inputs are: {}'.format(names, missing_names)) if cl_function.get_return_type() != 'void': kernel_data['_results'] = Zeros((nmr_instances,), cl_function.get_return_type()) workers = [] for ind, cl_environment in enumerate(cl_environments): worker = _ProcedureWorker(cl_environment, cl_runtime_info.compile_flags, cl_function, kernel_data, cl_runtime_info.double_precision, use_local_reduction) workers.append(worker) def enqueue_batch(batch_size, offset): items_per_worker = [batch_size // len(cl_environments) for _ in range(len(cl_environments) - 1)] items_per_worker.append(batch_size - sum(items_per_worker)) for ind, worker in enumerate(workers): worker.calculate(offset, offset + items_per_worker[ind]) offset += items_per_worker[ind] worker.cl_queue.flush() for worker in workers: worker.cl_queue.finish() return offset total_offset = 0 for batch_start, batch_end in split_in_batches(nmr_instances, 1e4 * len(workers)): total_offset = enqueue_batch(batch_end - batch_start, total_offset) if cl_function.get_return_type() != 'void': return kernel_data['_results'].get_data()
python
def apply_cl_function(cl_function, kernel_data, nmr_instances, use_local_reduction=False, cl_runtime_info=None): """Run the given function/procedure on the given set of data. This class will wrap the given CL function in a kernel call and execute that that for every data instance using the provided kernel data. This class will respect the read write setting of the kernel data elements such that output can be written back to the according kernel data elements. Args: cl_function (mot.lib.cl_function.CLFunction): the function to run on the datasets. Either a name function tuple or an actual CLFunction object. kernel_data (dict[str: mot.lib.kernel_data.KernelData]): the data to use as input to the function. nmr_instances (int): the number of parallel threads to run (used as ``global_size``) use_local_reduction (boolean): set this to True if you want to use local memory reduction in your CL procedure. If this is set to True we will multiply the global size (given by the nmr_instances) by the work group sizes. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information """ cl_runtime_info = cl_runtime_info or CLRuntimeInfo() cl_environments = cl_runtime_info.cl_environments for param in cl_function.get_parameters(): if param.name not in kernel_data: names = [param.name for param in cl_function.get_parameters()] missing_names = [name for name in names if name not in kernel_data] raise ValueError('Some parameters are missing an input value, ' 'required parameters are: {}, missing inputs are: {}'.format(names, missing_names)) if cl_function.get_return_type() != 'void': kernel_data['_results'] = Zeros((nmr_instances,), cl_function.get_return_type()) workers = [] for ind, cl_environment in enumerate(cl_environments): worker = _ProcedureWorker(cl_environment, cl_runtime_info.compile_flags, cl_function, kernel_data, cl_runtime_info.double_precision, use_local_reduction) workers.append(worker) def enqueue_batch(batch_size, offset): items_per_worker = [batch_size // len(cl_environments) for _ in range(len(cl_environments) - 1)] items_per_worker.append(batch_size - sum(items_per_worker)) for ind, worker in enumerate(workers): worker.calculate(offset, offset + items_per_worker[ind]) offset += items_per_worker[ind] worker.cl_queue.flush() for worker in workers: worker.cl_queue.finish() return offset total_offset = 0 for batch_start, batch_end in split_in_batches(nmr_instances, 1e4 * len(workers)): total_offset = enqueue_batch(batch_end - batch_start, total_offset) if cl_function.get_return_type() != 'void': return kernel_data['_results'].get_data()
[ "def", "apply_cl_function", "(", "cl_function", ",", "kernel_data", ",", "nmr_instances", ",", "use_local_reduction", "=", "False", ",", "cl_runtime_info", "=", "None", ")", ":", "cl_runtime_info", "=", "cl_runtime_info", "or", "CLRuntimeInfo", "(", ")", "cl_environ...
Run the given function/procedure on the given set of data. This class will wrap the given CL function in a kernel call and execute that that for every data instance using the provided kernel data. This class will respect the read write setting of the kernel data elements such that output can be written back to the according kernel data elements. Args: cl_function (mot.lib.cl_function.CLFunction): the function to run on the datasets. Either a name function tuple or an actual CLFunction object. kernel_data (dict[str: mot.lib.kernel_data.KernelData]): the data to use as input to the function. nmr_instances (int): the number of parallel threads to run (used as ``global_size``) use_local_reduction (boolean): set this to True if you want to use local memory reduction in your CL procedure. If this is set to True we will multiply the global size (given by the nmr_instances) by the work group sizes. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
[ "Run", "the", "given", "function", "/", "procedure", "on", "the", "given", "set", "of", "data", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L578-L633
cbclab/MOT
mot/lib/cl_function.py
SimpleCLFunction.from_string
def from_string(cls, cl_function, dependencies=()): """Parse the given CL function into a SimpleCLFunction object. Args: cl_function (str): the function we wish to turn into an object dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on Returns: SimpleCLFunction: the CL data type for this parameter declaration """ return_type, function_name, parameter_list, body = split_cl_function(cl_function) return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies)
python
def from_string(cls, cl_function, dependencies=()): """Parse the given CL function into a SimpleCLFunction object. Args: cl_function (str): the function we wish to turn into an object dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on Returns: SimpleCLFunction: the CL data type for this parameter declaration """ return_type, function_name, parameter_list, body = split_cl_function(cl_function) return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies)
[ "def", "from_string", "(", "cls", ",", "cl_function", ",", "dependencies", "=", "(", ")", ")", ":", "return_type", ",", "function_name", ",", "parameter_list", ",", "body", "=", "split_cl_function", "(", "cl_function", ")", "return", "SimpleCLFunction", "(", "...
Parse the given CL function into a SimpleCLFunction object. Args: cl_function (str): the function we wish to turn into an object dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on Returns: SimpleCLFunction: the CL data type for this parameter declaration
[ "Parse", "the", "given", "CL", "function", "into", "a", "SimpleCLFunction", "object", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L155-L166
cbclab/MOT
mot/lib/cl_function.py
SimpleCLFunction._get_parameter_signatures
def _get_parameter_signatures(self): """Get the signature of the parameters for the CL function declaration. This should return the list of signatures of the parameters for use inside the function signature. Returns: list: the signatures of the parameters for the use in the CL code. """ declarations = [] for p in self.get_parameters(): new_p = p.get_renamed(p.name.replace('.', '_')) declarations.append(new_p.get_declaration()) return declarations
python
def _get_parameter_signatures(self): """Get the signature of the parameters for the CL function declaration. This should return the list of signatures of the parameters for use inside the function signature. Returns: list: the signatures of the parameters for the use in the CL code. """ declarations = [] for p in self.get_parameters(): new_p = p.get_renamed(p.name.replace('.', '_')) declarations.append(new_p.get_declaration()) return declarations
[ "def", "_get_parameter_signatures", "(", "self", ")", ":", "declarations", "=", "[", "]", "for", "p", "in", "self", ".", "get_parameters", "(", ")", ":", "new_p", "=", "p", ".", "get_renamed", "(", "p", ".", "name", ".", "replace", "(", "'.'", ",", "...
Get the signature of the parameters for the CL function declaration. This should return the list of signatures of the parameters for use inside the function signature. Returns: list: the signatures of the parameters for the use in the CL code.
[ "Get", "the", "signature", "of", "the", "parameters", "for", "the", "CL", "function", "declaration", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L253-L265
cbclab/MOT
mot/lib/cl_function.py
SimpleCLFunction._get_cl_dependency_code
def _get_cl_dependency_code(self): """Get the CL code for all the CL code for all the dependencies. Returns: str: The CL code with the actual code. """ code = '' for d in self._dependencies: code += d.get_cl_code() + "\n" return code
python
def _get_cl_dependency_code(self): """Get the CL code for all the CL code for all the dependencies. Returns: str: The CL code with the actual code. """ code = '' for d in self._dependencies: code += d.get_cl_code() + "\n" return code
[ "def", "_get_cl_dependency_code", "(", "self", ")", ":", "code", "=", "''", "for", "d", "in", "self", ".", "_dependencies", ":", "code", "+=", "d", ".", "get_cl_code", "(", ")", "+", "\"\\n\"", "return", "code" ]
Get the CL code for all the CL code for all the dependencies. Returns: str: The CL code with the actual code.
[ "Get", "the", "CL", "code", "for", "all", "the", "CL", "code", "for", "all", "the", "dependencies", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L267-L276
cbclab/MOT
mot/lib/cl_function.py
_ProcedureWorker._build_kernel
def _build_kernel(self, kernel_source, compile_flags=()): """Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel """ return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))
python
def _build_kernel(self, kernel_source, compile_flags=()): """Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel """ return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))
[ "def", "_build_kernel", "(", "self", ",", "kernel_source", ",", "compile_flags", "=", "(", ")", ")", ":", "return", "cl", ".", "Program", "(", "self", ".", "_cl_context", ",", "kernel_source", ")", ".", "build", "(", "' '", ".", "join", "(", "compile_fla...
Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel
[ "Convenience", "function", "for", "building", "the", "kernel", "for", "this", "worker", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L706-L715
cbclab/MOT
mot/lib/cl_function.py
_ProcedureWorker._get_kernel_arguments
def _get_kernel_arguments(self): """Get the list of kernel arguments for loading the kernel data elements into the kernel. This will use the sorted keys for looping through the kernel input items. Returns: list of str: the list of parameter definitions """ declarations = [] for name, data in self._kernel_data.items(): declarations.extend(data.get_kernel_parameters('_' + name)) return declarations
python
def _get_kernel_arguments(self): """Get the list of kernel arguments for loading the kernel data elements into the kernel. This will use the sorted keys for looping through the kernel input items. Returns: list of str: the list of parameter definitions """ declarations = [] for name, data in self._kernel_data.items(): declarations.extend(data.get_kernel_parameters('_' + name)) return declarations
[ "def", "_get_kernel_arguments", "(", "self", ")", ":", "declarations", "=", "[", "]", "for", "name", ",", "data", "in", "self", ".", "_kernel_data", ".", "items", "(", ")", ":", "declarations", ".", "extend", "(", "data", ".", "get_kernel_parameters", "(",...
Get the list of kernel arguments for loading the kernel data elements into the kernel. This will use the sorted keys for looping through the kernel input items. Returns: list of str: the list of parameter definitions
[ "Get", "the", "list", "of", "kernel", "arguments", "for", "loading", "the", "kernel", "data", "elements", "into", "the", "kernel", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L752-L763
cbclab/MOT
mot/lib/cl_function.py
_ProcedureWorker.get_scalar_arg_dtypes
def get_scalar_arg_dtypes(self): """Get the location and types of the input scalars. Returns: list: for every kernel input element either None if the data is a buffer or the numpy data type if if is a scalar. """ dtypes = [] for name, data in self._kernel_data.items(): dtypes.extend(data.get_scalar_arg_dtypes()) return dtypes
python
def get_scalar_arg_dtypes(self): """Get the location and types of the input scalars. Returns: list: for every kernel input element either None if the data is a buffer or the numpy data type if if is a scalar. """ dtypes = [] for name, data in self._kernel_data.items(): dtypes.extend(data.get_scalar_arg_dtypes()) return dtypes
[ "def", "get_scalar_arg_dtypes", "(", "self", ")", ":", "dtypes", "=", "[", "]", "for", "name", ",", "data", "in", "self", ".", "_kernel_data", ".", "items", "(", ")", ":", "dtypes", ".", "extend", "(", "data", ".", "get_scalar_arg_dtypes", "(", ")", ")...
Get the location and types of the input scalars. Returns: list: for every kernel input element either None if the data is a buffer or the numpy data type if if is a scalar.
[ "Get", "the", "location", "and", "types", "of", "the", "input", "scalars", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L765-L775
ksbg/sparklanes
sparklanes/_framework/spark.py
SparkContextAndSessionContainer.set_sc
def set_sc(cls, master=None, appName=None, sparkHome=None, pyFiles=None, environment=None, batchSize=0, serializer=PickleSerializer(), conf=None, gateway=None, jsc=None, profiler_cls=BasicProfiler): """Creates and initializes a new `SparkContext` (the old one will be stopped). Argument signature is copied from `pyspark.SparkContext <https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext>`_. """ if cls.sc is not None: cls.sc.stop() cls.sc = SparkContext(master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, gateway, jsc, profiler_cls) cls.__init_spark()
python
def set_sc(cls, master=None, appName=None, sparkHome=None, pyFiles=None, environment=None, batchSize=0, serializer=PickleSerializer(), conf=None, gateway=None, jsc=None, profiler_cls=BasicProfiler): """Creates and initializes a new `SparkContext` (the old one will be stopped). Argument signature is copied from `pyspark.SparkContext <https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext>`_. """ if cls.sc is not None: cls.sc.stop() cls.sc = SparkContext(master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, gateway, jsc, profiler_cls) cls.__init_spark()
[ "def", "set_sc", "(", "cls", ",", "master", "=", "None", ",", "appName", "=", "None", ",", "sparkHome", "=", "None", ",", "pyFiles", "=", "None", ",", "environment", "=", "None", ",", "batchSize", "=", "0", ",", "serializer", "=", "PickleSerializer", "...
Creates and initializes a new `SparkContext` (the old one will be stopped). Argument signature is copied from `pyspark.SparkContext <https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext>`_.
[ "Creates", "and", "initializes", "a", "new", "SparkContext", "(", "the", "old", "one", "will", "be", "stopped", ")", ".", "Argument", "signature", "is", "copied", "from", "pyspark", ".", "SparkContext", "<https", ":", "//", "spark", ".", "apache", ".", "or...
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/spark.py#L24-L36
ksbg/sparklanes
sparklanes/_framework/spark.py
SparkContextAndSessionContainer.set_spark
def set_spark(cls, master=None, appName=None, conf=None, hive_support=False): """Creates and initializes a new `SparkSession`. Argument signature is copied from `pyspark.sql.SparkSession <https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_. """ sess = SparkSession.builder if master: sess.master(master) if appName: sess.appName(appName) if conf: sess.config(conf=conf) if hive_support: sess.enableHiveSupport() cls.spark = sess.getOrCreate()
python
def set_spark(cls, master=None, appName=None, conf=None, hive_support=False): """Creates and initializes a new `SparkSession`. Argument signature is copied from `pyspark.sql.SparkSession <https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_. """ sess = SparkSession.builder if master: sess.master(master) if appName: sess.appName(appName) if conf: sess.config(conf=conf) if hive_support: sess.enableHiveSupport() cls.spark = sess.getOrCreate()
[ "def", "set_spark", "(", "cls", ",", "master", "=", "None", ",", "appName", "=", "None", ",", "conf", "=", "None", ",", "hive_support", "=", "False", ")", ":", "sess", "=", "SparkSession", ".", "builder", "if", "master", ":", "sess", ".", "master", "...
Creates and initializes a new `SparkSession`. Argument signature is copied from `pyspark.sql.SparkSession <https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_.
[ "Creates", "and", "initializes", "a", "new", "SparkSession", ".", "Argument", "signature", "is", "copied", "from", "pyspark", ".", "sql", ".", "SparkSession", "<https", ":", "//", "spark", ".", "apache", ".", "org", "/", "docs", "/", "latest", "/", "api", ...
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/spark.py#L39-L54
ksbg/sparklanes
sparklanes/_submit/submit.py
_package_and_submit
def _package_and_submit(args): """ Packages and submits a job, which is defined in a YAML file, to Spark. Parameters ---------- args (List): Command-line arguments """ args = _parse_and_validate_args(args) logging.debug(args) dist = __make_tmp_dir() try: __package_dependencies(dist_dir=dist, additional_reqs=args['requirements'], silent=args['silent']) __package_app(tasks_pkg=args['package'], dist_dir=dist, custom_main=args['main'], extra_data=args['extra_data']) __run_spark_submit(lane_yaml=args['yaml'], dist_dir=dist, spark_home=args['spark_home'], spark_args=args['spark_args'], silent=args['silent']) except Exception as exc: __clean_up(dist) raise exc __clean_up(dist)
python
def _package_and_submit(args): """ Packages and submits a job, which is defined in a YAML file, to Spark. Parameters ---------- args (List): Command-line arguments """ args = _parse_and_validate_args(args) logging.debug(args) dist = __make_tmp_dir() try: __package_dependencies(dist_dir=dist, additional_reqs=args['requirements'], silent=args['silent']) __package_app(tasks_pkg=args['package'], dist_dir=dist, custom_main=args['main'], extra_data=args['extra_data']) __run_spark_submit(lane_yaml=args['yaml'], dist_dir=dist, spark_home=args['spark_home'], spark_args=args['spark_args'], silent=args['silent']) except Exception as exc: __clean_up(dist) raise exc __clean_up(dist)
[ "def", "_package_and_submit", "(", "args", ")", ":", "args", "=", "_parse_and_validate_args", "(", "args", ")", "logging", ".", "debug", "(", "args", ")", "dist", "=", "__make_tmp_dir", "(", ")", "try", ":", "__package_dependencies", "(", "dist_dir", "=", "d...
Packages and submits a job, which is defined in a YAML file, to Spark. Parameters ---------- args (List): Command-line arguments
[ "Packages", "and", "submits", "a", "job", "which", "is", "defined", "in", "a", "YAML", "file", "to", "Spark", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L19-L47
ksbg/sparklanes
sparklanes/_submit/submit.py
_parse_and_validate_args
def _parse_and_validate_args(args): """ Parse and validate arguments. During validation, it is checked whether the given files/directories exist, while also converting relative paths to absolute ones. Parameters ---------- args (List): Command-line arguments """ class ExtendAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): if getattr(namespace, self.dest, None) is None: setattr(namespace, self.dest, []) getattr(namespace, self.dest).extend(values) parser = argparse.ArgumentParser(description='Submitting a lane to spark.') parser.add_argument('-y', '--yaml', type=str, required=True, help='Path to the yaml definition file.') parser.add_argument('-p', '--package', type=str, required=True, help='Path to the python package containing your tasks.') parser.add_argument('-r', '--requirements', type=str, required=False, help='Path to a `requirements.txt` specifying any additional dependencies ' 'of your tasks.') parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction, help='Path to any additional files or directories that should be packaged ' 'and sent to Spark.') parser.add_argument('-m', '--main', type=str, required=False, help='Path to a custom main python file') parser.add_argument('-d', '--spark-home', type=str, required=False, help='Custom path to the directory containing your Spark installation. If ' 'none is given, sparklanes will try to use the `spark-submit` command ' 'from your PATH') parser.add_argument('-s', '--spark-args', nargs='*', required=False, help='Any additional arguments that should be sent to Spark via ' 'spark-submit. ' '(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)') parser.add_argument('--silent', help='If set, no output will be sent to console', action='store_true') args = parser.parse_args(args).__dict__ # Check/fix files/dirs for param in ('package', 'spark_home'): args[param] = __validate_and_fix_path(args[param], check_dir=True) for param in ('yaml', 'requirements', 'main'): args[param] = __validate_and_fix_path(args[param], check_file=True) if args['extra_data']: for i in range(len(args['extra_data'])): args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i], check_file=True, check_dir=True) # Check if python package if not os.path.isfile(os.path.join(args['package'], '__init__.py')): raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an ' '`__init__.py`.') # Check/fix spark args if args['spark_args']: args['spark_args'] = __validate_and_fix_spark_args(args['spark_args']) return args
python
def _parse_and_validate_args(args): """ Parse and validate arguments. During validation, it is checked whether the given files/directories exist, while also converting relative paths to absolute ones. Parameters ---------- args (List): Command-line arguments """ class ExtendAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): if getattr(namespace, self.dest, None) is None: setattr(namespace, self.dest, []) getattr(namespace, self.dest).extend(values) parser = argparse.ArgumentParser(description='Submitting a lane to spark.') parser.add_argument('-y', '--yaml', type=str, required=True, help='Path to the yaml definition file.') parser.add_argument('-p', '--package', type=str, required=True, help='Path to the python package containing your tasks.') parser.add_argument('-r', '--requirements', type=str, required=False, help='Path to a `requirements.txt` specifying any additional dependencies ' 'of your tasks.') parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction, help='Path to any additional files or directories that should be packaged ' 'and sent to Spark.') parser.add_argument('-m', '--main', type=str, required=False, help='Path to a custom main python file') parser.add_argument('-d', '--spark-home', type=str, required=False, help='Custom path to the directory containing your Spark installation. If ' 'none is given, sparklanes will try to use the `spark-submit` command ' 'from your PATH') parser.add_argument('-s', '--spark-args', nargs='*', required=False, help='Any additional arguments that should be sent to Spark via ' 'spark-submit. ' '(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)') parser.add_argument('--silent', help='If set, no output will be sent to console', action='store_true') args = parser.parse_args(args).__dict__ # Check/fix files/dirs for param in ('package', 'spark_home'): args[param] = __validate_and_fix_path(args[param], check_dir=True) for param in ('yaml', 'requirements', 'main'): args[param] = __validate_and_fix_path(args[param], check_file=True) if args['extra_data']: for i in range(len(args['extra_data'])): args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i], check_file=True, check_dir=True) # Check if python package if not os.path.isfile(os.path.join(args['package'], '__init__.py')): raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an ' '`__init__.py`.') # Check/fix spark args if args['spark_args']: args['spark_args'] = __validate_and_fix_spark_args(args['spark_args']) return args
[ "def", "_parse_and_validate_args", "(", "args", ")", ":", "class", "ExtendAction", "(", "argparse", ".", "Action", ")", ":", "def", "__call__", "(", "self", ",", "parser", ",", "namespace", ",", "values", ",", "option_string", "=", "None", ")", ":", "if", ...
Parse and validate arguments. During validation, it is checked whether the given files/directories exist, while also converting relative paths to absolute ones. Parameters ---------- args (List): Command-line arguments
[ "Parse", "and", "validate", "arguments", ".", "During", "validation", "it", "is", "checked", "whether", "the", "given", "files", "/", "directories", "exist", "while", "also", "converting", "relative", "paths", "to", "absolute", "ones", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L50-L109
ksbg/sparklanes
sparklanes/_submit/submit.py
__validate_and_fix_path
def __validate_and_fix_path(path, check_file=False, check_dir=False): """Check if a file/directory exists and converts relative paths to absolute ones""" # pylint: disable=superfluous-parens if path is None: return path else: if not (os.path.isfile(path) if check_file else False) \ and not (os.path.isdir(path) if check_dir else False): raise SystemExit('Path `%s` does not exist' % path) if not os.path.isabs(path): path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path)) return path
python
def __validate_and_fix_path(path, check_file=False, check_dir=False): """Check if a file/directory exists and converts relative paths to absolute ones""" # pylint: disable=superfluous-parens if path is None: return path else: if not (os.path.isfile(path) if check_file else False) \ and not (os.path.isdir(path) if check_dir else False): raise SystemExit('Path `%s` does not exist' % path) if not os.path.isabs(path): path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path)) return path
[ "def", "__validate_and_fix_path", "(", "path", ",", "check_file", "=", "False", ",", "check_dir", "=", "False", ")", ":", "# pylint: disable=superfluous-parens", "if", "path", "is", "None", ":", "return", "path", "else", ":", "if", "not", "(", "os", ".", "pa...
Check if a file/directory exists and converts relative paths to absolute ones
[ "Check", "if", "a", "file", "/", "directory", "exists", "and", "converts", "relative", "paths", "to", "absolute", "ones" ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L112-L124
ksbg/sparklanes
sparklanes/_submit/submit.py
__validate_and_fix_spark_args
def __validate_and_fix_spark_args(spark_args): """ Prepares spark arguments. In the command-line script, they are passed as for example `-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as `--master local[4] --deploy-mode client --verbose` Parameters ---------- spark_args (List): List of spark arguments Returns ------- fixed_args (List): List of fixed and validated spark arguments """ pattern = re.compile(r'[\w\-_]+=.+') fixed_args = [] for arg in spark_args: if arg not in SPARK_SUBMIT_FLAGS: if not pattern.match(arg): raise SystemExit('Spark argument `%s` does not seem to be in the correct format ' '`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the' 'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS))) eq_pos = arg.find('=') fixed_args.append('--' + arg[:eq_pos]) fixed_args.append(arg[eq_pos + 1:]) else: fixed_args.append('--' + arg) return fixed_args
python
def __validate_and_fix_spark_args(spark_args): """ Prepares spark arguments. In the command-line script, they are passed as for example `-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as `--master local[4] --deploy-mode client --verbose` Parameters ---------- spark_args (List): List of spark arguments Returns ------- fixed_args (List): List of fixed and validated spark arguments """ pattern = re.compile(r'[\w\-_]+=.+') fixed_args = [] for arg in spark_args: if arg not in SPARK_SUBMIT_FLAGS: if not pattern.match(arg): raise SystemExit('Spark argument `%s` does not seem to be in the correct format ' '`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the' 'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS))) eq_pos = arg.find('=') fixed_args.append('--' + arg[:eq_pos]) fixed_args.append(arg[eq_pos + 1:]) else: fixed_args.append('--' + arg) return fixed_args
[ "def", "__validate_and_fix_spark_args", "(", "spark_args", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r'[\\w\\-_]+=.+'", ")", "fixed_args", "=", "[", "]", "for", "arg", "in", "spark_args", ":", "if", "arg", "not", "in", "SPARK_SUBMIT_FLAGS", ":", ...
Prepares spark arguments. In the command-line script, they are passed as for example `-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as `--master local[4] --deploy-mode client --verbose` Parameters ---------- spark_args (List): List of spark arguments Returns ------- fixed_args (List): List of fixed and validated spark arguments
[ "Prepares", "spark", "arguments", ".", "In", "the", "command", "-", "line", "script", "they", "are", "passed", "as", "for", "example", "-", "s", "master", "=", "local", "[", "4", "]", "deploy", "-", "mode", "=", "client", "verbose", "which", "would", "...
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L127-L155
ksbg/sparklanes
sparklanes/_submit/submit.py
__package_dependencies
def __package_dependencies(dist_dir, additional_reqs, silent): """ Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark. Parameters ---------- dist_dir (str): Path to directory where the packaged libs shall be located additional_reqs (str): Path to a requirements.txt, containing any of the app's additional requirements silent (bool): Flag indicating whether pip output should be printed to console """ logging.info('Packaging dependencies') libs_dir = os.path.join(dist_dir, 'libs') if not os.path.isdir(libs_dir): os.mkdir(libs_dir) # Get requirements req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt') with open(req_txt, 'r') as req: requirements = req.read().splitlines() if additional_reqs: with open(additional_reqs, 'r') as req: for row in req: requirements.append(row) # Remove duplicates requirements = list(set(requirements)) # Install devnull = open(os.devnull, 'w') outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {} for pkg in requirements: cmd = ['pip', 'install', pkg, '-t', libs_dir] logging.debug('Calling `%s`', str(cmd)) call(cmd, **outp) devnull.close() # Package shutil.make_archive(libs_dir, 'zip', libs_dir, './')
python
def __package_dependencies(dist_dir, additional_reqs, silent): """ Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark. Parameters ---------- dist_dir (str): Path to directory where the packaged libs shall be located additional_reqs (str): Path to a requirements.txt, containing any of the app's additional requirements silent (bool): Flag indicating whether pip output should be printed to console """ logging.info('Packaging dependencies') libs_dir = os.path.join(dist_dir, 'libs') if not os.path.isdir(libs_dir): os.mkdir(libs_dir) # Get requirements req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt') with open(req_txt, 'r') as req: requirements = req.read().splitlines() if additional_reqs: with open(additional_reqs, 'r') as req: for row in req: requirements.append(row) # Remove duplicates requirements = list(set(requirements)) # Install devnull = open(os.devnull, 'w') outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {} for pkg in requirements: cmd = ['pip', 'install', pkg, '-t', libs_dir] logging.debug('Calling `%s`', str(cmd)) call(cmd, **outp) devnull.close() # Package shutil.make_archive(libs_dir, 'zip', libs_dir, './')
[ "def", "__package_dependencies", "(", "dist_dir", ",", "additional_reqs", ",", "silent", ")", ":", "logging", ".", "info", "(", "'Packaging dependencies'", ")", "libs_dir", "=", "os", ".", "path", ".", "join", "(", "dist_dir", ",", "'libs'", ")", "if", "not"...
Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark. Parameters ---------- dist_dir (str): Path to directory where the packaged libs shall be located additional_reqs (str): Path to a requirements.txt, containing any of the app's additional requirements silent (bool): Flag indicating whether pip output should be printed to console
[ "Installs", "the", "app", "s", "dependencies", "from", "pip", "and", "packages", "them", "(", "as", "zip", ")", "to", "be", "submitted", "to", "spark", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L172-L210
ksbg/sparklanes
sparklanes/_submit/submit.py
__package_app
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None): """ Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to `dist_dir`, to be submitted to spark. Same for `extra_data`. Parameters ---------- tasks_pkg (str): Path to the python package containing tasks dist_dir (str): Path to the directory where the packaged code should be stored custom_main (str): Path to a custom 'main' python file. extra_data (List[str]): List containing paths to files/directories that should also be packaged and submitted to spark """ logging.info('Packaging application') # Package tasks tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg)) shutil.make_archive(os.path.join(dist_dir, 'tasks'), 'zip', tasks_dir_splits[0], tasks_dir_splits[1]) # Package main.py if custom_main is None: from . import _main main_path = _main.__file__ if main_path[-3:] == 'pyc': main_path = main_path[:-1] shutil.copy(os.path.realpath(main_path), os.path.join(dist_dir, 'main.py')) else: shutil.copy(os.path.realpath(custom_main), os.path.join(dist_dir, 'main.py')) # Package _framework shutil.make_archive(os.path.join(dist_dir, '_framework'), 'zip', os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'), './sparklanes/') # Package extra data if extra_data: for dat in extra_data: real_path = os.path.realpath(dat) target = os.path.join(dist_dir, os.path.split(real_path)[1]) if os.path.isfile(real_path): shutil.copy(real_path, target) elif os.path.isdir(real_path): shutil.copytree(real_path, target) else: raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
python
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None): """ Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to `dist_dir`, to be submitted to spark. Same for `extra_data`. Parameters ---------- tasks_pkg (str): Path to the python package containing tasks dist_dir (str): Path to the directory where the packaged code should be stored custom_main (str): Path to a custom 'main' python file. extra_data (List[str]): List containing paths to files/directories that should also be packaged and submitted to spark """ logging.info('Packaging application') # Package tasks tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg)) shutil.make_archive(os.path.join(dist_dir, 'tasks'), 'zip', tasks_dir_splits[0], tasks_dir_splits[1]) # Package main.py if custom_main is None: from . import _main main_path = _main.__file__ if main_path[-3:] == 'pyc': main_path = main_path[:-1] shutil.copy(os.path.realpath(main_path), os.path.join(dist_dir, 'main.py')) else: shutil.copy(os.path.realpath(custom_main), os.path.join(dist_dir, 'main.py')) # Package _framework shutil.make_archive(os.path.join(dist_dir, '_framework'), 'zip', os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'), './sparklanes/') # Package extra data if extra_data: for dat in extra_data: real_path = os.path.realpath(dat) target = os.path.join(dist_dir, os.path.split(real_path)[1]) if os.path.isfile(real_path): shutil.copy(real_path, target) elif os.path.isdir(real_path): shutil.copytree(real_path, target) else: raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
[ "def", "__package_app", "(", "tasks_pkg", ",", "dist_dir", ",", "custom_main", "=", "None", ",", "extra_data", "=", "None", ")", ":", "logging", ".", "info", "(", "'Packaging application'", ")", "# Package tasks", "tasks_dir_splits", "=", "os", ".", "path", "....
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to `dist_dir`, to be submitted to spark. Same for `extra_data`. Parameters ---------- tasks_pkg (str): Path to the python package containing tasks dist_dir (str): Path to the directory where the packaged code should be stored custom_main (str): Path to a custom 'main' python file. extra_data (List[str]): List containing paths to files/directories that should also be packaged and submitted to spark
[ "Packages", "the", "tasks_pkg", "(", "as", "zip", ")", "to", "dist_dir", ".", "Also", "copies", "the", "main", "python", "file", "to", "dist_dir", "to", "be", "submitted", "to", "spark", ".", "Same", "for", "extra_data", "." ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L213-L263
ksbg/sparklanes
sparklanes/_submit/submit.py
__run_spark_submit
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent): """ Submits the packaged application to spark using a `spark-submit` subprocess Parameters ---------- lane_yaml (str): Path to the YAML lane definition file dist_dir (str): Path to the directory where the packaged code is located spark_args (str): String of any additional spark config args to be passed when submitting silent (bool): Flag indicating whether job output should be printed to console """ # spark-submit binary cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')] # Supplied spark arguments if spark_args: cmd += spark_args # Packaged App & lane cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py'] cmd += ['--lane', lane_yaml] logging.info('Submitting to Spark') logging.debug(str(cmd)) # Submit devnull = open(os.devnull, 'w') outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {} call(cmd, cwd=dist_dir, env=MY_ENV, **outp) devnull.close()
python
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent): """ Submits the packaged application to spark using a `spark-submit` subprocess Parameters ---------- lane_yaml (str): Path to the YAML lane definition file dist_dir (str): Path to the directory where the packaged code is located spark_args (str): String of any additional spark config args to be passed when submitting silent (bool): Flag indicating whether job output should be printed to console """ # spark-submit binary cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')] # Supplied spark arguments if spark_args: cmd += spark_args # Packaged App & lane cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py'] cmd += ['--lane', lane_yaml] logging.info('Submitting to Spark') logging.debug(str(cmd)) # Submit devnull = open(os.devnull, 'w') outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {} call(cmd, cwd=dist_dir, env=MY_ENV, **outp) devnull.close()
[ "def", "__run_spark_submit", "(", "lane_yaml", ",", "dist_dir", ",", "spark_home", ",", "spark_args", ",", "silent", ")", ":", "# spark-submit binary", "cmd", "=", "[", "'spark-submit'", "if", "spark_home", "is", "None", "else", "os", ".", "path", ".", "join",...
Submits the packaged application to spark using a `spark-submit` subprocess Parameters ---------- lane_yaml (str): Path to the YAML lane definition file dist_dir (str): Path to the directory where the packaged code is located spark_args (str): String of any additional spark config args to be passed when submitting silent (bool): Flag indicating whether job output should be printed to console
[ "Submits", "the", "packaged", "application", "to", "spark", "using", "a", "spark", "-", "submit", "subprocess" ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L266-L295
cbclab/MOT
mot/lib/utils.py
add_include_guards
def add_include_guards(cl_str, guard_name=None): """Add include guards to the given string. If you are including the same body of CL code multiple times in a Kernel, it is important to add include guards (https://en.wikipedia.org/wiki/Include_guard) around them to prevent the kernel from registering the function twice. Args: cl_str (str): the piece of CL code as a string to which we add the include guards guard_name (str): the name of the C pre-processor guard. If not given we use the MD5 hash of the given cl string. Returns: str: the same string but then with include guards around them. """ if not guard_name: guard_name = 'GUARD_' + hashlib.md5(cl_str.encode('utf-8')).hexdigest() return ''' # ifndef {guard_name} # define {guard_name} {func_str} # endif // {guard_name} '''.format(func_str=cl_str, guard_name=guard_name)
python
def add_include_guards(cl_str, guard_name=None): """Add include guards to the given string. If you are including the same body of CL code multiple times in a Kernel, it is important to add include guards (https://en.wikipedia.org/wiki/Include_guard) around them to prevent the kernel from registering the function twice. Args: cl_str (str): the piece of CL code as a string to which we add the include guards guard_name (str): the name of the C pre-processor guard. If not given we use the MD5 hash of the given cl string. Returns: str: the same string but then with include guards around them. """ if not guard_name: guard_name = 'GUARD_' + hashlib.md5(cl_str.encode('utf-8')).hexdigest() return ''' # ifndef {guard_name} # define {guard_name} {func_str} # endif // {guard_name} '''.format(func_str=cl_str, guard_name=guard_name)
[ "def", "add_include_guards", "(", "cl_str", ",", "guard_name", "=", "None", ")", ":", "if", "not", "guard_name", ":", "guard_name", "=", "'GUARD_'", "+", "hashlib", ".", "md5", "(", "cl_str", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "("...
Add include guards to the given string. If you are including the same body of CL code multiple times in a Kernel, it is important to add include guards (https://en.wikipedia.org/wiki/Include_guard) around them to prevent the kernel from registering the function twice. Args: cl_str (str): the piece of CL code as a string to which we add the include guards guard_name (str): the name of the C pre-processor guard. If not given we use the MD5 hash of the given cl string. Returns: str: the same string but then with include guards around them.
[ "Add", "include", "guards", "to", "the", "given", "string", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L21-L44
cbclab/MOT
mot/lib/utils.py
ctype_to_dtype
def ctype_to_dtype(cl_type, mot_float_type='float'): """Get the numpy dtype of the given cl_type string. Args: cl_type (str): the CL data type to match, for example 'float' or 'float4'. mot_float_type (str): the C name of the ``mot_float_type``. The dtype will be looked up recursively. Returns: dtype: the numpy datatype """ if is_vector_ctype(cl_type): raw_type, vector_length = split_vector_ctype(cl_type) if raw_type == 'mot_float_type': if is_vector_ctype(mot_float_type): raw_type, _ = split_vector_ctype(mot_float_type) else: raw_type = mot_float_type vector_type = raw_type + str(vector_length) return getattr(cl_array.vec, vector_type) else: if cl_type == 'mot_float_type': cl_type = mot_float_type data_types = [ ('char', np.int8), ('uchar', np.uint8), ('short', np.int16), ('ushort', np.uint16), ('int', np.int32), ('uint', np.uint32), ('long', np.int64), ('ulong', np.uint64), ('float', np.float32), ('double', np.float64), ] for ctype, dtype in data_types: if ctype == cl_type: return dtype
python
def ctype_to_dtype(cl_type, mot_float_type='float'): """Get the numpy dtype of the given cl_type string. Args: cl_type (str): the CL data type to match, for example 'float' or 'float4'. mot_float_type (str): the C name of the ``mot_float_type``. The dtype will be looked up recursively. Returns: dtype: the numpy datatype """ if is_vector_ctype(cl_type): raw_type, vector_length = split_vector_ctype(cl_type) if raw_type == 'mot_float_type': if is_vector_ctype(mot_float_type): raw_type, _ = split_vector_ctype(mot_float_type) else: raw_type = mot_float_type vector_type = raw_type + str(vector_length) return getattr(cl_array.vec, vector_type) else: if cl_type == 'mot_float_type': cl_type = mot_float_type data_types = [ ('char', np.int8), ('uchar', np.uint8), ('short', np.int16), ('ushort', np.uint16), ('int', np.int32), ('uint', np.uint32), ('long', np.int64), ('ulong', np.uint64), ('float', np.float32), ('double', np.float64), ] for ctype, dtype in data_types: if ctype == cl_type: return dtype
[ "def", "ctype_to_dtype", "(", "cl_type", ",", "mot_float_type", "=", "'float'", ")", ":", "if", "is_vector_ctype", "(", "cl_type", ")", ":", "raw_type", ",", "vector_length", "=", "split_vector_ctype", "(", "cl_type", ")", "if", "raw_type", "==", "'mot_float_typ...
Get the numpy dtype of the given cl_type string. Args: cl_type (str): the CL data type to match, for example 'float' or 'float4'. mot_float_type (str): the C name of the ``mot_float_type``. The dtype will be looked up recursively. Returns: dtype: the numpy datatype
[ "Get", "the", "numpy", "dtype", "of", "the", "given", "cl_type", "string", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L60-L99
cbclab/MOT
mot/lib/utils.py
convert_data_to_dtype
def convert_data_to_dtype(data, data_type, mot_float_type='float'): """Convert the given input data to the correct numpy type. Args: data (ndarray): The value to convert to the correct numpy type data_type (str): the data type we need to convert the data to mot_float_type (str): the data type of the current ``mot_float_type`` Returns: ndarray: the input data but then converted to the desired numpy data type """ scalar_dtype = ctype_to_dtype(data_type, mot_float_type) if isinstance(data, numbers.Number): data = scalar_dtype(data) if is_vector_ctype(data_type): shape = data.shape dtype = ctype_to_dtype(data_type, mot_float_type) ve = np.zeros(shape[:-1], dtype=dtype) if len(shape) == 1: for vector_ind in range(shape[0]): ve[0][vector_ind] = data[vector_ind] elif len(shape) == 2: for i in range(data.shape[0]): for vector_ind in range(data.shape[1]): ve[i][vector_ind] = data[i, vector_ind] elif len(shape) == 3: for i in range(data.shape[0]): for j in range(data.shape[1]): for vector_ind in range(data.shape[2]): ve[i, j][vector_ind] = data[i, j, vector_ind] return np.require(ve, requirements=['C', 'A', 'O']) return np.require(data, scalar_dtype, ['C', 'A', 'O'])
python
def convert_data_to_dtype(data, data_type, mot_float_type='float'): """Convert the given input data to the correct numpy type. Args: data (ndarray): The value to convert to the correct numpy type data_type (str): the data type we need to convert the data to mot_float_type (str): the data type of the current ``mot_float_type`` Returns: ndarray: the input data but then converted to the desired numpy data type """ scalar_dtype = ctype_to_dtype(data_type, mot_float_type) if isinstance(data, numbers.Number): data = scalar_dtype(data) if is_vector_ctype(data_type): shape = data.shape dtype = ctype_to_dtype(data_type, mot_float_type) ve = np.zeros(shape[:-1], dtype=dtype) if len(shape) == 1: for vector_ind in range(shape[0]): ve[0][vector_ind] = data[vector_ind] elif len(shape) == 2: for i in range(data.shape[0]): for vector_ind in range(data.shape[1]): ve[i][vector_ind] = data[i, vector_ind] elif len(shape) == 3: for i in range(data.shape[0]): for j in range(data.shape[1]): for vector_ind in range(data.shape[2]): ve[i, j][vector_ind] = data[i, j, vector_ind] return np.require(ve, requirements=['C', 'A', 'O']) return np.require(data, scalar_dtype, ['C', 'A', 'O'])
[ "def", "convert_data_to_dtype", "(", "data", ",", "data_type", ",", "mot_float_type", "=", "'float'", ")", ":", "scalar_dtype", "=", "ctype_to_dtype", "(", "data_type", ",", "mot_float_type", ")", "if", "isinstance", "(", "data", ",", "numbers", ".", "Number", ...
Convert the given input data to the correct numpy type. Args: data (ndarray): The value to convert to the correct numpy type data_type (str): the data type we need to convert the data to mot_float_type (str): the data type of the current ``mot_float_type`` Returns: ndarray: the input data but then converted to the desired numpy data type
[ "Convert", "the", "given", "input", "data", "to", "the", "correct", "numpy", "type", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L102-L137
cbclab/MOT
mot/lib/utils.py
split_vector_ctype
def split_vector_ctype(ctype): """Split a vector ctype into a raw ctype and the vector length. If the given ctype is not a vector type, we raise an error. I Args: ctype (str): the ctype to possibly split into a raw ctype and the vector length Returns: tuple: the raw ctype and the vector length """ if not is_vector_ctype(ctype): raise ValueError('The given ctype is not a vector type.') for vector_length in [2, 3, 4, 8, 16]: if ctype.endswith(str(vector_length)): vector_str_len = len(str(vector_length)) return ctype[:-vector_str_len], int(ctype[-vector_str_len:])
python
def split_vector_ctype(ctype): """Split a vector ctype into a raw ctype and the vector length. If the given ctype is not a vector type, we raise an error. I Args: ctype (str): the ctype to possibly split into a raw ctype and the vector length Returns: tuple: the raw ctype and the vector length """ if not is_vector_ctype(ctype): raise ValueError('The given ctype is not a vector type.') for vector_length in [2, 3, 4, 8, 16]: if ctype.endswith(str(vector_length)): vector_str_len = len(str(vector_length)) return ctype[:-vector_str_len], int(ctype[-vector_str_len:])
[ "def", "split_vector_ctype", "(", "ctype", ")", ":", "if", "not", "is_vector_ctype", "(", "ctype", ")", ":", "raise", "ValueError", "(", "'The given ctype is not a vector type.'", ")", "for", "vector_length", "in", "[", "2", ",", "3", ",", "4", ",", "8", ","...
Split a vector ctype into a raw ctype and the vector length. If the given ctype is not a vector type, we raise an error. I Args: ctype (str): the ctype to possibly split into a raw ctype and the vector length Returns: tuple: the raw ctype and the vector length
[ "Split", "a", "vector", "ctype", "into", "a", "raw", "ctype", "and", "the", "vector", "length", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L140-L156
cbclab/MOT
mot/lib/utils.py
device_type_from_string
def device_type_from_string(cl_device_type_str): """Converts values like ``gpu`` to a pyopencl device type string. Supported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned. Args: cl_device_type_str (str): The string we want to convert to a device type. Returns: cl.device_type: the pyopencl device type. """ cl_device_type_str = cl_device_type_str.upper() if hasattr(cl.device_type, cl_device_type_str): return getattr(cl.device_type, cl_device_type_str) return None
python
def device_type_from_string(cl_device_type_str): """Converts values like ``gpu`` to a pyopencl device type string. Supported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned. Args: cl_device_type_str (str): The string we want to convert to a device type. Returns: cl.device_type: the pyopencl device type. """ cl_device_type_str = cl_device_type_str.upper() if hasattr(cl.device_type, cl_device_type_str): return getattr(cl.device_type, cl_device_type_str) return None
[ "def", "device_type_from_string", "(", "cl_device_type_str", ")", ":", "cl_device_type_str", "=", "cl_device_type_str", ".", "upper", "(", ")", "if", "hasattr", "(", "cl", ".", "device_type", ",", "cl_device_type_str", ")", ":", "return", "getattr", "(", "cl", "...
Converts values like ``gpu`` to a pyopencl device type string. Supported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned. Args: cl_device_type_str (str): The string we want to convert to a device type. Returns: cl.device_type: the pyopencl device type.
[ "Converts", "values", "like", "gpu", "to", "a", "pyopencl", "device", "type", "string", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L171-L185
cbclab/MOT
mot/lib/utils.py
get_float_type_def
def get_float_type_def(double_precision, include_complex=True): """Get the model floating point type definition. Args: double_precision (boolean): if True we will use the double type for the mot_float_type type. Else, we will use the single precision float type for the mot_float_type type. include_complex (boolean): if we include support for complex numbers Returns: str: defines the mot_float_type types, the epsilon and the MIN and MAX values. """ if include_complex: with open(os.path.abspath(resource_filename('mot', 'data/opencl/complex.h')), 'r') as f: complex_number_support = f.read() else: complex_number_support = '' scipy_constants = ''' #define MACHEP DBL_EPSILON #define MAXLOG log(DBL_MAX) #define LANCZOS_G 6.024680040776729583740234375 /* taken from Scipy */ #define EULER 0.577215664901532860606512090082402431 /* Euler constant, from Scipy */ ''' if double_precision: return ''' #if __OPENCL_VERSION__ <= CL_VERSION_1_1 #pragma OPENCL EXTENSION cl_khr_fp64 : enable #endif #define PYOPENCL_DEFINE_CDOUBLE typedef double mot_float_type; typedef double2 mot_float_type2; typedef double4 mot_float_type4; typedef double8 mot_float_type8; typedef double16 mot_float_type16; #define MOT_EPSILON DBL_EPSILON #define MOT_MIN DBL_MIN #define MOT_MAX DBL_MAX ''' + scipy_constants + complex_number_support else: return ''' #if __OPENCL_VERSION__ <= CL_VERSION_1_1 #pragma OPENCL EXTENSION cl_khr_fp64 : enable #endif typedef float mot_float_type; typedef float2 mot_float_type2; typedef float4 mot_float_type4; typedef float8 mot_float_type8; typedef float16 mot_float_type16; #define MOT_EPSILON FLT_EPSILON #define MOT_MIN FLT_MIN #define MOT_MAX FLT_MAX ''' + scipy_constants + complex_number_support
python
def get_float_type_def(double_precision, include_complex=True): """Get the model floating point type definition. Args: double_precision (boolean): if True we will use the double type for the mot_float_type type. Else, we will use the single precision float type for the mot_float_type type. include_complex (boolean): if we include support for complex numbers Returns: str: defines the mot_float_type types, the epsilon and the MIN and MAX values. """ if include_complex: with open(os.path.abspath(resource_filename('mot', 'data/opencl/complex.h')), 'r') as f: complex_number_support = f.read() else: complex_number_support = '' scipy_constants = ''' #define MACHEP DBL_EPSILON #define MAXLOG log(DBL_MAX) #define LANCZOS_G 6.024680040776729583740234375 /* taken from Scipy */ #define EULER 0.577215664901532860606512090082402431 /* Euler constant, from Scipy */ ''' if double_precision: return ''' #if __OPENCL_VERSION__ <= CL_VERSION_1_1 #pragma OPENCL EXTENSION cl_khr_fp64 : enable #endif #define PYOPENCL_DEFINE_CDOUBLE typedef double mot_float_type; typedef double2 mot_float_type2; typedef double4 mot_float_type4; typedef double8 mot_float_type8; typedef double16 mot_float_type16; #define MOT_EPSILON DBL_EPSILON #define MOT_MIN DBL_MIN #define MOT_MAX DBL_MAX ''' + scipy_constants + complex_number_support else: return ''' #if __OPENCL_VERSION__ <= CL_VERSION_1_1 #pragma OPENCL EXTENSION cl_khr_fp64 : enable #endif typedef float mot_float_type; typedef float2 mot_float_type2; typedef float4 mot_float_type4; typedef float8 mot_float_type8; typedef float16 mot_float_type16; #define MOT_EPSILON FLT_EPSILON #define MOT_MIN FLT_MIN #define MOT_MAX FLT_MAX ''' + scipy_constants + complex_number_support
[ "def", "get_float_type_def", "(", "double_precision", ",", "include_complex", "=", "True", ")", ":", "if", "include_complex", ":", "with", "open", "(", "os", ".", "path", ".", "abspath", "(", "resource_filename", "(", "'mot'", ",", "'data/opencl/complex.h'", ")"...
Get the model floating point type definition. Args: double_precision (boolean): if True we will use the double type for the mot_float_type type. Else, we will use the single precision float type for the mot_float_type type. include_complex (boolean): if we include support for complex numbers Returns: str: defines the mot_float_type types, the epsilon and the MIN and MAX values.
[ "Get", "the", "model", "floating", "point", "type", "definition", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L201-L258
cbclab/MOT
mot/lib/utils.py
topological_sort
def topological_sort(data): """Topological sort the given dictionary structure. Args: data (dict); dictionary structure where the value is a list of dependencies for that given key. For example: ``{'a': (), 'b': ('a',)}``, where ``a`` depends on nothing and ``b`` depends on ``a``. Returns: tuple: the dependencies in constructor order """ def check_self_dependencies(input_data): """Check if there are self dependencies within a node. Self dependencies are for example: ``{'a': ('a',)}``. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Raises: ValueError: if there are indeed self dependencies """ for k, v in input_data.items(): if k in v: raise ValueError('Self-dependency, {} depends on itself.'.format(k)) def prepare_input_data(input_data): """Prepares the input data by making sets of the dependencies. This automatically removes redundant items. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Returns: dict: a copy of the input dict but with sets instead of lists for the dependencies. """ return {k: set(v) for k, v in input_data.items()} def find_items_without_dependencies(input_data): """This searches the dependencies of all the items for items that have no dependencies. For example, suppose the input is: ``{'a': ('b',)}``, then ``a`` depends on ``b`` and ``b`` depends on nothing. This class returns ``(b,)`` in this example. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Returns: list: the list of items without any dependency. """ return list(reduce(set.union, input_data.values()) - set(input_data.keys())) def add_empty_dependencies(data): items_without_dependencies = find_items_without_dependencies(data) data.update({item: set() for item in items_without_dependencies}) def get_sorted(input_data): data = input_data while True: ordered = set(item for item, dep in data.items() if len(dep) == 0) if not ordered: break yield ordered data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered} if len(data) != 0: raise ValueError('Cyclic dependencies exist ' 'among these items: {}'.format(', '.join(repr(x) for x in data.items()))) check_self_dependencies(data) if not len(data): return [] data_copy = prepare_input_data(data) add_empty_dependencies(data_copy) result = [] for d in get_sorted(data_copy): try: d = sorted(d) except TypeError: d = list(d) result.extend(d) return result
python
def topological_sort(data): """Topological sort the given dictionary structure. Args: data (dict); dictionary structure where the value is a list of dependencies for that given key. For example: ``{'a': (), 'b': ('a',)}``, where ``a`` depends on nothing and ``b`` depends on ``a``. Returns: tuple: the dependencies in constructor order """ def check_self_dependencies(input_data): """Check if there are self dependencies within a node. Self dependencies are for example: ``{'a': ('a',)}``. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Raises: ValueError: if there are indeed self dependencies """ for k, v in input_data.items(): if k in v: raise ValueError('Self-dependency, {} depends on itself.'.format(k)) def prepare_input_data(input_data): """Prepares the input data by making sets of the dependencies. This automatically removes redundant items. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Returns: dict: a copy of the input dict but with sets instead of lists for the dependencies. """ return {k: set(v) for k, v in input_data.items()} def find_items_without_dependencies(input_data): """This searches the dependencies of all the items for items that have no dependencies. For example, suppose the input is: ``{'a': ('b',)}``, then ``a`` depends on ``b`` and ``b`` depends on nothing. This class returns ``(b,)`` in this example. Args: input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}. Returns: list: the list of items without any dependency. """ return list(reduce(set.union, input_data.values()) - set(input_data.keys())) def add_empty_dependencies(data): items_without_dependencies = find_items_without_dependencies(data) data.update({item: set() for item in items_without_dependencies}) def get_sorted(input_data): data = input_data while True: ordered = set(item for item, dep in data.items() if len(dep) == 0) if not ordered: break yield ordered data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered} if len(data) != 0: raise ValueError('Cyclic dependencies exist ' 'among these items: {}'.format(', '.join(repr(x) for x in data.items()))) check_self_dependencies(data) if not len(data): return [] data_copy = prepare_input_data(data) add_empty_dependencies(data_copy) result = [] for d in get_sorted(data_copy): try: d = sorted(d) except TypeError: d = list(d) result.extend(d) return result
[ "def", "topological_sort", "(", "data", ")", ":", "def", "check_self_dependencies", "(", "input_data", ")", ":", "\"\"\"Check if there are self dependencies within a node.\n\n Self dependencies are for example: ``{'a': ('a',)}``.\n\n Args:\n input_data (dict): the in...
Topological sort the given dictionary structure. Args: data (dict); dictionary structure where the value is a list of dependencies for that given key. For example: ``{'a': (), 'b': ('a',)}``, where ``a`` depends on nothing and ``b`` depends on ``a``. Returns: tuple: the dependencies in constructor order
[ "Topological", "sort", "the", "given", "dictionary", "structure", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L261-L345
cbclab/MOT
mot/lib/utils.py
is_scalar
def is_scalar(value): """Test if the given value is a scalar. This function also works with memory mapped array values, in contrast to the numpy is_scalar method. Args: value: the value to test for being a scalar value Returns: boolean: if the given value is a scalar or not """ return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0))
python
def is_scalar(value): """Test if the given value is a scalar. This function also works with memory mapped array values, in contrast to the numpy is_scalar method. Args: value: the value to test for being a scalar value Returns: boolean: if the given value is a scalar or not """ return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0))
[ "def", "is_scalar", "(", "value", ")", ":", "return", "np", ".", "isscalar", "(", "value", ")", "or", "(", "isinstance", "(", "value", ",", "np", ".", "ndarray", ")", "and", "(", "len", "(", "np", ".", "squeeze", "(", "value", ")", ".", "shape", ...
Test if the given value is a scalar. This function also works with memory mapped array values, in contrast to the numpy is_scalar method. Args: value: the value to test for being a scalar value Returns: boolean: if the given value is a scalar or not
[ "Test", "if", "the", "given", "value", "is", "a", "scalar", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L348-L359
cbclab/MOT
mot/lib/utils.py
all_elements_equal
def all_elements_equal(value): """Checks if all elements in the given value are equal to each other. If the input is a single value the result is trivial. If not, we compare all the values to see if they are exactly the same. Args: value (ndarray or number): a numpy array or a single number. Returns: bool: true if all elements are equal to each other, false otherwise """ if is_scalar(value): return True return np.array(value == value.flatten()[0]).all()
python
def all_elements_equal(value): """Checks if all elements in the given value are equal to each other. If the input is a single value the result is trivial. If not, we compare all the values to see if they are exactly the same. Args: value (ndarray or number): a numpy array or a single number. Returns: bool: true if all elements are equal to each other, false otherwise """ if is_scalar(value): return True return np.array(value == value.flatten()[0]).all()
[ "def", "all_elements_equal", "(", "value", ")", ":", "if", "is_scalar", "(", "value", ")", ":", "return", "True", "return", "np", ".", "array", "(", "value", "==", "value", ".", "flatten", "(", ")", "[", "0", "]", ")", ".", "all", "(", ")" ]
Checks if all elements in the given value are equal to each other. If the input is a single value the result is trivial. If not, we compare all the values to see if they are exactly the same. Args: value (ndarray or number): a numpy array or a single number. Returns: bool: true if all elements are equal to each other, false otherwise
[ "Checks", "if", "all", "elements", "in", "the", "given", "value", "are", "equal", "to", "each", "other", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L362-L376
cbclab/MOT
mot/lib/utils.py
get_single_value
def get_single_value(value): """Get a single value out of the given value. This is meant to be used after a call to :func:`all_elements_equal` that returned True. With this function we return a single number from the input value. Args: value (ndarray or number): a numpy array or a single number. Returns: number: a single number from the input Raises: ValueError: if not all elements are equal """ if not all_elements_equal(value): raise ValueError('Not all values are equal to each other.') if is_scalar(value): return value return value.item(0)
python
def get_single_value(value): """Get a single value out of the given value. This is meant to be used after a call to :func:`all_elements_equal` that returned True. With this function we return a single number from the input value. Args: value (ndarray or number): a numpy array or a single number. Returns: number: a single number from the input Raises: ValueError: if not all elements are equal """ if not all_elements_equal(value): raise ValueError('Not all values are equal to each other.') if is_scalar(value): return value return value.item(0)
[ "def", "get_single_value", "(", "value", ")", ":", "if", "not", "all_elements_equal", "(", "value", ")", ":", "raise", "ValueError", "(", "'Not all values are equal to each other.'", ")", "if", "is_scalar", "(", "value", ")", ":", "return", "value", "return", "v...
Get a single value out of the given value. This is meant to be used after a call to :func:`all_elements_equal` that returned True. With this function we return a single number from the input value. Args: value (ndarray or number): a numpy array or a single number. Returns: number: a single number from the input Raises: ValueError: if not all elements are equal
[ "Get", "a", "single", "value", "out", "of", "the", "given", "value", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L379-L399
cbclab/MOT
mot/lib/utils.py
all_logging_disabled
def all_logging_disabled(highest_level=logging.CRITICAL): """Disable all logging temporarily. A context manager that will prevent any logging messages triggered during the body from being processed. Args: highest_level: the maximum logging level that is being blocked """ previous_level = logging.root.manager.disable logging.disable(highest_level) try: yield finally: logging.disable(previous_level)
python
def all_logging_disabled(highest_level=logging.CRITICAL): """Disable all logging temporarily. A context manager that will prevent any logging messages triggered during the body from being processed. Args: highest_level: the maximum logging level that is being blocked """ previous_level = logging.root.manager.disable logging.disable(highest_level) try: yield finally: logging.disable(previous_level)
[ "def", "all_logging_disabled", "(", "highest_level", "=", "logging", ".", "CRITICAL", ")", ":", "previous_level", "=", "logging", ".", "root", ".", "manager", ".", "disable", "logging", ".", "disable", "(", "highest_level", ")", "try", ":", "yield", "finally",...
Disable all logging temporarily. A context manager that will prevent any logging messages triggered during the body from being processed. Args: highest_level: the maximum logging level that is being blocked
[ "Disable", "all", "logging", "temporarily", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L403-L416
cbclab/MOT
mot/lib/utils.py
split_in_batches
def split_in_batches(nmr_elements, max_batch_size): """Split the total number of elements into batches of the specified maximum size. Examples:: split_in_batches(30, 8) -> [(0, 8), (8, 15), (16, 23), (24, 29)] for batch_start, batch_end in split_in_batches(2000, 100): array[batch_start:batch_end] Yields: tuple: the start and end point of the next batch """ offset = 0 elements_left = nmr_elements while elements_left > 0: next_batch = (offset, offset + min(elements_left, max_batch_size)) yield next_batch batch_size = min(elements_left, max_batch_size) elements_left -= batch_size offset += batch_size
python
def split_in_batches(nmr_elements, max_batch_size): """Split the total number of elements into batches of the specified maximum size. Examples:: split_in_batches(30, 8) -> [(0, 8), (8, 15), (16, 23), (24, 29)] for batch_start, batch_end in split_in_batches(2000, 100): array[batch_start:batch_end] Yields: tuple: the start and end point of the next batch """ offset = 0 elements_left = nmr_elements while elements_left > 0: next_batch = (offset, offset + min(elements_left, max_batch_size)) yield next_batch batch_size = min(elements_left, max_batch_size) elements_left -= batch_size offset += batch_size
[ "def", "split_in_batches", "(", "nmr_elements", ",", "max_batch_size", ")", ":", "offset", "=", "0", "elements_left", "=", "nmr_elements", "while", "elements_left", ">", "0", ":", "next_batch", "=", "(", "offset", ",", "offset", "+", "min", "(", "elements_left...
Split the total number of elements into batches of the specified maximum size. Examples:: split_in_batches(30, 8) -> [(0, 8), (8, 15), (16, 23), (24, 29)] for batch_start, batch_end in split_in_batches(2000, 100): array[batch_start:batch_end] Yields: tuple: the start and end point of the next batch
[ "Split", "the", "total", "number", "of", "elements", "into", "batches", "of", "the", "specified", "maximum", "size", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L460-L480
cbclab/MOT
mot/lib/utils.py
covariance_to_correlations
def covariance_to_correlations(covariance): """Transform a covariance matrix into a correlations matrix. This can be seen as dividing a covariance matrix by the outer product of the diagonal. As post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1]. Args: covariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p). Returns: ndarray: the correlations matrix """ diagonal_ind = np.arange(covariance.shape[1]) diagonal_els = covariance[:, diagonal_ind, diagonal_ind] result = covariance / np.sqrt(diagonal_els[:, :, None] * diagonal_els[:, None, :]) result[np.isinf(result)] = 0 return np.clip(np.nan_to_num(result), -1, 1)
python
def covariance_to_correlations(covariance): """Transform a covariance matrix into a correlations matrix. This can be seen as dividing a covariance matrix by the outer product of the diagonal. As post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1]. Args: covariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p). Returns: ndarray: the correlations matrix """ diagonal_ind = np.arange(covariance.shape[1]) diagonal_els = covariance[:, diagonal_ind, diagonal_ind] result = covariance / np.sqrt(diagonal_els[:, :, None] * diagonal_els[:, None, :]) result[np.isinf(result)] = 0 return np.clip(np.nan_to_num(result), -1, 1)
[ "def", "covariance_to_correlations", "(", "covariance", ")", ":", "diagonal_ind", "=", "np", ".", "arange", "(", "covariance", ".", "shape", "[", "1", "]", ")", "diagonal_els", "=", "covariance", "[", ":", ",", "diagonal_ind", ",", "diagonal_ind", "]", "resu...
Transform a covariance matrix into a correlations matrix. This can be seen as dividing a covariance matrix by the outer product of the diagonal. As post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1]. Args: covariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p). Returns: ndarray: the correlations matrix
[ "Transform", "a", "covariance", "matrix", "into", "a", "correlations", "matrix", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L483-L500
cbclab/MOT
mot/lib/utils.py
multiprocess_mapping
def multiprocess_mapping(func, iterable): """Multiprocess mapping the given function on the given iterable. This only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on single processing. Also, if we reach memory limits we fall back on single cpu processing. Args: func (func): the function to apply iterable (iterable): the iterable with the elements we want to apply the function on """ if os.name == 'nt': # In Windows there is no fork. return list(map(func, iterable)) try: p = multiprocessing.Pool() return_data = list(p.imap(func, iterable)) p.close() p.join() return return_data except OSError: return list(map(func, iterable))
python
def multiprocess_mapping(func, iterable): """Multiprocess mapping the given function on the given iterable. This only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on single processing. Also, if we reach memory limits we fall back on single cpu processing. Args: func (func): the function to apply iterable (iterable): the iterable with the elements we want to apply the function on """ if os.name == 'nt': # In Windows there is no fork. return list(map(func, iterable)) try: p = multiprocessing.Pool() return_data = list(p.imap(func, iterable)) p.close() p.join() return return_data except OSError: return list(map(func, iterable))
[ "def", "multiprocess_mapping", "(", "func", ",", "iterable", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "# In Windows there is no fork.", "return", "list", "(", "map", "(", "func", ",", "iterable", ")", ")", "try", ":", "p", "=", "multiprocessin...
Multiprocess mapping the given function on the given iterable. This only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on single processing. Also, if we reach memory limits we fall back on single cpu processing. Args: func (func): the function to apply iterable (iterable): the iterable with the elements we want to apply the function on
[ "Multiprocess", "mapping", "the", "given", "function", "on", "the", "given", "iterable", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L503-L522
cbclab/MOT
mot/lib/utils.py
parse_cl_function
def parse_cl_function(cl_code, dependencies=()): """Parse the given OpenCL string to a single SimpleCLFunction. If the string contains more than one function, we will return only the last, with all the other added as a dependency. Args: cl_code (str): the input string containing one or more functions. dependencies (Iterable[CLCodeObject]): The list of CL libraries this function depends on Returns: mot.lib.cl_function.SimpleCLFunction: the CL function for the last function in the given strings. """ from mot.lib.cl_function import SimpleCLFunction def separate_cl_functions(input_str): """Separate all the OpenCL functions. This creates a list of strings, with for each function found the OpenCL code. Args: input_str (str): the string containing one or more functions. Returns: list: a list of strings, with one string per found CL function. """ class Semantics: def __init__(self): self._functions = [] def result(self, ast): return self._functions def arglist(self, ast): return '({})'.format(', '.join(ast)) def function(self, ast): def join(items): result = '' for item in items: if isinstance(item, str): result += item else: result += join(item) return result self._functions.append(join(ast).strip()) return ast return _extract_cl_functions_parser.parse(input_str, semantics=Semantics()) functions = separate_cl_functions(cl_code) return SimpleCLFunction.from_string(functions[-1], dependencies=list(dependencies or []) + [ SimpleCLFunction.from_string(s) for s in functions[:-1]])
python
def parse_cl_function(cl_code, dependencies=()): """Parse the given OpenCL string to a single SimpleCLFunction. If the string contains more than one function, we will return only the last, with all the other added as a dependency. Args: cl_code (str): the input string containing one or more functions. dependencies (Iterable[CLCodeObject]): The list of CL libraries this function depends on Returns: mot.lib.cl_function.SimpleCLFunction: the CL function for the last function in the given strings. """ from mot.lib.cl_function import SimpleCLFunction def separate_cl_functions(input_str): """Separate all the OpenCL functions. This creates a list of strings, with for each function found the OpenCL code. Args: input_str (str): the string containing one or more functions. Returns: list: a list of strings, with one string per found CL function. """ class Semantics: def __init__(self): self._functions = [] def result(self, ast): return self._functions def arglist(self, ast): return '({})'.format(', '.join(ast)) def function(self, ast): def join(items): result = '' for item in items: if isinstance(item, str): result += item else: result += join(item) return result self._functions.append(join(ast).strip()) return ast return _extract_cl_functions_parser.parse(input_str, semantics=Semantics()) functions = separate_cl_functions(cl_code) return SimpleCLFunction.from_string(functions[-1], dependencies=list(dependencies or []) + [ SimpleCLFunction.from_string(s) for s in functions[:-1]])
[ "def", "parse_cl_function", "(", "cl_code", ",", "dependencies", "=", "(", ")", ")", ":", "from", "mot", ".", "lib", ".", "cl_function", "import", "SimpleCLFunction", "def", "separate_cl_functions", "(", "input_str", ")", ":", "\"\"\"Separate all the OpenCL function...
Parse the given OpenCL string to a single SimpleCLFunction. If the string contains more than one function, we will return only the last, with all the other added as a dependency. Args: cl_code (str): the input string containing one or more functions. dependencies (Iterable[CLCodeObject]): The list of CL libraries this function depends on Returns: mot.lib.cl_function.SimpleCLFunction: the CL function for the last function in the given strings.
[ "Parse", "the", "given", "OpenCL", "string", "to", "a", "single", "SimpleCLFunction", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L546-L600
cbclab/MOT
mot/lib/utils.py
split_cl_function
def split_cl_function(cl_str): """Split an CL function into a return type, function name, parameters list and the body. Args: cl_str (str): the CL code to parse and plit into components Returns: tuple: string elements for the return type, function name, parameter list and the body """ class Semantics: def __init__(self): self._return_type = '' self._function_name = '' self._parameter_list = [] self._cl_body = '' def result(self, ast): return self._return_type, self._function_name, self._parameter_list, self._cl_body def address_space(self, ast): self._return_type = ast.strip() + ' ' return ast def data_type(self, ast): self._return_type += ''.join(ast).strip() return ast def function_name(self, ast): self._function_name = ast.strip() return ast def arglist(self, ast): if ast != '()': self._parameter_list = ast return ast def body(self, ast): def join(items): result = '' for item in items: if isinstance(item, str): result += item else: result += join(item) return result self._cl_body = join(ast).strip()[1:-1] return ast return _split_cl_function_parser.parse(cl_str, semantics=Semantics())
python
def split_cl_function(cl_str): """Split an CL function into a return type, function name, parameters list and the body. Args: cl_str (str): the CL code to parse and plit into components Returns: tuple: string elements for the return type, function name, parameter list and the body """ class Semantics: def __init__(self): self._return_type = '' self._function_name = '' self._parameter_list = [] self._cl_body = '' def result(self, ast): return self._return_type, self._function_name, self._parameter_list, self._cl_body def address_space(self, ast): self._return_type = ast.strip() + ' ' return ast def data_type(self, ast): self._return_type += ''.join(ast).strip() return ast def function_name(self, ast): self._function_name = ast.strip() return ast def arglist(self, ast): if ast != '()': self._parameter_list = ast return ast def body(self, ast): def join(items): result = '' for item in items: if isinstance(item, str): result += item else: result += join(item) return result self._cl_body = join(ast).strip()[1:-1] return ast return _split_cl_function_parser.parse(cl_str, semantics=Semantics())
[ "def", "split_cl_function", "(", "cl_str", ")", ":", "class", "Semantics", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_return_type", "=", "''", "self", ".", "_function_name", "=", "''", "self", ".", "_parameter_list", "=", "[", "]", "se...
Split an CL function into a return type, function name, parameters list and the body. Args: cl_str (str): the CL code to parse and plit into components Returns: tuple: string elements for the return type, function name, parameter list and the body
[ "Split", "an", "CL", "function", "into", "a", "return", "type", "function", "name", "parameters", "list", "and", "the", "body", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/utils.py#L603-L653
ksbg/sparklanes
sparklanes/_framework/log.py
make_default_logger
def make_default_logger(name=INTERNAL_LOGGER_NAME, level=logging.INFO, fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'): """Create a logger with the default configuration""" logger = logging.getLogger(name) logger.setLevel(level) if not logger.handlers: handler = logging.StreamHandler(sys.stderr) handler.setLevel(level) formatter = logging.Formatter(fmt) handler.setFormatter(formatter) logger.addHandler(handler) return logger
python
def make_default_logger(name=INTERNAL_LOGGER_NAME, level=logging.INFO, fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'): """Create a logger with the default configuration""" logger = logging.getLogger(name) logger.setLevel(level) if not logger.handlers: handler = logging.StreamHandler(sys.stderr) handler.setLevel(level) formatter = logging.Formatter(fmt) handler.setFormatter(formatter) logger.addHandler(handler) return logger
[ "def", "make_default_logger", "(", "name", "=", "INTERNAL_LOGGER_NAME", ",", "level", "=", "logging", ".", "INFO", ",", "fmt", "=", "'%(asctime)s - %(name)s - %(levelname)s - %(message)s'", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "l...
Create a logger with the default configuration
[ "Create", "a", "logger", "with", "the", "default", "configuration" ]
train
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/log.py#L8-L20
cbclab/MOT
mot/lib/cl_environments.py
CLEnvironment.is_gpu
def is_gpu(self): """Check if the device associated with this environment is a GPU. Returns: boolean: True if the device is an GPU, false otherwise. """ return self._device.get_info(cl.device_info.TYPE) == cl.device_type.GPU
python
def is_gpu(self): """Check if the device associated with this environment is a GPU. Returns: boolean: True if the device is an GPU, false otherwise. """ return self._device.get_info(cl.device_info.TYPE) == cl.device_type.GPU
[ "def", "is_gpu", "(", "self", ")", ":", "return", "self", ".", "_device", ".", "get_info", "(", "cl", ".", "device_info", ".", "TYPE", ")", "==", "cl", ".", "device_type", ".", "GPU" ]
Check if the device associated with this environment is a GPU. Returns: boolean: True if the device is an GPU, false otherwise.
[ "Check", "if", "the", "device", "associated", "with", "this", "environment", "is", "a", "GPU", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_environments.py#L79-L85
cbclab/MOT
mot/lib/cl_environments.py
CLEnvironment.is_cpu
def is_cpu(self): """Check if the device associated with this environment is a CPU. Returns: boolean: True if the device is an CPU, false otherwise. """ return self._device.get_info(cl.device_info.TYPE) == cl.device_type.CPU
python
def is_cpu(self): """Check if the device associated with this environment is a CPU. Returns: boolean: True if the device is an CPU, false otherwise. """ return self._device.get_info(cl.device_info.TYPE) == cl.device_type.CPU
[ "def", "is_cpu", "(", "self", ")", ":", "return", "self", ".", "_device", ".", "get_info", "(", "cl", ".", "device_info", ".", "TYPE", ")", "==", "cl", ".", "device_type", ".", "CPU" ]
Check if the device associated with this environment is a CPU. Returns: boolean: True if the device is an CPU, false otherwise.
[ "Check", "if", "the", "device", "associated", "with", "this", "environment", "is", "a", "CPU", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_environments.py#L88-L94
cbclab/MOT
mot/lib/cl_environments.py
CLEnvironmentFactory.single_device
def single_device(cl_device_type='GPU', platform=None, fallback_to_any_device_type=False): """Get a list containing a single device environment, for a device of the given type on the given platform. This will only fetch devices that support double (possibly only double with a pragma defined, but still, it should support double). Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'. platform (opencl platform): The opencl platform to select the devices from fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system. Returns: list of CLEnvironment: List with one element, the CL runtime environment requested. """ if isinstance(cl_device_type, str): cl_device_type = device_type_from_string(cl_device_type) device = None if platform is None: platforms = cl.get_platforms() else: platforms = [platform] for platform in platforms: devices = platform.get_devices(device_type=cl_device_type) for dev in devices: if device_supports_double(dev): try: env = CLEnvironment(platform, dev) return [env] except cl.RuntimeError: pass if not device: if fallback_to_any_device_type: return cl.get_platforms()[0].get_devices() else: raise ValueError('No devices of the specified type ({}) found.'.format( cl.device_type.to_string(cl_device_type))) raise ValueError('No suitable OpenCL device found.')
python
def single_device(cl_device_type='GPU', platform=None, fallback_to_any_device_type=False): """Get a list containing a single device environment, for a device of the given type on the given platform. This will only fetch devices that support double (possibly only double with a pragma defined, but still, it should support double). Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'. platform (opencl platform): The opencl platform to select the devices from fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system. Returns: list of CLEnvironment: List with one element, the CL runtime environment requested. """ if isinstance(cl_device_type, str): cl_device_type = device_type_from_string(cl_device_type) device = None if platform is None: platforms = cl.get_platforms() else: platforms = [platform] for platform in platforms: devices = platform.get_devices(device_type=cl_device_type) for dev in devices: if device_supports_double(dev): try: env = CLEnvironment(platform, dev) return [env] except cl.RuntimeError: pass if not device: if fallback_to_any_device_type: return cl.get_platforms()[0].get_devices() else: raise ValueError('No devices of the specified type ({}) found.'.format( cl.device_type.to_string(cl_device_type))) raise ValueError('No suitable OpenCL device found.')
[ "def", "single_device", "(", "cl_device_type", "=", "'GPU'", ",", "platform", "=", "None", ",", "fallback_to_any_device_type", "=", "False", ")", ":", "if", "isinstance", "(", "cl_device_type", ",", "str", ")", ":", "cl_device_type", "=", "device_type_from_string"...
Get a list containing a single device environment, for a device of the given type on the given platform. This will only fetch devices that support double (possibly only double with a pragma defined, but still, it should support double). Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'. platform (opencl platform): The opencl platform to select the devices from fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system. Returns: list of CLEnvironment: List with one element, the CL runtime environment requested.
[ "Get", "a", "list", "containing", "a", "single", "device", "environment", "for", "a", "device", "of", "the", "given", "type", "on", "the", "given", "platform", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_environments.py#L164-L207
cbclab/MOT
mot/lib/cl_environments.py
CLEnvironmentFactory.all_devices
def all_devices(cl_device_type=None, platform=None): """Get multiple device environments, optionally only of the indicated type. This will only fetch devices that support double point precision. Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU' or 'CPU'. platform (opencl platform): The opencl platform to select the devices from Returns: list of CLEnvironment: List with the CL device environments. """ if isinstance(cl_device_type, str): cl_device_type = device_type_from_string(cl_device_type) runtime_list = [] if platform is None: platforms = cl.get_platforms() else: platforms = [platform] for platform in platforms: if cl_device_type: devices = platform.get_devices(device_type=cl_device_type) else: devices = platform.get_devices() for device in devices: if device_supports_double(device): env = CLEnvironment(platform, device) runtime_list.append(env) return runtime_list
python
def all_devices(cl_device_type=None, platform=None): """Get multiple device environments, optionally only of the indicated type. This will only fetch devices that support double point precision. Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU' or 'CPU'. platform (opencl platform): The opencl platform to select the devices from Returns: list of CLEnvironment: List with the CL device environments. """ if isinstance(cl_device_type, str): cl_device_type = device_type_from_string(cl_device_type) runtime_list = [] if platform is None: platforms = cl.get_platforms() else: platforms = [platform] for platform in platforms: if cl_device_type: devices = platform.get_devices(device_type=cl_device_type) else: devices = platform.get_devices() for device in devices: if device_supports_double(device): env = CLEnvironment(platform, device) runtime_list.append(env) return runtime_list
[ "def", "all_devices", "(", "cl_device_type", "=", "None", ",", "platform", "=", "None", ")", ":", "if", "isinstance", "(", "cl_device_type", ",", "str", ")", ":", "cl_device_type", "=", "device_type_from_string", "(", "cl_device_type", ")", "runtime_list", "=", ...
Get multiple device environments, optionally only of the indicated type. This will only fetch devices that support double point precision. Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU' or 'CPU'. platform (opencl platform): The opencl platform to select the devices from Returns: list of CLEnvironment: List with the CL device environments.
[ "Get", "multiple", "device", "environments", "optionally", "only", "of", "the", "indicated", "type", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_environments.py#L210-L244
cbclab/MOT
mot/lib/cl_environments.py
CLEnvironmentFactory.smart_device_selection
def smart_device_selection(preferred_device_type=None): """Get a list of device environments that is suitable for use in MOT. Basically this gets the total list of devices using all_devices() and applies a filter on it. This filter does the following: 1) if the 'AMD Accelerated Parallel Processing' is available remove all environments using the 'Clover' platform. More things may be implemented in the future. Args: preferred_device_type (str): the preferred device type, one of 'CPU', 'GPU' or 'APU'. If no devices of this type can be found, we will use any other device available. Returns: list of CLEnvironment: List with the CL device environments. """ cl_environments = CLEnvironmentFactory.all_devices(cl_device_type=preferred_device_type) platform_names = [env.platform.name for env in cl_environments] has_amd_pro_platform = any('AMD Accelerated Parallel Processing' in name for name in platform_names) if has_amd_pro_platform: return list(filter(lambda env: 'Clover' not in env.platform.name, cl_environments)) if preferred_device_type is not None and not len(cl_environments): return CLEnvironmentFactory.all_devices() return cl_environments
python
def smart_device_selection(preferred_device_type=None): """Get a list of device environments that is suitable for use in MOT. Basically this gets the total list of devices using all_devices() and applies a filter on it. This filter does the following: 1) if the 'AMD Accelerated Parallel Processing' is available remove all environments using the 'Clover' platform. More things may be implemented in the future. Args: preferred_device_type (str): the preferred device type, one of 'CPU', 'GPU' or 'APU'. If no devices of this type can be found, we will use any other device available. Returns: list of CLEnvironment: List with the CL device environments. """ cl_environments = CLEnvironmentFactory.all_devices(cl_device_type=preferred_device_type) platform_names = [env.platform.name for env in cl_environments] has_amd_pro_platform = any('AMD Accelerated Parallel Processing' in name for name in platform_names) if has_amd_pro_platform: return list(filter(lambda env: 'Clover' not in env.platform.name, cl_environments)) if preferred_device_type is not None and not len(cl_environments): return CLEnvironmentFactory.all_devices() return cl_environments
[ "def", "smart_device_selection", "(", "preferred_device_type", "=", "None", ")", ":", "cl_environments", "=", "CLEnvironmentFactory", ".", "all_devices", "(", "cl_device_type", "=", "preferred_device_type", ")", "platform_names", "=", "[", "env", ".", "platform", ".",...
Get a list of device environments that is suitable for use in MOT. Basically this gets the total list of devices using all_devices() and applies a filter on it. This filter does the following: 1) if the 'AMD Accelerated Parallel Processing' is available remove all environments using the 'Clover' platform. More things may be implemented in the future. Args: preferred_device_type (str): the preferred device type, one of 'CPU', 'GPU' or 'APU'. If no devices of this type can be found, we will use any other device available. Returns: list of CLEnvironment: List with the CL device environments.
[ "Get", "a", "list", "of", "device", "environments", "that", "is", "suitable", "for", "use", "in", "MOT", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_environments.py#L247-L275
cbclab/MOT
mot/mcmc_diagnostics.py
multivariate_ess
def multivariate_ess(samples, batch_size_generator=None): r"""Estimate the multivariate Effective Sample Size for the samples of every problem. This essentially applies :func:`estimate_multivariate_ess` to every problem. Args: samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many batches and of which size we use in estimating the minimum ESS. Returns: ndarray: the multivariate ESS per problem """ samples_generator = _get_sample_generator(samples) return np.array(multiprocess_mapping(_MultivariateESSMultiProcessing(batch_size_generator), samples_generator()))
python
def multivariate_ess(samples, batch_size_generator=None): r"""Estimate the multivariate Effective Sample Size for the samples of every problem. This essentially applies :func:`estimate_multivariate_ess` to every problem. Args: samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many batches and of which size we use in estimating the minimum ESS. Returns: ndarray: the multivariate ESS per problem """ samples_generator = _get_sample_generator(samples) return np.array(multiprocess_mapping(_MultivariateESSMultiProcessing(batch_size_generator), samples_generator()))
[ "def", "multivariate_ess", "(", "samples", ",", "batch_size_generator", "=", "None", ")", ":", "samples_generator", "=", "_get_sample_generator", "(", "samples", ")", "return", "np", ".", "array", "(", "multiprocess_mapping", "(", "_MultivariateESSMultiProcessing", "(...
r"""Estimate the multivariate Effective Sample Size for the samples of every problem. This essentially applies :func:`estimate_multivariate_ess` to every problem. Args: samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many batches and of which size we use in estimating the minimum ESS. Returns: ndarray: the multivariate ESS per problem
[ "r", "Estimate", "the", "multivariate", "Effective", "Sample", "Size", "for", "the", "samples", "of", "every", "problem", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/mcmc_diagnostics.py#L21-L37
cbclab/MOT
mot/mcmc_diagnostics.py
univariate_ess
def univariate_ess(samples, method='standard_error', **kwargs): r"""Estimate the univariate Effective Sample Size for the samples of every problem. This computes the ESS using: .. math:: ESS(X) = n * \frac{\lambda^{2}}{\sigma^{2}} Where :math:`\lambda` is the standard deviation of the chain and :math:`\sigma` is estimated using the monte carlo standard error (which in turn is, by default, estimated using a batch means estimator). Args: samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). method (str): one of 'autocorrelation' or 'standard_error' defaults to 'standard_error'. If 'autocorrelation' is chosen we apply the function: :func:`estimate_univariate_ess_autocorrelation`, if 'standard_error` is choosen we apply the function: :func:`estimate_univariate_ess_standard_error`. **kwargs: passed to the chosen compute method Returns: ndarray: a matrix of size (d, p) with for every problem and every parameter an ESS. References: * Flegal, J.M., Haran, M., and Jones, G.L. (2008). "Markov chain Monte Carlo: Can We Trust the Third Significant Figure?". Statistical Science, 23, p. 250-260. * Marc S. Meketon and Bruce Schmeiser. 1984. Overlapping batch means: something for nothing?. In Proceedings of the 16th conference on Winter simulation (WSC '84), Sallie Sheppard (Ed.). IEEE Press, Piscataway, NJ, USA, 226-230. """ samples_generator = _get_sample_generator(samples) return np.array(multiprocess_mapping(_UnivariateESSMultiProcessing(method, **kwargs), samples_generator()))
python
def univariate_ess(samples, method='standard_error', **kwargs): r"""Estimate the univariate Effective Sample Size for the samples of every problem. This computes the ESS using: .. math:: ESS(X) = n * \frac{\lambda^{2}}{\sigma^{2}} Where :math:`\lambda` is the standard deviation of the chain and :math:`\sigma` is estimated using the monte carlo standard error (which in turn is, by default, estimated using a batch means estimator). Args: samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). method (str): one of 'autocorrelation' or 'standard_error' defaults to 'standard_error'. If 'autocorrelation' is chosen we apply the function: :func:`estimate_univariate_ess_autocorrelation`, if 'standard_error` is choosen we apply the function: :func:`estimate_univariate_ess_standard_error`. **kwargs: passed to the chosen compute method Returns: ndarray: a matrix of size (d, p) with for every problem and every parameter an ESS. References: * Flegal, J.M., Haran, M., and Jones, G.L. (2008). "Markov chain Monte Carlo: Can We Trust the Third Significant Figure?". Statistical Science, 23, p. 250-260. * Marc S. Meketon and Bruce Schmeiser. 1984. Overlapping batch means: something for nothing?. In Proceedings of the 16th conference on Winter simulation (WSC '84), Sallie Sheppard (Ed.). IEEE Press, Piscataway, NJ, USA, 226-230. """ samples_generator = _get_sample_generator(samples) return np.array(multiprocess_mapping(_UnivariateESSMultiProcessing(method, **kwargs), samples_generator()))
[ "def", "univariate_ess", "(", "samples", ",", "method", "=", "'standard_error'", ",", "*", "*", "kwargs", ")", ":", "samples_generator", "=", "_get_sample_generator", "(", "samples", ")", "return", "np", ".", "array", "(", "multiprocess_mapping", "(", "_Univaria...
r"""Estimate the univariate Effective Sample Size for the samples of every problem. This computes the ESS using: .. math:: ESS(X) = n * \frac{\lambda^{2}}{\sigma^{2}} Where :math:`\lambda` is the standard deviation of the chain and :math:`\sigma` is estimated using the monte carlo standard error (which in turn is, by default, estimated using a batch means estimator). Args: samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). method (str): one of 'autocorrelation' or 'standard_error' defaults to 'standard_error'. If 'autocorrelation' is chosen we apply the function: :func:`estimate_univariate_ess_autocorrelation`, if 'standard_error` is choosen we apply the function: :func:`estimate_univariate_ess_standard_error`. **kwargs: passed to the chosen compute method Returns: ndarray: a matrix of size (d, p) with for every problem and every parameter an ESS. References: * Flegal, J.M., Haran, M., and Jones, G.L. (2008). "Markov chain Monte Carlo: Can We Trust the Third Significant Figure?". Statistical Science, 23, p. 250-260. * Marc S. Meketon and Bruce Schmeiser. 1984. Overlapping batch means: something for nothing?. In Proceedings of the 16th conference on Winter simulation (WSC '84), Sallie Sheppard (Ed.). IEEE Press, Piscataway, NJ, USA, 226-230.
[ "r", "Estimate", "the", "univariate", "Effective", "Sample", "Size", "for", "the", "samples", "of", "every", "problem", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/mcmc_diagnostics.py#L50-L82
cbclab/MOT
mot/mcmc_diagnostics.py
_get_sample_generator
def _get_sample_generator(samples): """Get a sample generator from the given polymorphic input. Args: samples (ndarray, dict or generator): either an matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). Returns: generator: a generator that yields a matrix of size (p, n) for every problem in the input. """ if isinstance(samples, Mapping): def samples_generator(): for ind in range(samples[list(samples.keys())[0]].shape[0]): yield np.array([samples[s][ind, :] for s in sorted(samples)]) elif isinstance(samples, np.ndarray): def samples_generator(): for ind in range(samples.shape[0]): yield samples[ind] else: samples_generator = samples return samples_generator
python
def _get_sample_generator(samples): """Get a sample generator from the given polymorphic input. Args: samples (ndarray, dict or generator): either an matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). Returns: generator: a generator that yields a matrix of size (p, n) for every problem in the input. """ if isinstance(samples, Mapping): def samples_generator(): for ind in range(samples[list(samples.keys())[0]].shape[0]): yield np.array([samples[s][ind, :] for s in sorted(samples)]) elif isinstance(samples, np.ndarray): def samples_generator(): for ind in range(samples.shape[0]): yield samples[ind] else: samples_generator = samples return samples_generator
[ "def", "_get_sample_generator", "(", "samples", ")", ":", "if", "isinstance", "(", "samples", ",", "Mapping", ")", ":", "def", "samples_generator", "(", ")", ":", "for", "ind", "in", "range", "(", "samples", "[", "list", "(", "samples", ".", "keys", "(",...
Get a sample generator from the given polymorphic input. Args: samples (ndarray, dict or generator): either an matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). Returns: generator: a generator that yields a matrix of size (p, n) for every problem in the input.
[ "Get", "a", "sample", "generator", "from", "the", "given", "polymorphic", "input", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/mcmc_diagnostics.py#L105-L126
cbclab/MOT
mot/mcmc_diagnostics.py
get_auto_correlation
def get_auto_correlation(chain, lag): r"""Estimates the auto correlation for the given chain (1d vector) with the given lag. Given a lag :math:`k`, the auto correlation coefficient :math:`\rho_{k}` is estimated as: .. math:: \hat{\rho}_{k} = \frac{E[(X_{t} - \mu)(X_{t + k} - \mu)]}{\sigma^{2}} Please note that this equation only works for lags :math:`k < n` where :math:`n` is the number of samples in the chain. Args: chain (ndarray): the vector with the samples lag (int): the lag to use in the autocorrelation computation Returns: float: the autocorrelation with the given lag """ normalized_chain = chain - np.mean(chain, dtype=np.float64) lagged_mean = np.mean(normalized_chain[:len(chain) - lag] * normalized_chain[lag:], dtype=np.float64) return lagged_mean / np.var(chain, dtype=np.float64)
python
def get_auto_correlation(chain, lag): r"""Estimates the auto correlation for the given chain (1d vector) with the given lag. Given a lag :math:`k`, the auto correlation coefficient :math:`\rho_{k}` is estimated as: .. math:: \hat{\rho}_{k} = \frac{E[(X_{t} - \mu)(X_{t + k} - \mu)]}{\sigma^{2}} Please note that this equation only works for lags :math:`k < n` where :math:`n` is the number of samples in the chain. Args: chain (ndarray): the vector with the samples lag (int): the lag to use in the autocorrelation computation Returns: float: the autocorrelation with the given lag """ normalized_chain = chain - np.mean(chain, dtype=np.float64) lagged_mean = np.mean(normalized_chain[:len(chain) - lag] * normalized_chain[lag:], dtype=np.float64) return lagged_mean / np.var(chain, dtype=np.float64)
[ "def", "get_auto_correlation", "(", "chain", ",", "lag", ")", ":", "normalized_chain", "=", "chain", "-", "np", ".", "mean", "(", "chain", ",", "dtype", "=", "np", ".", "float64", ")", "lagged_mean", "=", "np", ".", "mean", "(", "normalized_chain", "[", ...
r"""Estimates the auto correlation for the given chain (1d vector) with the given lag. Given a lag :math:`k`, the auto correlation coefficient :math:`\rho_{k}` is estimated as: .. math:: \hat{\rho}_{k} = \frac{E[(X_{t} - \mu)(X_{t + k} - \mu)]}{\sigma^{2}} Please note that this equation only works for lags :math:`k < n` where :math:`n` is the number of samples in the chain. Args: chain (ndarray): the vector with the samples lag (int): the lag to use in the autocorrelation computation Returns: float: the autocorrelation with the given lag
[ "r", "Estimates", "the", "auto", "correlation", "for", "the", "given", "chain", "(", "1d", "vector", ")", "with", "the", "given", "lag", "." ]
train
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/mcmc_diagnostics.py#L129-L150