repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
BreakingBytes/simkit
simkit/core/models.py
Model.add
def add(self, layer, items): """ Add items in model. """ for k in items.iterkeys(): if k in self.model[layer]: raise Exception('item %s is already in layer %s' % (k, layer)) self.model[layer].update(items) # this should also update Layer.layer, the layer data # same as calling layer constructor # so now just need to add items to the layer for k, v in items.iteritems(): getattr(self, layer).add(k, v['module'], v.get('package'))
python
def add(self, layer, items): """ Add items in model. """ for k in items.iterkeys(): if k in self.model[layer]: raise Exception('item %s is already in layer %s' % (k, layer)) self.model[layer].update(items) # this should also update Layer.layer, the layer data # same as calling layer constructor # so now just need to add items to the layer for k, v in items.iteritems(): getattr(self, layer).add(k, v['module'], v.get('package'))
[ "def", "add", "(", "self", ",", "layer", ",", "items", ")", ":", "for", "k", "in", "items", ".", "iterkeys", "(", ")", ":", "if", "k", "in", "self", ".", "model", "[", "layer", "]", ":", "raise", "Exception", "(", "'item %s is already in layer %s'", ...
Add items in model.
[ "Add", "items", "in", "model", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L266-L278
train
49,400
BreakingBytes/simkit
simkit/core/models.py
Model.delete
def delete(self, layer, items): """ Delete items in model. """ # Use edit to get the layer obj containing item items = _listify(items) # make items a list if it's not layer_obj = self.edit(layer, dict.fromkeys(items), delete=True) for k in items: if k in layer_obj.layer: layer_obj.delete(k) else: raise AttributeError('item %s missing from layer %s' % (k, layer))
python
def delete(self, layer, items): """ Delete items in model. """ # Use edit to get the layer obj containing item items = _listify(items) # make items a list if it's not layer_obj = self.edit(layer, dict.fromkeys(items), delete=True) for k in items: if k in layer_obj.layer: layer_obj.delete(k) else: raise AttributeError('item %s missing from layer %s' % (k, layer))
[ "def", "delete", "(", "self", ",", "layer", ",", "items", ")", ":", "# Use edit to get the layer obj containing item", "items", "=", "_listify", "(", "items", ")", "# make items a list if it's not", "layer_obj", "=", "self", ".", "edit", "(", "layer", ",", "dict",...
Delete items in model.
[ "Delete", "items", "in", "model", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L280-L292
train
49,401
BreakingBytes/simkit
simkit/core/models.py
Model.save
def save(self, modelfile, layer=None): """ Save a model file. :param modelfile: The name of the json file to save. :type modelfile: str :param layer: Optionally save only specified layer. :type layer: str """ if layer: obj = {layer: self.model[layer]} else: obj = self.model with open(modelfile, 'w') as fp: json.dump(obj, fp, indent=2, sort_keys=True)
python
def save(self, modelfile, layer=None): """ Save a model file. :param modelfile: The name of the json file to save. :type modelfile: str :param layer: Optionally save only specified layer. :type layer: str """ if layer: obj = {layer: self.model[layer]} else: obj = self.model with open(modelfile, 'w') as fp: json.dump(obj, fp, indent=2, sort_keys=True)
[ "def", "save", "(", "self", ",", "modelfile", ",", "layer", "=", "None", ")", ":", "if", "layer", ":", "obj", "=", "{", "layer", ":", "self", ".", "model", "[", "layer", "]", "}", "else", ":", "obj", "=", "self", ".", "model", "with", "open", "...
Save a model file. :param modelfile: The name of the json file to save. :type modelfile: str :param layer: Optionally save only specified layer. :type layer: str
[ "Save", "a", "model", "file", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L301-L315
train
49,402
BreakingBytes/simkit
simkit/core/models.py
Model.command
def command(self, cmd, progress_hook=None, *args, **kwargs): """ Execute a model command. :param cmd: Name of the command. :param progress_hook: A function to which progress updates are passed. """ cmds = cmd.split(None, 1) # split commands and simulations sim_names = cmds[1:] # simulations if not sim_names: sim_names = self.cmd_layer.reg.iterkeys() for sim_name in sim_names: sim_cmd = getattr(self.cmd_layer.reg[sim_name], cmd) sim_cmd(self, progress_hook=progress_hook, *args, **kwargs)
python
def command(self, cmd, progress_hook=None, *args, **kwargs): """ Execute a model command. :param cmd: Name of the command. :param progress_hook: A function to which progress updates are passed. """ cmds = cmd.split(None, 1) # split commands and simulations sim_names = cmds[1:] # simulations if not sim_names: sim_names = self.cmd_layer.reg.iterkeys() for sim_name in sim_names: sim_cmd = getattr(self.cmd_layer.reg[sim_name], cmd) sim_cmd(self, progress_hook=progress_hook, *args, **kwargs)
[ "def", "command", "(", "self", ",", "cmd", ",", "progress_hook", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cmds", "=", "cmd", ".", "split", "(", "None", ",", "1", ")", "# split commands and simulations", "sim_names", "=", "cmds...
Execute a model command. :param cmd: Name of the command. :param progress_hook: A function to which progress updates are passed.
[ "Execute", "a", "model", "command", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L331-L344
train
49,403
volafiled/python-volapi
volapi/user.py
User.login
def login(self, password): """Attempts to log in as the current user with given password""" if self.logged_in: raise RuntimeError("User already logged in!") params = {"name": self.nick, "password": password} resp = self.conn.make_api_call("login", params) if "error" in resp: raise RuntimeError( f"Login failed: {resp['error'].get('message') or resp['error']}" ) self.session = resp["session"] self.conn.make_call("useSession", self.session) self.conn.cookies.update({"session": self.session}) self.logged_in = True return True
python
def login(self, password): """Attempts to log in as the current user with given password""" if self.logged_in: raise RuntimeError("User already logged in!") params = {"name": self.nick, "password": password} resp = self.conn.make_api_call("login", params) if "error" in resp: raise RuntimeError( f"Login failed: {resp['error'].get('message') or resp['error']}" ) self.session = resp["session"] self.conn.make_call("useSession", self.session) self.conn.cookies.update({"session": self.session}) self.logged_in = True return True
[ "def", "login", "(", "self", ",", "password", ")", ":", "if", "self", ".", "logged_in", ":", "raise", "RuntimeError", "(", "\"User already logged in!\"", ")", "params", "=", "{", "\"name\"", ":", "self", ".", "nick", ",", "\"password\"", ":", "password", "...
Attempts to log in as the current user with given password
[ "Attempts", "to", "log", "in", "as", "the", "current", "user", "with", "given", "password" ]
5f0bc03dbde703264ac6ed494e2050761f688a3e
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L18-L34
train
49,404
volafiled/python-volapi
volapi/user.py
User.login_transplant
def login_transplant(self, other): """Attempts to carry over the login state from another room""" if not other.logged_in: raise ValueError("Other room is not logged in") cookie = other.session if not cookie: raise ValueError("Other room has no cookie") self.conn.cookies.update({"session": cookie}) self.session = cookie self.logged_in = True return True
python
def login_transplant(self, other): """Attempts to carry over the login state from another room""" if not other.logged_in: raise ValueError("Other room is not logged in") cookie = other.session if not cookie: raise ValueError("Other room has no cookie") self.conn.cookies.update({"session": cookie}) self.session = cookie self.logged_in = True return True
[ "def", "login_transplant", "(", "self", ",", "other", ")", ":", "if", "not", "other", ".", "logged_in", ":", "raise", "ValueError", "(", "\"Other room is not logged in\"", ")", "cookie", "=", "other", ".", "session", "if", "not", "cookie", ":", "raise", "Val...
Attempts to carry over the login state from another room
[ "Attempts", "to", "carry", "over", "the", "login", "state", "from", "another", "room" ]
5f0bc03dbde703264ac6ed494e2050761f688a3e
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L36-L47
train
49,405
volafiled/python-volapi
volapi/user.py
User.logout
def logout(self): """Logs your user out""" if not self.logged_in: raise RuntimeError("User is not logged in") if self.conn.connected: params = {"room": self.conn.room.room_id} resp = self.conn.make_api_call("logout", params) if not resp.get("success", False): raise RuntimeError( f"Logout unsuccessful: " f"{resp['error'].get('message') or resp['error']}" ) self.conn.make_call("logout", params) self.conn.cookies.pop("session") self.logged_in = False
python
def logout(self): """Logs your user out""" if not self.logged_in: raise RuntimeError("User is not logged in") if self.conn.connected: params = {"room": self.conn.room.room_id} resp = self.conn.make_api_call("logout", params) if not resp.get("success", False): raise RuntimeError( f"Logout unsuccessful: " f"{resp['error'].get('message') or resp['error']}" ) self.conn.make_call("logout", params) self.conn.cookies.pop("session") self.logged_in = False
[ "def", "logout", "(", "self", ")", ":", "if", "not", "self", ".", "logged_in", ":", "raise", "RuntimeError", "(", "\"User is not logged in\"", ")", "if", "self", ".", "conn", ".", "connected", ":", "params", "=", "{", "\"room\"", ":", "self", ".", "conn"...
Logs your user out
[ "Logs", "your", "user", "out" ]
5f0bc03dbde703264ac6ed494e2050761f688a3e
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L49-L64
train
49,406
volafiled/python-volapi
volapi/user.py
User.register
def register(self, password): """Registers the current user with the given password.""" if len(password) < 8: raise ValueError("Password must be at least 8 characters.") params = {"name": self.nick, "password": password} resp = self.conn.make_api_call("register", params) if "error" in resp: raise RuntimeError(f"{resp['error'].get('message') or resp['error']}") self.conn.make_call("useSession", resp["session"]) self.conn.cookies.update({"session": resp["session"]}) self.logged_in = True
python
def register(self, password): """Registers the current user with the given password.""" if len(password) < 8: raise ValueError("Password must be at least 8 characters.") params = {"name": self.nick, "password": password} resp = self.conn.make_api_call("register", params) if "error" in resp: raise RuntimeError(f"{resp['error'].get('message') or resp['error']}") self.conn.make_call("useSession", resp["session"]) self.conn.cookies.update({"session": resp["session"]}) self.logged_in = True
[ "def", "register", "(", "self", ",", "password", ")", ":", "if", "len", "(", "password", ")", "<", "8", ":", "raise", "ValueError", "(", "\"Password must be at least 8 characters.\"", ")", "params", "=", "{", "\"name\"", ":", "self", ".", "nick", ",", "\"p...
Registers the current user with the given password.
[ "Registers", "the", "current", "user", "with", "the", "given", "password", "." ]
5f0bc03dbde703264ac6ed494e2050761f688a3e
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L77-L91
train
49,407
volafiled/python-volapi
volapi/user.py
User.__verify_username
def __verify_username(self, username): """Raises an exception if the given username is not valid.""" if len(username) > self.__max_length or len(username) < 3: raise ValueError( f"Username must be between 3 and {self.__max_length} characters." ) if any(c not in string.ascii_letters + string.digits for c in username): raise ValueError("Usernames can only contain alphanumeric characters.")
python
def __verify_username(self, username): """Raises an exception if the given username is not valid.""" if len(username) > self.__max_length or len(username) < 3: raise ValueError( f"Username must be between 3 and {self.__max_length} characters." ) if any(c not in string.ascii_letters + string.digits for c in username): raise ValueError("Usernames can only contain alphanumeric characters.")
[ "def", "__verify_username", "(", "self", ",", "username", ")", ":", "if", "len", "(", "username", ")", ">", "self", ".", "__max_length", "or", "len", "(", "username", ")", "<", "3", ":", "raise", "ValueError", "(", "f\"Username must be between 3 and {self.__ma...
Raises an exception if the given username is not valid.
[ "Raises", "an", "exception", "if", "the", "given", "username", "is", "not", "valid", "." ]
5f0bc03dbde703264ac6ed494e2050761f688a3e
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L93-L101
train
49,408
Yelp/uwsgi_metrics
uwsgi_metrics/reservoir.py
Reservoir.update
def update(self, value, timestamp=None): """Add a value to the reservoir. :param value: the value to be added :param timestamp: the epoch timestamp of the value in seconds, defaults to the current timestamp if not specified. """ if timestamp is None: timestamp = self.current_time_in_fractional_seconds() self.rescale_if_needed() priority = self.weight(timestamp - self.start_time) / random.random() self.values[priority] = value if len(self.values) > self.size: self.values.remove_min()
python
def update(self, value, timestamp=None): """Add a value to the reservoir. :param value: the value to be added :param timestamp: the epoch timestamp of the value in seconds, defaults to the current timestamp if not specified. """ if timestamp is None: timestamp = self.current_time_in_fractional_seconds() self.rescale_if_needed() priority = self.weight(timestamp - self.start_time) / random.random() self.values[priority] = value if len(self.values) > self.size: self.values.remove_min()
[ "def", "update", "(", "self", ",", "value", ",", "timestamp", "=", "None", ")", ":", "if", "timestamp", "is", "None", ":", "timestamp", "=", "self", ".", "current_time_in_fractional_seconds", "(", ")", "self", ".", "rescale_if_needed", "(", ")", "priority", ...
Add a value to the reservoir. :param value: the value to be added :param timestamp: the epoch timestamp of the value in seconds, defaults to the current timestamp if not specified.
[ "Add", "a", "value", "to", "the", "reservoir", "." ]
534966fd461ff711aecd1e3d4caaafdc23ac33f0
https://github.com/Yelp/uwsgi_metrics/blob/534966fd461ff711aecd1e3d4caaafdc23ac33f0/uwsgi_metrics/reservoir.py#L66-L80
train
49,409
Jaymon/prom
prom/utils.py
get_objects
def get_objects(classpath, calling_classpath=""): """ given a classpath like foo.bar.Baz return module foo.bar and class Baz objects .. seealso:: https://docs.python.org/2.5/whatsnew/pep-328.html https://www.python.org/dev/peps/pep-0328/ :param classpath: string, the full python class path (includes modules), a classpath is something like foo.bar.Che where Che is the class definied in the foo.bar module :param calling_classpath: string, if classpath is relative (eg, ..foo.Bar) then this is needed to resolve the relative classpath, it is usually the path of the module that is calling get_objects() :returns: tuple, (module, class) """ # if classpath.startswith("."): # rel_count = len(re.match("^\.+", classpath).group(0)) # if calling_classpath: # calling_count = calling_classpath.count(".") # if rel_count > calling_count: # raise ValueError( # "Attempting relative import passed calling_classpath {}".format( # calling_classpath # ) # ) # # bits = calling_classpath.rsplit('.', rel_count) # parent_classpath = bits[0] # classpath = ".".join([parent_classpath, classpath[rel_count:]]) # # else: # raise ValueError("Attempting relative import without calling_classpath") # module_name, class_name = classpath.rsplit('.', 1) module = importlib.import_module(module_name, calling_classpath) try: klass = getattr(module, class_name) except AttributeError: raise AttributeError("module {} has no attribute {} parsing {}".format( module.__name__, class_name, classpath )) return module, klass
python
def get_objects(classpath, calling_classpath=""): """ given a classpath like foo.bar.Baz return module foo.bar and class Baz objects .. seealso:: https://docs.python.org/2.5/whatsnew/pep-328.html https://www.python.org/dev/peps/pep-0328/ :param classpath: string, the full python class path (includes modules), a classpath is something like foo.bar.Che where Che is the class definied in the foo.bar module :param calling_classpath: string, if classpath is relative (eg, ..foo.Bar) then this is needed to resolve the relative classpath, it is usually the path of the module that is calling get_objects() :returns: tuple, (module, class) """ # if classpath.startswith("."): # rel_count = len(re.match("^\.+", classpath).group(0)) # if calling_classpath: # calling_count = calling_classpath.count(".") # if rel_count > calling_count: # raise ValueError( # "Attempting relative import passed calling_classpath {}".format( # calling_classpath # ) # ) # # bits = calling_classpath.rsplit('.', rel_count) # parent_classpath = bits[0] # classpath = ".".join([parent_classpath, classpath[rel_count:]]) # # else: # raise ValueError("Attempting relative import without calling_classpath") # module_name, class_name = classpath.rsplit('.', 1) module = importlib.import_module(module_name, calling_classpath) try: klass = getattr(module, class_name) except AttributeError: raise AttributeError("module {} has no attribute {} parsing {}".format( module.__name__, class_name, classpath )) return module, klass
[ "def", "get_objects", "(", "classpath", ",", "calling_classpath", "=", "\"\"", ")", ":", "# if classpath.startswith(\".\"):", "# rel_count = len(re.match(\"^\\.+\", classpath).group(0))", "# if calling_classpath:", "# calling_count = calling_classpath.count(...
given a classpath like foo.bar.Baz return module foo.bar and class Baz objects .. seealso:: https://docs.python.org/2.5/whatsnew/pep-328.html https://www.python.org/dev/peps/pep-0328/ :param classpath: string, the full python class path (includes modules), a classpath is something like foo.bar.Che where Che is the class definied in the foo.bar module :param calling_classpath: string, if classpath is relative (eg, ..foo.Bar) then this is needed to resolve the relative classpath, it is usually the path of the module that is calling get_objects() :returns: tuple, (module, class)
[ "given", "a", "classpath", "like", "foo", ".", "bar", ".", "Baz", "return", "module", "foo", ".", "bar", "and", "class", "Baz", "objects" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L178-L224
train
49,410
Jaymon/prom
prom/utils.py
make_dict
def make_dict(fields, fields_kwargs): """lot's of methods take a dict or kwargs, this combines those Basically, we do a lot of def method(fields, **kwargs) and we want to merge those into one super dict with kwargs taking precedence, this does that fields -- dict -- a passed in dict fields_kwargs -- dict -- usually a **kwargs dict from another function return -- dict -- a merged fields and fields_kwargs """ ret = {} if fields: ret.update(fields) if fields_kwargs: ret.update(fields_kwargs) return ret
python
def make_dict(fields, fields_kwargs): """lot's of methods take a dict or kwargs, this combines those Basically, we do a lot of def method(fields, **kwargs) and we want to merge those into one super dict with kwargs taking precedence, this does that fields -- dict -- a passed in dict fields_kwargs -- dict -- usually a **kwargs dict from another function return -- dict -- a merged fields and fields_kwargs """ ret = {} if fields: ret.update(fields) if fields_kwargs: ret.update(fields_kwargs) return ret
[ "def", "make_dict", "(", "fields", ",", "fields_kwargs", ")", ":", "ret", "=", "{", "}", "if", "fields", ":", "ret", ".", "update", "(", "fields", ")", "if", "fields_kwargs", ":", "ret", ".", "update", "(", "fields_kwargs", ")", "return", "ret" ]
lot's of methods take a dict or kwargs, this combines those Basically, we do a lot of def method(fields, **kwargs) and we want to merge those into one super dict with kwargs taking precedence, this does that fields -- dict -- a passed in dict fields_kwargs -- dict -- usually a **kwargs dict from another function return -- dict -- a merged fields and fields_kwargs
[ "lot", "s", "of", "methods", "take", "a", "dict", "or", "kwargs", "this", "combines", "those" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L237-L255
train
49,411
Jaymon/prom
prom/utils.py
Stream.write_line
def write_line(self, line, count=1): """writes the line and count newlines after the line""" self.write(line) self.write_newlines(count)
python
def write_line(self, line, count=1): """writes the line and count newlines after the line""" self.write(line) self.write_newlines(count)
[ "def", "write_line", "(", "self", ",", "line", ",", "count", "=", "1", ")", ":", "self", ".", "write", "(", "line", ")", "self", ".", "write_newlines", "(", "count", ")" ]
writes the line and count newlines after the line
[ "writes", "the", "line", "and", "count", "newlines", "after", "the", "line" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L35-L38
train
49,412
Jaymon/prom
prom/utils.py
PriorityQueue.add
def add(self, key, val, priority=None): """add a value to the queue with priority, using the key to know uniqueness key -- str -- this is used to determine if val already exists in the queue, if key is already in the queue, then the val will be replaced in the queue with the new priority val -- mixed -- the value to add to the queue priority -- int -- the priority of val """ if key in self.item_finder: self.remove(key) else: # keep the queue contained if self.full(): raise OverflowError("Queue is full") if priority is None: priority = next(self.counter) item = [priority, key, val] self.item_finder[key] = item heapq.heappush(self.pq, item)
python
def add(self, key, val, priority=None): """add a value to the queue with priority, using the key to know uniqueness key -- str -- this is used to determine if val already exists in the queue, if key is already in the queue, then the val will be replaced in the queue with the new priority val -- mixed -- the value to add to the queue priority -- int -- the priority of val """ if key in self.item_finder: self.remove(key) else: # keep the queue contained if self.full(): raise OverflowError("Queue is full") if priority is None: priority = next(self.counter) item = [priority, key, val] self.item_finder[key] = item heapq.heappush(self.pq, item)
[ "def", "add", "(", "self", ",", "key", ",", "val", ",", "priority", "=", "None", ")", ":", "if", "key", "in", "self", ".", "item_finder", ":", "self", ".", "remove", "(", "key", ")", "else", ":", "# keep the queue contained", "if", "self", ".", "full...
add a value to the queue with priority, using the key to know uniqueness key -- str -- this is used to determine if val already exists in the queue, if key is already in the queue, then the val will be replaced in the queue with the new priority val -- mixed -- the value to add to the queue priority -- int -- the priority of val
[ "add", "a", "value", "to", "the", "queue", "with", "priority", "using", "the", "key", "to", "know", "uniqueness" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L111-L134
train
49,413
Jaymon/prom
prom/utils.py
PriorityQueue.remove
def remove(self, key): """remove the value found at key from the queue""" item = self.item_finder.pop(key) item[-1] = None self.removed_count += 1
python
def remove(self, key): """remove the value found at key from the queue""" item = self.item_finder.pop(key) item[-1] = None self.removed_count += 1
[ "def", "remove", "(", "self", ",", "key", ")", ":", "item", "=", "self", ".", "item_finder", ".", "pop", "(", "key", ")", "item", "[", "-", "1", "]", "=", "None", "self", ".", "removed_count", "+=", "1" ]
remove the value found at key from the queue
[ "remove", "the", "value", "found", "at", "key", "from", "the", "queue" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L136-L140
train
49,414
Jaymon/prom
prom/utils.py
PriorityQueue.full
def full(self): """Return True if the queue is full""" if not self.size: return False return len(self.pq) == (self.size + self.removed_count)
python
def full(self): """Return True if the queue is full""" if not self.size: return False return len(self.pq) == (self.size + self.removed_count)
[ "def", "full", "(", "self", ")", ":", "if", "not", "self", ".", "size", ":", "return", "False", "return", "len", "(", "self", ".", "pq", ")", "==", "(", "self", ".", "size", "+", "self", ".", "removed_count", ")" ]
Return True if the queue is full
[ "Return", "True", "if", "the", "queue", "is", "full" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L161-L164
train
49,415
guaix-ucm/pyemir
emirdrp/recipes/image/checks.py
check_photometry_categorize
def check_photometry_categorize(x, y, levels, tags=None): '''Put every point in its category. levels must be sorted.''' x = numpy.asarray(x) y = numpy.asarray(y) ys = y.copy() ys.sort() # Mean of the upper half m = ys[len(ys) // 2:].mean() y /= m m = 1.0 s = ys[len(ys) // 2:].std() result = [] if tags is None: tags = list(six.moves.range(len(levels) + 1)) for l, t in zip(levels, tags): indc = y < l if indc.any(): x1 = x[indc] y1 = y[indc] result.append((x1, y1, t)) x = x[~indc] y = y[~indc] else: result.append((x, y, tags[-1])) return result, (m, s)
python
def check_photometry_categorize(x, y, levels, tags=None): '''Put every point in its category. levels must be sorted.''' x = numpy.asarray(x) y = numpy.asarray(y) ys = y.copy() ys.sort() # Mean of the upper half m = ys[len(ys) // 2:].mean() y /= m m = 1.0 s = ys[len(ys) // 2:].std() result = [] if tags is None: tags = list(six.moves.range(len(levels) + 1)) for l, t in zip(levels, tags): indc = y < l if indc.any(): x1 = x[indc] y1 = y[indc] result.append((x1, y1, t)) x = x[~indc] y = y[~indc] else: result.append((x, y, tags[-1])) return result, (m, s)
[ "def", "check_photometry_categorize", "(", "x", ",", "y", ",", "levels", ",", "tags", "=", "None", ")", ":", "x", "=", "numpy", ".", "asarray", "(", "x", ")", "y", "=", "numpy", ".", "asarray", "(", "y", ")", "ys", "=", "y", ".", "copy", "(", "...
Put every point in its category. levels must be sorted.
[ "Put", "every", "point", "in", "its", "category", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/recipes/image/checks.py#L173-L203
train
49,416
IdentityPython/oidcendpoint
src/oidcendpoint/endpoint.py
Endpoint.client_authentication
def client_authentication(self, request, auth=None, **kwargs): """ Do client authentication :param endpoint_context: A :py:class:`oidcendpoint.endpoint_context.SrvInfo` instance :param request: Parsed request, a self.request_cls class instance :param authn: Authorization info :return: client_id or raise and exception """ return verify_client(self.endpoint_context, request, auth)
python
def client_authentication(self, request, auth=None, **kwargs): """ Do client authentication :param endpoint_context: A :py:class:`oidcendpoint.endpoint_context.SrvInfo` instance :param request: Parsed request, a self.request_cls class instance :param authn: Authorization info :return: client_id or raise and exception """ return verify_client(self.endpoint_context, request, auth)
[ "def", "client_authentication", "(", "self", ",", "request", ",", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "verify_client", "(", "self", ".", "endpoint_context", ",", "request", ",", "auth", ")" ]
Do client authentication :param endpoint_context: A :py:class:`oidcendpoint.endpoint_context.SrvInfo` instance :param request: Parsed request, a self.request_cls class instance :param authn: Authorization info :return: client_id or raise and exception
[ "Do", "client", "authentication" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/endpoint.py#L146-L157
train
49,417
IdentityPython/oidcendpoint
src/oidcendpoint/endpoint.py
Endpoint.construct
def construct(self, response_args, request, **kwargs): """ Construct the response :param response_args: response arguments :param request: The parsed request, a self.request_cls class instance :param kwargs: Extra keyword arguments :return: An instance of the self.response_cls class """ response_args = self.do_pre_construct(response_args, request, **kwargs) # logger.debug("kwargs: %s" % sanitize(kwargs)) response = self.response_cls(**response_args) return self.do_post_construct(response, request, **kwargs)
python
def construct(self, response_args, request, **kwargs): """ Construct the response :param response_args: response arguments :param request: The parsed request, a self.request_cls class instance :param kwargs: Extra keyword arguments :return: An instance of the self.response_cls class """ response_args = self.do_pre_construct(response_args, request, **kwargs) # logger.debug("kwargs: %s" % sanitize(kwargs)) response = self.response_cls(**response_args) return self.do_post_construct(response, request, **kwargs)
[ "def", "construct", "(", "self", ",", "response_args", ",", "request", ",", "*", "*", "kwargs", ")", ":", "response_args", "=", "self", ".", "do_pre_construct", "(", "response_args", ",", "request", ",", "*", "*", "kwargs", ")", "# logger.debug(\"kwargs: %s\" ...
Construct the response :param response_args: response arguments :param request: The parsed request, a self.request_cls class instance :param kwargs: Extra keyword arguments :return: An instance of the self.response_cls class
[ "Construct", "the", "response" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/endpoint.py#L189-L203
train
49,418
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/authorization.py
inputs
def inputs(form_args): """ Creates list of input elements """ element = [] for name, value in form_args.items(): element.append( '<input type="hidden" name="{}" value="{}"/>'.format(name, value)) return "\n".join(element)
python
def inputs(form_args): """ Creates list of input elements """ element = [] for name, value in form_args.items(): element.append( '<input type="hidden" name="{}" value="{}"/>'.format(name, value)) return "\n".join(element)
[ "def", "inputs", "(", "form_args", ")", ":", "element", "=", "[", "]", "for", "name", ",", "value", "in", "form_args", ".", "items", "(", ")", ":", "element", ".", "append", "(", "'<input type=\"hidden\" name=\"{}\" value=\"{}\"/>'", ".", "format", "(", "nam...
Creates list of input elements
[ "Creates", "list", "of", "input", "elements" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L53-L61
train
49,419
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/authorization.py
verify_uri
def verify_uri(endpoint_context, request, uri_type, client_id=None): """ A redirect URI MUST NOT contain a fragment MAY contain query component :param endpoint_context: :param request: :param uri_type: redirect_uri/post_logout_redirect_uri :return: An error response if the redirect URI is faulty otherwise None """ try: _cid = request["client_id"] except KeyError: _cid = client_id if not _cid: logger.error('No client id found') raise UnknownClient('No client_id provided') _redirect_uri = unquote(request[uri_type]) part = urlparse(_redirect_uri) if part.fragment: raise URIError("Contains fragment") (_base, _query) = splitquery(_redirect_uri) if _query: _query = parse_qs(_query) match = False try: values = endpoint_context.cdb[_cid]['{}s'.format(uri_type)] except KeyError: raise ValueError('No registered {}'.format(uri_type)) else: for regbase, rquery in values: # The URI MUST exactly match one of the Redirection URI if _base == regbase: # every registered query component must exist in the uri if rquery: if not _query: raise ValueError('Missing query part') for key, vals in rquery.items(): if key not in _query: raise ValueError('"{}" not in query part'.format(key)) for val in vals: if val not in _query[key]: raise ValueError('{}={} value not in query part'.format(key, val)) # and vice versa, every query component in the uri # must be registered if _query: if not rquery: raise ValueError('No registered query part') for key, vals in _query.items(): if key not in rquery: raise ValueError('"{}" extra in query part'.format(key)) for val in vals: if val not in rquery[key]: raise ValueError('Extra {}={} value in query part'.format(key, val)) match = True break if not match: raise RedirectURIError("Doesn't match any registered uris")
python
def verify_uri(endpoint_context, request, uri_type, client_id=None): """ A redirect URI MUST NOT contain a fragment MAY contain query component :param endpoint_context: :param request: :param uri_type: redirect_uri/post_logout_redirect_uri :return: An error response if the redirect URI is faulty otherwise None """ try: _cid = request["client_id"] except KeyError: _cid = client_id if not _cid: logger.error('No client id found') raise UnknownClient('No client_id provided') _redirect_uri = unquote(request[uri_type]) part = urlparse(_redirect_uri) if part.fragment: raise URIError("Contains fragment") (_base, _query) = splitquery(_redirect_uri) if _query: _query = parse_qs(_query) match = False try: values = endpoint_context.cdb[_cid]['{}s'.format(uri_type)] except KeyError: raise ValueError('No registered {}'.format(uri_type)) else: for regbase, rquery in values: # The URI MUST exactly match one of the Redirection URI if _base == regbase: # every registered query component must exist in the uri if rquery: if not _query: raise ValueError('Missing query part') for key, vals in rquery.items(): if key not in _query: raise ValueError('"{}" not in query part'.format(key)) for val in vals: if val not in _query[key]: raise ValueError('{}={} value not in query part'.format(key, val)) # and vice versa, every query component in the uri # must be registered if _query: if not rquery: raise ValueError('No registered query part') for key, vals in _query.items(): if key not in rquery: raise ValueError('"{}" extra in query part'.format(key)) for val in vals: if val not in rquery[key]: raise ValueError('Extra {}={} value in query part'.format(key, val)) match = True break if not match: raise RedirectURIError("Doesn't match any registered uris")
[ "def", "verify_uri", "(", "endpoint_context", ",", "request", ",", "uri_type", ",", "client_id", "=", "None", ")", ":", "try", ":", "_cid", "=", "request", "[", "\"client_id\"", "]", "except", "KeyError", ":", "_cid", "=", "client_id", "if", "not", "_cid",...
A redirect URI MUST NOT contain a fragment MAY contain query component :param endpoint_context: :param request: :param uri_type: redirect_uri/post_logout_redirect_uri :return: An error response if the redirect URI is faulty otherwise None
[ "A", "redirect", "URI", "MUST", "NOT", "contain", "a", "fragment", "MAY", "contain", "query", "component" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L100-L168
train
49,420
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/authorization.py
get_uri
def get_uri(endpoint_context, request, uri_type): """ verify that the redirect URI is reasonable :param endpoint_context: :param request: The Authorization request :param uri_type: 'redirect_uri' or 'post_logout_redirect_uri' :return: redirect_uri """ if uri_type in request: verify_uri(endpoint_context, request, uri_type) uri = request[uri_type] else: try: _specs = endpoint_context.cdb[ str(request["client_id"])]["{}s".format(uri_type)] except KeyError: raise ParameterError( "Missing {} and none registered".format(uri_type)) else: if len(_specs) > 1: raise ParameterError( "Missing {} and more than one registered".format(uri_type)) else: uri = join_query(*_specs[0]) return uri
python
def get_uri(endpoint_context, request, uri_type): """ verify that the redirect URI is reasonable :param endpoint_context: :param request: The Authorization request :param uri_type: 'redirect_uri' or 'post_logout_redirect_uri' :return: redirect_uri """ if uri_type in request: verify_uri(endpoint_context, request, uri_type) uri = request[uri_type] else: try: _specs = endpoint_context.cdb[ str(request["client_id"])]["{}s".format(uri_type)] except KeyError: raise ParameterError( "Missing {} and none registered".format(uri_type)) else: if len(_specs) > 1: raise ParameterError( "Missing {} and more than one registered".format(uri_type)) else: uri = join_query(*_specs[0]) return uri
[ "def", "get_uri", "(", "endpoint_context", ",", "request", ",", "uri_type", ")", ":", "if", "uri_type", "in", "request", ":", "verify_uri", "(", "endpoint_context", ",", "request", ",", "uri_type", ")", "uri", "=", "request", "[", "uri_type", "]", "else", ...
verify that the redirect URI is reasonable :param endpoint_context: :param request: The Authorization request :param uri_type: 'redirect_uri' or 'post_logout_redirect_uri' :return: redirect_uri
[ "verify", "that", "the", "redirect", "URI", "is", "reasonable" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L184-L209
train
49,421
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/authorization.py
Authorization.post_authentication
def post_authentication(self, user, request, sid, **kwargs): """ Things that are done after a successful authentication. :param user: :param request: :param sid: :param kwargs: :return: A dictionary with 'response_args' """ response_info = {} # Do the authorization try: permission = self.endpoint_context.authz( user, client_id=request['client_id']) except ToOld as err: return self.error_response( response_info, 'access_denied', 'Authentication to old {}'.format(err.args)) except Exception as err: return self.error_response(response_info, 'access_denied', '{}'.format(err.args)) else: try: self.endpoint_context.sdb.update(sid, permission=permission) except Exception as err: return self.error_response(response_info, 'server_error', '{}'.format(err.args)) logger.debug("response type: %s" % request["response_type"]) if self.endpoint_context.sdb.is_session_revoked(sid): return self.error_response(response_info, "access_denied", "Session is revoked") response_info = create_authn_response(self, request, sid) try: redirect_uri = get_uri(self.endpoint_context, request, 'redirect_uri') except (RedirectURIError, ParameterError) as err: return self.error_response(response_info, 'invalid_request', '{}'.format(err.args)) else: response_info['return_uri'] = redirect_uri # Must not use HTTP unless implicit grant type and native application # info = self.aresp_check(response_info['response_args'], request) # if isinstance(info, ResponseMessage): # return info _cookie = new_cookie( self.endpoint_context, sub=user, sid=sid, state=request['state'], client_id=request['client_id'], cookie_name=self.endpoint_context.cookie_name['session']) # Now about the response_mode. Should not be set if it's obvious # from the response_type. Knows about 'query', 'fragment' and # 'form_post'. if "response_mode" in request: try: response_info = self.response_mode(request, **response_info) except InvalidRequest as err: return self.error_response(response_info, 'invalid_request', '{}'.format(err.args)) response_info['cookie'] = [_cookie] return response_info
python
def post_authentication(self, user, request, sid, **kwargs): """ Things that are done after a successful authentication. :param user: :param request: :param sid: :param kwargs: :return: A dictionary with 'response_args' """ response_info = {} # Do the authorization try: permission = self.endpoint_context.authz( user, client_id=request['client_id']) except ToOld as err: return self.error_response( response_info, 'access_denied', 'Authentication to old {}'.format(err.args)) except Exception as err: return self.error_response(response_info, 'access_denied', '{}'.format(err.args)) else: try: self.endpoint_context.sdb.update(sid, permission=permission) except Exception as err: return self.error_response(response_info, 'server_error', '{}'.format(err.args)) logger.debug("response type: %s" % request["response_type"]) if self.endpoint_context.sdb.is_session_revoked(sid): return self.error_response(response_info, "access_denied", "Session is revoked") response_info = create_authn_response(self, request, sid) try: redirect_uri = get_uri(self.endpoint_context, request, 'redirect_uri') except (RedirectURIError, ParameterError) as err: return self.error_response(response_info, 'invalid_request', '{}'.format(err.args)) else: response_info['return_uri'] = redirect_uri # Must not use HTTP unless implicit grant type and native application # info = self.aresp_check(response_info['response_args'], request) # if isinstance(info, ResponseMessage): # return info _cookie = new_cookie( self.endpoint_context, sub=user, sid=sid, state=request['state'], client_id=request['client_id'], cookie_name=self.endpoint_context.cookie_name['session']) # Now about the response_mode. Should not be set if it's obvious # from the response_type. Knows about 'query', 'fragment' and # 'form_post'. if "response_mode" in request: try: response_info = self.response_mode(request, **response_info) except InvalidRequest as err: return self.error_response(response_info, 'invalid_request', '{}'.format(err.args)) response_info['cookie'] = [_cookie] return response_info
[ "def", "post_authentication", "(", "self", ",", "user", ",", "request", ",", "sid", ",", "*", "*", "kwargs", ")", ":", "response_info", "=", "{", "}", "# Do the authorization", "try", ":", "permission", "=", "self", ".", "endpoint_context", ".", "authz", "...
Things that are done after a successful authentication. :param user: :param request: :param sid: :param kwargs: :return: A dictionary with 'response_args'
[ "Things", "that", "are", "done", "after", "a", "successful", "authentication", "." ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L568-L640
train
49,422
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/authorization.py
Authorization.authz_part2
def authz_part2(self, user, authn_event, request, **kwargs): """ After the authentication this is where you should end up :param user: :param request: The Authorization Request :param sid: Session key :param kwargs: possible other parameters :return: A redirect to the redirect_uri of the client """ sid = setup_session(self.endpoint_context, request, user, authn_event=authn_event) try: resp_info = self.post_authentication(user, request, sid, **kwargs) except Exception as err: return self.error_response({}, 'server_error', err) if "check_session_iframe" in self.endpoint_context.provider_info: ec = self.endpoint_context salt = rndstr() if ec.sdb.is_session_revoked(sid): pass else: authn_event = ec.sdb.get_authentication_event(sid) # use the last session _state = json.dumps({'authn_time': authn_event['authn_time']}) session_cookie = ec.cookie_dealer.create_cookie( json.dumps(_state), typ="session", cookie_name=ec.cookie_name['session_management']) opbs = session_cookie[ec.cookie_name['session_management']] _session_state = compute_session_state(opbs.value, salt, request["client_id"], resp_info['return_uri']) if 'cookie' in resp_info: if isinstance(resp_info['cookie'], list): resp_info['cookie'].append(session_cookie) else: append_cookie(resp_info['cookie'], session_cookie) else: resp_info['cookie'] = session_cookie resp_info['response_args']['session_state'] = _session_state # Mix-Up mitigation resp_info['response_args']['iss'] = self.endpoint_context.issuer resp_info['response_args']['client_id'] = request['client_id'] return resp_info
python
def authz_part2(self, user, authn_event, request, **kwargs): """ After the authentication this is where you should end up :param user: :param request: The Authorization Request :param sid: Session key :param kwargs: possible other parameters :return: A redirect to the redirect_uri of the client """ sid = setup_session(self.endpoint_context, request, user, authn_event=authn_event) try: resp_info = self.post_authentication(user, request, sid, **kwargs) except Exception as err: return self.error_response({}, 'server_error', err) if "check_session_iframe" in self.endpoint_context.provider_info: ec = self.endpoint_context salt = rndstr() if ec.sdb.is_session_revoked(sid): pass else: authn_event = ec.sdb.get_authentication_event(sid) # use the last session _state = json.dumps({'authn_time': authn_event['authn_time']}) session_cookie = ec.cookie_dealer.create_cookie( json.dumps(_state), typ="session", cookie_name=ec.cookie_name['session_management']) opbs = session_cookie[ec.cookie_name['session_management']] _session_state = compute_session_state(opbs.value, salt, request["client_id"], resp_info['return_uri']) if 'cookie' in resp_info: if isinstance(resp_info['cookie'], list): resp_info['cookie'].append(session_cookie) else: append_cookie(resp_info['cookie'], session_cookie) else: resp_info['cookie'] = session_cookie resp_info['response_args']['session_state'] = _session_state # Mix-Up mitigation resp_info['response_args']['iss'] = self.endpoint_context.issuer resp_info['response_args']['client_id'] = request['client_id'] return resp_info
[ "def", "authz_part2", "(", "self", ",", "user", ",", "authn_event", ",", "request", ",", "*", "*", "kwargs", ")", ":", "sid", "=", "setup_session", "(", "self", ".", "endpoint_context", ",", "request", ",", "user", ",", "authn_event", "=", "authn_event", ...
After the authentication this is where you should end up :param user: :param request: The Authorization Request :param sid: Session key :param kwargs: possible other parameters :return: A redirect to the redirect_uri of the client
[ "After", "the", "authentication", "this", "is", "where", "you", "should", "end", "up" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L642-L693
train
49,423
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/authorization.py
Authorization.process_request
def process_request(self, request_info=None, **kwargs): """ The AuthorizationRequest endpoint :param request_info: The authorization request as a dictionary :return: dictionary """ if isinstance(request_info, AuthorizationErrorResponse): return request_info _cid = request_info["client_id"] cinfo = self.endpoint_context.cdb[_cid] try: cookie = kwargs['cookie'] except KeyError: cookie = '' else: del kwargs['cookie'] if proposed_user(request_info): kwargs['req_user'] = proposed_user(request_info) else: try: _login_hint = request_info['login_hint'] except KeyError: pass else: if self.endpoint_context.login_hint_lookup: kwargs['req_user'] = self.endpoint_context.login_hint_lookup[ _login_hint] info = self.setup_auth(request_info, request_info["redirect_uri"], cinfo, cookie, **kwargs) if 'error' in info: return info try: _function = info['function'] except KeyError: # already authenticated logger.debug("- authenticated -") logger.debug("AREQ keys: %s" % request_info.keys()) res = self.authz_part2(info['user'], info['authn_event'], request_info, cookie=cookie) return res else: try: # Run the authentication function return { 'http_response': _function(**info['args']), 'return_uri': request_info["redirect_uri"] } except Exception as err: logger.exception(err) return {'http_response': 'Internal error: {}'.format(err)}
python
def process_request(self, request_info=None, **kwargs): """ The AuthorizationRequest endpoint :param request_info: The authorization request as a dictionary :return: dictionary """ if isinstance(request_info, AuthorizationErrorResponse): return request_info _cid = request_info["client_id"] cinfo = self.endpoint_context.cdb[_cid] try: cookie = kwargs['cookie'] except KeyError: cookie = '' else: del kwargs['cookie'] if proposed_user(request_info): kwargs['req_user'] = proposed_user(request_info) else: try: _login_hint = request_info['login_hint'] except KeyError: pass else: if self.endpoint_context.login_hint_lookup: kwargs['req_user'] = self.endpoint_context.login_hint_lookup[ _login_hint] info = self.setup_auth(request_info, request_info["redirect_uri"], cinfo, cookie, **kwargs) if 'error' in info: return info try: _function = info['function'] except KeyError: # already authenticated logger.debug("- authenticated -") logger.debug("AREQ keys: %s" % request_info.keys()) res = self.authz_part2(info['user'], info['authn_event'], request_info, cookie=cookie) return res else: try: # Run the authentication function return { 'http_response': _function(**info['args']), 'return_uri': request_info["redirect_uri"] } except Exception as err: logger.exception(err) return {'http_response': 'Internal error: {}'.format(err)}
[ "def", "process_request", "(", "self", ",", "request_info", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "request_info", ",", "AuthorizationErrorResponse", ")", ":", "return", "request_info", "_cid", "=", "request_info", "[", "\"cli...
The AuthorizationRequest endpoint :param request_info: The authorization request as a dictionary :return: dictionary
[ "The", "AuthorizationRequest", "endpoint" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L695-L751
train
49,424
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
setup_session
def setup_session(endpoint_context, areq, uid, client_id='', acr='', salt='salt', authn_event=None): """ Setting up a user session :param endpoint_context: :param areq: :param uid: :param acr: :param client_id: :param salt: :param authn_event: A already made AuthnEvent :return: """ if authn_event is None and acr: authn_event = AuthnEvent(uid=uid, salt=salt, authn_info=acr, authn_time=time.time()) if not client_id: client_id = areq['client_id'] sid = endpoint_context.sdb.create_authz_session(authn_event, areq, client_id=client_id, uid=uid) endpoint_context.sdb.do_sub(sid, uid, '') return sid
python
def setup_session(endpoint_context, areq, uid, client_id='', acr='', salt='salt', authn_event=None): """ Setting up a user session :param endpoint_context: :param areq: :param uid: :param acr: :param client_id: :param salt: :param authn_event: A already made AuthnEvent :return: """ if authn_event is None and acr: authn_event = AuthnEvent(uid=uid, salt=salt, authn_info=acr, authn_time=time.time()) if not client_id: client_id = areq['client_id'] sid = endpoint_context.sdb.create_authz_session(authn_event, areq, client_id=client_id, uid=uid) endpoint_context.sdb.do_sub(sid, uid, '') return sid
[ "def", "setup_session", "(", "endpoint_context", ",", "areq", ",", "uid", ",", "client_id", "=", "''", ",", "acr", "=", "''", ",", "salt", "=", "'salt'", ",", "authn_event", "=", "None", ")", ":", "if", "authn_event", "is", "None", "and", "acr", ":", ...
Setting up a user session :param endpoint_context: :param areq: :param uid: :param acr: :param client_id: :param salt: :param authn_event: A already made AuthnEvent :return:
[ "Setting", "up", "a", "user", "session" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L44-L69
train
49,425
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.update
def update(self, sid, **kwargs): """ Add attribute value assertion to a special session :param sid: Session ID :param kwargs: """ item = self[sid] for attribute, value in kwargs.items(): item[attribute] = value self[sid] = item
python
def update(self, sid, **kwargs): """ Add attribute value assertion to a special session :param sid: Session ID :param kwargs: """ item = self[sid] for attribute, value in kwargs.items(): item[attribute] = value self[sid] = item
[ "def", "update", "(", "self", ",", "sid", ",", "*", "*", "kwargs", ")", ":", "item", "=", "self", "[", "sid", "]", "for", "attribute", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "item", "[", "attribute", "]", "=", "value", "self",...
Add attribute value assertion to a special session :param sid: Session ID :param kwargs:
[ "Add", "attribute", "value", "assertion", "to", "a", "special", "session" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L204-L214
train
49,426
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.update_by_token
def update_by_token(self, token, **kwargs): """ Updated the session info. Any type of known token can be used :param token: code/access token/refresh token/... :param kwargs: Key word arguements """ _sid = self.handler.sid(token) return self.update(_sid, **kwargs)
python
def update_by_token(self, token, **kwargs): """ Updated the session info. Any type of known token can be used :param token: code/access token/refresh token/... :param kwargs: Key word arguements """ _sid = self.handler.sid(token) return self.update(_sid, **kwargs)
[ "def", "update_by_token", "(", "self", ",", "token", ",", "*", "*", "kwargs", ")", ":", "_sid", "=", "self", ".", "handler", ".", "sid", "(", "token", ")", "return", "self", ".", "update", "(", "_sid", ",", "*", "*", "kwargs", ")" ]
Updated the session info. Any type of known token can be used :param token: code/access token/refresh token/... :param kwargs: Key word arguements
[ "Updated", "the", "session", "info", ".", "Any", "type", "of", "known", "token", "can", "be", "used" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L216-L224
train
49,427
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.replace_token
def replace_token(self, sid, sinfo, token_type): """ Replace an old refresh_token with a new one :param sid: session ID :param sinfo: session info :param token_type: What type of tokens should be replaced :return: Updated session info """ try: # Mint a new one refresh_token = self.handler[token_type](sid, sinfo=sinfo) except KeyError: pass else: # blacklist the old is there is one try: self.handler[token_type].black_list(sinfo[token_type]) except KeyError: pass sinfo[token_type] = refresh_token return sinfo
python
def replace_token(self, sid, sinfo, token_type): """ Replace an old refresh_token with a new one :param sid: session ID :param sinfo: session info :param token_type: What type of tokens should be replaced :return: Updated session info """ try: # Mint a new one refresh_token = self.handler[token_type](sid, sinfo=sinfo) except KeyError: pass else: # blacklist the old is there is one try: self.handler[token_type].black_list(sinfo[token_type]) except KeyError: pass sinfo[token_type] = refresh_token return sinfo
[ "def", "replace_token", "(", "self", ",", "sid", ",", "sinfo", ",", "token_type", ")", ":", "try", ":", "# Mint a new one", "refresh_token", "=", "self", ".", "handler", "[", "token_type", "]", "(", "sid", ",", "sinfo", "=", "sinfo", ")", "except", "KeyE...
Replace an old refresh_token with a new one :param sid: session ID :param sinfo: session info :param token_type: What type of tokens should be replaced :return: Updated session info
[ "Replace", "an", "old", "refresh_token", "with", "a", "new", "one" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L268-L291
train
49,428
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.refresh_token
def refresh_token(self, token, new_refresh=False): """ Issue a new access token using a valid refresh token :param token: Refresh token :param new_refresh: Whether a new refresh token should be minted or not :return: Dictionary with session info :raises: ExpiredToken for invalid refresh token WrongTokenType for wrong token type """ try: _tinfo = self.handler['refresh_token'].info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: raise ExpiredToken() _sid = _tinfo['sid'] session_info = self[_sid] session_info = self.replace_token(_sid, session_info, 'access_token') session_info["token_type"] = self.handler['access_token'].token_type if new_refresh: session_info = self.replace_token(_sid, session_info, 'refresh_token') self[_sid] = session_info return session_info
python
def refresh_token(self, token, new_refresh=False): """ Issue a new access token using a valid refresh token :param token: Refresh token :param new_refresh: Whether a new refresh token should be minted or not :return: Dictionary with session info :raises: ExpiredToken for invalid refresh token WrongTokenType for wrong token type """ try: _tinfo = self.handler['refresh_token'].info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: raise ExpiredToken() _sid = _tinfo['sid'] session_info = self[_sid] session_info = self.replace_token(_sid, session_info, 'access_token') session_info["token_type"] = self.handler['access_token'].token_type if new_refresh: session_info = self.replace_token(_sid, session_info, 'refresh_token') self[_sid] = session_info return session_info
[ "def", "refresh_token", "(", "self", ",", "token", ",", "new_refresh", "=", "False", ")", ":", "try", ":", "_tinfo", "=", "self", ".", "handler", "[", "'refresh_token'", "]", ".", "info", "(", "token", ")", "except", "KeyError", ":", "return", "False", ...
Issue a new access token using a valid refresh token :param token: Refresh token :param new_refresh: Whether a new refresh token should be minted or not :return: Dictionary with session info :raises: ExpiredToken for invalid refresh token WrongTokenType for wrong token type
[ "Issue", "a", "new", "access", "token", "using", "a", "valid", "refresh", "token" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L356-L387
train
49,429
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.is_token_valid
def is_token_valid(self, token): """ Checks validity of a given token :param token: Access or refresh token """ try: _tinfo = self.handler.info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: return False # Dependent on what state the session is in. session_info = self[_tinfo['sid']] if session_info["oauth_state"] == "authz": if _tinfo['handler'] != self.handler['code']: return False elif session_info["oauth_state"] == "token": if _tinfo['handler'] != self.handler['access_token']: return False return True
python
def is_token_valid(self, token): """ Checks validity of a given token :param token: Access or refresh token """ try: _tinfo = self.handler.info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: return False # Dependent on what state the session is in. session_info = self[_tinfo['sid']] if session_info["oauth_state"] == "authz": if _tinfo['handler'] != self.handler['code']: return False elif session_info["oauth_state"] == "token": if _tinfo['handler'] != self.handler['access_token']: return False return True
[ "def", "is_token_valid", "(", "self", ",", "token", ")", ":", "try", ":", "_tinfo", "=", "self", ".", "handler", ".", "info", "(", "token", ")", "except", "KeyError", ":", "return", "False", "if", "is_expired", "(", "int", "(", "_tinfo", "[", "'exp'", ...
Checks validity of a given token :param token: Access or refresh token
[ "Checks", "validity", "of", "a", "given", "token" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L389-L414
train
49,430
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.revoke_token
def revoke_token(self, token, token_type=''): """ Revokes access token :param token: access token """ if token_type: self.handler[token_type].black_list(token) else: self.handler.black_list(token)
python
def revoke_token(self, token, token_type=''): """ Revokes access token :param token: access token """ if token_type: self.handler[token_type].black_list(token) else: self.handler.black_list(token)
[ "def", "revoke_token", "(", "self", ",", "token", ",", "token_type", "=", "''", ")", ":", "if", "token_type", ":", "self", ".", "handler", "[", "token_type", "]", ".", "black_list", "(", "token", ")", "else", ":", "self", ".", "handler", ".", "black_li...
Revokes access token :param token: access token
[ "Revokes", "access", "token" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L416-L425
train
49,431
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.revoke_session
def revoke_session(self, sid='', token=''): """ Mark session as revoked but also explicitly revoke all issued tokens :param token: any token connected to the session :param sid: Session identifier """ if not sid: if token: sid = self.handler.sid(token) else: raise ValueError('Need one of "sid" or "token"') for typ in ['access_token', 'refresh_token', 'code']: try: self.revoke_token(self[sid][typ], typ) except KeyError: # If no such token has been issued pass self.update(sid, revoked=True)
python
def revoke_session(self, sid='', token=''): """ Mark session as revoked but also explicitly revoke all issued tokens :param token: any token connected to the session :param sid: Session identifier """ if not sid: if token: sid = self.handler.sid(token) else: raise ValueError('Need one of "sid" or "token"') for typ in ['access_token', 'refresh_token', 'code']: try: self.revoke_token(self[sid][typ], typ) except KeyError: # If no such token has been issued pass self.update(sid, revoked=True)
[ "def", "revoke_session", "(", "self", ",", "sid", "=", "''", ",", "token", "=", "''", ")", ":", "if", "not", "sid", ":", "if", "token", ":", "sid", "=", "self", ".", "handler", ".", "sid", "(", "token", ")", "else", ":", "raise", "ValueError", "(...
Mark session as revoked but also explicitly revoke all issued tokens :param token: any token connected to the session :param sid: Session identifier
[ "Mark", "session", "as", "revoked", "but", "also", "explicitly", "revoke", "all", "issued", "tokens" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L435-L454
train
49,432
IdentityPython/oidcendpoint
src/oidcendpoint/user_authn/authn_context.py
pick_auth
def pick_auth(endpoint_context, areq, all=False): """ Pick authentication method :param areq: AuthorizationRequest instance :return: A dictionary with the authentication method and its authn class ref """ acrs = [] try: if len(endpoint_context.authn_broker) == 1: return endpoint_context.authn_broker.default() if "acr_values" in areq: if not isinstance(areq["acr_values"], list): areq["acr_values"] = [areq["acr_values"]] acrs = areq["acr_values"] else: # same as any try: acrs = areq["claims"]["id_token"]["acr"]["values"] except KeyError: try: _ith = areq[verified_claim_name("id_token_hint")] except KeyError: try: _hint = areq['login_hint'] except KeyError: pass else: if endpoint_context.login_hint2acrs: acrs = endpoint_context.login_hint2acrs(_hint) else: try: acrs = [_ith['acr']] except KeyError: pass if not acrs: return endpoint_context.authn_broker.default() for acr in acrs: res = endpoint_context.authn_broker.pick(acr) logger.debug("Picked AuthN broker for ACR %s: %s" % ( str(acr), str(res))) if res: if all: return res else: # Return the first guess by pick. return res[0] except KeyError as exc: logger.debug( "An error occurred while picking the authN broker: %s" % str(exc)) return None
python
def pick_auth(endpoint_context, areq, all=False): """ Pick authentication method :param areq: AuthorizationRequest instance :return: A dictionary with the authentication method and its authn class ref """ acrs = [] try: if len(endpoint_context.authn_broker) == 1: return endpoint_context.authn_broker.default() if "acr_values" in areq: if not isinstance(areq["acr_values"], list): areq["acr_values"] = [areq["acr_values"]] acrs = areq["acr_values"] else: # same as any try: acrs = areq["claims"]["id_token"]["acr"]["values"] except KeyError: try: _ith = areq[verified_claim_name("id_token_hint")] except KeyError: try: _hint = areq['login_hint'] except KeyError: pass else: if endpoint_context.login_hint2acrs: acrs = endpoint_context.login_hint2acrs(_hint) else: try: acrs = [_ith['acr']] except KeyError: pass if not acrs: return endpoint_context.authn_broker.default() for acr in acrs: res = endpoint_context.authn_broker.pick(acr) logger.debug("Picked AuthN broker for ACR %s: %s" % ( str(acr), str(res))) if res: if all: return res else: # Return the first guess by pick. return res[0] except KeyError as exc: logger.debug( "An error occurred while picking the authN broker: %s" % str(exc)) return None
[ "def", "pick_auth", "(", "endpoint_context", ",", "areq", ",", "all", "=", "False", ")", ":", "acrs", "=", "[", "]", "try", ":", "if", "len", "(", "endpoint_context", ".", "authn_broker", ")", "==", "1", ":", "return", "endpoint_context", ".", "authn_bro...
Pick authentication method :param areq: AuthorizationRequest instance :return: A dictionary with the authentication method and its authn class ref
[ "Pick", "authentication", "method" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/user_authn/authn_context.py#L117-L172
train
49,433
IdentityPython/oidcendpoint
src/oidcendpoint/user_authn/authn_context.py
AuthnBroker.get_method
def get_method(self, cls_name): """ Generator that returns all registered authenticators based on a specific authentication class. :param acr: Authentication Class :return: generator """ for id, spec in self.db.items(): if spec["method"].__class__.__name__ == cls_name: yield spec["method"]
python
def get_method(self, cls_name): """ Generator that returns all registered authenticators based on a specific authentication class. :param acr: Authentication Class :return: generator """ for id, spec in self.db.items(): if spec["method"].__class__.__name__ == cls_name: yield spec["method"]
[ "def", "get_method", "(", "self", ",", "cls_name", ")", ":", "for", "id", ",", "spec", "in", "self", ".", "db", ".", "items", "(", ")", ":", "if", "spec", "[", "\"method\"", "]", ".", "__class__", ".", "__name__", "==", "cls_name", ":", "yield", "s...
Generator that returns all registered authenticators based on a specific authentication class. :param acr: Authentication Class :return: generator
[ "Generator", "that", "returns", "all", "registered", "authenticators", "based", "on", "a", "specific", "authentication", "class", "." ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/user_authn/authn_context.py#L63-L73
train
49,434
IdentityPython/oidcendpoint
src/oidcendpoint/user_authn/authn_context.py
AuthnBroker.pick
def pick(self, acr=None): """ Given the authentication context find zero or more authn methods that could be used. :param acr: The authentication class reference requested :return: An URL """ if acr is None: # Anything else doesn't make sense return self.db.values() else: return self._pick_by_class_ref(acr)
python
def pick(self, acr=None): """ Given the authentication context find zero or more authn methods that could be used. :param acr: The authentication class reference requested :return: An URL """ if acr is None: # Anything else doesn't make sense return self.db.values() else: return self._pick_by_class_ref(acr)
[ "def", "pick", "(", "self", ",", "acr", "=", "None", ")", ":", "if", "acr", "is", "None", ":", "# Anything else doesn't make sense", "return", "self", ".", "db", ".", "values", "(", ")", "else", ":", "return", "self", ".", "_pick_by_class_ref", "(", "acr...
Given the authentication context find zero or more authn methods that could be used. :param acr: The authentication class reference requested :return: An URL
[ "Given", "the", "authentication", "context", "find", "zero", "or", "more", "authn", "methods", "that", "could", "be", "used", "." ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/user_authn/authn_context.py#L78-L91
train
49,435
guaix-ucm/pyemir
emirdrp/util/sextractor.py
SExtractor.update_config
def update_config(self): """ Update the configuration files according to the current in-memory SExtractor configuration. """ # -- Write filter configuration file # First check the filter itself filter = self.config['FILTER_MASK'] rows = len(filter) cols = len(filter[0]) # May raise ValueError, OK filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w') filter_f.write("CONV NORM\n") filter_f.write("# %dx%d Generated from sextractor.py module.\n" % (rows, cols)) for row in filter: filter_f.write(" ".join(map(repr, row))) filter_f.write("\n") filter_f.close() # -- Write parameter list file parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w') for parameter in self.config['PARAMETERS_LIST']: print(parameter, file=parameters_f) parameters_f.close() # -- Write NNW configuration file nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w') nnw_f.write(nnw_config) nnw_f.close() # -- Write main configuration file main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w') for key in self.config.keys(): if (key in SExtractor._SE_config_special_keys): continue if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value value = " ".join(map(str, self.config[key])) else: value = str(self.config[key]) print(("%-16s %-16s # %s" % (key, value, SExtractor._SE_config[key]['comment'])), file=main_f) main_f.close()
python
def update_config(self): """ Update the configuration files according to the current in-memory SExtractor configuration. """ # -- Write filter configuration file # First check the filter itself filter = self.config['FILTER_MASK'] rows = len(filter) cols = len(filter[0]) # May raise ValueError, OK filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w') filter_f.write("CONV NORM\n") filter_f.write("# %dx%d Generated from sextractor.py module.\n" % (rows, cols)) for row in filter: filter_f.write(" ".join(map(repr, row))) filter_f.write("\n") filter_f.close() # -- Write parameter list file parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w') for parameter in self.config['PARAMETERS_LIST']: print(parameter, file=parameters_f) parameters_f.close() # -- Write NNW configuration file nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w') nnw_f.write(nnw_config) nnw_f.close() # -- Write main configuration file main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w') for key in self.config.keys(): if (key in SExtractor._SE_config_special_keys): continue if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value value = " ".join(map(str, self.config[key])) else: value = str(self.config[key]) print(("%-16s %-16s # %s" % (key, value, SExtractor._SE_config[key]['comment'])), file=main_f) main_f.close()
[ "def", "update_config", "(", "self", ")", ":", "# -- Write filter configuration file", "# First check the filter itself", "filter", "=", "self", ".", "config", "[", "'FILTER_MASK'", "]", "rows", "=", "len", "(", "filter", ")", "cols", "=", "len", "(", "filter", ...
Update the configuration files according to the current in-memory SExtractor configuration.
[ "Update", "the", "configuration", "files", "according", "to", "the", "current", "in", "-", "memory", "SExtractor", "configuration", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L478-L531
train
49,436
guaix-ucm/pyemir
emirdrp/util/sextractor.py
SExtractor.run
def run(self, file, updateconfig=True, clean=False, path=None): """ Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates. """ if updateconfig: self.update_config() # Try to find SExtractor program # This will raise an exception if it failed self.program, self.version = self.setup(path) commandline = ( self.program + " -c " + self.config['CONFIG_FILE'] + " " + file) # print commandline rcode = os.system(commandline) if (rcode): raise SExtractorException( "SExtractor command [%s] failed." % commandline ) if clean: self.clean()
python
def run(self, file, updateconfig=True, clean=False, path=None): """ Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates. """ if updateconfig: self.update_config() # Try to find SExtractor program # This will raise an exception if it failed self.program, self.version = self.setup(path) commandline = ( self.program + " -c " + self.config['CONFIG_FILE'] + " " + file) # print commandline rcode = os.system(commandline) if (rcode): raise SExtractorException( "SExtractor command [%s] failed." % commandline ) if clean: self.clean()
[ "def", "run", "(", "self", ",", "file", ",", "updateconfig", "=", "True", ",", "clean", "=", "False", ",", "path", "=", "None", ")", ":", "if", "updateconfig", ":", "self", ".", "update_config", "(", ")", "# Try to find SExtractor program", "# This will rais...
Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates.
[ "Run", "SExtractor", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L533-L565
train
49,437
guaix-ucm/pyemir
emirdrp/processing/wcs.py
offsets_from_wcs
def offsets_from_wcs(frames, pixref): '''Compute offsets between frames using WCS information. :parameter frames: sequence of FITS filenames or file descriptors :parameter pixref: numpy array used as reference pixel The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a numpy array with the difference between the computed pixel value and the reference pixel. The first line of the array is [0, 0], being the offset from the first image to itself. ''' result = numpy.zeros((len(frames), pixref.shape[1])) with frames[0].open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) skyref = wcsh.wcs_pix2world(pixref, 1) for idx, frame in enumerate(frames[1:]): with frame.open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) pixval = wcsh.wcs_world2pix(skyref, 1) result[idx + 1] = -(pixval[0] - pixref[0]) return result
python
def offsets_from_wcs(frames, pixref): '''Compute offsets between frames using WCS information. :parameter frames: sequence of FITS filenames or file descriptors :parameter pixref: numpy array used as reference pixel The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a numpy array with the difference between the computed pixel value and the reference pixel. The first line of the array is [0, 0], being the offset from the first image to itself. ''' result = numpy.zeros((len(frames), pixref.shape[1])) with frames[0].open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) skyref = wcsh.wcs_pix2world(pixref, 1) for idx, frame in enumerate(frames[1:]): with frame.open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) pixval = wcsh.wcs_world2pix(skyref, 1) result[idx + 1] = -(pixval[0] - pixref[0]) return result
[ "def", "offsets_from_wcs", "(", "frames", ",", "pixref", ")", ":", "result", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "frames", ")", ",", "pixref", ".", "shape", "[", "1", "]", ")", ")", "with", "frames", "[", "0", "]", ".", "open", "(",...
Compute offsets between frames using WCS information. :parameter frames: sequence of FITS filenames or file descriptors :parameter pixref: numpy array used as reference pixel The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a numpy array with the difference between the computed pixel value and the reference pixel. The first line of the array is [0, 0], being the offset from the first image to itself.
[ "Compute", "offsets", "between", "frames", "using", "WCS", "information", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wcs.py#L24-L54
train
49,438
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/api.py
_check_deployed_nodes
def _check_deployed_nodes(nodes): """This is borrowed from execo.""" deployed = [] undeployed = [] cmd = "! (mount | grep -E '^/dev/[[:alpha:]]+2 on / ')" deployed_check = get_execo_remote( cmd, nodes, DEFAULT_CONN_PARAMS) for p in deployed_check.processes: p.nolog_exit_code = True p.nolog_timeout = True p.nolog_error = True p.timeout = 10 deployed_check.run() for p in deployed_check.processes: if p.ok: deployed.append(p.host.address) else: undeployed.append(p.host.address) return deployed, undeployed
python
def _check_deployed_nodes(nodes): """This is borrowed from execo.""" deployed = [] undeployed = [] cmd = "! (mount | grep -E '^/dev/[[:alpha:]]+2 on / ')" deployed_check = get_execo_remote( cmd, nodes, DEFAULT_CONN_PARAMS) for p in deployed_check.processes: p.nolog_exit_code = True p.nolog_timeout = True p.nolog_error = True p.timeout = 10 deployed_check.run() for p in deployed_check.processes: if p.ok: deployed.append(p.host.address) else: undeployed.append(p.host.address) return deployed, undeployed
[ "def", "_check_deployed_nodes", "(", "nodes", ")", ":", "deployed", "=", "[", "]", "undeployed", "=", "[", "]", "cmd", "=", "\"! (mount | grep -E '^/dev/[[:alpha:]]+2 on / ')\"", "deployed_check", "=", "get_execo_remote", "(", "cmd", ",", "nodes", ",", "DEFAULT_CONN...
This is borrowed from execo.
[ "This", "is", "borrowed", "from", "execo", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/api.py#L30-L54
train
49,439
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/api.py
Resources.get_networks
def get_networks(self): """Get the networks assoiated with the resource description. Returns list of tuple roles, network """ networks = self.c_resources["networks"] result = [] for net in networks: _c_network = net.get("_c_network") if _c_network is None: continue roles = utils.get_roles_as_list(net) result.append((roles, _c_network)) return result
python
def get_networks(self): """Get the networks assoiated with the resource description. Returns list of tuple roles, network """ networks = self.c_resources["networks"] result = [] for net in networks: _c_network = net.get("_c_network") if _c_network is None: continue roles = utils.get_roles_as_list(net) result.append((roles, _c_network)) return result
[ "def", "get_networks", "(", "self", ")", ":", "networks", "=", "self", ".", "c_resources", "[", "\"networks\"", "]", "result", "=", "[", "]", "for", "net", "in", "networks", ":", "_c_network", "=", "net", ".", "get", "(", "\"_c_network\"", ")", "if", "...
Get the networks assoiated with the resource description. Returns list of tuple roles, network
[ "Get", "the", "networks", "assoiated", "with", "the", "resource", "description", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/api.py#L167-L181
train
49,440
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/api.py
Resources.get_roles
def get_roles(self): """Get the roles associated with the hosts. Returns dict of role -> [host] """ machines = self.c_resources["machines"] result = {} for desc in machines: roles = utils.get_roles_as_list(desc) hosts = self._denormalize(desc) for role in roles: result.setdefault(role, []) result[role].extend(hosts) return result
python
def get_roles(self): """Get the roles associated with the hosts. Returns dict of role -> [host] """ machines = self.c_resources["machines"] result = {} for desc in machines: roles = utils.get_roles_as_list(desc) hosts = self._denormalize(desc) for role in roles: result.setdefault(role, []) result[role].extend(hosts) return result
[ "def", "get_roles", "(", "self", ")", ":", "machines", "=", "self", ".", "c_resources", "[", "\"machines\"", "]", "result", "=", "{", "}", "for", "desc", "in", "machines", ":", "roles", "=", "utils", ".", "get_roles_as_list", "(", "desc", ")", "hosts", ...
Get the roles associated with the hosts. Returns dict of role -> [host]
[ "Get", "the", "roles", "associated", "with", "the", "hosts", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/api.py#L183-L198
train
49,441
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket.start_client
async def start_client(self, sock: anyio.abc.SocketStream, addr, path: str, headers: Optional[List] = None, subprotocols: Optional[List[str]] = None): """Start a client WS connection on this socket. Returns: the AcceptConnection message. """ self._sock = sock self._connection = WSConnection(ConnectionType.CLIENT) if headers is None: headers = [] if subprotocols is None: subprotocols = [] data = self._connection.send( Request( host=addr[0], target=path, extra_headers=headers, subprotocols=subprotocols)) await self._sock.send_all(data) assert self._scope is None self._scope = True try: event = await self._next_event() if not isinstance(event, AcceptConnection): raise ConnectionError("Failed to establish a connection", event) return event finally: self._scope = None
python
async def start_client(self, sock: anyio.abc.SocketStream, addr, path: str, headers: Optional[List] = None, subprotocols: Optional[List[str]] = None): """Start a client WS connection on this socket. Returns: the AcceptConnection message. """ self._sock = sock self._connection = WSConnection(ConnectionType.CLIENT) if headers is None: headers = [] if subprotocols is None: subprotocols = [] data = self._connection.send( Request( host=addr[0], target=path, extra_headers=headers, subprotocols=subprotocols)) await self._sock.send_all(data) assert self._scope is None self._scope = True try: event = await self._next_event() if not isinstance(event, AcceptConnection): raise ConnectionError("Failed to establish a connection", event) return event finally: self._scope = None
[ "async", "def", "start_client", "(", "self", ",", "sock", ":", "anyio", ".", "abc", ".", "SocketStream", ",", "addr", ",", "path", ":", "str", ",", "headers", ":", "Optional", "[", "List", "]", "=", "None", ",", "subprotocols", ":", "Optional", "[", ...
Start a client WS connection on this socket. Returns: the AcceptConnection message.
[ "Start", "a", "client", "WS", "connection", "on", "this", "socket", "." ]
e33e75fd51ce5ae0feac244e8407d2672c5b4745
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L42-L75
train
49,442
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket.start_server
async def start_server(self, sock: anyio.abc.SocketStream, filter=None): # pylint: disable=W0622 """Start a server WS connection on this socket. Filter: an async callable that gets passed the initial Request. It may return an AcceptConnection message, a bool, or a string (the subprotocol to use). Returns: the Request message. """ assert self._scope is None self._scope = True self._sock = sock self._connection = WSConnection(ConnectionType.SERVER) try: event = await self._next_event() if not isinstance(event, Request): raise ConnectionError("Failed to establish a connection", event) msg = None if filter is not None: msg = await filter(event) if not msg: msg = RejectConnection() elif msg is True: msg = None elif isinstance(msg, str): msg = AcceptConnection(subprotocol=msg) if not msg: msg = AcceptConnection(subprotocol=event.subprotocols[0]) data = self._connection.send(msg) await self._sock.send_all(data) if not isinstance(msg, AcceptConnection): raise ConnectionError("Not accepted", msg) finally: self._scope = None
python
async def start_server(self, sock: anyio.abc.SocketStream, filter=None): # pylint: disable=W0622 """Start a server WS connection on this socket. Filter: an async callable that gets passed the initial Request. It may return an AcceptConnection message, a bool, or a string (the subprotocol to use). Returns: the Request message. """ assert self._scope is None self._scope = True self._sock = sock self._connection = WSConnection(ConnectionType.SERVER) try: event = await self._next_event() if not isinstance(event, Request): raise ConnectionError("Failed to establish a connection", event) msg = None if filter is not None: msg = await filter(event) if not msg: msg = RejectConnection() elif msg is True: msg = None elif isinstance(msg, str): msg = AcceptConnection(subprotocol=msg) if not msg: msg = AcceptConnection(subprotocol=event.subprotocols[0]) data = self._connection.send(msg) await self._sock.send_all(data) if not isinstance(msg, AcceptConnection): raise ConnectionError("Not accepted", msg) finally: self._scope = None
[ "async", "def", "start_server", "(", "self", ",", "sock", ":", "anyio", ".", "abc", ".", "SocketStream", ",", "filter", "=", "None", ")", ":", "# pylint: disable=W0622", "assert", "self", ".", "_scope", "is", "None", "self", ".", "_scope", "=", "True", "...
Start a server WS connection on this socket. Filter: an async callable that gets passed the initial Request. It may return an AcceptConnection message, a bool, or a string (the subprotocol to use). Returns: the Request message.
[ "Start", "a", "server", "WS", "connection", "on", "this", "socket", "." ]
e33e75fd51ce5ae0feac244e8407d2672c5b4745
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L77-L111
train
49,443
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket._next_event
async def _next_event(self): """ Gets the next event. """ while True: for event in self._connection.events(): if isinstance(event, Message): # check if we need to buffer if event.message_finished: return self._wrap_data(self._gather_buffers(event)) self._buffer(event) break # exit for loop else: return event data = await self._sock.receive_some(4096) if not data: return CloseConnection(code=500, reason="Socket closed") self._connection.receive_data(data)
python
async def _next_event(self): """ Gets the next event. """ while True: for event in self._connection.events(): if isinstance(event, Message): # check if we need to buffer if event.message_finished: return self._wrap_data(self._gather_buffers(event)) self._buffer(event) break # exit for loop else: return event data = await self._sock.receive_some(4096) if not data: return CloseConnection(code=500, reason="Socket closed") self._connection.receive_data(data)
[ "async", "def", "_next_event", "(", "self", ")", ":", "while", "True", ":", "for", "event", "in", "self", ".", "_connection", ".", "events", "(", ")", ":", "if", "isinstance", "(", "event", ",", "Message", ")", ":", "# check if we need to buffer", "if", ...
Gets the next event.
[ "Gets", "the", "next", "event", "." ]
e33e75fd51ce5ae0feac244e8407d2672c5b4745
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L113-L131
train
49,444
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket.close
async def close(self, code: int = 1006, reason: str = "Connection closed"): """ Closes the websocket. """ if self._closed: return self._closed = True if self._scope is not None: await self._scope.cancel() # cancel any outstanding listeners data = self._connection.send(CloseConnection(code=code, reason=reason)) await self._sock.send_all(data) # No, we don't wait for the correct reply await self._sock.close()
python
async def close(self, code: int = 1006, reason: str = "Connection closed"): """ Closes the websocket. """ if self._closed: return self._closed = True if self._scope is not None: await self._scope.cancel() # cancel any outstanding listeners data = self._connection.send(CloseConnection(code=code, reason=reason)) await self._sock.send_all(data) # No, we don't wait for the correct reply await self._sock.close()
[ "async", "def", "close", "(", "self", ",", "code", ":", "int", "=", "1006", ",", "reason", ":", "str", "=", "\"Connection closed\"", ")", ":", "if", "self", ".", "_closed", ":", "return", "self", ".", "_closed", "=", "True", "if", "self", ".", "_scop...
Closes the websocket.
[ "Closes", "the", "websocket", "." ]
e33e75fd51ce5ae0feac244e8407d2672c5b4745
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L133-L150
train
49,445
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket.send
async def send(self, data: Union[bytes, str], final: bool = True): """ Sends some data down the connection. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage data = MsgType(data=data, message_finished=final) data = self._connection.send(event=data) await self._sock.send_all(data)
python
async def send(self, data: Union[bytes, str], final: bool = True): """ Sends some data down the connection. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage data = MsgType(data=data, message_finished=final) data = self._connection.send(event=data) await self._sock.send_all(data)
[ "async", "def", "send", "(", "self", ",", "data", ":", "Union", "[", "bytes", ",", "str", "]", ",", "final", ":", "bool", "=", "True", ")", ":", "MsgType", "=", "TextMessage", "if", "isinstance", "(", "data", ",", "str", ")", "else", "BytesMessage", ...
Sends some data down the connection.
[ "Sends", "some", "data", "down", "the", "connection", "." ]
e33e75fd51ce5ae0feac244e8407d2672c5b4745
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L152-L159
train
49,446
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket._buffer
def _buffer(self, event: Message): """ Buffers an event, if applicable. """ if isinstance(event, BytesMessage): self._byte_buffer.write(event.data) elif isinstance(event, TextMessage): self._string_buffer.write(event.data)
python
def _buffer(self, event: Message): """ Buffers an event, if applicable. """ if isinstance(event, BytesMessage): self._byte_buffer.write(event.data) elif isinstance(event, TextMessage): self._string_buffer.write(event.data)
[ "def", "_buffer", "(", "self", ",", "event", ":", "Message", ")", ":", "if", "isinstance", "(", "event", ",", "BytesMessage", ")", ":", "self", ".", "_byte_buffer", ".", "write", "(", "event", ".", "data", ")", "elif", "isinstance", "(", "event", ",", ...
Buffers an event, if applicable.
[ "Buffers", "an", "event", "if", "applicable", "." ]
e33e75fd51ce5ae0feac244e8407d2672c5b4745
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L161-L168
train
49,447
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket._gather_buffers
def _gather_buffers(self, event: Message): """ Gathers all the data from a buffer. """ if isinstance(event, BytesMessage): buf = self._byte_buffer else: buf = self._string_buffer # yay for code shortening buf.write(event.data) buf.seek(0) data = buf.read() buf.seek(0) buf.truncate() return data
python
def _gather_buffers(self, event: Message): """ Gathers all the data from a buffer. """ if isinstance(event, BytesMessage): buf = self._byte_buffer else: buf = self._string_buffer # yay for code shortening buf.write(event.data) buf.seek(0) data = buf.read() buf.seek(0) buf.truncate() return data
[ "def", "_gather_buffers", "(", "self", ",", "event", ":", "Message", ")", ":", "if", "isinstance", "(", "event", ",", "BytesMessage", ")", ":", "buf", "=", "self", ".", "_byte_buffer", "else", ":", "buf", "=", "self", ".", "_string_buffer", "# yay for code...
Gathers all the data from a buffer.
[ "Gathers", "all", "the", "data", "from", "a", "buffer", "." ]
e33e75fd51ce5ae0feac244e8407d2672c5b4745
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L170-L185
train
49,448
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket._wrap_data
def _wrap_data(data: Union[str, bytes]): """ Wraps data into the right event. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage return MsgType(data=data, frame_finished=True, message_finished=True)
python
def _wrap_data(data: Union[str, bytes]): """ Wraps data into the right event. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage return MsgType(data=data, frame_finished=True, message_finished=True)
[ "def", "_wrap_data", "(", "data", ":", "Union", "[", "str", ",", "bytes", "]", ")", ":", "MsgType", "=", "TextMessage", "if", "isinstance", "(", "data", ",", "str", ")", "else", "BytesMessage", "return", "MsgType", "(", "data", "=", "data", ",", "frame...
Wraps data into the right event.
[ "Wraps", "data", "into", "the", "right", "event", "." ]
e33e75fd51ce5ae0feac244e8407d2672c5b4745
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L188-L193
train
49,449
Jaymon/prom
prom/interface/sqlite.py
SQLite.get_field_SQL
def get_field_SQL(self, field_name, field): """ returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL) """ field_type = "" is_pk = field.options.get('pk', False) if issubclass(field.type, bool): field_type = 'BOOLEAN' elif issubclass(field.type, long): if is_pk: field_type = 'INTEGER PRIMARY KEY' else: field_type = 'BIGINT' elif issubclass(field.type, int): field_type = 'INTEGER' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, basestring): fo = field.options if field.is_ref(): # TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance # would combine all the options of both the current field and the # foreign key field and return all those when Field.options is called # (with the current field's options taking precedence) but there are # lots of circular dependency things that happen when one field is # trying to get the schema of another field and I don't have time # to sort it all out right now ref_s = field.schema fo = ref_s.pk.options if 'size' in fo: field_type = 'CHARACTER({})'.format(fo['size']) elif 'max_size' in fo: field_type = 'VARCHAR({})'.format(fo['max_size']) else: field_type = 'TEXT' if fo.get('ignore_case', False): field_type += ' COLLATE NOCASE' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, datetime.datetime): #field_type = 'DATETIME' field_type = 'TIMESTAMP' elif issubclass(field.type, datetime.date): field_type = 'DATE' elif issubclass(field.type, float): field_type = 'REAL' size = field.options.get('size', field.options.get('max_size', 0)) if size > 6: field_type = 'DOUBLE PRECISION' elif issubclass(field.type, decimal.Decimal): field_type = 'NUMERIC' elif issubclass(field.type, bytearray): field_type = 'BLOB' else: raise ValueError('unknown python type: {}'.format(field.type.__name__)) if field.required: field_type += ' NOT NULL' else: field_type += ' NULL' if not is_pk: if field.is_ref(): ref_s = field.schema if field.required: # strong ref, it deletes on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'.format( ref_s, ref_s.pk.name ) else: # weak ref, it sets column to null on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'.format( ref_s, ref_s.pk.name ) return '{} {}'.format(self._normalize_name(field_name), field_type)
python
def get_field_SQL(self, field_name, field): """ returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL) """ field_type = "" is_pk = field.options.get('pk', False) if issubclass(field.type, bool): field_type = 'BOOLEAN' elif issubclass(field.type, long): if is_pk: field_type = 'INTEGER PRIMARY KEY' else: field_type = 'BIGINT' elif issubclass(field.type, int): field_type = 'INTEGER' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, basestring): fo = field.options if field.is_ref(): # TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance # would combine all the options of both the current field and the # foreign key field and return all those when Field.options is called # (with the current field's options taking precedence) but there are # lots of circular dependency things that happen when one field is # trying to get the schema of another field and I don't have time # to sort it all out right now ref_s = field.schema fo = ref_s.pk.options if 'size' in fo: field_type = 'CHARACTER({})'.format(fo['size']) elif 'max_size' in fo: field_type = 'VARCHAR({})'.format(fo['max_size']) else: field_type = 'TEXT' if fo.get('ignore_case', False): field_type += ' COLLATE NOCASE' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, datetime.datetime): #field_type = 'DATETIME' field_type = 'TIMESTAMP' elif issubclass(field.type, datetime.date): field_type = 'DATE' elif issubclass(field.type, float): field_type = 'REAL' size = field.options.get('size', field.options.get('max_size', 0)) if size > 6: field_type = 'DOUBLE PRECISION' elif issubclass(field.type, decimal.Decimal): field_type = 'NUMERIC' elif issubclass(field.type, bytearray): field_type = 'BLOB' else: raise ValueError('unknown python type: {}'.format(field.type.__name__)) if field.required: field_type += ' NOT NULL' else: field_type += ' NULL' if not is_pk: if field.is_ref(): ref_s = field.schema if field.required: # strong ref, it deletes on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'.format( ref_s, ref_s.pk.name ) else: # weak ref, it sets column to null on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'.format( ref_s, ref_s.pk.name ) return '{} {}'.format(self._normalize_name(field_name), field_type)
[ "def", "get_field_SQL", "(", "self", ",", "field_name", ",", "field", ")", ":", "field_type", "=", "\"\"", "is_pk", "=", "field", ".", "options", ".", "get", "(", "'pk'", ",", "False", ")", "if", "issubclass", "(", "field", ".", "type", ",", "bool", ...
returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL)
[ "returns", "the", "SQL", "for", "a", "given", "field", "with", "full", "type", "information" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L266-L362
train
49,450
Jaymon/prom
prom/interface/sqlite.py
SQLite._get_fields
def _get_fields(self, table_name, **kwargs): """return all the fields for the given table""" ret = {} query_str = 'PRAGMA table_info({})'.format(self._normalize_table_name(table_name)) fields = self._query(query_str, **kwargs) #pout.v([dict(d) for d in fields]) query_str = 'PRAGMA foreign_key_list({})'.format(self._normalize_table_name(table_name)) fks = {f["from"]: f for f in self._query(query_str, **kwargs)} #pout.v([dict(d) for d in fks.values()]) pg_types = { "INTEGER": int, "BIGINT": long, "DOUBLE PRECISION": float, "FLOAT": float, "REAL": float, "NUMERIC": decimal.Decimal, "BOOLEAN": bool, "DATE": datetime.date, "TIMESTAMP": datetime.datetime, "CHARACTER": str, "VARCHAR": str, "TEXT": str, "BLOB": bytearray, } # the rows we can set: field_type, name, field_required, min_size, max_size, # size, unique, pk, <foreign key info> # These keys will roughly correspond with schema.Field # TODO -- we could actually use "type" to get the size because SQLite returns # a value like VARCHAR[32] for row in fields: field = { "name": row["name"], "field_required": bool(row["notnull"]) or bool(row["pk"]), "pk": bool(row["pk"]), } for tname, ty in pg_types.items(): if row["type"].startswith(tname): field["field_type"] = ty break if field["pk"] and field["field_type"] is int: # we compensate for SQLite internally setting pk to int field["field_type"] = long if row["name"] in fks: field["schema_table_name"] = fks[row["name"]]["table"] field["ref_table_name"] = fks[row["name"]]["table"] ret[field["name"]] = field return ret
python
def _get_fields(self, table_name, **kwargs): """return all the fields for the given table""" ret = {} query_str = 'PRAGMA table_info({})'.format(self._normalize_table_name(table_name)) fields = self._query(query_str, **kwargs) #pout.v([dict(d) for d in fields]) query_str = 'PRAGMA foreign_key_list({})'.format(self._normalize_table_name(table_name)) fks = {f["from"]: f for f in self._query(query_str, **kwargs)} #pout.v([dict(d) for d in fks.values()]) pg_types = { "INTEGER": int, "BIGINT": long, "DOUBLE PRECISION": float, "FLOAT": float, "REAL": float, "NUMERIC": decimal.Decimal, "BOOLEAN": bool, "DATE": datetime.date, "TIMESTAMP": datetime.datetime, "CHARACTER": str, "VARCHAR": str, "TEXT": str, "BLOB": bytearray, } # the rows we can set: field_type, name, field_required, min_size, max_size, # size, unique, pk, <foreign key info> # These keys will roughly correspond with schema.Field # TODO -- we could actually use "type" to get the size because SQLite returns # a value like VARCHAR[32] for row in fields: field = { "name": row["name"], "field_required": bool(row["notnull"]) or bool(row["pk"]), "pk": bool(row["pk"]), } for tname, ty in pg_types.items(): if row["type"].startswith(tname): field["field_type"] = ty break if field["pk"] and field["field_type"] is int: # we compensate for SQLite internally setting pk to int field["field_type"] = long if row["name"] in fks: field["schema_table_name"] = fks[row["name"]]["table"] field["ref_table_name"] = fks[row["name"]]["table"] ret[field["name"]] = field return ret
[ "def", "_get_fields", "(", "self", ",", "table_name", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "query_str", "=", "'PRAGMA table_info({})'", ".", "format", "(", "self", ".", "_normalize_table_name", "(", "table_name", ")", ")", "fields", "="...
return all the fields for the given table
[ "return", "all", "the", "fields", "for", "the", "given", "table" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L473-L527
train
49,451
Jaymon/prom
prom/interface/sqlite.py
SQLite._normalize_date_SQL
def _normalize_date_SQL(self, field_name, field_kwargs, symbol): """ allow extracting information from date http://www.sqlite.org/lang_datefunc.html """ fstrs = [] k_opts = { 'day': "CAST(strftime('%d', {}) AS integer)", 'hour': "CAST(strftime('%H', {}) AS integer)", 'doy': "CAST(strftime('%j', {}) AS integer)", # day of year 'julian_day': "strftime('%J', {})", # YYYY-MM-DD 'month': "CAST(strftime('%m', {}) AS integer)", 'minute': "CAST(strftime('%M', {}) AS integer)", 'dow': "CAST(strftime('%w', {}) AS integer)", # day of week 0 = sunday 'week': "CAST(strftime('%W', {}) AS integer)", 'year': "CAST(strftime('%Y', {}) AS integer)" } for k, v in field_kwargs.items(): fstrs.append([k_opts[k].format(self._normalize_name(field_name)), self.val_placeholder, v]) return fstrs
python
def _normalize_date_SQL(self, field_name, field_kwargs, symbol): """ allow extracting information from date http://www.sqlite.org/lang_datefunc.html """ fstrs = [] k_opts = { 'day': "CAST(strftime('%d', {}) AS integer)", 'hour': "CAST(strftime('%H', {}) AS integer)", 'doy': "CAST(strftime('%j', {}) AS integer)", # day of year 'julian_day': "strftime('%J', {})", # YYYY-MM-DD 'month': "CAST(strftime('%m', {}) AS integer)", 'minute': "CAST(strftime('%M', {}) AS integer)", 'dow': "CAST(strftime('%w', {}) AS integer)", # day of week 0 = sunday 'week': "CAST(strftime('%W', {}) AS integer)", 'year': "CAST(strftime('%Y', {}) AS integer)" } for k, v in field_kwargs.items(): fstrs.append([k_opts[k].format(self._normalize_name(field_name)), self.val_placeholder, v]) return fstrs
[ "def", "_normalize_date_SQL", "(", "self", ",", "field_name", ",", "field_kwargs", ",", "symbol", ")", ":", "fstrs", "=", "[", "]", "k_opts", "=", "{", "'day'", ":", "\"CAST(strftime('%d', {}) AS integer)\"", ",", "'hour'", ":", "\"CAST(strftime('%H', {}) AS integer...
allow extracting information from date http://www.sqlite.org/lang_datefunc.html
[ "allow", "extracting", "information", "from", "date" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L529-L551
train
49,452
Jaymon/prom
prom/interface/sqlite.py
SQLite._normalize_sort_SQL
def _normalize_sort_SQL(self, field_name, field_vals, sort_dir_str): """ allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by """ fvi = None if sort_dir_str == 'ASC': fvi = (t for t in enumerate(field_vals)) else: fvi = (t for t in enumerate(reversed(field_vals))) query_sort_str = [' CASE {}'.format(self._normalize_name(field_name))] query_args = [] for i, v in fvi: query_sort_str.append(' WHEN {} THEN {}'.format(self.val_placeholder, i)) query_args.append(v) query_sort_str.append(' END') query_sort_str = "\n".join(query_sort_str) return query_sort_str, query_args
python
def _normalize_sort_SQL(self, field_name, field_vals, sort_dir_str): """ allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by """ fvi = None if sort_dir_str == 'ASC': fvi = (t for t in enumerate(field_vals)) else: fvi = (t for t in enumerate(reversed(field_vals))) query_sort_str = [' CASE {}'.format(self._normalize_name(field_name))] query_args = [] for i, v in fvi: query_sort_str.append(' WHEN {} THEN {}'.format(self.val_placeholder, i)) query_args.append(v) query_sort_str.append(' END') query_sort_str = "\n".join(query_sort_str) return query_sort_str, query_args
[ "def", "_normalize_sort_SQL", "(", "self", ",", "field_name", ",", "field_vals", ",", "sort_dir_str", ")", ":", "fvi", "=", "None", "if", "sort_dir_str", "==", "'ASC'", ":", "fvi", "=", "(", "t", "for", "t", "in", "enumerate", "(", "field_vals", ")", ")"...
allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by
[ "allow", "sorting", "by", "a", "set", "of", "values" ]
b7ad2c259eca198da03e1e4bc7d95014c168c361
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L553-L574
train
49,453
BreakingBytes/simkit
simkit/core/calculations.py
CalcRegistry.register
def register(self, new_calc, *args, **kwargs): """ Register calculations and meta data. * ``dependencies`` - list of prerequisite calculations * ``always_calc`` - ``True`` if calculation ignores thresholds * ``frequency`` - frequency of calculation in intervals or units of time :param new_calc: register new calculation """ kwargs.update(zip(self.meta_names, args)) # dependencies should be a list of other calculations if isinstance(kwargs['dependencies'], basestring): kwargs['dependencies'] = [kwargs['dependencies']] # call super method, now meta can be passed as args or kwargs. super(CalcRegistry, self).register(new_calc, **kwargs)
python
def register(self, new_calc, *args, **kwargs): """ Register calculations and meta data. * ``dependencies`` - list of prerequisite calculations * ``always_calc`` - ``True`` if calculation ignores thresholds * ``frequency`` - frequency of calculation in intervals or units of time :param new_calc: register new calculation """ kwargs.update(zip(self.meta_names, args)) # dependencies should be a list of other calculations if isinstance(kwargs['dependencies'], basestring): kwargs['dependencies'] = [kwargs['dependencies']] # call super method, now meta can be passed as args or kwargs. super(CalcRegistry, self).register(new_calc, **kwargs)
[ "def", "register", "(", "self", ",", "new_calc", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# dependencies should be a list of other calculations", "if", ...
Register calculations and meta data. * ``dependencies`` - list of prerequisite calculations * ``always_calc`` - ``True`` if calculation ignores thresholds * ``frequency`` - frequency of calculation in intervals or units of time :param new_calc: register new calculation
[ "Register", "calculations", "and", "meta", "data", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/calculations.py#L41-L56
train
49,454
guaix-ucm/pyemir
emirdrp/processing/bars.py
slits_to_ds9_reg
def slits_to_ds9_reg(ds9reg, slits): """Transform fiber traces to ds9-region format. Parameters ---------- ds9reg : BinaryIO Handle to output file name in ds9-region format. """ # open output file and insert header ds9reg.write('# Region file format: DS9 version 4.1\n') ds9reg.write( 'global color=green dashlist=8 3 width=1 font="helvetica 10 ' 'normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 ' 'move=1 delete=1 include=1 source=1\n') ds9reg.write('physical\n') for idx, slit in enumerate(slits, 1): xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1 = slit xc = 0.5 * (xpos1 + xpos2) + 1 yc = 0.5 * (y1 + y2) + 1 xd = (xpos2 - xpos1) yd = (y2 - y1) ds9reg.write('box({0},{1},{2},{3},0)\n'.format(xc, yc, xd, yd)) ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(xpos1 - 5, yc, idx)) ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(xpos2 + 5, yc, idx + EMIR_NBARS))
python
def slits_to_ds9_reg(ds9reg, slits): """Transform fiber traces to ds9-region format. Parameters ---------- ds9reg : BinaryIO Handle to output file name in ds9-region format. """ # open output file and insert header ds9reg.write('# Region file format: DS9 version 4.1\n') ds9reg.write( 'global color=green dashlist=8 3 width=1 font="helvetica 10 ' 'normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 ' 'move=1 delete=1 include=1 source=1\n') ds9reg.write('physical\n') for idx, slit in enumerate(slits, 1): xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1 = slit xc = 0.5 * (xpos1 + xpos2) + 1 yc = 0.5 * (y1 + y2) + 1 xd = (xpos2 - xpos1) yd = (y2 - y1) ds9reg.write('box({0},{1},{2},{3},0)\n'.format(xc, yc, xd, yd)) ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(xpos1 - 5, yc, idx)) ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(xpos2 + 5, yc, idx + EMIR_NBARS))
[ "def", "slits_to_ds9_reg", "(", "ds9reg", ",", "slits", ")", ":", "# open output file and insert header", "ds9reg", ".", "write", "(", "'# Region file format: DS9 version 4.1\\n'", ")", "ds9reg", ".", "write", "(", "'global color=green dashlist=8 3 width=1 font=\"helvetica 10 '...
Transform fiber traces to ds9-region format. Parameters ---------- ds9reg : BinaryIO Handle to output file name in ds9-region format.
[ "Transform", "fiber", "traces", "to", "ds9", "-", "region", "format", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/bars.py#L192-L218
train
49,455
BreakingBytes/simkit
simkit/core/simulations.py
id_maker
def id_maker(obj): """ Makes an ID from the object's class name and the datetime now in ISO format. :param obj: the class from which to make the ID :return: ID """ dtfmt = '%Y%m%d-%H%M%S' return '%s-%s' % (obj.__class__.__name__, datetime.now().strftime(dtfmt))
python
def id_maker(obj): """ Makes an ID from the object's class name and the datetime now in ISO format. :param obj: the class from which to make the ID :return: ID """ dtfmt = '%Y%m%d-%H%M%S' return '%s-%s' % (obj.__class__.__name__, datetime.now().strftime(dtfmt))
[ "def", "id_maker", "(", "obj", ")", ":", "dtfmt", "=", "'%Y%m%d-%H%M%S'", "return", "'%s-%s'", "%", "(", "obj", ".", "__class__", ".", "__name__", ",", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "dtfmt", ")", ")" ]
Makes an ID from the object's class name and the datetime now in ISO format. :param obj: the class from which to make the ID :return: ID
[ "Makes", "an", "ID", "from", "the", "object", "s", "class", "name", "and", "the", "datetime", "now", "in", "ISO", "format", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L38-L46
train
49,456
BreakingBytes/simkit
simkit/core/simulations.py
SimRegistry.register
def register(self, sim, *args, **kwargs): """ register simulation and metadata. * ``commands`` - list of methods to callable from model :param sim: new simulation """ kwargs.update(zip(self.meta_names, args)) # call super method, now meta can be passed as args or kwargs. super(SimRegistry, self).register(sim, **kwargs)
python
def register(self, sim, *args, **kwargs): """ register simulation and metadata. * ``commands`` - list of methods to callable from model :param sim: new simulation """ kwargs.update(zip(self.meta_names, args)) # call super method, now meta can be passed as args or kwargs. super(SimRegistry, self).register(sim, **kwargs)
[ "def", "register", "(", "self", ",", "sim", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# call super method, now meta can be passed as args or kwargs.", "su...
register simulation and metadata. * ``commands`` - list of methods to callable from model :param sim: new simulation
[ "register", "simulation", "and", "metadata", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L111-L121
train
49,457
BreakingBytes/simkit
simkit/core/simulations.py
Simulation.check_data
def check_data(self, data): """ Check if data loaded for all sources in data layer. :param data: data layer from model :type data: :class:`~simkit.core.layer.Data` :return: dictionary of data sources and objects or `None` if not loaded """ data_objs = { data_src: data.objects.get(data_src) for data_src in data.layer } self._is_data_loaded = all(data_objs.values()) return data_objs
python
def check_data(self, data): """ Check if data loaded for all sources in data layer. :param data: data layer from model :type data: :class:`~simkit.core.layer.Data` :return: dictionary of data sources and objects or `None` if not loaded """ data_objs = { data_src: data.objects.get(data_src) for data_src in data.layer } self._is_data_loaded = all(data_objs.values()) return data_objs
[ "def", "check_data", "(", "self", ",", "data", ")", ":", "data_objs", "=", "{", "data_src", ":", "data", ".", "objects", ".", "get", "(", "data_src", ")", "for", "data_src", "in", "data", ".", "layer", "}", "self", ".", "_is_data_loaded", "=", "all", ...
Check if data loaded for all sources in data layer. :param data: data layer from model :type data: :class:`~simkit.core.layer.Data` :return: dictionary of data sources and objects or `None` if not loaded
[ "Check", "if", "data", "loaded", "for", "all", "sources", "in", "data", "layer", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L303-L315
train
49,458
BreakingBytes/simkit
simkit/core/simulations.py
Simulation.initialize
def initialize(self, calc_reg): """ Initialize the simulation. Organize calculations by dependency. :param calc_reg: Calculation registry. :type calc_reg: :class:`~simkit.core.calculation.CalcRegistry` """ self._isinitialized = True # TODO: if calculations are edited, loaded, added, etc. then reset self.calc_order = topological_sort(calc_reg.dependencies)
python
def initialize(self, calc_reg): """ Initialize the simulation. Organize calculations by dependency. :param calc_reg: Calculation registry. :type calc_reg: :class:`~simkit.core.calculation.CalcRegistry` """ self._isinitialized = True # TODO: if calculations are edited, loaded, added, etc. then reset self.calc_order = topological_sort(calc_reg.dependencies)
[ "def", "initialize", "(", "self", ",", "calc_reg", ")", ":", "self", ".", "_isinitialized", "=", "True", "# TODO: if calculations are edited, loaded, added, etc. then reset", "self", ".", "calc_order", "=", "topological_sort", "(", "calc_reg", ".", "dependencies", ")" ]
Initialize the simulation. Organize calculations by dependency. :param calc_reg: Calculation registry. :type calc_reg: :class:`~simkit.core.calculation.CalcRegistry`
[ "Initialize", "the", "simulation", ".", "Organize", "calculations", "by", "dependency", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L317-L327
train
49,459
BreakingBytes/simkit
simkit/core/simulations.py
Simulation.index_iterator
def index_iterator(self): """ Generator that resumes from same index, or restarts from sent index. """ idx = 0 # index while idx < self.number_intervals: new_idx = yield idx idx += 1 if new_idx: idx = new_idx - 1
python
def index_iterator(self): """ Generator that resumes from same index, or restarts from sent index. """ idx = 0 # index while idx < self.number_intervals: new_idx = yield idx idx += 1 if new_idx: idx = new_idx - 1
[ "def", "index_iterator", "(", "self", ")", ":", "idx", "=", "0", "# index", "while", "idx", "<", "self", ".", "number_intervals", ":", "new_idx", "=", "yield", "idx", "idx", "+=", "1", "if", "new_idx", ":", "idx", "=", "new_idx", "-", "1" ]
Generator that resumes from same index, or restarts from sent index.
[ "Generator", "that", "resumes", "from", "same", "index", "or", "restarts", "from", "sent", "index", "." ]
205163d879d3880b6c9ef609f1b723a58773026b
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L329-L338
train
49,460
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
save_ds9
def save_ds9(output, filename): """Save ds9 region output info filename. Parameters ---------- output : str String containing the full output to be exported as a ds9 region file. filename : str Output file name. """ ds9_file = open(filename, 'wt') ds9_file.write(output) ds9_file.close()
python
def save_ds9(output, filename): """Save ds9 region output info filename. Parameters ---------- output : str String containing the full output to be exported as a ds9 region file. filename : str Output file name. """ ds9_file = open(filename, 'wt') ds9_file.write(output) ds9_file.close()
[ "def", "save_ds9", "(", "output", ",", "filename", ")", ":", "ds9_file", "=", "open", "(", "filename", ",", "'wt'", ")", "ds9_file", ".", "write", "(", "output", ")", "ds9_file", ".", "close", "(", ")" ]
Save ds9 region output info filename. Parameters ---------- output : str String containing the full output to be exported as a ds9 region file. filename : str Output file name.
[ "Save", "ds9", "region", "output", "info", "filename", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L41-L56
train
49,461
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
save_four_ds9
def save_four_ds9(rectwv_coeff, debugplot=0): """Save the 4 possible ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for limits, rectified, suffix in zip( ['frontiers', 'frontiers', 'boundaries', 'boundaries'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = rectwv_coeff_to_ds9(rectwv_coeff=rectwv_coeff, limits=limits, rectified=rectified) filename = 'ds9_' + limits + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
python
def save_four_ds9(rectwv_coeff, debugplot=0): """Save the 4 possible ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for limits, rectified, suffix in zip( ['frontiers', 'frontiers', 'boundaries', 'boundaries'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = rectwv_coeff_to_ds9(rectwv_coeff=rectwv_coeff, limits=limits, rectified=rectified) filename = 'ds9_' + limits + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
[ "def", "save_four_ds9", "(", "rectwv_coeff", ",", "debugplot", "=", "0", ")", ":", "for", "limits", ",", "rectified", ",", "suffix", "in", "zip", "(", "[", "'frontiers'", ",", "'frontiers'", ",", "'boundaries'", ",", "'boundaries'", "]", ",", "[", "False",...
Save the 4 possible ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'.
[ "Save", "the", "4", "possible", "ds9", "region", "files", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L59-L84
train
49,462
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
save_spectral_lines_ds9
def save_spectral_lines_ds9(rectwv_coeff, debugplot=0): """Save expected location of arc and OH airglow to ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for spectral_lines, rectified, suffix in zip( ['arc', 'arc', 'oh', 'oh'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = spectral_lines_to_ds9(rectwv_coeff=rectwv_coeff, spectral_lines=spectral_lines, rectified=rectified) filename = 'ds9_' + spectral_lines + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
python
def save_spectral_lines_ds9(rectwv_coeff, debugplot=0): """Save expected location of arc and OH airglow to ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for spectral_lines, rectified, suffix in zip( ['arc', 'arc', 'oh', 'oh'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = spectral_lines_to_ds9(rectwv_coeff=rectwv_coeff, spectral_lines=spectral_lines, rectified=rectified) filename = 'ds9_' + spectral_lines + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
[ "def", "save_spectral_lines_ds9", "(", "rectwv_coeff", ",", "debugplot", "=", "0", ")", ":", "for", "spectral_lines", ",", "rectified", ",", "suffix", "in", "zip", "(", "[", "'arc'", ",", "'arc'", ",", "'oh'", ",", "'oh'", "]", ",", "[", "False", ",", ...
Save expected location of arc and OH airglow to ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'.
[ "Save", "expected", "location", "of", "arc", "and", "OH", "airglow", "to", "ds9", "region", "files", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L239-L264
train
49,463
IdentityPython/oidcendpoint
src/oidcendpoint/endpoint_context.py
EndpointContext.create_providerinfo
def create_providerinfo(self, capabilities): """ Dynamically create the provider info response :param capabilities: :return: """ _pinfo = self.package_capabilities() not_supported = {} for key, val in capabilities.items(): try: allowed = _pinfo[key] except KeyError: _pinfo[key] = val else: if isinstance(allowed, bool): if allowed is False: if val is True: not_supported[key] = True else: _pinfo[key] = val elif isinstance(allowed, str): if val != allowed: not_supported[key] = val elif isinstance(allowed, list): if isinstance(val, str): sv = {val} else: try: sv = set(val) except TypeError: if key == 'response_types_supported': sv = set() for v in val: v.sort() sv.add(' '.join(v)) else: raise else: sv = set() for v in val: vs = v.split(' ') vs.sort() sv.add(' '.join(vs)) sa = set(allowed) if (sv & sa) == sv: _pinfo[key] = list(sv) else: not_supported[key] = list(sv - sa) if not_supported: _msg = "Server doesn't support the following features: {}".format( not_supported) logger.error(_msg) raise ConfigurationError(_msg) if self.jwks_uri and self.keyjar: _pinfo["jwks_uri"] = self.jwks_uri for name, instance in self.endpoint.items(): if name not in ['webfinger', 'provider_info']: _pinfo['{}_endpoint'.format(name)] = instance.full_path return _pinfo
python
def create_providerinfo(self, capabilities): """ Dynamically create the provider info response :param capabilities: :return: """ _pinfo = self.package_capabilities() not_supported = {} for key, val in capabilities.items(): try: allowed = _pinfo[key] except KeyError: _pinfo[key] = val else: if isinstance(allowed, bool): if allowed is False: if val is True: not_supported[key] = True else: _pinfo[key] = val elif isinstance(allowed, str): if val != allowed: not_supported[key] = val elif isinstance(allowed, list): if isinstance(val, str): sv = {val} else: try: sv = set(val) except TypeError: if key == 'response_types_supported': sv = set() for v in val: v.sort() sv.add(' '.join(v)) else: raise else: sv = set() for v in val: vs = v.split(' ') vs.sort() sv.add(' '.join(vs)) sa = set(allowed) if (sv & sa) == sv: _pinfo[key] = list(sv) else: not_supported[key] = list(sv - sa) if not_supported: _msg = "Server doesn't support the following features: {}".format( not_supported) logger.error(_msg) raise ConfigurationError(_msg) if self.jwks_uri and self.keyjar: _pinfo["jwks_uri"] = self.jwks_uri for name, instance in self.endpoint.items(): if name not in ['webfinger', 'provider_info']: _pinfo['{}_endpoint'.format(name)] = instance.full_path return _pinfo
[ "def", "create_providerinfo", "(", "self", ",", "capabilities", ")", ":", "_pinfo", "=", "self", ".", "package_capabilities", "(", ")", "not_supported", "=", "{", "}", "for", "key", ",", "val", "in", "capabilities", ".", "items", "(", ")", ":", "try", ":...
Dynamically create the provider info response :param capabilities: :return:
[ "Dynamically", "create", "the", "provider", "info", "response" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/endpoint_context.py#L324-L390
train
49,464
TkTech/PyNBT
pynbt/nbt.py
BaseTag._write_utf8
def _write_utf8(write, value): """Writes a length-prefixed UTF-8 string.""" write('h', len(value)) write.io.write(value.encode('utf-8'))
python
def _write_utf8(write, value): """Writes a length-prefixed UTF-8 string.""" write('h', len(value)) write.io.write(value.encode('utf-8'))
[ "def", "_write_utf8", "(", "write", ",", "value", ")", ":", "write", "(", "'h'", ",", "len", "(", "value", ")", ")", "write", ".", "io", ".", "write", "(", "value", ".", "encode", "(", "'utf-8'", ")", ")" ]
Writes a length-prefixed UTF-8 string.
[ "Writes", "a", "length", "-", "prefixed", "UTF", "-", "8", "string", "." ]
060fb0a2b58fc464a637fc108f05bd7274da8e5f
https://github.com/TkTech/PyNBT/blob/060fb0a2b58fc464a637fc108f05bd7274da8e5f/pynbt/nbt.py#L29-L32
train
49,465
TkTech/PyNBT
pynbt/nbt.py
BaseTag.read
def read(cls, read, has_name=True): """ Read the tag in using the reader `rd`. If `has_name` is `False`, skip reading the tag name. """ name = cls._read_utf8(read) if has_name else None if cls is TAG_Compound: # A TAG_Compound is almost identical to Python's native dict() # object, or a Java HashMap. final = {} while True: # Find the type of each tag in a compound in turn. tag = read('b', 1)[0] if tag == 0: # A tag of 0 means we've reached TAG_End, used to terminate # a TAG_Compound. break # We read in each tag in turn, using its name as the key in # the dict (Since a compound cannot have repeating names, # this works fine). tmp = _tags[tag].read(read) final[tmp.name] = tmp return cls(final, name=name) elif cls is TAG_List: # A TAG_List is a very simple homogeneous array, similar to # Python's native list() object, but restricted to a single type. tag_type, length = read('bi', 5) tag_read = _tags[tag_type].read return cls( _tags[tag_type], [tag_read(read, has_name=False) for x in range(0, length)], name=name ) elif cls is TAG_String: # A simple length-prefixed UTF-8 string. value = cls._read_utf8(read) return cls(value, name=name) elif cls is TAG_Byte_Array: # A simple array of (signed) bytes. length = read('i', 4)[0] return cls(read('{0}b'.format(length), length), name=name) elif cls is TAG_Int_Array: # A simple array of (signed) 4-byte integers. length = read('i', 4)[0] return cls(read('{0}i'.format(length), length * 4), name=name) elif cls is TAG_Long_Array: # A simple array of (signed) 8-byte longs. length = read('i', 4)[0] return cls(read('{0}q'.format(length), length * 8), name=name) elif cls is TAG_Byte: # A single (signed) byte. return cls(read('b', 1)[0], name=name) elif cls is TAG_Short: # A single (signed) short. return cls(read('h', 2)[0], name=name) elif cls is TAG_Int: # A signed (signed) 4-byte int. return cls(read('i', 4)[0], name=name) elif cls is TAG_Long: # A single (signed) 8-byte long. return cls(read('q', 8)[0], name=name) elif cls is TAG_Float: # A single single-precision floating point value. return cls(read('f', 4)[0], name=name) elif cls is TAG_Double: # A single double-precision floating point value. return cls(read('d', 8)[0], name=name) elif cls is TAG_End: # A End of Compound Tag return cls(read('2b', 2)[0], name=name)
python
def read(cls, read, has_name=True): """ Read the tag in using the reader `rd`. If `has_name` is `False`, skip reading the tag name. """ name = cls._read_utf8(read) if has_name else None if cls is TAG_Compound: # A TAG_Compound is almost identical to Python's native dict() # object, or a Java HashMap. final = {} while True: # Find the type of each tag in a compound in turn. tag = read('b', 1)[0] if tag == 0: # A tag of 0 means we've reached TAG_End, used to terminate # a TAG_Compound. break # We read in each tag in turn, using its name as the key in # the dict (Since a compound cannot have repeating names, # this works fine). tmp = _tags[tag].read(read) final[tmp.name] = tmp return cls(final, name=name) elif cls is TAG_List: # A TAG_List is a very simple homogeneous array, similar to # Python's native list() object, but restricted to a single type. tag_type, length = read('bi', 5) tag_read = _tags[tag_type].read return cls( _tags[tag_type], [tag_read(read, has_name=False) for x in range(0, length)], name=name ) elif cls is TAG_String: # A simple length-prefixed UTF-8 string. value = cls._read_utf8(read) return cls(value, name=name) elif cls is TAG_Byte_Array: # A simple array of (signed) bytes. length = read('i', 4)[0] return cls(read('{0}b'.format(length), length), name=name) elif cls is TAG_Int_Array: # A simple array of (signed) 4-byte integers. length = read('i', 4)[0] return cls(read('{0}i'.format(length), length * 4), name=name) elif cls is TAG_Long_Array: # A simple array of (signed) 8-byte longs. length = read('i', 4)[0] return cls(read('{0}q'.format(length), length * 8), name=name) elif cls is TAG_Byte: # A single (signed) byte. return cls(read('b', 1)[0], name=name) elif cls is TAG_Short: # A single (signed) short. return cls(read('h', 2)[0], name=name) elif cls is TAG_Int: # A signed (signed) 4-byte int. return cls(read('i', 4)[0], name=name) elif cls is TAG_Long: # A single (signed) 8-byte long. return cls(read('q', 8)[0], name=name) elif cls is TAG_Float: # A single single-precision floating point value. return cls(read('f', 4)[0], name=name) elif cls is TAG_Double: # A single double-precision floating point value. return cls(read('d', 8)[0], name=name) elif cls is TAG_End: # A End of Compound Tag return cls(read('2b', 2)[0], name=name)
[ "def", "read", "(", "cls", ",", "read", ",", "has_name", "=", "True", ")", ":", "name", "=", "cls", ".", "_read_utf8", "(", "read", ")", "if", "has_name", "else", "None", "if", "cls", "is", "TAG_Compound", ":", "# A TAG_Compound is almost identical to Python...
Read the tag in using the reader `rd`. If `has_name` is `False`, skip reading the tag name.
[ "Read", "the", "tag", "in", "using", "the", "reader", "rd", ".", "If", "has_name", "is", "False", "skip", "reading", "the", "tag", "name", "." ]
060fb0a2b58fc464a637fc108f05bd7274da8e5f
https://github.com/TkTech/PyNBT/blob/060fb0a2b58fc464a637fc108f05bd7274da8e5f/pynbt/nbt.py#L35-L105
train
49,466
TkTech/PyNBT
pynbt/nbt.py
BaseTag.pretty
def pretty(self, indent=0, indent_str=' '): """ Pretty-print a tag in the same general style as Markus's example output. """ return '{0}{1}({2!r}): {3!r}'.format( indent_str * indent, self.__class__.__name__, self.name, self.value )
python
def pretty(self, indent=0, indent_str=' '): """ Pretty-print a tag in the same general style as Markus's example output. """ return '{0}{1}({2!r}): {3!r}'.format( indent_str * indent, self.__class__.__name__, self.name, self.value )
[ "def", "pretty", "(", "self", ",", "indent", "=", "0", ",", "indent_str", "=", "' '", ")", ":", "return", "'{0}{1}({2!r}): {3!r}'", ".", "format", "(", "indent_str", "*", "indent", ",", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "name"...
Pretty-print a tag in the same general style as Markus's example output.
[ "Pretty", "-", "print", "a", "tag", "in", "the", "same", "general", "style", "as", "Markus", "s", "example", "output", "." ]
060fb0a2b58fc464a637fc108f05bd7274da8e5f
https://github.com/TkTech/PyNBT/blob/060fb0a2b58fc464a637fc108f05bd7274da8e5f/pynbt/nbt.py#L153-L163
train
49,467
IdentityPython/oidcendpoint
src/oidcendpoint/token_handler.py
factory
def factory(ec, code=None, token=None, refresh=None, **kwargs): """ Create a token handler :param code: :param token: :param refresh: :return: TokenHandler instance """ TTYPE = {'code': 'A', 'token': 'T', 'refresh': 'R'} args = {} if code: args['code_handler'] = init_token_handler(ec, code, TTYPE['code']) if token: args['access_token_handler'] = init_token_handler(ec, token, TTYPE['token']) if refresh: args['refresh_token_handler'] = init_token_handler(ec, token, TTYPE['refresh']) return TokenHandler(**args)
python
def factory(ec, code=None, token=None, refresh=None, **kwargs): """ Create a token handler :param code: :param token: :param refresh: :return: TokenHandler instance """ TTYPE = {'code': 'A', 'token': 'T', 'refresh': 'R'} args = {} if code: args['code_handler'] = init_token_handler(ec, code, TTYPE['code']) if token: args['access_token_handler'] = init_token_handler(ec, token, TTYPE['token']) if refresh: args['refresh_token_handler'] = init_token_handler(ec, token, TTYPE['refresh']) return TokenHandler(**args)
[ "def", "factory", "(", "ec", ",", "code", "=", "None", ",", "token", "=", "None", ",", "refresh", "=", "None", ",", "*", "*", "kwargs", ")", ":", "TTYPE", "=", "{", "'code'", ":", "'A'", ",", "'token'", ":", "'T'", ",", "'refresh'", ":", "'R'", ...
Create a token handler :param code: :param token: :param refresh: :return: TokenHandler instance
[ "Create", "a", "token", "handler" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/token_handler.py#L279-L300
train
49,468
IdentityPython/oidcendpoint
src/oidcendpoint/token_handler.py
DefaultToken.info
def info(self, token): """ Return token information. :param token: A token :return: dictionary with info about the token """ _res = dict(zip(['_id', 'type', 'sid', 'exp'], self.split_token(token))) if _res['type'] != self.type: raise WrongTokenType(_res['type']) else: _res['handler'] = self _res['black_listed'] = self.is_black_listed(token) return _res
python
def info(self, token): """ Return token information. :param token: A token :return: dictionary with info about the token """ _res = dict(zip(['_id', 'type', 'sid', 'exp'], self.split_token(token))) if _res['type'] != self.type: raise WrongTokenType(_res['type']) else: _res['handler'] = self _res['black_listed'] = self.is_black_listed(token) return _res
[ "def", "info", "(", "self", ",", "token", ")", ":", "_res", "=", "dict", "(", "zip", "(", "[", "'_id'", ",", "'type'", ",", "'sid'", ",", "'exp'", "]", ",", "self", ".", "split_token", "(", "token", ")", ")", ")", "if", "_res", "[", "'type'", "...
Return token information. :param token: A token :return: dictionary with info about the token
[ "Return", "token", "information", "." ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/token_handler.py#L170-L184
train
49,469
openmicroanalysis/pyxray
pyxray/composition.py
process_wildcard
def process_wildcard(fractions): """ Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0. """ wildcard_zs = set() total_fraction = 0.0 for z, fraction in fractions.items(): if fraction == '?': wildcard_zs.add(z) else: total_fraction += fraction if not wildcard_zs: return fractions balance_fraction = (1.0 - total_fraction) / len(wildcard_zs) for z in wildcard_zs: fractions[z] = balance_fraction return fractions
python
def process_wildcard(fractions): """ Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0. """ wildcard_zs = set() total_fraction = 0.0 for z, fraction in fractions.items(): if fraction == '?': wildcard_zs.add(z) else: total_fraction += fraction if not wildcard_zs: return fractions balance_fraction = (1.0 - total_fraction) / len(wildcard_zs) for z in wildcard_zs: fractions[z] = balance_fraction return fractions
[ "def", "process_wildcard", "(", "fractions", ")", ":", "wildcard_zs", "=", "set", "(", ")", "total_fraction", "=", "0.0", "for", "z", ",", "fraction", "in", "fractions", ".", "items", "(", ")", ":", "if", "fraction", "==", "'?'", ":", "wildcard_zs", ".",...
Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0.
[ "Processes", "element", "with", "a", "wildcard", "?", "weight", "fraction", "and", "returns", "composition", "balanced", "to", "1", ".", "0", "." ]
cae89677f00ebcc0952f94d1ab70e6b35e1a51e9
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L20-L40
train
49,470
openmicroanalysis/pyxray
pyxray/composition.py
generate_name
def generate_name(atomic_fractions): """ Generates a name from the composition. The name is generated on the basis of a classical chemical formula. """ if not atomic_fractions: return '' if len(atomic_fractions) == 1: z = list(atomic_fractions.keys())[0] return pyxray.element_symbol(z) symbols = [] fractions = [] for z in sorted(atomic_fractions.keys(), reverse=True): symbols.append(pyxray.element_symbol(z)) fractions.append(Fraction(atomic_fractions[z]).limit_denominator()) # Find gcd of the fractions gcds = [] for a, b in itertools.combinations(fractions, 2): gcds.append(math.gcd(a.denominator, b.denominator)) smallest_gcd = min(gcds) # Write formula name = '' for symbol, fraction in zip(symbols, fractions): mole_fraction = int(fraction * smallest_gcd) if mole_fraction == 0: continue elif mole_fraction == 1: name += "%s" % symbol else: name += '%s%i' % (symbol, mole_fraction) return name
python
def generate_name(atomic_fractions): """ Generates a name from the composition. The name is generated on the basis of a classical chemical formula. """ if not atomic_fractions: return '' if len(atomic_fractions) == 1: z = list(atomic_fractions.keys())[0] return pyxray.element_symbol(z) symbols = [] fractions = [] for z in sorted(atomic_fractions.keys(), reverse=True): symbols.append(pyxray.element_symbol(z)) fractions.append(Fraction(atomic_fractions[z]).limit_denominator()) # Find gcd of the fractions gcds = [] for a, b in itertools.combinations(fractions, 2): gcds.append(math.gcd(a.denominator, b.denominator)) smallest_gcd = min(gcds) # Write formula name = '' for symbol, fraction in zip(symbols, fractions): mole_fraction = int(fraction * smallest_gcd) if mole_fraction == 0: continue elif mole_fraction == 1: name += "%s" % symbol else: name += '%s%i' % (symbol, mole_fraction) return name
[ "def", "generate_name", "(", "atomic_fractions", ")", ":", "if", "not", "atomic_fractions", ":", "return", "''", "if", "len", "(", "atomic_fractions", ")", "==", "1", ":", "z", "=", "list", "(", "atomic_fractions", ".", "keys", "(", ")", ")", "[", "0", ...
Generates a name from the composition. The name is generated on the basis of a classical chemical formula.
[ "Generates", "a", "name", "from", "the", "composition", ".", "The", "name", "is", "generated", "on", "the", "basis", "of", "a", "classical", "chemical", "formula", "." ]
cae89677f00ebcc0952f94d1ab70e6b35e1a51e9
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L122-L157
train
49,471
openmicroanalysis/pyxray
pyxray/composition.py
Composition.from_pure
def from_pure(cls, z): """ Creates a pure composition. Args: z (int): atomic number """ return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
python
def from_pure(cls, z): """ Creates a pure composition. Args: z (int): atomic number """ return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
[ "def", "from_pure", "(", "cls", ",", "z", ")", ":", "return", "cls", "(", "cls", ".", "_key", ",", "{", "z", ":", "1.0", "}", ",", "{", "z", ":", "1.0", "}", ",", "pyxray", ".", "element_symbol", "(", "z", ")", ")" ]
Creates a pure composition. Args: z (int): atomic number
[ "Creates", "a", "pure", "composition", "." ]
cae89677f00ebcc0952f94d1ab70e6b35e1a51e9
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L199-L206
train
49,472
delvelabs/hammertime
hammertime/rules/status.py
DetectSoft404._collect_sample
async def _collect_sample(self, url, url_pattern): """ Sample collection is meant to be very tolerant to generic failures as failing to obtain the sample has important consequences on the results. - Multiple retries with longer delays - Larger than usual timeout """ samples = [] urls = [self.path_generator.generate_url(url, url_pattern) for _ in range(self.confirmation_factor)] iterator = asyncio.as_completed([self._fetch_sample(url) for url in urls]) for promise in iterator: try: sig = await promise if sig: samples.append(sig) except RejectRequest as e: pass if not samples: raise StopRequest("Impossible to obtain sample") else: return samples
python
async def _collect_sample(self, url, url_pattern): """ Sample collection is meant to be very tolerant to generic failures as failing to obtain the sample has important consequences on the results. - Multiple retries with longer delays - Larger than usual timeout """ samples = [] urls = [self.path_generator.generate_url(url, url_pattern) for _ in range(self.confirmation_factor)] iterator = asyncio.as_completed([self._fetch_sample(url) for url in urls]) for promise in iterator: try: sig = await promise if sig: samples.append(sig) except RejectRequest as e: pass if not samples: raise StopRequest("Impossible to obtain sample") else: return samples
[ "async", "def", "_collect_sample", "(", "self", ",", "url", ",", "url_pattern", ")", ":", "samples", "=", "[", "]", "urls", "=", "[", "self", ".", "path_generator", ".", "generate_url", "(", "url", ",", "url_pattern", ")", "for", "_", "in", "range", "(...
Sample collection is meant to be very tolerant to generic failures as failing to obtain the sample has important consequences on the results. - Multiple retries with longer delays - Larger than usual timeout
[ "Sample", "collection", "is", "meant", "to", "be", "very", "tolerant", "to", "generic", "failures", "as", "failing", "to", "obtain", "the", "sample", "has", "important", "consequences", "on", "the", "results", "." ]
0b371499869881c7d12ed71f66e0d6ac9f9a3307
https://github.com/delvelabs/hammertime/blob/0b371499869881c7d12ed71f66e0d6ac9f9a3307/hammertime/rules/status.py#L182-L206
train
49,473
guaix-ucm/pyemir
emirdrp/tools/overplot_bounddict.py
get_boundaries
def get_boundaries(bounddict_file, slitlet_number): """Read the bounddict json file and return the polynomial boundaries. Parameters ---------- bounddict_file : file handler File containing the bounddict JSON data. slitlet_number : int Number of slitlet. Returns ------- pol_lower_boundary : numpy polynomial Polynomial defining the lower boundary of the slitlet. pol_upper_boundary : numpy polynomial Polynomial defining the upper boundary of the slitlet. xmin_lower : float Minimum abscissae for the lower boundary. xmax_lower : float Maximum abscissae for the lower boundary. xmin_upper : float Minimum abscissae for the upper boundary. xmax_upper : float Maximum abscissae for the upper boundary. csu_bar_slit_center : float CSU bar slit center (in mm) """ bounddict = json.loads(open(bounddict_file.name).read()) # return values in case the requested slitlet number is not defined pol_lower_boundary = None pol_upper_boundary = None xmin_lower = None xmax_lower = None xmin_upper = None xmax_upper = None csu_bar_slit_center = None # search the slitlet number in bounddict slitlet_label = "slitlet" + str(slitlet_number).zfill(2) if slitlet_label in bounddict['contents'].keys(): list_date_obs = list(bounddict['contents'][slitlet_label].keys()) list_date_obs.sort() num_date_obs = len(list_date_obs) if num_date_obs == 1: date_obs = list_date_obs[0] tmp_dict = bounddict['contents'][slitlet_label][date_obs] pol_lower_boundary = Polynomial(tmp_dict['boundary_coef_lower']) pol_upper_boundary = Polynomial(tmp_dict['boundary_coef_upper']) xmin_lower = tmp_dict['boundary_xmin_lower'] xmax_lower = tmp_dict['boundary_xmax_lower'] xmin_upper = tmp_dict['boundary_xmin_upper'] xmax_upper = tmp_dict['boundary_xmax_upper'] csu_bar_slit_center = tmp_dict['csu_bar_slit_center'] else: raise ValueError("num_date_obs =", num_date_obs, " (must be 1)") else: print("WARNING: slitlet number " + str(slitlet_number) + " is not available in " + bounddict_file.name) # return result return pol_lower_boundary, pol_upper_boundary, \ xmin_lower, xmax_lower, xmin_upper, xmax_upper, \ csu_bar_slit_center
python
def get_boundaries(bounddict_file, slitlet_number): """Read the bounddict json file and return the polynomial boundaries. Parameters ---------- bounddict_file : file handler File containing the bounddict JSON data. slitlet_number : int Number of slitlet. Returns ------- pol_lower_boundary : numpy polynomial Polynomial defining the lower boundary of the slitlet. pol_upper_boundary : numpy polynomial Polynomial defining the upper boundary of the slitlet. xmin_lower : float Minimum abscissae for the lower boundary. xmax_lower : float Maximum abscissae for the lower boundary. xmin_upper : float Minimum abscissae for the upper boundary. xmax_upper : float Maximum abscissae for the upper boundary. csu_bar_slit_center : float CSU bar slit center (in mm) """ bounddict = json.loads(open(bounddict_file.name).read()) # return values in case the requested slitlet number is not defined pol_lower_boundary = None pol_upper_boundary = None xmin_lower = None xmax_lower = None xmin_upper = None xmax_upper = None csu_bar_slit_center = None # search the slitlet number in bounddict slitlet_label = "slitlet" + str(slitlet_number).zfill(2) if slitlet_label in bounddict['contents'].keys(): list_date_obs = list(bounddict['contents'][slitlet_label].keys()) list_date_obs.sort() num_date_obs = len(list_date_obs) if num_date_obs == 1: date_obs = list_date_obs[0] tmp_dict = bounddict['contents'][slitlet_label][date_obs] pol_lower_boundary = Polynomial(tmp_dict['boundary_coef_lower']) pol_upper_boundary = Polynomial(tmp_dict['boundary_coef_upper']) xmin_lower = tmp_dict['boundary_xmin_lower'] xmax_lower = tmp_dict['boundary_xmax_lower'] xmin_upper = tmp_dict['boundary_xmin_upper'] xmax_upper = tmp_dict['boundary_xmax_upper'] csu_bar_slit_center = tmp_dict['csu_bar_slit_center'] else: raise ValueError("num_date_obs =", num_date_obs, " (must be 1)") else: print("WARNING: slitlet number " + str(slitlet_number) + " is not available in " + bounddict_file.name) # return result return pol_lower_boundary, pol_upper_boundary, \ xmin_lower, xmax_lower, xmin_upper, xmax_upper, \ csu_bar_slit_center
[ "def", "get_boundaries", "(", "bounddict_file", ",", "slitlet_number", ")", ":", "bounddict", "=", "json", ".", "loads", "(", "open", "(", "bounddict_file", ".", "name", ")", ".", "read", "(", ")", ")", "# return values in case the requested slitlet number is not de...
Read the bounddict json file and return the polynomial boundaries. Parameters ---------- bounddict_file : file handler File containing the bounddict JSON data. slitlet_number : int Number of slitlet. Returns ------- pol_lower_boundary : numpy polynomial Polynomial defining the lower boundary of the slitlet. pol_upper_boundary : numpy polynomial Polynomial defining the upper boundary of the slitlet. xmin_lower : float Minimum abscissae for the lower boundary. xmax_lower : float Maximum abscissae for the lower boundary. xmin_upper : float Minimum abscissae for the upper boundary. xmax_upper : float Maximum abscissae for the upper boundary. csu_bar_slit_center : float CSU bar slit center (in mm)
[ "Read", "the", "bounddict", "json", "file", "and", "return", "the", "polynomial", "boundaries", "." ]
fef6bbabcb13f80123cafd1800a0f508a3c21702
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/overplot_bounddict.py#L38-L104
train
49,474
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/provider.py
_to_enos_roles
def _to_enos_roles(roles): """Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init` """ def to_host(h): extra = {} # create extra_vars for the nics # network_role = ethX for nic, roles in h["nics"]: for role in roles: extra[role] = nic return Host(h["host"], user="root", extra=extra) enos_roles = {} for role, hosts in roles.items(): enos_roles[role] = [to_host(h) for h in hosts] logger.debug(enos_roles) return enos_roles
python
def _to_enos_roles(roles): """Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init` """ def to_host(h): extra = {} # create extra_vars for the nics # network_role = ethX for nic, roles in h["nics"]: for role in roles: extra[role] = nic return Host(h["host"], user="root", extra=extra) enos_roles = {} for role, hosts in roles.items(): enos_roles[role] = [to_host(h) for h in hosts] logger.debug(enos_roles) return enos_roles
[ "def", "_to_enos_roles", "(", "roles", ")", ":", "def", "to_host", "(", "h", ")", ":", "extra", "=", "{", "}", "# create extra_vars for the nics", "# network_role = ethX", "for", "nic", ",", "roles", "in", "h", "[", "\"nics\"", "]", ":", "for", "role", "in...
Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init`
[ "Transform", "the", "roles", "to", "use", "enoslib", ".", "host", ".", "Host", "hosts", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/provider.py#L12-L34
train
49,475
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/provider.py
_to_enos_networks
def _to_enos_networks(networks): """Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init` """ nets = [] for roles, network in networks: nets.append(network.to_enos(roles)) logger.debug(nets) return nets
python
def _to_enos_networks(networks): """Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init` """ nets = [] for roles, network in networks: nets.append(network.to_enos(roles)) logger.debug(nets) return nets
[ "def", "_to_enos_networks", "(", "networks", ")", ":", "nets", "=", "[", "]", "for", "roles", ",", "network", "in", "networks", ":", "nets", ".", "append", "(", "network", ".", "to_enos", "(", "roles", ")", ")", "logger", ".", "debug", "(", "nets", "...
Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init`
[ "Transform", "the", "networks", "returned", "by", "deploy5k", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/provider.py#L37-L48
train
49,476
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/provider.py
G5k.init
def init(self, force_deploy=False, client=None): """Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met. """ _force_deploy = self.provider_conf.force_deploy self.provider_conf.force_deploy = _force_deploy or force_deploy self._provider_conf = self.provider_conf.to_dict() r = api.Resources(self._provider_conf, client=client) r.launch() roles = r.get_roles() networks = r.get_networks() return (_to_enos_roles(roles), _to_enos_networks(networks))
python
def init(self, force_deploy=False, client=None): """Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met. """ _force_deploy = self.provider_conf.force_deploy self.provider_conf.force_deploy = _force_deploy or force_deploy self._provider_conf = self.provider_conf.to_dict() r = api.Resources(self._provider_conf, client=client) r.launch() roles = r.get_roles() networks = r.get_networks() return (_to_enos_roles(roles), _to_enos_networks(networks))
[ "def", "init", "(", "self", ",", "force_deploy", "=", "False", ",", "client", "=", "None", ")", ":", "_force_deploy", "=", "self", ".", "provider_conf", ".", "force_deploy", "self", ".", "provider_conf", ".", "force_deploy", "=", "_force_deploy", "or", "forc...
Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met.
[ "Reserve", "and", "deploys", "the", "nodes", "according", "to", "the", "resources", "section" ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/provider.py#L54-L77
train
49,477
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/provider.py
G5k.destroy
def destroy(self): """Destroys the jobs.""" r = api.Resources(self.provider_conf.to_dict()) # insert force_deploy r.destroy()
python
def destroy(self): """Destroys the jobs.""" r = api.Resources(self.provider_conf.to_dict()) # insert force_deploy r.destroy()
[ "def", "destroy", "(", "self", ")", ":", "r", "=", "api", ".", "Resources", "(", "self", ".", "provider_conf", ".", "to_dict", "(", ")", ")", "# insert force_deploy", "r", ".", "destroy", "(", ")" ]
Destroys the jobs.
[ "Destroys", "the", "jobs", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/provider.py#L79-L83
train
49,478
openmicroanalysis/pyxray
pyxray/parser/wikipedia.py
WikipediaElementNameParser._find_wikipedia_names
def _find_wikipedia_names(self, name_en): """ Finds all Wikipedia pages referring to the specified name in English and returns a dictionary where the keys are the language code and the values are the titles of the corresponding pages. """ url = 'https://en.wikipedia.org/w/api.php' params = {'action': 'query', 'titles': name_en, 'prop': 'langlinks', 'lllimit': 500, 'format': 'json'} r = requests.get(url, params=params) if not r: raise ValueError('Could not find wikipedia page: {0}'.format(name_en)) out = r.json() names = {} pages = out['query']['pages'] for page in pages: for langlink in pages[page].get('langlinks', []): names[langlink['lang']] = langlink['*'] return names
python
def _find_wikipedia_names(self, name_en): """ Finds all Wikipedia pages referring to the specified name in English and returns a dictionary where the keys are the language code and the values are the titles of the corresponding pages. """ url = 'https://en.wikipedia.org/w/api.php' params = {'action': 'query', 'titles': name_en, 'prop': 'langlinks', 'lllimit': 500, 'format': 'json'} r = requests.get(url, params=params) if not r: raise ValueError('Could not find wikipedia page: {0}'.format(name_en)) out = r.json() names = {} pages = out['query']['pages'] for page in pages: for langlink in pages[page].get('langlinks', []): names[langlink['lang']] = langlink['*'] return names
[ "def", "_find_wikipedia_names", "(", "self", ",", "name_en", ")", ":", "url", "=", "'https://en.wikipedia.org/w/api.php'", "params", "=", "{", "'action'", ":", "'query'", ",", "'titles'", ":", "name_en", ",", "'prop'", ":", "'langlinks'", ",", "'lllimit'", ":", ...
Finds all Wikipedia pages referring to the specified name in English and returns a dictionary where the keys are the language code and the values are the titles of the corresponding pages.
[ "Finds", "all", "Wikipedia", "pages", "referring", "to", "the", "specified", "name", "in", "English", "and", "returns", "a", "dictionary", "where", "the", "keys", "are", "the", "language", "code", "and", "the", "values", "are", "the", "titles", "of", "the", ...
cae89677f00ebcc0952f94d1ab70e6b35e1a51e9
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/parser/wikipedia.py#L73-L96
train
49,479
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse.append_response
def append_response(self, response): """Append the response to the stack of responses. :param tornado.httpclient.HTTPResponse response: The HTTP response """ self._responses.append(response) if 'Warning' in response.headers: LOGGER.warning( 'HTTP %s %s Warning (%s): %s (attempt %s)', response.request.method, response.request.url, response.code, response.headers['Warning'], len(self._responses))
python
def append_response(self, response): """Append the response to the stack of responses. :param tornado.httpclient.HTTPResponse response: The HTTP response """ self._responses.append(response) if 'Warning' in response.headers: LOGGER.warning( 'HTTP %s %s Warning (%s): %s (attempt %s)', response.request.method, response.request.url, response.code, response.headers['Warning'], len(self._responses))
[ "def", "append_response", "(", "self", ",", "response", ")", ":", "self", ".", "_responses", ".", "append", "(", "response", ")", "if", "'Warning'", "in", "response", ".", "headers", ":", "LOGGER", ".", "warning", "(", "'HTTP %s %s Warning (%s): %s (attempt %s)'...
Append the response to the stack of responses. :param tornado.httpclient.HTTPResponse response: The HTTP response
[ "Append", "the", "response", "to", "the", "stack", "of", "responses", "." ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L64-L76
train
49,480
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse.body
def body(self): """Returns the HTTP response body, deserialized if possible. :rtype: mixed """ if not self._responses: return None if self._responses[-1].code >= 400: return self._error_message() return self._deserialize()
python
def body(self): """Returns the HTTP response body, deserialized if possible. :rtype: mixed """ if not self._responses: return None if self._responses[-1].code >= 400: return self._error_message() return self._deserialize()
[ "def", "body", "(", "self", ")", ":", "if", "not", "self", ".", "_responses", ":", "return", "None", "if", "self", ".", "_responses", "[", "-", "1", "]", ".", "code", ">=", "400", ":", "return", "self", ".", "_error_message", "(", ")", "return", "s...
Returns the HTTP response body, deserialized if possible. :rtype: mixed
[ "Returns", "the", "HTTP", "response", "body", "deserialized", "if", "possible", "." ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L94-L104
train
49,481
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse.links
def links(self): """Return the parsed link header if it was set, returning a list of the links as a dict. :rtype: list(dict()) or None """ if not self._responses: return None if 'Link' in self._responses[-1].headers: links = [] for l in headers.parse_link(self._responses[-1].headers['Link']): link = {'target': l.target} link.update({k: v for (k, v) in l.parameters}) links.append(link) return links
python
def links(self): """Return the parsed link header if it was set, returning a list of the links as a dict. :rtype: list(dict()) or None """ if not self._responses: return None if 'Link' in self._responses[-1].headers: links = [] for l in headers.parse_link(self._responses[-1].headers['Link']): link = {'target': l.target} link.update({k: v for (k, v) in l.parameters}) links.append(link) return links
[ "def", "links", "(", "self", ")", ":", "if", "not", "self", ".", "_responses", ":", "return", "None", "if", "'Link'", "in", "self", ".", "_responses", "[", "-", "1", "]", ".", "headers", ":", "links", "=", "[", "]", "for", "l", "in", "headers", "...
Return the parsed link header if it was set, returning a list of the links as a dict. :rtype: list(dict()) or None
[ "Return", "the", "parsed", "link", "header", "if", "it", "was", "set", "returning", "a", "list", "of", "the", "links", "as", "a", "dict", "." ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L155-L170
train
49,482
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse._decode
def _decode(self, value): """Decode bytes to UTF-8 strings as a singe value, list, or dict. :param mixed value: The value to decode :rtype: mixed """ if isinstance(value, list): return [self._decode(v) for v in value] elif isinstance(value, dict): return {self._decode(k): self._decode(v) for k, v in value.items()} elif isinstance(value, bytes): return value.decode('utf-8') return value
python
def _decode(self, value): """Decode bytes to UTF-8 strings as a singe value, list, or dict. :param mixed value: The value to decode :rtype: mixed """ if isinstance(value, list): return [self._decode(v) for v in value] elif isinstance(value, dict): return {self._decode(k): self._decode(v) for k, v in value.items()} elif isinstance(value, bytes): return value.decode('utf-8') return value
[ "def", "_decode", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "[", "self", ".", "_decode", "(", "v", ")", "for", "v", "in", "value", "]", "elif", "isinstance", "(", "value", ",", "dict", ...
Decode bytes to UTF-8 strings as a singe value, list, or dict. :param mixed value: The value to decode :rtype: mixed
[ "Decode", "bytes", "to", "UTF", "-", "8", "strings", "as", "a", "singe", "value", "list", "or", "dict", "." ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L196-L210
train
49,483
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse._deserialize
def _deserialize(self): """Try and deserialize a response body based upon the specified content type. :rtype: mixed """ if not self._responses or not self._responses[-1].body: return None if 'Content-Type' not in self._responses[-1].headers: return self._responses[-1].body try: content_type = algorithms.select_content_type( [headers.parse_content_type( self._responses[-1].headers['Content-Type'])], AVAILABLE_CONTENT_TYPES) except errors.NoMatch: return self._responses[-1].body if content_type[0] == CONTENT_TYPE_JSON: return self._decode( self._json.loads(self._decode(self._responses[-1].body))) elif content_type[0] == CONTENT_TYPE_MSGPACK: # pragma: nocover return self._decode( self._msgpack.unpackb(self._responses[-1].body))
python
def _deserialize(self): """Try and deserialize a response body based upon the specified content type. :rtype: mixed """ if not self._responses or not self._responses[-1].body: return None if 'Content-Type' not in self._responses[-1].headers: return self._responses[-1].body try: content_type = algorithms.select_content_type( [headers.parse_content_type( self._responses[-1].headers['Content-Type'])], AVAILABLE_CONTENT_TYPES) except errors.NoMatch: return self._responses[-1].body if content_type[0] == CONTENT_TYPE_JSON: return self._decode( self._json.loads(self._decode(self._responses[-1].body))) elif content_type[0] == CONTENT_TYPE_MSGPACK: # pragma: nocover return self._decode( self._msgpack.unpackb(self._responses[-1].body))
[ "def", "_deserialize", "(", "self", ")", ":", "if", "not", "self", ".", "_responses", "or", "not", "self", ".", "_responses", "[", "-", "1", "]", ".", "body", ":", "return", "None", "if", "'Content-Type'", "not", "in", "self", ".", "_responses", "[", ...
Try and deserialize a response body based upon the specified content type. :rtype: mixed
[ "Try", "and", "deserialize", "a", "response", "body", "based", "upon", "the", "specified", "content", "type", "." ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L212-L236
train
49,484
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse._error_message
def _error_message(self): """Try and extract the error message from a HTTP error response. :rtype: str """ body = self._deserialize() return body.get('message', body) if isinstance(body, dict) else body
python
def _error_message(self): """Try and extract the error message from a HTTP error response. :rtype: str """ body = self._deserialize() return body.get('message', body) if isinstance(body, dict) else body
[ "def", "_error_message", "(", "self", ")", ":", "body", "=", "self", ".", "_deserialize", "(", ")", "return", "body", ".", "get", "(", "'message'", ",", "body", ")", "if", "isinstance", "(", "body", ",", "dict", ")", "else", "body" ]
Try and extract the error message from a HTTP error response. :rtype: str
[ "Try", "and", "extract", "the", "error", "message", "from", "a", "HTTP", "error", "response", "." ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L238-L245
train
49,485
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin.http_fetch
async def http_fetch(self, url, method='GET', request_headers=None, body=None, content_type=CONTENT_TYPE_MSGPACK, follow_redirects=False, max_redirects=MAX_REDIRECTS, connect_timeout=DEFAULT_CONNECT_TIMEOUT, request_timeout=DEFAULT_REQUEST_TIMEOUT, max_http_attempts=MAX_HTTP_RETRIES, auth_username=None, auth_password=None, user_agent=None, validate_cert=True, allow_nonstandard_methods=False, dont_retry=None): """Perform a HTTP request Will retry up to ``self.MAX_HTTP_RETRIES`` times. :param str url: The URL for the request :param str method: The HTTP request method, defaults to ``GET`` :param dict request_headers: Headers to include in the HTTP request :param mixed body: The HTTP request body to send with the request :param content_type: The mime type to use for requests & responses. Defaults to ``application/msgpack`` :type content_type: :py:class:`~ietfparse.datastructures.ContentType` or str :param bool follow_redirects: Follow HTTP redirects when received :param int max_redirects: Maximum number of redirects to follow, default is 5 :param float connect_timeout: Timeout for initial connection in seconds, default 20 seconds :param float request_timeout: Timeout for entire request in seconds, default 20 seconds :param int max_http_attempts: Maximum number of times to retry a request, default is 3 attempts :param str auth_username: Username for HTTP authentication :param str auth_password: Password for HTTP authentication :param str user_agent: The str used for the ``User-Agent`` header, default used if unspecified. :param bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True :param bool allow_nonstandard_methods: Allow methods that don't adhere to the HTTP spec. :param set dont_retry: A list of status codes that will not be retried if an error is returned. Default: set({}) :rtype: HTTPResponse """ response = HTTPResponse() request_headers = self._http_req_apply_default_headers( request_headers, content_type, body) if body: body = self._http_req_body_serialize( body, request_headers['Content-Type']) if not dont_retry: dont_retry = set({}) client = httpclient.AsyncHTTPClient() # Workaround for Tornado defect. if hasattr(client, 'max_clients') and os.getenv('HTTP_MAX_CLIENTS'): client.max_clients = int(os.getenv('HTTP_MAX_CLIENTS')) for attempt in range(0, max_http_attempts): LOGGER.debug('%s %s (Attempt %i of %i) %r', method, url, attempt + 1, max_http_attempts, request_headers) if attempt > 0: request_headers['X-Retry-Attempt'] = str(attempt + 1) try: resp = await client.fetch( url, method=method, headers=request_headers, body=body, auth_username=auth_username, auth_password=auth_password, connect_timeout=connect_timeout, request_timeout=request_timeout, user_agent=user_agent or self._http_req_user_agent(), follow_redirects=follow_redirects, max_redirects=max_redirects, raise_error=False, validate_cert=validate_cert, allow_nonstandard_methods=allow_nonstandard_methods) except (ConnectionError, CurlError, OSError, socket.gaierror) as error: response.append_exception(error) LOGGER.warning( 'HTTP Request Error for %s to %s attempt %i of %i: %s', method, url, attempt + 1, max_http_attempts, error) continue # Keep track of each response response.append_response(resp) # If the response is ok, finish and exit if response.ok: response.finish() return response elif resp.code in dont_retry: break elif resp.code in {423, 429}: await self._http_resp_rate_limited(resp) elif resp.code < 500: LOGGER.debug('HTTP Response Error for %s to %s' 'attempt %i of %i (%s): %s', method, url, resp.code, attempt + 1, max_http_attempts, response.body) response.finish() return response LOGGER.warning( 'HTTP Error for %s to %s, attempt %i of %i (%s): %s', method, url, attempt + 1, max_http_attempts, resp.code, response.body) LOGGER.warning('HTTP %s to %s failed after %i attempts', method, url, max_http_attempts) response.finish() return response
python
async def http_fetch(self, url, method='GET', request_headers=None, body=None, content_type=CONTENT_TYPE_MSGPACK, follow_redirects=False, max_redirects=MAX_REDIRECTS, connect_timeout=DEFAULT_CONNECT_TIMEOUT, request_timeout=DEFAULT_REQUEST_TIMEOUT, max_http_attempts=MAX_HTTP_RETRIES, auth_username=None, auth_password=None, user_agent=None, validate_cert=True, allow_nonstandard_methods=False, dont_retry=None): """Perform a HTTP request Will retry up to ``self.MAX_HTTP_RETRIES`` times. :param str url: The URL for the request :param str method: The HTTP request method, defaults to ``GET`` :param dict request_headers: Headers to include in the HTTP request :param mixed body: The HTTP request body to send with the request :param content_type: The mime type to use for requests & responses. Defaults to ``application/msgpack`` :type content_type: :py:class:`~ietfparse.datastructures.ContentType` or str :param bool follow_redirects: Follow HTTP redirects when received :param int max_redirects: Maximum number of redirects to follow, default is 5 :param float connect_timeout: Timeout for initial connection in seconds, default 20 seconds :param float request_timeout: Timeout for entire request in seconds, default 20 seconds :param int max_http_attempts: Maximum number of times to retry a request, default is 3 attempts :param str auth_username: Username for HTTP authentication :param str auth_password: Password for HTTP authentication :param str user_agent: The str used for the ``User-Agent`` header, default used if unspecified. :param bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True :param bool allow_nonstandard_methods: Allow methods that don't adhere to the HTTP spec. :param set dont_retry: A list of status codes that will not be retried if an error is returned. Default: set({}) :rtype: HTTPResponse """ response = HTTPResponse() request_headers = self._http_req_apply_default_headers( request_headers, content_type, body) if body: body = self._http_req_body_serialize( body, request_headers['Content-Type']) if not dont_retry: dont_retry = set({}) client = httpclient.AsyncHTTPClient() # Workaround for Tornado defect. if hasattr(client, 'max_clients') and os.getenv('HTTP_MAX_CLIENTS'): client.max_clients = int(os.getenv('HTTP_MAX_CLIENTS')) for attempt in range(0, max_http_attempts): LOGGER.debug('%s %s (Attempt %i of %i) %r', method, url, attempt + 1, max_http_attempts, request_headers) if attempt > 0: request_headers['X-Retry-Attempt'] = str(attempt + 1) try: resp = await client.fetch( url, method=method, headers=request_headers, body=body, auth_username=auth_username, auth_password=auth_password, connect_timeout=connect_timeout, request_timeout=request_timeout, user_agent=user_agent or self._http_req_user_agent(), follow_redirects=follow_redirects, max_redirects=max_redirects, raise_error=False, validate_cert=validate_cert, allow_nonstandard_methods=allow_nonstandard_methods) except (ConnectionError, CurlError, OSError, socket.gaierror) as error: response.append_exception(error) LOGGER.warning( 'HTTP Request Error for %s to %s attempt %i of %i: %s', method, url, attempt + 1, max_http_attempts, error) continue # Keep track of each response response.append_response(resp) # If the response is ok, finish and exit if response.ok: response.finish() return response elif resp.code in dont_retry: break elif resp.code in {423, 429}: await self._http_resp_rate_limited(resp) elif resp.code < 500: LOGGER.debug('HTTP Response Error for %s to %s' 'attempt %i of %i (%s): %s', method, url, resp.code, attempt + 1, max_http_attempts, response.body) response.finish() return response LOGGER.warning( 'HTTP Error for %s to %s, attempt %i of %i (%s): %s', method, url, attempt + 1, max_http_attempts, resp.code, response.body) LOGGER.warning('HTTP %s to %s failed after %i attempts', method, url, max_http_attempts) response.finish() return response
[ "async", "def", "http_fetch", "(", "self", ",", "url", ",", "method", "=", "'GET'", ",", "request_headers", "=", "None", ",", "body", "=", "None", ",", "content_type", "=", "CONTENT_TYPE_MSGPACK", ",", "follow_redirects", "=", "False", ",", "max_redirects", ...
Perform a HTTP request Will retry up to ``self.MAX_HTTP_RETRIES`` times. :param str url: The URL for the request :param str method: The HTTP request method, defaults to ``GET`` :param dict request_headers: Headers to include in the HTTP request :param mixed body: The HTTP request body to send with the request :param content_type: The mime type to use for requests & responses. Defaults to ``application/msgpack`` :type content_type: :py:class:`~ietfparse.datastructures.ContentType` or str :param bool follow_redirects: Follow HTTP redirects when received :param int max_redirects: Maximum number of redirects to follow, default is 5 :param float connect_timeout: Timeout for initial connection in seconds, default 20 seconds :param float request_timeout: Timeout for entire request in seconds, default 20 seconds :param int max_http_attempts: Maximum number of times to retry a request, default is 3 attempts :param str auth_username: Username for HTTP authentication :param str auth_password: Password for HTTP authentication :param str user_agent: The str used for the ``User-Agent`` header, default used if unspecified. :param bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True :param bool allow_nonstandard_methods: Allow methods that don't adhere to the HTTP spec. :param set dont_retry: A list of status codes that will not be retried if an error is returned. Default: set({}) :rtype: HTTPResponse
[ "Perform", "a", "HTTP", "request" ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L265-L392
train
49,486
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin._http_req_apply_default_headers
def _http_req_apply_default_headers(self, request_headers, content_type, body): """Set default values for common HTTP request headers :param dict request_headers: The HTTP request headers :param content_type: The mime-type used in the request/response :type content_type: :py:class:`ietfparse.datastructures.ContentType` or str :param mixed body: The request body :rtype: dict """ if not request_headers: request_headers = {} request_headers.setdefault( 'Accept', ', '.join([str(ct) for ct in AVAILABLE_CONTENT_TYPES])) if body: request_headers.setdefault( 'Content-Type', str(content_type) or str(CONTENT_TYPE_MSGPACK)) if hasattr(self, 'correlation_id'): request_headers.setdefault( 'Correlation-Id', self.correlation_id) elif hasattr(self, 'request') and \ self.request.headers.get('Correlation-Id'): request_headers.setdefault( 'Correlation-Id', self.request.headers['Correlation-Id']) return request_headers
python
def _http_req_apply_default_headers(self, request_headers, content_type, body): """Set default values for common HTTP request headers :param dict request_headers: The HTTP request headers :param content_type: The mime-type used in the request/response :type content_type: :py:class:`ietfparse.datastructures.ContentType` or str :param mixed body: The request body :rtype: dict """ if not request_headers: request_headers = {} request_headers.setdefault( 'Accept', ', '.join([str(ct) for ct in AVAILABLE_CONTENT_TYPES])) if body: request_headers.setdefault( 'Content-Type', str(content_type) or str(CONTENT_TYPE_MSGPACK)) if hasattr(self, 'correlation_id'): request_headers.setdefault( 'Correlation-Id', self.correlation_id) elif hasattr(self, 'request') and \ self.request.headers.get('Correlation-Id'): request_headers.setdefault( 'Correlation-Id', self.request.headers['Correlation-Id']) return request_headers
[ "def", "_http_req_apply_default_headers", "(", "self", ",", "request_headers", ",", "content_type", ",", "body", ")", ":", "if", "not", "request_headers", ":", "request_headers", "=", "{", "}", "request_headers", ".", "setdefault", "(", "'Accept'", ",", "', '", ...
Set default values for common HTTP request headers :param dict request_headers: The HTTP request headers :param content_type: The mime-type used in the request/response :type content_type: :py:class:`ietfparse.datastructures.ContentType` or str :param mixed body: The request body :rtype: dict
[ "Set", "default", "values", "for", "common", "HTTP", "request", "headers" ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L394-L420
train
49,487
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin._http_req_body_serialize
def _http_req_body_serialize(self, body, content_type): """Conditionally serialize the request body value if mime_type is set and it's serializable. :param mixed body: The request body :param str content_type: The content type for the request body :raises: ValueError """ if not body or not isinstance(body, (dict, list)): return body content_type = headers.parse_content_type(content_type) if content_type == CONTENT_TYPE_JSON: return self.__hcm_json.dumps(body) elif content_type == CONTENT_TYPE_MSGPACK: return self.__hcm_msgpack.packb(body) raise ValueError('Unsupported Content Type')
python
def _http_req_body_serialize(self, body, content_type): """Conditionally serialize the request body value if mime_type is set and it's serializable. :param mixed body: The request body :param str content_type: The content type for the request body :raises: ValueError """ if not body or not isinstance(body, (dict, list)): return body content_type = headers.parse_content_type(content_type) if content_type == CONTENT_TYPE_JSON: return self.__hcm_json.dumps(body) elif content_type == CONTENT_TYPE_MSGPACK: return self.__hcm_msgpack.packb(body) raise ValueError('Unsupported Content Type')
[ "def", "_http_req_body_serialize", "(", "self", ",", "body", ",", "content_type", ")", ":", "if", "not", "body", "or", "not", "isinstance", "(", "body", ",", "(", "dict", ",", "list", ")", ")", ":", "return", "body", "content_type", "=", "headers", ".", ...
Conditionally serialize the request body value if mime_type is set and it's serializable. :param mixed body: The request body :param str content_type: The content type for the request body :raises: ValueError
[ "Conditionally", "serialize", "the", "request", "body", "value", "if", "mime_type", "is", "set", "and", "it", "s", "serializable", "." ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L422-L438
train
49,488
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin._http_resp_rate_limited
def _http_resp_rate_limited(response): """Extract the ``Retry-After`` header value if the request was rate limited and return a future to sleep for the specified duration. :param tornado.httpclient.HTTPResponse response: The response :rtype: tornado.concurrent.Future """ parsed = parse.urlparse(response.request.url) duration = int(response.headers.get('Retry-After', 3)) LOGGER.warning('Rate Limited by %s, retrying in %i seconds', parsed.netloc, duration) return asyncio.sleep(duration)
python
def _http_resp_rate_limited(response): """Extract the ``Retry-After`` header value if the request was rate limited and return a future to sleep for the specified duration. :param tornado.httpclient.HTTPResponse response: The response :rtype: tornado.concurrent.Future """ parsed = parse.urlparse(response.request.url) duration = int(response.headers.get('Retry-After', 3)) LOGGER.warning('Rate Limited by %s, retrying in %i seconds', parsed.netloc, duration) return asyncio.sleep(duration)
[ "def", "_http_resp_rate_limited", "(", "response", ")", ":", "parsed", "=", "parse", ".", "urlparse", "(", "response", ".", "request", ".", "url", ")", "duration", "=", "int", "(", "response", ".", "headers", ".", "get", "(", "'Retry-After'", ",", "3", "...
Extract the ``Retry-After`` header value if the request was rate limited and return a future to sleep for the specified duration. :param tornado.httpclient.HTTPResponse response: The response :rtype: tornado.concurrent.Future
[ "Extract", "the", "Retry", "-", "After", "header", "value", "if", "the", "request", "was", "rate", "limited", "and", "return", "a", "future", "to", "sleep", "for", "the", "specified", "duration", "." ]
982219a10be979668726f573f324415fcf2020c8
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L468-L480
train
49,489
thorgate/tg-utils
tg_utils/lock.py
acquires_lock
def acquires_lock(expires, should_fail=True, should_wait=False, resource=None, prefix=DEFAULT_PREFIX): """ Decorator to ensure function only runs when it is unique holder of the resource. Any invocations of the functions before the first is done will raise RuntimeError. Locks are stored in redis with prefix: `lock:acquires_lock` Arguments: expires(timedelta|int): Expiry time of lock, way more than expected time to run. Intended as a failsafe clean-up mechanism. should_fail(bool): Should error be raised if failed to acquire lock. should_wait(bool): Should this task wait for lock to be released. resource(str): Resource identifier, by default taken from function name. prefix(str): Change prefix added to redis key (the 'lock:' part will always be added) Example: You have a celery task and you want to ensure it is never executed concurrently: @shared_task @acquire_lock(60, resource='foo') def foo(): ... """ # Seconds from now on if isinstance(expires, timedelta): expires = expires.total_seconds() # This is just a tiny wrapper around redis_lock # 1) acquire lock or fail # 2) run function # 3) release lock def decorator(f): nonlocal resource if resource is None: resource = f.__name__ resource = '%s%s' % (prefix, resource) @wraps(f) def wrapper(*args, **kwargs): # The context manager is annoying and always blocking... lock = redis_lock.Lock(_redis_conn, resource, expire=expires) lock_acquired = False # Get default lock blocking mode # Copying to local variable so original variable would not be touched nonlocal should_wait is_blocking = should_wait should_execute_if_lock_fails = False if 'should_execute_if_lock_fails' in kwargs: should_execute_if_lock_fails = kwargs.pop("should_execute_if_lock_fails") # If decorated fn is called with should_wait kwarg # Override lock blocking mode if 'should_wait' in kwargs: is_blocking = kwargs.pop('should_wait') if is_blocking: logger.debug('Waiting for resource "%s"', resource) if not lock.acquire(blocking=is_blocking): if should_fail: raise RuntimeError("Failed to acquire lock: %s" % resource) logger.warning('Failed to acquire lock: %s', resource) if not should_execute_if_lock_fails: return False else: lock_acquired = True try: return f(*args, **kwargs) finally: try: if lock_acquired: lock.release() except Exception as e: logger.exception('Failed to release lock: %s', str(e), exc_info=False) return wrapper return decorator
python
def acquires_lock(expires, should_fail=True, should_wait=False, resource=None, prefix=DEFAULT_PREFIX): """ Decorator to ensure function only runs when it is unique holder of the resource. Any invocations of the functions before the first is done will raise RuntimeError. Locks are stored in redis with prefix: `lock:acquires_lock` Arguments: expires(timedelta|int): Expiry time of lock, way more than expected time to run. Intended as a failsafe clean-up mechanism. should_fail(bool): Should error be raised if failed to acquire lock. should_wait(bool): Should this task wait for lock to be released. resource(str): Resource identifier, by default taken from function name. prefix(str): Change prefix added to redis key (the 'lock:' part will always be added) Example: You have a celery task and you want to ensure it is never executed concurrently: @shared_task @acquire_lock(60, resource='foo') def foo(): ... """ # Seconds from now on if isinstance(expires, timedelta): expires = expires.total_seconds() # This is just a tiny wrapper around redis_lock # 1) acquire lock or fail # 2) run function # 3) release lock def decorator(f): nonlocal resource if resource is None: resource = f.__name__ resource = '%s%s' % (prefix, resource) @wraps(f) def wrapper(*args, **kwargs): # The context manager is annoying and always blocking... lock = redis_lock.Lock(_redis_conn, resource, expire=expires) lock_acquired = False # Get default lock blocking mode # Copying to local variable so original variable would not be touched nonlocal should_wait is_blocking = should_wait should_execute_if_lock_fails = False if 'should_execute_if_lock_fails' in kwargs: should_execute_if_lock_fails = kwargs.pop("should_execute_if_lock_fails") # If decorated fn is called with should_wait kwarg # Override lock blocking mode if 'should_wait' in kwargs: is_blocking = kwargs.pop('should_wait') if is_blocking: logger.debug('Waiting for resource "%s"', resource) if not lock.acquire(blocking=is_blocking): if should_fail: raise RuntimeError("Failed to acquire lock: %s" % resource) logger.warning('Failed to acquire lock: %s', resource) if not should_execute_if_lock_fails: return False else: lock_acquired = True try: return f(*args, **kwargs) finally: try: if lock_acquired: lock.release() except Exception as e: logger.exception('Failed to release lock: %s', str(e), exc_info=False) return wrapper return decorator
[ "def", "acquires_lock", "(", "expires", ",", "should_fail", "=", "True", ",", "should_wait", "=", "False", ",", "resource", "=", "None", ",", "prefix", "=", "DEFAULT_PREFIX", ")", ":", "# Seconds from now on", "if", "isinstance", "(", "expires", ",", "timedelt...
Decorator to ensure function only runs when it is unique holder of the resource. Any invocations of the functions before the first is done will raise RuntimeError. Locks are stored in redis with prefix: `lock:acquires_lock` Arguments: expires(timedelta|int): Expiry time of lock, way more than expected time to run. Intended as a failsafe clean-up mechanism. should_fail(bool): Should error be raised if failed to acquire lock. should_wait(bool): Should this task wait for lock to be released. resource(str): Resource identifier, by default taken from function name. prefix(str): Change prefix added to redis key (the 'lock:' part will always be added) Example: You have a celery task and you want to ensure it is never executed concurrently: @shared_task @acquire_lock(60, resource='foo') def foo(): ...
[ "Decorator", "to", "ensure", "function", "only", "runs", "when", "it", "is", "unique", "holder", "of", "the", "resource", "." ]
81e404e837334b241686d9159cc3eb44de509a88
https://github.com/thorgate/tg-utils/blob/81e404e837334b241686d9159cc3eb44de509a88/tg_utils/lock.py#L36-L124
train
49,490
BeyondTheClouds/enoslib
enoslib/infra/enos_openstack/provider.py
get_session
def get_session(): """Build the session object.""" # NOTE(msimonin): We provide only a basic support which focus # Chameleon cloud and its rc files if os.environ.get("OS_IDENTITY_API_VERSION") == "3": logging.info("Creating a v3 Keystone Session") auth = v3.Password( auth_url=os.environ["OS_AUTH_URL"], username=os.environ["OS_USERNAME"], password=os.environ["OS_PASSWORD"], project_id=os.environ["OS_PROJECT_ID"], user_domain_name=os.environ["OS_USER_DOMAIN_NAME"] ) else: logging.info("Creating a v2 Keystone Session") auth = v2.Password( auth_url=os.environ["OS_AUTH_URL"], username=os.environ["OS_USERNAME"], password=os.environ["OS_PASSWORD"], tenant_id=os.environ["OS_TENANT_ID"]) return session.Session(auth=auth)
python
def get_session(): """Build the session object.""" # NOTE(msimonin): We provide only a basic support which focus # Chameleon cloud and its rc files if os.environ.get("OS_IDENTITY_API_VERSION") == "3": logging.info("Creating a v3 Keystone Session") auth = v3.Password( auth_url=os.environ["OS_AUTH_URL"], username=os.environ["OS_USERNAME"], password=os.environ["OS_PASSWORD"], project_id=os.environ["OS_PROJECT_ID"], user_domain_name=os.environ["OS_USER_DOMAIN_NAME"] ) else: logging.info("Creating a v2 Keystone Session") auth = v2.Password( auth_url=os.environ["OS_AUTH_URL"], username=os.environ["OS_USERNAME"], password=os.environ["OS_PASSWORD"], tenant_id=os.environ["OS_TENANT_ID"]) return session.Session(auth=auth)
[ "def", "get_session", "(", ")", ":", "# NOTE(msimonin): We provide only a basic support which focus", "# Chameleon cloud and its rc files", "if", "os", ".", "environ", ".", "get", "(", "\"OS_IDENTITY_API_VERSION\"", ")", "==", "\"3\"", ":", "logging", ".", "info", "(", ...
Build the session object.
[ "Build", "the", "session", "object", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L27-L49
train
49,491
BeyondTheClouds/enoslib
enoslib/infra/enos_openstack/provider.py
check_glance
def check_glance(session, image_name): """Check that the base image is available. Fails if the base image isn't added. This means the image should be added manually. """ gclient = glance.Client(GLANCE_VERSION, session=session, region_name=os.environ['OS_REGION_NAME']) images = gclient.images.list() name_ids = [{'name': i['name'], 'id': i['id']} for i in images] if image_name not in list(map(itemgetter('name'), name_ids)): logger.error("[glance]: Image %s is missing" % image_name) raise Exception("Image %s is missing" % image_name) else: image = [i for i in name_ids if i['name'] == image_name] image_id = image[0]['id'] logger.info("[glance]: Using image %s:%s" % (image_name, image_id)) return image_id
python
def check_glance(session, image_name): """Check that the base image is available. Fails if the base image isn't added. This means the image should be added manually. """ gclient = glance.Client(GLANCE_VERSION, session=session, region_name=os.environ['OS_REGION_NAME']) images = gclient.images.list() name_ids = [{'name': i['name'], 'id': i['id']} for i in images] if image_name not in list(map(itemgetter('name'), name_ids)): logger.error("[glance]: Image %s is missing" % image_name) raise Exception("Image %s is missing" % image_name) else: image = [i for i in name_ids if i['name'] == image_name] image_id = image[0]['id'] logger.info("[glance]: Using image %s:%s" % (image_name, image_id)) return image_id
[ "def", "check_glance", "(", "session", ",", "image_name", ")", ":", "gclient", "=", "glance", ".", "Client", "(", "GLANCE_VERSION", ",", "session", "=", "session", ",", "region_name", "=", "os", ".", "environ", "[", "'OS_REGION_NAME'", "]", ")", "images", ...
Check that the base image is available. Fails if the base image isn't added. This means the image should be added manually.
[ "Check", "that", "the", "base", "image", "is", "available", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L52-L69
train
49,492
BeyondTheClouds/enoslib
enoslib/infra/enos_openstack/provider.py
check_flavors
def check_flavors(session): """Build the flavors mapping returns the mappings id <-> flavor """ nclient = nova.Client(NOVA_VERSION, session=session, region_name=os.environ['OS_REGION_NAME']) flavors = nclient.flavors.list() to_id = dict(list(map(lambda n: [n.name, n.id], flavors))) to_flavor = dict(list(map(lambda n: [n.id, n.name], flavors))) return to_id, to_flavor
python
def check_flavors(session): """Build the flavors mapping returns the mappings id <-> flavor """ nclient = nova.Client(NOVA_VERSION, session=session, region_name=os.environ['OS_REGION_NAME']) flavors = nclient.flavors.list() to_id = dict(list(map(lambda n: [n.name, n.id], flavors))) to_flavor = dict(list(map(lambda n: [n.id, n.name], flavors))) return to_id, to_flavor
[ "def", "check_flavors", "(", "session", ")", ":", "nclient", "=", "nova", ".", "Client", "(", "NOVA_VERSION", ",", "session", "=", "session", ",", "region_name", "=", "os", ".", "environ", "[", "'OS_REGION_NAME'", "]", ")", "flavors", "=", "nclient", ".", ...
Build the flavors mapping returns the mappings id <-> flavor
[ "Build", "the", "flavors", "mapping" ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L72-L82
train
49,493
BeyondTheClouds/enoslib
enoslib/infra/enos_openstack/provider.py
wait_for_servers
def wait_for_servers(session, servers): """Wait for the servers to be ready. Note(msimonin): we don't garantee the SSH connection to be ready. """ nclient = nova.Client(NOVA_VERSION, session=session, region_name=os.environ['OS_REGION_NAME']) while True: deployed = [] undeployed = [] for server in servers: c = nclient.servers.get(server.id) if c.addresses != {} and c.status == 'ACTIVE': deployed.append(server) if c.status == 'ERROR': undeployed.append(server) logger.info("[nova]: Polling the Deployment") logger.info("[nova]: %s deployed servers" % len(deployed)) logger.info("[nova]: %s undeployed servers" % len(undeployed)) if len(deployed) + len(undeployed) >= len(servers): break time.sleep(3) return deployed, undeployed
python
def wait_for_servers(session, servers): """Wait for the servers to be ready. Note(msimonin): we don't garantee the SSH connection to be ready. """ nclient = nova.Client(NOVA_VERSION, session=session, region_name=os.environ['OS_REGION_NAME']) while True: deployed = [] undeployed = [] for server in servers: c = nclient.servers.get(server.id) if c.addresses != {} and c.status == 'ACTIVE': deployed.append(server) if c.status == 'ERROR': undeployed.append(server) logger.info("[nova]: Polling the Deployment") logger.info("[nova]: %s deployed servers" % len(deployed)) logger.info("[nova]: %s undeployed servers" % len(undeployed)) if len(deployed) + len(undeployed) >= len(servers): break time.sleep(3) return deployed, undeployed
[ "def", "wait_for_servers", "(", "session", ",", "servers", ")", ":", "nclient", "=", "nova", ".", "Client", "(", "NOVA_VERSION", ",", "session", "=", "session", ",", "region_name", "=", "os", ".", "environ", "[", "'OS_REGION_NAME'", "]", ")", "while", "Tru...
Wait for the servers to be ready. Note(msimonin): we don't garantee the SSH connection to be ready.
[ "Wait", "for", "the", "servers", "to", "be", "ready", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L208-L230
train
49,494
BeyondTheClouds/enoslib
enoslib/infra/enos_openstack/provider.py
check_servers
def check_servers(session, machines, extra_prefix="", force_deploy=False, key_name=None, image_id=None, flavors='m1.medium', network=None, ext_net=None, scheduler_hints=None): """Checks the servers status for the deployment. If needed, it creates new servers and add a floating ip to one of them. This server can be used as a gateway to the others. """ scheduler_hints = scheduler_hints or [] nclient = nova.Client(NOVA_VERSION, session=session, region_name=os.environ['OS_REGION_NAME']) servers = nclient.servers.list( search_opts={'name': '-'.join([DEFAULT_PREFIX, extra_prefix])}) wanted = _get_total_wanted_machines(machines) if force_deploy: for server in servers: server.delete() servers = [] if len(servers) == wanted: logger.info("[nova]: Reusing existing servers : %s", servers) return servers elif len(servers) > 0 and len(servers) < wanted: raise Exception("Only %s/%s servers found" % (servers, wanted)) # starting the servers total = 0 for machine in machines: number = machine.number roles = machine.roles logger.info("[nova]: Starting %s servers" % number) logger.info("[nova]: for roles %s" % roles) logger.info("[nova]: with extra hints %s" % scheduler_hints) for _ in range(number): flavor = machine.flavour if isinstance(flavors, str): flavor = flavors else: flavor_to_id, _ = flavors flavor = flavor_to_id[flavor] if scheduler_hints: _scheduler_hints = \ scheduler_hints[total % len(scheduler_hints)] else: _scheduler_hints = [] server = nclient.servers.create( name='-'.join([DEFAULT_PREFIX, extra_prefix, str(total)]), image=image_id, flavor=flavor, nics=[{'net-id': network['id']}], key_name=key_name, security_groups=[SECGROUP_NAME], scheduler_hints=_scheduler_hints) servers.append(server) total = total + 1 return servers
python
def check_servers(session, machines, extra_prefix="", force_deploy=False, key_name=None, image_id=None, flavors='m1.medium', network=None, ext_net=None, scheduler_hints=None): """Checks the servers status for the deployment. If needed, it creates new servers and add a floating ip to one of them. This server can be used as a gateway to the others. """ scheduler_hints = scheduler_hints or [] nclient = nova.Client(NOVA_VERSION, session=session, region_name=os.environ['OS_REGION_NAME']) servers = nclient.servers.list( search_opts={'name': '-'.join([DEFAULT_PREFIX, extra_prefix])}) wanted = _get_total_wanted_machines(machines) if force_deploy: for server in servers: server.delete() servers = [] if len(servers) == wanted: logger.info("[nova]: Reusing existing servers : %s", servers) return servers elif len(servers) > 0 and len(servers) < wanted: raise Exception("Only %s/%s servers found" % (servers, wanted)) # starting the servers total = 0 for machine in machines: number = machine.number roles = machine.roles logger.info("[nova]: Starting %s servers" % number) logger.info("[nova]: for roles %s" % roles) logger.info("[nova]: with extra hints %s" % scheduler_hints) for _ in range(number): flavor = machine.flavour if isinstance(flavors, str): flavor = flavors else: flavor_to_id, _ = flavors flavor = flavor_to_id[flavor] if scheduler_hints: _scheduler_hints = \ scheduler_hints[total % len(scheduler_hints)] else: _scheduler_hints = [] server = nclient.servers.create( name='-'.join([DEFAULT_PREFIX, extra_prefix, str(total)]), image=image_id, flavor=flavor, nics=[{'net-id': network['id']}], key_name=key_name, security_groups=[SECGROUP_NAME], scheduler_hints=_scheduler_hints) servers.append(server) total = total + 1 return servers
[ "def", "check_servers", "(", "session", ",", "machines", ",", "extra_prefix", "=", "\"\"", ",", "force_deploy", "=", "False", ",", "key_name", "=", "None", ",", "image_id", "=", "None", ",", "flavors", "=", "'m1.medium'", ",", "network", "=", "None", ",", ...
Checks the servers status for the deployment. If needed, it creates new servers and add a floating ip to one of them. This server can be used as a gateway to the others.
[ "Checks", "the", "servers", "status", "for", "the", "deployment", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L238-L297
train
49,495
BeyondTheClouds/enoslib
enoslib/infra/enos_openstack/provider.py
is_in_current_deployment
def is_in_current_deployment(server, extra_prefix=""): """Check if an existing server in the system take part to the current deployment """ return re.match(r"^%s" % '-'.join([DEFAULT_PREFIX, extra_prefix]), server.name) is not None
python
def is_in_current_deployment(server, extra_prefix=""): """Check if an existing server in the system take part to the current deployment """ return re.match(r"^%s" % '-'.join([DEFAULT_PREFIX, extra_prefix]), server.name) is not None
[ "def", "is_in_current_deployment", "(", "server", ",", "extra_prefix", "=", "\"\"", ")", ":", "return", "re", ".", "match", "(", "r\"^%s\"", "%", "'-'", ".", "join", "(", "[", "DEFAULT_PREFIX", ",", "extra_prefix", "]", ")", ",", "server", ".", "name", "...
Check if an existing server in the system take part to the current deployment
[ "Check", "if", "an", "existing", "server", "in", "the", "system", "take", "part", "to", "the", "current", "deployment" ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L321-L326
train
49,496
BeyondTheClouds/enoslib
enoslib/infra/enos_openstack/provider.py
allow_address_pairs
def allow_address_pairs(session, network, subnet): """Allow several interfaces to be added and accessed from the other machines. This is particularly useful when working with virtual ips. """ nclient = neutron.Client('2', session=session, region_name=os.environ['OS_REGION_NAME']) ports = nclient.list_ports() ports_to_update = filter( lambda p: p['network_id'] == network['id'], ports['ports']) logger.info('[nova]: Allowing address pairs for ports %s' % list(map(lambda p: p['fixed_ips'], ports_to_update))) for port in ports_to_update: try: nclient.update_port(port['id'], { 'port': { 'allowed_address_pairs': [{ 'ip_address': subnet }] } }) except Exception: # NOTE(msimonin): dhcp and router interface port # seems to have enabled_sec_groups = False which # prevent them to be updated, just throw a warning # a skip them logger.warn("Can't update port %s" % port)
python
def allow_address_pairs(session, network, subnet): """Allow several interfaces to be added and accessed from the other machines. This is particularly useful when working with virtual ips. """ nclient = neutron.Client('2', session=session, region_name=os.environ['OS_REGION_NAME']) ports = nclient.list_ports() ports_to_update = filter( lambda p: p['network_id'] == network['id'], ports['ports']) logger.info('[nova]: Allowing address pairs for ports %s' % list(map(lambda p: p['fixed_ips'], ports_to_update))) for port in ports_to_update: try: nclient.update_port(port['id'], { 'port': { 'allowed_address_pairs': [{ 'ip_address': subnet }] } }) except Exception: # NOTE(msimonin): dhcp and router interface port # seems to have enabled_sec_groups = False which # prevent them to be updated, just throw a warning # a skip them logger.warn("Can't update port %s" % port)
[ "def", "allow_address_pairs", "(", "session", ",", "network", ",", "subnet", ")", ":", "nclient", "=", "neutron", ".", "Client", "(", "'2'", ",", "session", "=", "session", ",", "region_name", "=", "os", ".", "environ", "[", "'OS_REGION_NAME'", "]", ")", ...
Allow several interfaces to be added and accessed from the other machines. This is particularly useful when working with virtual ips.
[ "Allow", "several", "interfaces", "to", "be", "added", "and", "accessed", "from", "the", "other", "machines", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L329-L356
train
49,497
BeyondTheClouds/enoslib
enoslib/infra/enos_openstack/provider.py
check_environment
def check_environment(provider_conf): """Check all ressources needed by Enos.""" session = get_session() image_id = check_glance(session, provider_conf.image) flavor_to_id, id_to_flavor = check_flavors(session) ext_net, network, subnet = check_network( session, provider_conf.configure_network, provider_conf.network, subnet=provider_conf.subnet, dns_nameservers=provider_conf.dns_nameservers, allocation_pool=provider_conf.allocation_pool) return { 'session': session, 'image_id': image_id, 'flavor_to_id': flavor_to_id, 'id_to_flavor': id_to_flavor, 'ext_net': ext_net, 'network': network, 'subnet': subnet }
python
def check_environment(provider_conf): """Check all ressources needed by Enos.""" session = get_session() image_id = check_glance(session, provider_conf.image) flavor_to_id, id_to_flavor = check_flavors(session) ext_net, network, subnet = check_network( session, provider_conf.configure_network, provider_conf.network, subnet=provider_conf.subnet, dns_nameservers=provider_conf.dns_nameservers, allocation_pool=provider_conf.allocation_pool) return { 'session': session, 'image_id': image_id, 'flavor_to_id': flavor_to_id, 'id_to_flavor': id_to_flavor, 'ext_net': ext_net, 'network': network, 'subnet': subnet }
[ "def", "check_environment", "(", "provider_conf", ")", ":", "session", "=", "get_session", "(", ")", "image_id", "=", "check_glance", "(", "session", ",", "provider_conf", ".", "image", ")", "flavor_to_id", ",", "id_to_flavor", "=", "check_flavors", "(", "sessio...
Check all ressources needed by Enos.
[ "Check", "all", "ressources", "needed", "by", "Enos", "." ]
fb00be58e56a7848cfe482187d659744919fe2f7
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L359-L380
train
49,498
IdentityPython/oidcendpoint
src/oidcendpoint/userinfo.py
collect_user_info
def collect_user_info(endpoint_context, session, userinfo_claims=None): """ Collect information about a user. This can happen in two cases, either when constructing an IdToken or when returning user info through the UserInfo endpoint :param session: Session information :param userinfo_claims: user info claims :return: User info """ authn_req = session['authn_req'] if userinfo_claims is None: uic = scope2claims(authn_req["scope"]) # Get only keys allowed by user and update the dict if such info # is stored in session perm_set = session.get('permission') if perm_set: uic = {key: uic[key] for key in uic if key in perm_set} uic = update_claims(session, "userinfo", uic) if uic: userinfo_claims = Claims(**uic) else: userinfo_claims = None logger.debug( "userinfo_claim: %s" % sanitize(userinfo_claims.to_dict())) logger.debug("Session info: %s" % sanitize(session)) authn_event = session['authn_event'] if authn_event: uid = authn_event["uid"] else: uid = session['uid'] info = endpoint_context.userinfo(uid, authn_req['client_id'], userinfo_claims) if "sub" in userinfo_claims: if not claims_match(session["sub"], userinfo_claims["sub"]): raise FailedAuthentication("Unmatched sub claim") info["sub"] = session["sub"] try: logger.debug("user_info_response: {}".format(info)) except UnicodeEncodeError: try: logger.debug( "user_info_response: {}".format(info.encode('utf-8'))) except Exception: pass return info
python
def collect_user_info(endpoint_context, session, userinfo_claims=None): """ Collect information about a user. This can happen in two cases, either when constructing an IdToken or when returning user info through the UserInfo endpoint :param session: Session information :param userinfo_claims: user info claims :return: User info """ authn_req = session['authn_req'] if userinfo_claims is None: uic = scope2claims(authn_req["scope"]) # Get only keys allowed by user and update the dict if such info # is stored in session perm_set = session.get('permission') if perm_set: uic = {key: uic[key] for key in uic if key in perm_set} uic = update_claims(session, "userinfo", uic) if uic: userinfo_claims = Claims(**uic) else: userinfo_claims = None logger.debug( "userinfo_claim: %s" % sanitize(userinfo_claims.to_dict())) logger.debug("Session info: %s" % sanitize(session)) authn_event = session['authn_event'] if authn_event: uid = authn_event["uid"] else: uid = session['uid'] info = endpoint_context.userinfo(uid, authn_req['client_id'], userinfo_claims) if "sub" in userinfo_claims: if not claims_match(session["sub"], userinfo_claims["sub"]): raise FailedAuthentication("Unmatched sub claim") info["sub"] = session["sub"] try: logger.debug("user_info_response: {}".format(info)) except UnicodeEncodeError: try: logger.debug( "user_info_response: {}".format(info.encode('utf-8'))) except Exception: pass return info
[ "def", "collect_user_info", "(", "endpoint_context", ",", "session", ",", "userinfo_claims", "=", "None", ")", ":", "authn_req", "=", "session", "[", "'authn_req'", "]", "if", "userinfo_claims", "is", "None", ":", "uic", "=", "scope2claims", "(", "authn_req", ...
Collect information about a user. This can happen in two cases, either when constructing an IdToken or when returning user info through the UserInfo endpoint :param session: Session information :param userinfo_claims: user info claims :return: User info
[ "Collect", "information", "about", "a", "user", ".", "This", "can", "happen", "in", "two", "cases", "either", "when", "constructing", "an", "IdToken", "or", "when", "returning", "user", "info", "through", "the", "UserInfo", "endpoint" ]
6c1d729d51bfb6332816117fe476073df7a1d823
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/userinfo.py#L105-L161
train
49,499