code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def set_homepage(self, shop_id, template_id, url=None): """ 设置商家主页 详情请参考 http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html :param shop_id: 门店 ID :param template_id: 模板ID,0-默认模板,1-自定义url :param url: 自定义链接,当template_id为1时必填 :return: 返回的 JSON 数据包 """ data = { 'shop_id': shop_id, 'template_id': template_id, } if url: data['struct'] = {'url': url} return self._post('homepage/set', data=data)
设置商家主页 详情请参考 http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html :param shop_id: 门店 ID :param template_id: 模板ID,0-默认模板,1-自定义url :param url: 自定义链接,当template_id为1时必填 :return: 返回的 JSON 数据包
Below is the the instruction that describes the task: ### Input: 设置商家主页 详情请参考 http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html :param shop_id: 门店 ID :param template_id: 模板ID,0-默认模板,1-自定义url :param url: 自定义链接,当template_id为1时必填 :return: 返回的 JSON 数据包 ### Response: def set_homepage(self, shop_id, template_id, url=None): """ 设置商家主页 详情请参考 http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html :param shop_id: 门店 ID :param template_id: 模板ID,0-默认模板,1-自定义url :param url: 自定义链接,当template_id为1时必填 :return: 返回的 JSON 数据包 """ data = { 'shop_id': shop_id, 'template_id': template_id, } if url: data['struct'] = {'url': url} return self._post('homepage/set', data=data)
def to_openmath(self, obj): """ Convert Python object to OpenMath """ for cl, conv in reversed(self._conv_to_om): if cl is None or isinstance(obj, cl): try: return conv(obj) except CannotConvertError: continue if hasattr(obj, '__openmath__'): return obj.__openmath__() raise ValueError('Cannot convert %r to OpenMath.' % obj)
Convert Python object to OpenMath
Below is the the instruction that describes the task: ### Input: Convert Python object to OpenMath ### Response: def to_openmath(self, obj): """ Convert Python object to OpenMath """ for cl, conv in reversed(self._conv_to_om): if cl is None or isinstance(obj, cl): try: return conv(obj) except CannotConvertError: continue if hasattr(obj, '__openmath__'): return obj.__openmath__() raise ValueError('Cannot convert %r to OpenMath.' % obj)
def pool_to_HW(shape, data_frmt): """ Convert from NHWC|NCHW => HW """ if len(shape) != 4: return shape # Not NHWC|NCHW, return as is if data_frmt == 'NCHW': return [shape[2], shape[3]] return [shape[1], shape[2]]
Convert from NHWC|NCHW => HW
Below is the the instruction that describes the task: ### Input: Convert from NHWC|NCHW => HW ### Response: def pool_to_HW(shape, data_frmt): """ Convert from NHWC|NCHW => HW """ if len(shape) != 4: return shape # Not NHWC|NCHW, return as is if data_frmt == 'NCHW': return [shape[2], shape[3]] return [shape[1], shape[2]]
def client(self): """Return a lazy-instantiated pymongo client. When running with eventlet, connection causes IO and can result in more than one MongoDB client getting instantiatied, so we wrap the code in a semaphore to make sure only one mongodb client is instantiated per SimplDB class. """ if eventlet: with self.client_lock: self._set_client() else: self._set_client() return self._client
Return a lazy-instantiated pymongo client. When running with eventlet, connection causes IO and can result in more than one MongoDB client getting instantiatied, so we wrap the code in a semaphore to make sure only one mongodb client is instantiated per SimplDB class.
Below is the the instruction that describes the task: ### Input: Return a lazy-instantiated pymongo client. When running with eventlet, connection causes IO and can result in more than one MongoDB client getting instantiatied, so we wrap the code in a semaphore to make sure only one mongodb client is instantiated per SimplDB class. ### Response: def client(self): """Return a lazy-instantiated pymongo client. When running with eventlet, connection causes IO and can result in more than one MongoDB client getting instantiatied, so we wrap the code in a semaphore to make sure only one mongodb client is instantiated per SimplDB class. """ if eventlet: with self.client_lock: self._set_client() else: self._set_client() return self._client
def padDigitalData(self, dig_data, n): """Pad dig_data with its last element so that the new array is a multiple of n. """ n = int(n) l0 = len(dig_data) if l0 % n == 0: return dig_data # no need of padding else: ladd = n - (l0 % n) dig_data_add = np.zeros(ladd, dtype="uint32") dig_data_add.fill(dig_data[-1]) return np.concatenate((dig_data, dig_data_add))
Pad dig_data with its last element so that the new array is a multiple of n.
Below is the the instruction that describes the task: ### Input: Pad dig_data with its last element so that the new array is a multiple of n. ### Response: def padDigitalData(self, dig_data, n): """Pad dig_data with its last element so that the new array is a multiple of n. """ n = int(n) l0 = len(dig_data) if l0 % n == 0: return dig_data # no need of padding else: ladd = n - (l0 % n) dig_data_add = np.zeros(ladd, dtype="uint32") dig_data_add.fill(dig_data[-1]) return np.concatenate((dig_data, dig_data_add))
def send(self, request): """Queue a request to be sent to the RPC.""" if self._UNARY_REQUESTS: try: self._send_unary_request(request) except exceptions.GoogleAPICallError: _LOGGER.debug( "Exception while sending unary RPC. This is typically " "non-fatal as stream requests are best-effort.", exc_info=True, ) else: self._rpc.send(request)
Queue a request to be sent to the RPC.
Below is the the instruction that describes the task: ### Input: Queue a request to be sent to the RPC. ### Response: def send(self, request): """Queue a request to be sent to the RPC.""" if self._UNARY_REQUESTS: try: self._send_unary_request(request) except exceptions.GoogleAPICallError: _LOGGER.debug( "Exception while sending unary RPC. This is typically " "non-fatal as stream requests are best-effort.", exc_info=True, ) else: self._rpc.send(request)
async def send_read_acknowledge( self, entity, message=None, *, max_id=None, clear_mentions=False): """ Sends a "read acknowledge" (i.e., notifying the given peer that we've read their messages, also known as the "double check"). This effectively marks a message as read (or more than one) in the given conversation. If neither message nor maximum ID are provided, all messages will be marked as read by assuming that ``max_id = 0``. Args: entity (`entity`): The chat where these messages are located. message (`list` | `Message <telethon.tl.custom.message.Message>`): Either a list of messages or a single message. max_id (`int`): Overrides messages, until which message should the acknowledge should be sent. clear_mentions (`bool`): Whether the mention badge should be cleared (so that there are no more mentions) or not for the given entity. If no message is provided, this will be the only action taken. """ if max_id is None: if not message: max_id = 0 else: if utils.is_list_like(message): max_id = max(msg.id for msg in message) else: max_id = message.id entity = await self.get_input_entity(entity) if clear_mentions: await self(functions.messages.ReadMentionsRequest(entity)) if max_id is None: return True if max_id is not None: if isinstance(entity, types.InputPeerChannel): return await self(functions.channels.ReadHistoryRequest( entity, max_id=max_id)) else: return await self(functions.messages.ReadHistoryRequest( entity, max_id=max_id)) return False
Sends a "read acknowledge" (i.e., notifying the given peer that we've read their messages, also known as the "double check"). This effectively marks a message as read (or more than one) in the given conversation. If neither message nor maximum ID are provided, all messages will be marked as read by assuming that ``max_id = 0``. Args: entity (`entity`): The chat where these messages are located. message (`list` | `Message <telethon.tl.custom.message.Message>`): Either a list of messages or a single message. max_id (`int`): Overrides messages, until which message should the acknowledge should be sent. clear_mentions (`bool`): Whether the mention badge should be cleared (so that there are no more mentions) or not for the given entity. If no message is provided, this will be the only action taken.
Below is the the instruction that describes the task: ### Input: Sends a "read acknowledge" (i.e., notifying the given peer that we've read their messages, also known as the "double check"). This effectively marks a message as read (or more than one) in the given conversation. If neither message nor maximum ID are provided, all messages will be marked as read by assuming that ``max_id = 0``. Args: entity (`entity`): The chat where these messages are located. message (`list` | `Message <telethon.tl.custom.message.Message>`): Either a list of messages or a single message. max_id (`int`): Overrides messages, until which message should the acknowledge should be sent. clear_mentions (`bool`): Whether the mention badge should be cleared (so that there are no more mentions) or not for the given entity. If no message is provided, this will be the only action taken. ### Response: async def send_read_acknowledge( self, entity, message=None, *, max_id=None, clear_mentions=False): """ Sends a "read acknowledge" (i.e., notifying the given peer that we've read their messages, also known as the "double check"). This effectively marks a message as read (or more than one) in the given conversation. If neither message nor maximum ID are provided, all messages will be marked as read by assuming that ``max_id = 0``. Args: entity (`entity`): The chat where these messages are located. message (`list` | `Message <telethon.tl.custom.message.Message>`): Either a list of messages or a single message. max_id (`int`): Overrides messages, until which message should the acknowledge should be sent. clear_mentions (`bool`): Whether the mention badge should be cleared (so that there are no more mentions) or not for the given entity. If no message is provided, this will be the only action taken. """ if max_id is None: if not message: max_id = 0 else: if utils.is_list_like(message): max_id = max(msg.id for msg in message) else: max_id = message.id entity = await self.get_input_entity(entity) if clear_mentions: await self(functions.messages.ReadMentionsRequest(entity)) if max_id is None: return True if max_id is not None: if isinstance(entity, types.InputPeerChannel): return await self(functions.channels.ReadHistoryRequest( entity, max_id=max_id)) else: return await self(functions.messages.ReadHistoryRequest( entity, max_id=max_id)) return False
def module(self): """The module in which the Class is defined. Python equivalent of the CLIPS defglobal-module command. """ modname = ffi.string(lib.EnvDefclassModule(self._env, self._cls)) defmodule = lib.EnvFindDefmodule(self._env, modname) return Module(self._env, defmodule)
The module in which the Class is defined. Python equivalent of the CLIPS defglobal-module command.
Below is the the instruction that describes the task: ### Input: The module in which the Class is defined. Python equivalent of the CLIPS defglobal-module command. ### Response: def module(self): """The module in which the Class is defined. Python equivalent of the CLIPS defglobal-module command. """ modname = ffi.string(lib.EnvDefclassModule(self._env, self._cls)) defmodule = lib.EnvFindDefmodule(self._env, modname) return Module(self._env, defmodule)
def _delete(self, *args, **kwargs): """Wrapper around Requests for DELETE requests Returns: Response: A Requests Response object """ if 'timeout' not in kwargs: kwargs['timeout'] = self.timeout req = self.session.delete(*args, **kwargs) return req
Wrapper around Requests for DELETE requests Returns: Response: A Requests Response object
Below is the the instruction that describes the task: ### Input: Wrapper around Requests for DELETE requests Returns: Response: A Requests Response object ### Response: def _delete(self, *args, **kwargs): """Wrapper around Requests for DELETE requests Returns: Response: A Requests Response object """ if 'timeout' not in kwargs: kwargs['timeout'] = self.timeout req = self.session.delete(*args, **kwargs) return req
def metrics(ty, query, query_type, **kwargs): """ Outputs runtime metrics collected from cocaine-runtime and its services. This command shows runtime metrics collected from cocaine-runtime and its services during their lifetime. There are four kind of metrics available: gauges, counters, meters and timers. \b - Gauges - an instantaneous measurement of a value. - Counters - just a gauge for an atomic integer instance. - Meters - measures the rate of events over time (e.g., "requests per second"). In addition to the mean rate, meters also track 1-, 5-, and 15-minute moving averages. - Timers - measures both the rate that a particular piece of code is called and the distribution of its duration. Every metric in has a unique name, which is just a dotted-name string like "connections.count" or "node.queue.size". An output type can be configured using --type option. The default one results in plain formatting where there is only one depth level. As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of the result tree depends on metric name which is split by dot symbol. The result output will be probably too large without any customization. To reduce this output there are custom filters, which can be specified using --query option. Technically it's a special metrics query language (MQL) which supports the following operations and functions: \b - contains(<expr>, <expr>) - checks whether the result of second expression contains in the result of first expression. These expressions must resolve in strings. An output type of this function is bool. - name() - resolves in metric name. - type() - resolves in metric type (counter, meter, etc.). - tag(<expr>) - extracts custom metric tag and results in string. - && - combines several expressions in one, which applies when all of them apply. - || - combines several expressions in one, which applies when any of them apply. - == - compares two expressions for equality. - != - compares two expressions for an non-equality. - Also string literals (alphanumeric with dots) can be used as an expressions, for example "name() == locator.connections.accepted". Priorities can be specified using braces as in usual math expressions. The grammar for this query language is: \b expr ::= term ((AND | OR) term)* term ::= factor ((EQ | NE) factor)* factor ::= func | literal | number | LPAREN expr RPAREN func ::= literal LPAREN expr (,expr)* RPAREN literal ::= alphanum | . number ::= <floating point number> An example of the query, which returns all meters (for all services) and the number of accepted connections for the Locator service: "contains(type(), meter) || name() == locator.connections.accepted". """ ctx = Context(**kwargs) ctx.execute_action('metrics', **{ 'metrics': ctx.repo.create_secure_service('metrics'), 'ty': ty, 'query': query, 'query_type': query_type, })
Outputs runtime metrics collected from cocaine-runtime and its services. This command shows runtime metrics collected from cocaine-runtime and its services during their lifetime. There are four kind of metrics available: gauges, counters, meters and timers. \b - Gauges - an instantaneous measurement of a value. - Counters - just a gauge for an atomic integer instance. - Meters - measures the rate of events over time (e.g., "requests per second"). In addition to the mean rate, meters also track 1-, 5-, and 15-minute moving averages. - Timers - measures both the rate that a particular piece of code is called and the distribution of its duration. Every metric in has a unique name, which is just a dotted-name string like "connections.count" or "node.queue.size". An output type can be configured using --type option. The default one results in plain formatting where there is only one depth level. As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of the result tree depends on metric name which is split by dot symbol. The result output will be probably too large without any customization. To reduce this output there are custom filters, which can be specified using --query option. Technically it's a special metrics query language (MQL) which supports the following operations and functions: \b - contains(<expr>, <expr>) - checks whether the result of second expression contains in the result of first expression. These expressions must resolve in strings. An output type of this function is bool. - name() - resolves in metric name. - type() - resolves in metric type (counter, meter, etc.). - tag(<expr>) - extracts custom metric tag and results in string. - && - combines several expressions in one, which applies when all of them apply. - || - combines several expressions in one, which applies when any of them apply. - == - compares two expressions for equality. - != - compares two expressions for an non-equality. - Also string literals (alphanumeric with dots) can be used as an expressions, for example "name() == locator.connections.accepted". Priorities can be specified using braces as in usual math expressions. The grammar for this query language is: \b expr ::= term ((AND | OR) term)* term ::= factor ((EQ | NE) factor)* factor ::= func | literal | number | LPAREN expr RPAREN func ::= literal LPAREN expr (,expr)* RPAREN literal ::= alphanum | . number ::= <floating point number> An example of the query, which returns all meters (for all services) and the number of accepted connections for the Locator service: "contains(type(), meter) || name() == locator.connections.accepted".
Below is the the instruction that describes the task: ### Input: Outputs runtime metrics collected from cocaine-runtime and its services. This command shows runtime metrics collected from cocaine-runtime and its services during their lifetime. There are four kind of metrics available: gauges, counters, meters and timers. \b - Gauges - an instantaneous measurement of a value. - Counters - just a gauge for an atomic integer instance. - Meters - measures the rate of events over time (e.g., "requests per second"). In addition to the mean rate, meters also track 1-, 5-, and 15-minute moving averages. - Timers - measures both the rate that a particular piece of code is called and the distribution of its duration. Every metric in has a unique name, which is just a dotted-name string like "connections.count" or "node.queue.size". An output type can be configured using --type option. The default one results in plain formatting where there is only one depth level. As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of the result tree depends on metric name which is split by dot symbol. The result output will be probably too large without any customization. To reduce this output there are custom filters, which can be specified using --query option. Technically it's a special metrics query language (MQL) which supports the following operations and functions: \b - contains(<expr>, <expr>) - checks whether the result of second expression contains in the result of first expression. These expressions must resolve in strings. An output type of this function is bool. - name() - resolves in metric name. - type() - resolves in metric type (counter, meter, etc.). - tag(<expr>) - extracts custom metric tag and results in string. - && - combines several expressions in one, which applies when all of them apply. - || - combines several expressions in one, which applies when any of them apply. - == - compares two expressions for equality. - != - compares two expressions for an non-equality. - Also string literals (alphanumeric with dots) can be used as an expressions, for example "name() == locator.connections.accepted". Priorities can be specified using braces as in usual math expressions. The grammar for this query language is: \b expr ::= term ((AND | OR) term)* term ::= factor ((EQ | NE) factor)* factor ::= func | literal | number | LPAREN expr RPAREN func ::= literal LPAREN expr (,expr)* RPAREN literal ::= alphanum | . number ::= <floating point number> An example of the query, which returns all meters (for all services) and the number of accepted connections for the Locator service: "contains(type(), meter) || name() == locator.connections.accepted". ### Response: def metrics(ty, query, query_type, **kwargs): """ Outputs runtime metrics collected from cocaine-runtime and its services. This command shows runtime metrics collected from cocaine-runtime and its services during their lifetime. There are four kind of metrics available: gauges, counters, meters and timers. \b - Gauges - an instantaneous measurement of a value. - Counters - just a gauge for an atomic integer instance. - Meters - measures the rate of events over time (e.g., "requests per second"). In addition to the mean rate, meters also track 1-, 5-, and 15-minute moving averages. - Timers - measures both the rate that a particular piece of code is called and the distribution of its duration. Every metric in has a unique name, which is just a dotted-name string like "connections.count" or "node.queue.size". An output type can be configured using --type option. The default one results in plain formatting where there is only one depth level. As an alternative you can expanded the JSON tree by specifying --type=json option. The depth of the result tree depends on metric name which is split by dot symbol. The result output will be probably too large without any customization. To reduce this output there are custom filters, which can be specified using --query option. Technically it's a special metrics query language (MQL) which supports the following operations and functions: \b - contains(<expr>, <expr>) - checks whether the result of second expression contains in the result of first expression. These expressions must resolve in strings. An output type of this function is bool. - name() - resolves in metric name. - type() - resolves in metric type (counter, meter, etc.). - tag(<expr>) - extracts custom metric tag and results in string. - && - combines several expressions in one, which applies when all of them apply. - || - combines several expressions in one, which applies when any of them apply. - == - compares two expressions for equality. - != - compares two expressions for an non-equality. - Also string literals (alphanumeric with dots) can be used as an expressions, for example "name() == locator.connections.accepted". Priorities can be specified using braces as in usual math expressions. The grammar for this query language is: \b expr ::= term ((AND | OR) term)* term ::= factor ((EQ | NE) factor)* factor ::= func | literal | number | LPAREN expr RPAREN func ::= literal LPAREN expr (,expr)* RPAREN literal ::= alphanum | . number ::= <floating point number> An example of the query, which returns all meters (for all services) and the number of accepted connections for the Locator service: "contains(type(), meter) || name() == locator.connections.accepted". """ ctx = Context(**kwargs) ctx.execute_action('metrics', **{ 'metrics': ctx.repo.create_secure_service('metrics'), 'ty': ty, 'query': query, 'query_type': query_type, })
def releases(self, **kwargs): """ Get the release date and certification information by country for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API. """ path = self._get_id_path('releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the release date and certification information by country for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
Below is the the instruction that describes the task: ### Input: Get the release date and certification information by country for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API. ### Response: def releases(self, **kwargs): """ Get the release date and certification information by country for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API. """ path = self._get_id_path('releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
def do_jump(self, arg): """j(ump) lineno Set the next line that will be executed. Only available in the bottom-most frame. This lets you jump back and execute code again, or jump forward to skip code that you don't want to run. It should be noted that not all jumps are allowed -- for instance it is not possible to jump into the middle of a for loop or out of a finally clause. """ if self.curindex + 1 != len(self.stack): self.error('You can only jump within the bottom frame') return try: arg = int(arg) except ValueError: self.error("The 'jump' command requires a line number") else: try: # Do the jump, fix up our copy of the stack, and display the # new position self.curframe.f_lineno = arg self.stack[self.curindex] = self.stack[self.curindex][0], arg self.print_stack_entry(self.stack[self.curindex]) except ValueError as e: self.error('Jump failed: %s' % e)
j(ump) lineno Set the next line that will be executed. Only available in the bottom-most frame. This lets you jump back and execute code again, or jump forward to skip code that you don't want to run. It should be noted that not all jumps are allowed -- for instance it is not possible to jump into the middle of a for loop or out of a finally clause.
Below is the the instruction that describes the task: ### Input: j(ump) lineno Set the next line that will be executed. Only available in the bottom-most frame. This lets you jump back and execute code again, or jump forward to skip code that you don't want to run. It should be noted that not all jumps are allowed -- for instance it is not possible to jump into the middle of a for loop or out of a finally clause. ### Response: def do_jump(self, arg): """j(ump) lineno Set the next line that will be executed. Only available in the bottom-most frame. This lets you jump back and execute code again, or jump forward to skip code that you don't want to run. It should be noted that not all jumps are allowed -- for instance it is not possible to jump into the middle of a for loop or out of a finally clause. """ if self.curindex + 1 != len(self.stack): self.error('You can only jump within the bottom frame') return try: arg = int(arg) except ValueError: self.error("The 'jump' command requires a line number") else: try: # Do the jump, fix up our copy of the stack, and display the # new position self.curframe.f_lineno = arg self.stack[self.curindex] = self.stack[self.curindex][0], arg self.print_stack_entry(self.stack[self.curindex]) except ValueError as e: self.error('Jump failed: %s' % e)
def register_success(self, upgrade): """Register a successful upgrade.""" u = Upgrade(upgrade=upgrade.name, applied=datetime.now()) db.session.add(u) db.session.commit()
Register a successful upgrade.
Below is the the instruction that describes the task: ### Input: Register a successful upgrade. ### Response: def register_success(self, upgrade): """Register a successful upgrade.""" u = Upgrade(upgrade=upgrade.name, applied=datetime.now()) db.session.add(u) db.session.commit()
def set_selection_strategy(self, strategy='spectral-oasis', nsel=1, neig=None): """ Defines the column selection strategy Parameters ---------- strategy : str One of the following strategies to select new columns: random : randomly choose from non-selected columns oasis : maximal approximation error in the diagonal of :math:`A` spectral-oasis : selects the nsel columns that are most distanced in the oASIS-error-scaled dominant eigenspace nsel : int number of columns to be selected in each round neig : int or None, optional, default None Number of eigenvalues to be optimized by the selection process. If None, use the whole available eigenspace """ self._selection_strategy = selection_strategy(self, strategy, nsel, neig)
Defines the column selection strategy Parameters ---------- strategy : str One of the following strategies to select new columns: random : randomly choose from non-selected columns oasis : maximal approximation error in the diagonal of :math:`A` spectral-oasis : selects the nsel columns that are most distanced in the oASIS-error-scaled dominant eigenspace nsel : int number of columns to be selected in each round neig : int or None, optional, default None Number of eigenvalues to be optimized by the selection process. If None, use the whole available eigenspace
Below is the the instruction that describes the task: ### Input: Defines the column selection strategy Parameters ---------- strategy : str One of the following strategies to select new columns: random : randomly choose from non-selected columns oasis : maximal approximation error in the diagonal of :math:`A` spectral-oasis : selects the nsel columns that are most distanced in the oASIS-error-scaled dominant eigenspace nsel : int number of columns to be selected in each round neig : int or None, optional, default None Number of eigenvalues to be optimized by the selection process. If None, use the whole available eigenspace ### Response: def set_selection_strategy(self, strategy='spectral-oasis', nsel=1, neig=None): """ Defines the column selection strategy Parameters ---------- strategy : str One of the following strategies to select new columns: random : randomly choose from non-selected columns oasis : maximal approximation error in the diagonal of :math:`A` spectral-oasis : selects the nsel columns that are most distanced in the oASIS-error-scaled dominant eigenspace nsel : int number of columns to be selected in each round neig : int or None, optional, default None Number of eigenvalues to be optimized by the selection process. If None, use the whole available eigenspace """ self._selection_strategy = selection_strategy(self, strategy, nsel, neig)
def main(): """cm. Usage: cm [-q] help cm [-v] [-b] [--file=SCRIPT] [-i] [COMMAND ...] Arguments: COMMAND A command to be executed Options: --file=SCRIPT -f SCRIPT Executes the script -i After start keep the shell interactive, otherwise quit [default: False] -b surpress the printing of the banner [default: False] """ echo = False try: arguments = docopt(main.__doc__, help=True) # fixing the help parameter parsing if arguments['help']: arguments['COMMAND'] = ['help'] arguments['help'] = 'False' script_file = arguments['--file'] interactive = arguments['-i'] echo = arguments['-v'] if echo: pprint(arguments) except: script_file = None interactive = False arguments = {'-b': True, 'COMMAND': [' '.join(sys.argv[1:])]} plugins = [] plugins.append(dict(get_plugins_from_dir("sys", "cmd3"))) # plugins.append(dict(get_plugins_from_dir("~/.cloudmesh", "cmd3local"))) # if not os.path.exists(path_expand( "~/.cloudmesh/cmd3.yaml")): # from cmd3.plugins.shell_core import create_cmd3_yaml_file # create_cmd3_yaml_file() create_cmd3_yaml_file(force=False, verbose=False) filename = path_expand("~/.cloudmesh/cmd3.yaml") try: module_config = ConfigDict(filename=filename) modules = module_config["cmd3"]["modules"] properties = module_config["cmd3"]["properties"] except: modules = ['cloudmesh_cmd3.plugins'] for module_name in modules: #print ("INSTALL", module_name) try: plugins.append(dict(get_plugins_from_module(module_name))) except: # print "WARNING: could not find", module_name pass # sys.exit() # plugins.append(dict(get_plugins_from_dir("~/.cloudmesh", "cmd3local"))) # plugins.append(dict(get_plugins_from_dir (".", "dot"))) for plugin in plugins: sys.path.append(os.path.expanduser(plugin['dir'])) sys.path.append("../..") sys.path.append(".") sys.path.append("..") for plugin in plugins: plugin['class'] += ".plugins" # pprint(plugins) # pprint(sys.path) # sys.exit() name = "CmCli" # # not yet quite what i want, but falling back to a flatt array # (cmd, plugin_objects) = DynamicCmd(name, plugins) cmd.set_verbose(echo) cmd.activate() cmd.set_verbose(echo) cmd.set_debug(properties["debug"]) if arguments['-b']: cmd.set_banner("") if script_file is not None: cmd.do_exec(script_file) if len(arguments['COMMAND']) > 0: try: user_cmd = " ".join(arguments['COMMAND']) if echo: print(">", user_cmd) cmd.onecmd(user_cmd) except Exception, e: Console.error("") Console.error("ERROR: executing command '{0}'".format(user_cmd)) Console.error("") print (70 * "=") print(e) print (70 * "=") print(traceback.format_exc()) if interactive: cmd.cmdloop() elif not script_file or interactive: cmd.cmdloop()
cm. Usage: cm [-q] help cm [-v] [-b] [--file=SCRIPT] [-i] [COMMAND ...] Arguments: COMMAND A command to be executed Options: --file=SCRIPT -f SCRIPT Executes the script -i After start keep the shell interactive, otherwise quit [default: False] -b surpress the printing of the banner [default: False]
Below is the the instruction that describes the task: ### Input: cm. Usage: cm [-q] help cm [-v] [-b] [--file=SCRIPT] [-i] [COMMAND ...] Arguments: COMMAND A command to be executed Options: --file=SCRIPT -f SCRIPT Executes the script -i After start keep the shell interactive, otherwise quit [default: False] -b surpress the printing of the banner [default: False] ### Response: def main(): """cm. Usage: cm [-q] help cm [-v] [-b] [--file=SCRIPT] [-i] [COMMAND ...] Arguments: COMMAND A command to be executed Options: --file=SCRIPT -f SCRIPT Executes the script -i After start keep the shell interactive, otherwise quit [default: False] -b surpress the printing of the banner [default: False] """ echo = False try: arguments = docopt(main.__doc__, help=True) # fixing the help parameter parsing if arguments['help']: arguments['COMMAND'] = ['help'] arguments['help'] = 'False' script_file = arguments['--file'] interactive = arguments['-i'] echo = arguments['-v'] if echo: pprint(arguments) except: script_file = None interactive = False arguments = {'-b': True, 'COMMAND': [' '.join(sys.argv[1:])]} plugins = [] plugins.append(dict(get_plugins_from_dir("sys", "cmd3"))) # plugins.append(dict(get_plugins_from_dir("~/.cloudmesh", "cmd3local"))) # if not os.path.exists(path_expand( "~/.cloudmesh/cmd3.yaml")): # from cmd3.plugins.shell_core import create_cmd3_yaml_file # create_cmd3_yaml_file() create_cmd3_yaml_file(force=False, verbose=False) filename = path_expand("~/.cloudmesh/cmd3.yaml") try: module_config = ConfigDict(filename=filename) modules = module_config["cmd3"]["modules"] properties = module_config["cmd3"]["properties"] except: modules = ['cloudmesh_cmd3.plugins'] for module_name in modules: #print ("INSTALL", module_name) try: plugins.append(dict(get_plugins_from_module(module_name))) except: # print "WARNING: could not find", module_name pass # sys.exit() # plugins.append(dict(get_plugins_from_dir("~/.cloudmesh", "cmd3local"))) # plugins.append(dict(get_plugins_from_dir (".", "dot"))) for plugin in plugins: sys.path.append(os.path.expanduser(plugin['dir'])) sys.path.append("../..") sys.path.append(".") sys.path.append("..") for plugin in plugins: plugin['class'] += ".plugins" # pprint(plugins) # pprint(sys.path) # sys.exit() name = "CmCli" # # not yet quite what i want, but falling back to a flatt array # (cmd, plugin_objects) = DynamicCmd(name, plugins) cmd.set_verbose(echo) cmd.activate() cmd.set_verbose(echo) cmd.set_debug(properties["debug"]) if arguments['-b']: cmd.set_banner("") if script_file is not None: cmd.do_exec(script_file) if len(arguments['COMMAND']) > 0: try: user_cmd = " ".join(arguments['COMMAND']) if echo: print(">", user_cmd) cmd.onecmd(user_cmd) except Exception, e: Console.error("") Console.error("ERROR: executing command '{0}'".format(user_cmd)) Console.error("") print (70 * "=") print(e) print (70 * "=") print(traceback.format_exc()) if interactive: cmd.cmdloop() elif not script_file or interactive: cmd.cmdloop()
def py_to_go_cookie(py_cookie): '''Convert a python cookie to the JSON-marshalable Go-style cookie form.''' # TODO (perhaps): # HttpOnly # Creation # LastAccess # Updated # not done properly: CanonicalHost. go_cookie = { 'Name': py_cookie.name, 'Value': py_cookie.value, 'Domain': py_cookie.domain, 'HostOnly': not py_cookie.domain_specified, 'Persistent': not py_cookie.discard, 'Secure': py_cookie.secure, 'CanonicalHost': py_cookie.domain, } if py_cookie.path_specified: go_cookie['Path'] = py_cookie.path if py_cookie.expires is not None: unix_time = datetime.datetime.fromtimestamp(py_cookie.expires) # Note: fromtimestamp bizarrely produces a time without # a time zone, so we need to use accept_naive. go_cookie['Expires'] = pyrfc3339.generate(unix_time, accept_naive=True) return go_cookie
Convert a python cookie to the JSON-marshalable Go-style cookie form.
Below is the the instruction that describes the task: ### Input: Convert a python cookie to the JSON-marshalable Go-style cookie form. ### Response: def py_to_go_cookie(py_cookie): '''Convert a python cookie to the JSON-marshalable Go-style cookie form.''' # TODO (perhaps): # HttpOnly # Creation # LastAccess # Updated # not done properly: CanonicalHost. go_cookie = { 'Name': py_cookie.name, 'Value': py_cookie.value, 'Domain': py_cookie.domain, 'HostOnly': not py_cookie.domain_specified, 'Persistent': not py_cookie.discard, 'Secure': py_cookie.secure, 'CanonicalHost': py_cookie.domain, } if py_cookie.path_specified: go_cookie['Path'] = py_cookie.path if py_cookie.expires is not None: unix_time = datetime.datetime.fromtimestamp(py_cookie.expires) # Note: fromtimestamp bizarrely produces a time without # a time zone, so we need to use accept_naive. go_cookie['Expires'] = pyrfc3339.generate(unix_time, accept_naive=True) return go_cookie
def modsplit(s): """Split importable""" if ':' in s: c = s.split(':') if len(c) != 2: raise ValueError("Syntax error: {s}") return c[0], c[1] else: c = s.split('.') if len(c) < 2: raise ValueError("Syntax error: {s}") return '.'.join(c[:-1]), c[-1]
Split importable
Below is the the instruction that describes the task: ### Input: Split importable ### Response: def modsplit(s): """Split importable""" if ':' in s: c = s.split(':') if len(c) != 2: raise ValueError("Syntax error: {s}") return c[0], c[1] else: c = s.split('.') if len(c) < 2: raise ValueError("Syntax error: {s}") return '.'.join(c[:-1]), c[-1]
def _values_of_same_type(self, val1, val2): """Checks if two values agree in type. Raises a TypeError if both values are not supported by the parameter. Returns false if only one of the two values is supported by the parameter. Example usage: >>>param._values_of_same_type(42,43) True >>>param._values_of_same_type(42,'43') False :raises: TypeError """ if self.f_supports(val1) != self.f_supports(val2): return False if not self.f_supports(val1) and not self.f_supports(val2): raise TypeError('I do not support the types of both inputs (`%s` and `%s`),' ' therefore I cannot judge whether the two are of same type.' % str(type(val1)), str(type(val2))) if not type(val1) is type(val2): return False # Numpy arrays must agree in data type and shape if type(val1) is np.array: if not val1.dtype is val2.dtype: return False if not np.shape(val1) == np.shape(val2): return False # For tuples we now from earlier checks that the data is homogeneous. # Thus, only the type of the first item and the length must agree. if type(val1) is tuple: return (type(val1[0]) is type(val2[0])) and (len(val1) == len(val2)) return True
Checks if two values agree in type. Raises a TypeError if both values are not supported by the parameter. Returns false if only one of the two values is supported by the parameter. Example usage: >>>param._values_of_same_type(42,43) True >>>param._values_of_same_type(42,'43') False :raises: TypeError
Below is the the instruction that describes the task: ### Input: Checks if two values agree in type. Raises a TypeError if both values are not supported by the parameter. Returns false if only one of the two values is supported by the parameter. Example usage: >>>param._values_of_same_type(42,43) True >>>param._values_of_same_type(42,'43') False :raises: TypeError ### Response: def _values_of_same_type(self, val1, val2): """Checks if two values agree in type. Raises a TypeError if both values are not supported by the parameter. Returns false if only one of the two values is supported by the parameter. Example usage: >>>param._values_of_same_type(42,43) True >>>param._values_of_same_type(42,'43') False :raises: TypeError """ if self.f_supports(val1) != self.f_supports(val2): return False if not self.f_supports(val1) and not self.f_supports(val2): raise TypeError('I do not support the types of both inputs (`%s` and `%s`),' ' therefore I cannot judge whether the two are of same type.' % str(type(val1)), str(type(val2))) if not type(val1) is type(val2): return False # Numpy arrays must agree in data type and shape if type(val1) is np.array: if not val1.dtype is val2.dtype: return False if not np.shape(val1) == np.shape(val2): return False # For tuples we now from earlier checks that the data is homogeneous. # Thus, only the type of the first item and the length must agree. if type(val1) is tuple: return (type(val1[0]) is type(val2[0])) and (len(val1) == len(val2)) return True
def json_dump_hook(cfg, text: bool=False): """ Dumps all the data into a JSON file. """ data = cfg.config.dump() if not text: json.dump(data, cfg.fd) else: return json.dumps(data)
Dumps all the data into a JSON file.
Below is the the instruction that describes the task: ### Input: Dumps all the data into a JSON file. ### Response: def json_dump_hook(cfg, text: bool=False): """ Dumps all the data into a JSON file. """ data = cfg.config.dump() if not text: json.dump(data, cfg.fd) else: return json.dumps(data)
def Reload(self): """Loads all events added since the last call to `Reload`. If `Reload` was never called, loads all events in the file. Returns: The `EventAccumulator`. """ with self._generator_mutex: for event in self._generator.Load(): self._ProcessEvent(event) return self
Loads all events added since the last call to `Reload`. If `Reload` was never called, loads all events in the file. Returns: The `EventAccumulator`.
Below is the the instruction that describes the task: ### Input: Loads all events added since the last call to `Reload`. If `Reload` was never called, loads all events in the file. Returns: The `EventAccumulator`. ### Response: def Reload(self): """Loads all events added since the last call to `Reload`. If `Reload` was never called, loads all events in the file. Returns: The `EventAccumulator`. """ with self._generator_mutex: for event in self._generator.Load(): self._ProcessEvent(event) return self
def ExamineEvent(self, mediator, event): """Analyzes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine. """ # This event requires an URL attribute. url = getattr(event, 'url', None) if not url: return # TODO: refactor this the source should be used in formatting only. # Check if we are dealing with a web history event. source, _ = formatters_manager.FormattersManager.GetSourceStrings(event) if source != 'WEBHIST': return for engine, url_expression, method_name in self._URL_FILTERS: callback_method = getattr(self, method_name, None) if not callback_method: logger.warning('Missing method: {0:s}'.format(callback_method)) continue match = url_expression.search(url) if not match: continue search_query = callback_method(url) if not search_query: logger.warning('Missing search query for URL: {0:s}'.format(url)) continue search_query = self._DecodeURL(search_query) if not search_query: continue event_tag = self._CreateEventTag( event, self._EVENT_TAG_COMMENT, self._EVENT_TAG_LABELS) mediator.ProduceEventTag(event_tag) self._counter['{0:s}:{1:s}'.format(engine, search_query)] += 1 # Add the timeline format for each search term. timestamp = getattr(event, 'timestamp', 0) source = getattr(event, 'parser', 'N/A') source = getattr(event, 'plugin', source) self._search_term_timeline.append( SEARCH_OBJECT(timestamp, source, engine, search_query))
Analyzes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
Below is the the instruction that describes the task: ### Input: Analyzes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine. ### Response: def ExamineEvent(self, mediator, event): """Analyzes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine. """ # This event requires an URL attribute. url = getattr(event, 'url', None) if not url: return # TODO: refactor this the source should be used in formatting only. # Check if we are dealing with a web history event. source, _ = formatters_manager.FormattersManager.GetSourceStrings(event) if source != 'WEBHIST': return for engine, url_expression, method_name in self._URL_FILTERS: callback_method = getattr(self, method_name, None) if not callback_method: logger.warning('Missing method: {0:s}'.format(callback_method)) continue match = url_expression.search(url) if not match: continue search_query = callback_method(url) if not search_query: logger.warning('Missing search query for URL: {0:s}'.format(url)) continue search_query = self._DecodeURL(search_query) if not search_query: continue event_tag = self._CreateEventTag( event, self._EVENT_TAG_COMMENT, self._EVENT_TAG_LABELS) mediator.ProduceEventTag(event_tag) self._counter['{0:s}:{1:s}'.format(engine, search_query)] += 1 # Add the timeline format for each search term. timestamp = getattr(event, 'timestamp', 0) source = getattr(event, 'parser', 'N/A') source = getattr(event, 'plugin', source) self._search_term_timeline.append( SEARCH_OBJECT(timestamp, source, engine, search_query))
def filter_icc(icc, mask=None, thr=2, zscore=True, mode="+"): """ Threshold then mask an IC correlation map. Parameters ---------- icc: img-like The 'raw' ICC map. mask: img-like If not None. Will apply this masks in the end of the process. thr: float The threshold value. zscore: bool If True will calculate the z-score of the ICC before thresholding. mode: str Choices: '+' for positive threshold, '+-' for positive and negative threshold and '-' for negative threshold. Returns ------- icc_filt: nibabel.NiftiImage Thresholded and masked ICC. """ if zscore: icc_filt = thr_img(icc_img_to_zscore(icc), thr=thr, mode=mode) else: icc_filt = thr_img(icc, thr=thr, mode=mode) if mask is not None: icc_filt = apply_mask(icc_filt, mask) return icc_filt
Threshold then mask an IC correlation map. Parameters ---------- icc: img-like The 'raw' ICC map. mask: img-like If not None. Will apply this masks in the end of the process. thr: float The threshold value. zscore: bool If True will calculate the z-score of the ICC before thresholding. mode: str Choices: '+' for positive threshold, '+-' for positive and negative threshold and '-' for negative threshold. Returns ------- icc_filt: nibabel.NiftiImage Thresholded and masked ICC.
Below is the the instruction that describes the task: ### Input: Threshold then mask an IC correlation map. Parameters ---------- icc: img-like The 'raw' ICC map. mask: img-like If not None. Will apply this masks in the end of the process. thr: float The threshold value. zscore: bool If True will calculate the z-score of the ICC before thresholding. mode: str Choices: '+' for positive threshold, '+-' for positive and negative threshold and '-' for negative threshold. Returns ------- icc_filt: nibabel.NiftiImage Thresholded and masked ICC. ### Response: def filter_icc(icc, mask=None, thr=2, zscore=True, mode="+"): """ Threshold then mask an IC correlation map. Parameters ---------- icc: img-like The 'raw' ICC map. mask: img-like If not None. Will apply this masks in the end of the process. thr: float The threshold value. zscore: bool If True will calculate the z-score of the ICC before thresholding. mode: str Choices: '+' for positive threshold, '+-' for positive and negative threshold and '-' for negative threshold. Returns ------- icc_filt: nibabel.NiftiImage Thresholded and masked ICC. """ if zscore: icc_filt = thr_img(icc_img_to_zscore(icc), thr=thr, mode=mode) else: icc_filt = thr_img(icc, thr=thr, mode=mode) if mask is not None: icc_filt = apply_mask(icc_filt, mask) return icc_filt
def generate_base_grid(self, vtk_filename=None): """ Run first step of algorithm. Next step is split_voxels :param vtk_filename: :return: """ nd, ed, ed_dir = self.gen_grid_fcn(self.data.shape, self.voxelsize) self.add_nodes(nd) self.add_edges(ed, ed_dir, edge_low_or_high=0) if vtk_filename is not None: self.write_vtk(vtk_filename)
Run first step of algorithm. Next step is split_voxels :param vtk_filename: :return:
Below is the the instruction that describes the task: ### Input: Run first step of algorithm. Next step is split_voxels :param vtk_filename: :return: ### Response: def generate_base_grid(self, vtk_filename=None): """ Run first step of algorithm. Next step is split_voxels :param vtk_filename: :return: """ nd, ed, ed_dir = self.gen_grid_fcn(self.data.shape, self.voxelsize) self.add_nodes(nd) self.add_edges(ed, ed_dir, edge_low_or_high=0) if vtk_filename is not None: self.write_vtk(vtk_filename)
def _check_args(self, source): '''Validate the argument section. Args may be either a dict or a list (to allow multiple positional args). ''' path = [source] args = self.parsed_yaml.get('args', {}) self._assert_struct_type(args, 'args', (dict, list), path) path.append('args') if isinstance(args, dict): for argn, argattrs in args.items(): self._check_one_arg(path, argn, argattrs) else: # must be list - already asserted struct type for argdict in args: self._assert_command_dict(argdict, '[list-item]', path) argn, argattrs = list(argdict.items())[0] # safe - length asserted on previous line self._check_one_arg(path, argn, argattrs)
Validate the argument section. Args may be either a dict or a list (to allow multiple positional args).
Below is the the instruction that describes the task: ### Input: Validate the argument section. Args may be either a dict or a list (to allow multiple positional args). ### Response: def _check_args(self, source): '''Validate the argument section. Args may be either a dict or a list (to allow multiple positional args). ''' path = [source] args = self.parsed_yaml.get('args', {}) self._assert_struct_type(args, 'args', (dict, list), path) path.append('args') if isinstance(args, dict): for argn, argattrs in args.items(): self._check_one_arg(path, argn, argattrs) else: # must be list - already asserted struct type for argdict in args: self._assert_command_dict(argdict, '[list-item]', path) argn, argattrs = list(argdict.items())[0] # safe - length asserted on previous line self._check_one_arg(path, argn, argattrs)
def parse_targets(name=None, pkgs=None, sources=None, saltenv='base', normalize=True, **kwargs): ''' Parses the input to pkg.install and returns back the package(s) to be installed. Returns a list of packages, as well as a string noting whether the packages are to come from a repository or a binary package. CLI Example: .. code-block:: bash salt '*' pkg_resource.parse_targets ''' if '__env__' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('__env__') if __grains__['os'] == 'MacOS' and sources: log.warning('Parameter "sources" ignored on MacOS hosts.') version = kwargs.get('version') if pkgs and sources: log.error('Only one of "pkgs" and "sources" can be used.') return None, None elif 'advisory_ids' in kwargs: if pkgs: log.error('Cannot use "advisory_ids" and "pkgs" at the same time') return None, None elif kwargs['advisory_ids']: return kwargs['advisory_ids'], 'advisory' else: return [name], 'advisory' elif pkgs: if version is not None: log.warning('\'version\' argument will be ignored for multiple ' 'package targets') pkgs = _repack_pkgs(pkgs, normalize=normalize) if not pkgs: return None, None else: return pkgs, 'repository' elif sources and __grains__['os'] != 'MacOS': if version is not None: log.warning('\'version\' argument will be ignored for multiple ' 'package targets') sources = pack_sources(sources, normalize=normalize) if not sources: return None, None srcinfo = [] for pkg_name, pkg_src in six.iteritems(sources): if __salt__['config.valid_fileproto'](pkg_src): # Cache package from remote source (salt master, HTTP, FTP) and # append the cached path. srcinfo.append(__salt__['cp.cache_file'](pkg_src, saltenv)) else: # Package file local to the minion, just append the path to the # package file. if not os.path.isabs(pkg_src): raise SaltInvocationError( 'Path {0} for package {1} is either not absolute or ' 'an invalid protocol'.format(pkg_src, pkg_name) ) srcinfo.append(pkg_src) return srcinfo, 'file' elif name: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) packed = dict([(_normalize_name(x), version) for x in name.split(',')]) else: packed = dict([(x, version) for x in name.split(',')]) return packed, 'repository' else: log.error('No package sources provided') return None, None
Parses the input to pkg.install and returns back the package(s) to be installed. Returns a list of packages, as well as a string noting whether the packages are to come from a repository or a binary package. CLI Example: .. code-block:: bash salt '*' pkg_resource.parse_targets
Below is the the instruction that describes the task: ### Input: Parses the input to pkg.install and returns back the package(s) to be installed. Returns a list of packages, as well as a string noting whether the packages are to come from a repository or a binary package. CLI Example: .. code-block:: bash salt '*' pkg_resource.parse_targets ### Response: def parse_targets(name=None, pkgs=None, sources=None, saltenv='base', normalize=True, **kwargs): ''' Parses the input to pkg.install and returns back the package(s) to be installed. Returns a list of packages, as well as a string noting whether the packages are to come from a repository or a binary package. CLI Example: .. code-block:: bash salt '*' pkg_resource.parse_targets ''' if '__env__' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('__env__') if __grains__['os'] == 'MacOS' and sources: log.warning('Parameter "sources" ignored on MacOS hosts.') version = kwargs.get('version') if pkgs and sources: log.error('Only one of "pkgs" and "sources" can be used.') return None, None elif 'advisory_ids' in kwargs: if pkgs: log.error('Cannot use "advisory_ids" and "pkgs" at the same time') return None, None elif kwargs['advisory_ids']: return kwargs['advisory_ids'], 'advisory' else: return [name], 'advisory' elif pkgs: if version is not None: log.warning('\'version\' argument will be ignored for multiple ' 'package targets') pkgs = _repack_pkgs(pkgs, normalize=normalize) if not pkgs: return None, None else: return pkgs, 'repository' elif sources and __grains__['os'] != 'MacOS': if version is not None: log.warning('\'version\' argument will be ignored for multiple ' 'package targets') sources = pack_sources(sources, normalize=normalize) if not sources: return None, None srcinfo = [] for pkg_name, pkg_src in six.iteritems(sources): if __salt__['config.valid_fileproto'](pkg_src): # Cache package from remote source (salt master, HTTP, FTP) and # append the cached path. srcinfo.append(__salt__['cp.cache_file'](pkg_src, saltenv)) else: # Package file local to the minion, just append the path to the # package file. if not os.path.isabs(pkg_src): raise SaltInvocationError( 'Path {0} for package {1} is either not absolute or ' 'an invalid protocol'.format(pkg_src, pkg_name) ) srcinfo.append(pkg_src) return srcinfo, 'file' elif name: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) packed = dict([(_normalize_name(x), version) for x in name.split(',')]) else: packed = dict([(x, version) for x in name.split(',')]) return packed, 'repository' else: log.error('No package sources provided') return None, None
def members_entries(self, all_are_optional: Optional[bool] = False) -> List[Tuple[str, str]]: """ Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing reference types :param all_are_optional: If true, all types are forced optional :return: raw name/ signature type for all elements in this pair """ if self._type_reference: rval: List[Tuple[str, str]] = [] for n, t in self._context.reference(self._type_reference).members_entries(all_are_optional): rval.append((n, self._ebnf.signature_cardinality(t, all_are_optional).format(name=n))) return rval else: sig = self._ebnf.signature_cardinality(self._typ.reference_type(), all_are_optional) return [(name, sig.format(name=name)) for name in self._names]
Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing reference types :param all_are_optional: If true, all types are forced optional :return: raw name/ signature type for all elements in this pair
Below is the the instruction that describes the task: ### Input: Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing reference types :param all_are_optional: If true, all types are forced optional :return: raw name/ signature type for all elements in this pair ### Response: def members_entries(self, all_are_optional: Optional[bool] = False) -> List[Tuple[str, str]]: """ Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing reference types :param all_are_optional: If true, all types are forced optional :return: raw name/ signature type for all elements in this pair """ if self._type_reference: rval: List[Tuple[str, str]] = [] for n, t in self._context.reference(self._type_reference).members_entries(all_are_optional): rval.append((n, self._ebnf.signature_cardinality(t, all_are_optional).format(name=n))) return rval else: sig = self._ebnf.signature_cardinality(self._typ.reference_type(), all_are_optional) return [(name, sig.format(name=name)) for name in self._names]
def __send_smtp_email(self, recipients, subject, html_body, text_body): """Send an email using SMTP Args: recipients (`list` of `str`): List of recipient email addresses subject (str): Subject of the email html_body (str): HTML body of the email text_body (str): Text body of the email Returns: `None` """ smtp = smtplib.SMTP( dbconfig.get('smtp_server', NS_EMAIL, 'localhost'), dbconfig.get('smtp_port', NS_EMAIL, 25) ) source_arn = dbconfig.get('source_arn', NS_EMAIL) return_arn = dbconfig.get('return_path_arn', NS_EMAIL) from_arn = dbconfig.get('from_arn', NS_EMAIL) msg = MIMEMultipart('alternative') # Set SES options if needed if source_arn and from_arn and return_arn: msg['X-SES-SOURCE-ARN'] = source_arn msg['X-SES-FROM-ARN'] = from_arn msg['X-SES-RETURN-PATH-ARN'] = return_arn msg['Subject'] = subject msg['To'] = ','.join(recipients) msg['From'] = self.sender # Check body types to avoid exceptions if html_body: html_part = MIMEText(html_body, 'html') msg.attach(html_part) if text_body: text_part = MIMEText(text_body, 'plain') msg.attach(text_part) # TLS if needed if dbconfig.get('smtp_tls', NS_EMAIL, False): smtp.starttls() # Login if needed username = dbconfig.get('smtp_username', NS_EMAIL) password = dbconfig.get('smtp_password', NS_EMAIL) if username and password: smtp.login(username, password) smtp.sendmail(self.sender, recipients, msg.as_string()) smtp.quit()
Send an email using SMTP Args: recipients (`list` of `str`): List of recipient email addresses subject (str): Subject of the email html_body (str): HTML body of the email text_body (str): Text body of the email Returns: `None`
Below is the the instruction that describes the task: ### Input: Send an email using SMTP Args: recipients (`list` of `str`): List of recipient email addresses subject (str): Subject of the email html_body (str): HTML body of the email text_body (str): Text body of the email Returns: `None` ### Response: def __send_smtp_email(self, recipients, subject, html_body, text_body): """Send an email using SMTP Args: recipients (`list` of `str`): List of recipient email addresses subject (str): Subject of the email html_body (str): HTML body of the email text_body (str): Text body of the email Returns: `None` """ smtp = smtplib.SMTP( dbconfig.get('smtp_server', NS_EMAIL, 'localhost'), dbconfig.get('smtp_port', NS_EMAIL, 25) ) source_arn = dbconfig.get('source_arn', NS_EMAIL) return_arn = dbconfig.get('return_path_arn', NS_EMAIL) from_arn = dbconfig.get('from_arn', NS_EMAIL) msg = MIMEMultipart('alternative') # Set SES options if needed if source_arn and from_arn and return_arn: msg['X-SES-SOURCE-ARN'] = source_arn msg['X-SES-FROM-ARN'] = from_arn msg['X-SES-RETURN-PATH-ARN'] = return_arn msg['Subject'] = subject msg['To'] = ','.join(recipients) msg['From'] = self.sender # Check body types to avoid exceptions if html_body: html_part = MIMEText(html_body, 'html') msg.attach(html_part) if text_body: text_part = MIMEText(text_body, 'plain') msg.attach(text_part) # TLS if needed if dbconfig.get('smtp_tls', NS_EMAIL, False): smtp.starttls() # Login if needed username = dbconfig.get('smtp_username', NS_EMAIL) password = dbconfig.get('smtp_password', NS_EMAIL) if username and password: smtp.login(username, password) smtp.sendmail(self.sender, recipients, msg.as_string()) smtp.quit()
def create(cls, path, encoding='utf-8'): """Create a new bare repository""" cmd = [GIT, 'init', '--quiet', '--bare', path] subprocess.check_call(cmd) return cls(path, encoding)
Create a new bare repository
Below is the the instruction that describes the task: ### Input: Create a new bare repository ### Response: def create(cls, path, encoding='utf-8'): """Create a new bare repository""" cmd = [GIT, 'init', '--quiet', '--bare', path] subprocess.check_call(cmd) return cls(path, encoding)
def get(self): """Handle get request.""" try: app_user = InitUser() if (app_user.client_id and app_user.client_secret and app_user.adwords_manager_cid and app_user.developer_token and app_user.refresh_token): self.redirect('/showAccounts') else: self.redirect('/showCredentials') except Exception, e: logging.debug(str(e))
Handle get request.
Below is the the instruction that describes the task: ### Input: Handle get request. ### Response: def get(self): """Handle get request.""" try: app_user = InitUser() if (app_user.client_id and app_user.client_secret and app_user.adwords_manager_cid and app_user.developer_token and app_user.refresh_token): self.redirect('/showAccounts') else: self.redirect('/showCredentials') except Exception, e: logging.debug(str(e))
def error_page( participant=None, error_text=None, compensate=True, error_type="default", request_data="", ): """Render HTML for error page.""" config = _config() if error_text is None: error_text = """There has been an error and so you are unable to continue, sorry!""" if participant is not None: hit_id = (participant.hit_id,) assignment_id = (participant.assignment_id,) worker_id = participant.worker_id participant_id = participant.id else: hit_id = request.form.get("hit_id", "") assignment_id = request.form.get("assignment_id", "") worker_id = request.form.get("worker_id", "") participant_id = request.form.get("participant_id", None) if participant_id: try: participant_id = int(participant_id) except (ValueError, TypeError): participant_id = None return make_response( render_template( "error.html", error_text=error_text, compensate=compensate, contact_address=config.get("contact_email_on_error"), error_type=error_type, hit_id=hit_id, assignment_id=assignment_id, worker_id=worker_id, request_data=request_data, participant_id=participant_id, ), 500, )
Render HTML for error page.
Below is the the instruction that describes the task: ### Input: Render HTML for error page. ### Response: def error_page( participant=None, error_text=None, compensate=True, error_type="default", request_data="", ): """Render HTML for error page.""" config = _config() if error_text is None: error_text = """There has been an error and so you are unable to continue, sorry!""" if participant is not None: hit_id = (participant.hit_id,) assignment_id = (participant.assignment_id,) worker_id = participant.worker_id participant_id = participant.id else: hit_id = request.form.get("hit_id", "") assignment_id = request.form.get("assignment_id", "") worker_id = request.form.get("worker_id", "") participant_id = request.form.get("participant_id", None) if participant_id: try: participant_id = int(participant_id) except (ValueError, TypeError): participant_id = None return make_response( render_template( "error.html", error_text=error_text, compensate=compensate, contact_address=config.get("contact_email_on_error"), error_type=error_type, hit_id=hit_id, assignment_id=assignment_id, worker_id=worker_id, request_data=request_data, participant_id=participant_id, ), 500, )
def rosenbrock(x, y, a=1, b=100): """Bigger is better; global optimum at x=a, y=a**2""" return -1 * ((a - x)**2 + b * (y - x**2)**2)
Bigger is better; global optimum at x=a, y=a**2
Below is the the instruction that describes the task: ### Input: Bigger is better; global optimum at x=a, y=a**2 ### Response: def rosenbrock(x, y, a=1, b=100): """Bigger is better; global optimum at x=a, y=a**2""" return -1 * ((a - x)**2 + b * (y - x**2)**2)
def _register(self, assignment): # type: (Assignment) -> None """ Registers an Assignment in _positive or _negative. """ name = assignment.dependency.name old_positive = self._positive.get(name) if old_positive is not None: self._positive[name] = old_positive.intersect(assignment) return ref = assignment.dependency.name negative_by_ref = self._negative.get(name) old_negative = None if negative_by_ref is None else negative_by_ref.get(ref) if old_negative is None: term = assignment else: term = assignment.intersect(old_negative) if term.is_positive(): if name in self._negative: del self._negative[name] self._positive[name] = term else: if name not in self._negative: self._negative[name] = {} self._negative[name][ref] = term
Registers an Assignment in _positive or _negative.
Below is the the instruction that describes the task: ### Input: Registers an Assignment in _positive or _negative. ### Response: def _register(self, assignment): # type: (Assignment) -> None """ Registers an Assignment in _positive or _negative. """ name = assignment.dependency.name old_positive = self._positive.get(name) if old_positive is not None: self._positive[name] = old_positive.intersect(assignment) return ref = assignment.dependency.name negative_by_ref = self._negative.get(name) old_negative = None if negative_by_ref is None else negative_by_ref.get(ref) if old_negative is None: term = assignment else: term = assignment.intersect(old_negative) if term.is_positive(): if name in self._negative: del self._negative[name] self._positive[name] = term else: if name not in self._negative: self._negative[name] = {} self._negative[name][ref] = term
def add_element(self, element): """ Element can be href or type :py:class:`smc.base.model.Element` :: >>> from smc.elements.other import Category >>> category = Category('foo') >>> category.add_element(Host('kali')) :param str,Element element: element to add to tag :raises: ModificationFailed: failed adding element :return: None """ element = element_resolver(element) self.make_request( ModificationFailed, method='create', resource='category_add_element', json={'value': element})
Element can be href or type :py:class:`smc.base.model.Element` :: >>> from smc.elements.other import Category >>> category = Category('foo') >>> category.add_element(Host('kali')) :param str,Element element: element to add to tag :raises: ModificationFailed: failed adding element :return: None
Below is the the instruction that describes the task: ### Input: Element can be href or type :py:class:`smc.base.model.Element` :: >>> from smc.elements.other import Category >>> category = Category('foo') >>> category.add_element(Host('kali')) :param str,Element element: element to add to tag :raises: ModificationFailed: failed adding element :return: None ### Response: def add_element(self, element): """ Element can be href or type :py:class:`smc.base.model.Element` :: >>> from smc.elements.other import Category >>> category = Category('foo') >>> category.add_element(Host('kali')) :param str,Element element: element to add to tag :raises: ModificationFailed: failed adding element :return: None """ element = element_resolver(element) self.make_request( ModificationFailed, method='create', resource='category_add_element', json={'value': element})
def correct_scanpy(adatas, **kwargs): """Batch correct a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. """ if 'return_dimred' in kwargs and kwargs['return_dimred']: datasets_dimred, datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) else: datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) new_adatas = [] for i, adata in enumerate(adatas): adata.X = datasets[i] new_adatas.append(adata) if 'return_dimred' in kwargs and kwargs['return_dimred']: return datasets_dimred, new_adatas else: return new_adatas
Batch correct a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field.
Below is the the instruction that describes the task: ### Input: Batch correct a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. ### Response: def correct_scanpy(adatas, **kwargs): """Batch correct a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. """ if 'return_dimred' in kwargs and kwargs['return_dimred']: datasets_dimred, datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) else: datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) new_adatas = [] for i, adata in enumerate(adatas): adata.X = datasets[i] new_adatas.append(adata) if 'return_dimred' in kwargs and kwargs['return_dimred']: return datasets_dimred, new_adatas else: return new_adatas
def list_known_codes(s, unique=True, rgb_mode=False): """ Find and print all known escape codes in a string, using get_known_codes. """ total = 0 for codedesc in get_known_codes(s, unique=unique, rgb_mode=rgb_mode): total += 1 print(codedesc) plural = 'code' if total == 1 else 'codes' codetype = ' unique' if unique else '' print('\nFound {}{} escape {}.'.format(total, codetype, plural)) return 0 if total > 0 else 1
Find and print all known escape codes in a string, using get_known_codes.
Below is the the instruction that describes the task: ### Input: Find and print all known escape codes in a string, using get_known_codes. ### Response: def list_known_codes(s, unique=True, rgb_mode=False): """ Find and print all known escape codes in a string, using get_known_codes. """ total = 0 for codedesc in get_known_codes(s, unique=unique, rgb_mode=rgb_mode): total += 1 print(codedesc) plural = 'code' if total == 1 else 'codes' codetype = ' unique' if unique else '' print('\nFound {}{} escape {}.'.format(total, codetype, plural)) return 0 if total > 0 else 1
def G(self, ID, lat, lon): """ Creates a generic entry for an object. """ # Equatorial coordinates eqM = utils.eqCoords(lon, lat) eqZ = eqM if lat != 0: eqZ = utils.eqCoords(lon, 0) return { 'id': ID, 'lat': lat, 'lon': lon, 'ra': eqM[0], 'decl': eqM[1], 'raZ': eqZ[0], 'declZ': eqZ[1], }
Creates a generic entry for an object.
Below is the the instruction that describes the task: ### Input: Creates a generic entry for an object. ### Response: def G(self, ID, lat, lon): """ Creates a generic entry for an object. """ # Equatorial coordinates eqM = utils.eqCoords(lon, lat) eqZ = eqM if lat != 0: eqZ = utils.eqCoords(lon, 0) return { 'id': ID, 'lat': lat, 'lon': lon, 'ra': eqM[0], 'decl': eqM[1], 'raZ': eqZ[0], 'declZ': eqZ[1], }
def event(name): ''' Chekcs for a specific event match and returns result True if the match happens USAGE: .. code-block:: yaml salt/foo/*/bar: check.event run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: salt/foo/*/bar ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} for event in __events__: if salt.utils.stringutils.expr_match(event['tag'], name): ret['result'] = True return ret
Chekcs for a specific event match and returns result True if the match happens USAGE: .. code-block:: yaml salt/foo/*/bar: check.event run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: salt/foo/*/bar
Below is the the instruction that describes the task: ### Input: Chekcs for a specific event match and returns result True if the match happens USAGE: .. code-block:: yaml salt/foo/*/bar: check.event run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: salt/foo/*/bar ### Response: def event(name): ''' Chekcs for a specific event match and returns result True if the match happens USAGE: .. code-block:: yaml salt/foo/*/bar: check.event run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: salt/foo/*/bar ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} for event in __events__: if salt.utils.stringutils.expr_match(event['tag'], name): ret['result'] = True return ret
def from_config(cls, cp, section, variable_args): """Returns a distribution based on a configuration file. The parameters for the distribution are retrieved from the section titled "[`section`-`variable_args`]" in the config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the distribution options. section : str Name of the section in the configuration file. variable_args : str The names of the parameters for this distribution, separated by `prior.VARARGS_DELIM`. These must appear in the "tag" part of the section header. Returns ------- Uniform A distribution instance from the pycbc.inference.prior module. """ return super(UniformPowerLaw, cls).from_config(cp, section, variable_args, bounds_required=True)
Returns a distribution based on a configuration file. The parameters for the distribution are retrieved from the section titled "[`section`-`variable_args`]" in the config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the distribution options. section : str Name of the section in the configuration file. variable_args : str The names of the parameters for this distribution, separated by `prior.VARARGS_DELIM`. These must appear in the "tag" part of the section header. Returns ------- Uniform A distribution instance from the pycbc.inference.prior module.
Below is the the instruction that describes the task: ### Input: Returns a distribution based on a configuration file. The parameters for the distribution are retrieved from the section titled "[`section`-`variable_args`]" in the config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the distribution options. section : str Name of the section in the configuration file. variable_args : str The names of the parameters for this distribution, separated by `prior.VARARGS_DELIM`. These must appear in the "tag" part of the section header. Returns ------- Uniform A distribution instance from the pycbc.inference.prior module. ### Response: def from_config(cls, cp, section, variable_args): """Returns a distribution based on a configuration file. The parameters for the distribution are retrieved from the section titled "[`section`-`variable_args`]" in the config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the distribution options. section : str Name of the section in the configuration file. variable_args : str The names of the parameters for this distribution, separated by `prior.VARARGS_DELIM`. These must appear in the "tag" part of the section header. Returns ------- Uniform A distribution instance from the pycbc.inference.prior module. """ return super(UniformPowerLaw, cls).from_config(cp, section, variable_args, bounds_required=True)
def DescargarConstancia(self, nro_doc, filename="constancia.pdf"): "Llama a la API para descargar una constancia de inscripcion (PDF)" if not self.client: self.Conectar() self.response = self.client("sr-padron", "v1", "constancia", str(nro_doc)) if self.response.startswith("{"): result = json.loads(self.response) assert not result["success"] self.Excepcion = result['error']['mensaje'] return False else: with open(filename, "wb") as f: f.write(self.response) return True
Llama a la API para descargar una constancia de inscripcion (PDF)
Below is the the instruction that describes the task: ### Input: Llama a la API para descargar una constancia de inscripcion (PDF) ### Response: def DescargarConstancia(self, nro_doc, filename="constancia.pdf"): "Llama a la API para descargar una constancia de inscripcion (PDF)" if not self.client: self.Conectar() self.response = self.client("sr-padron", "v1", "constancia", str(nro_doc)) if self.response.startswith("{"): result = json.loads(self.response) assert not result["success"] self.Excepcion = result['error']['mensaje'] return False else: with open(filename, "wb") as f: f.write(self.response) return True
def create_italic(self, tag): """ See if span tag has italic style and wrap with em tag. """ style = tag.get('style') if style and 'font-style:italic' in style: tag.wrap(self.soup.new_tag('em'))
See if span tag has italic style and wrap with em tag.
Below is the the instruction that describes the task: ### Input: See if span tag has italic style and wrap with em tag. ### Response: def create_italic(self, tag): """ See if span tag has italic style and wrap with em tag. """ style = tag.get('style') if style and 'font-style:italic' in style: tag.wrap(self.soup.new_tag('em'))
def get_list_url_filtered_by_field_value(view, model, name, reverse=False): """Get the URL if a filter of model[name] value was appended. This allows programatically adding filters. This is used in the specialized case of filtering deeper into a list by a field's value. For instance, since there can be multiple assignments in a list of handins. The assignment column can have a URL generated by get_filter_url to filter the handins to show only ones for that assignment. Parameters ---------- view : View instance model : document (model instance, not the class itself) name : field name reverse : bool Whether to *remove* an applied filter from url Returns ------- string : URL of current list args + filtering on field value """ view_args = view._get_list_extra_args() def create_filter_arg(field_name, value): i, flt = next( ( v for k, v in view._filter_args.items() if k == '{}_equals'.format(field_name) ), None, ) return (i, flt.name, value) new_filter = create_filter_arg(name, model[name]) filters = view_args.filters if new_filter in view_args.filters: # Filter already applied if not reverse: return None else: # Remove filter filters.remove(new_filter) if not reverse: # Add Filter filters.append(new_filter) # Example of an activated filter: (u'view_args.filters', [(7, u'Path', u'course')]) return view._get_list_url( view_args.clone(filters=filters, page=0) # Reset page to 0 )
Get the URL if a filter of model[name] value was appended. This allows programatically adding filters. This is used in the specialized case of filtering deeper into a list by a field's value. For instance, since there can be multiple assignments in a list of handins. The assignment column can have a URL generated by get_filter_url to filter the handins to show only ones for that assignment. Parameters ---------- view : View instance model : document (model instance, not the class itself) name : field name reverse : bool Whether to *remove* an applied filter from url Returns ------- string : URL of current list args + filtering on field value
Below is the the instruction that describes the task: ### Input: Get the URL if a filter of model[name] value was appended. This allows programatically adding filters. This is used in the specialized case of filtering deeper into a list by a field's value. For instance, since there can be multiple assignments in a list of handins. The assignment column can have a URL generated by get_filter_url to filter the handins to show only ones for that assignment. Parameters ---------- view : View instance model : document (model instance, not the class itself) name : field name reverse : bool Whether to *remove* an applied filter from url Returns ------- string : URL of current list args + filtering on field value ### Response: def get_list_url_filtered_by_field_value(view, model, name, reverse=False): """Get the URL if a filter of model[name] value was appended. This allows programatically adding filters. This is used in the specialized case of filtering deeper into a list by a field's value. For instance, since there can be multiple assignments in a list of handins. The assignment column can have a URL generated by get_filter_url to filter the handins to show only ones for that assignment. Parameters ---------- view : View instance model : document (model instance, not the class itself) name : field name reverse : bool Whether to *remove* an applied filter from url Returns ------- string : URL of current list args + filtering on field value """ view_args = view._get_list_extra_args() def create_filter_arg(field_name, value): i, flt = next( ( v for k, v in view._filter_args.items() if k == '{}_equals'.format(field_name) ), None, ) return (i, flt.name, value) new_filter = create_filter_arg(name, model[name]) filters = view_args.filters if new_filter in view_args.filters: # Filter already applied if not reverse: return None else: # Remove filter filters.remove(new_filter) if not reverse: # Add Filter filters.append(new_filter) # Example of an activated filter: (u'view_args.filters', [(7, u'Path', u'course')]) return view._get_list_url( view_args.clone(filters=filters, page=0) # Reset page to 0 )
def pydevd(context): """ Start a pydev settrace """ global pdevd_not_available if pdevd_not_available: return '' try: import pydevd except ImportError: pdevd_not_available = True return '' render = lambda s: template.Template(s).render(context) availables = get_variables(context) for var in availables: locals()[var] = context[var] #catch the case where no client is listening try: pydevd.settrace() except socket.error: pdevd_not_available = True return ''
Start a pydev settrace
Below is the the instruction that describes the task: ### Input: Start a pydev settrace ### Response: def pydevd(context): """ Start a pydev settrace """ global pdevd_not_available if pdevd_not_available: return '' try: import pydevd except ImportError: pdevd_not_available = True return '' render = lambda s: template.Template(s).render(context) availables = get_variables(context) for var in availables: locals()[var] = context[var] #catch the case where no client is listening try: pydevd.settrace() except socket.error: pdevd_not_available = True return ''
def send_request(req=None, method=None, requires_response=True): """Call function req and then send its results via ZMQ.""" if req is None: return functools.partial(send_request, method=method, requires_response=requires_response) @functools.wraps(req) def wrapper(self, *args, **kwargs): params = req(self, *args, **kwargs) _id = self.send(method, params, requires_response) return _id wrapper._sends = method return wrapper
Call function req and then send its results via ZMQ.
Below is the the instruction that describes the task: ### Input: Call function req and then send its results via ZMQ. ### Response: def send_request(req=None, method=None, requires_response=True): """Call function req and then send its results via ZMQ.""" if req is None: return functools.partial(send_request, method=method, requires_response=requires_response) @functools.wraps(req) def wrapper(self, *args, **kwargs): params = req(self, *args, **kwargs) _id = self.send(method, params, requires_response) return _id wrapper._sends = method return wrapper
def to_positions(self): """ Converts fractional coordinates of trajectory into positions """ if self.coords_are_displacement: cumulative_displacements = np.cumsum(self.frac_coords, axis=0) positions = self.base_positions + cumulative_displacements self.frac_coords = positions self.coords_are_displacement = False return
Converts fractional coordinates of trajectory into positions
Below is the the instruction that describes the task: ### Input: Converts fractional coordinates of trajectory into positions ### Response: def to_positions(self): """ Converts fractional coordinates of trajectory into positions """ if self.coords_are_displacement: cumulative_displacements = np.cumsum(self.frac_coords, axis=0) positions = self.base_positions + cumulative_displacements self.frac_coords = positions self.coords_are_displacement = False return
def setup_new_conf(self): # pylint: disable=too-many-locals """ Setup a new configuration received from a Master arbiter. TODO: perharps we should not accept the configuration or raise an error if we do not find our own configuration data in the data. Thus this should never happen... :return: None """ # Execute the base class treatment... super(Arbiter, self).setup_new_conf() with self.conf_lock: logger.info("I received a new configuration from my master") # Get the new configuration self.cur_conf = self.new_conf # self_conf is our own configuration from the alignak environment # Arbiters do not have this property in the received configuration because # they already loaded a configuration on daemon load self_conf = self.cur_conf.get('self_conf', None) if not self_conf: self_conf = self.conf # whole_conf contains the full configuration load by my master whole_conf = self.cur_conf['whole_conf'] logger.debug("Received a new configuration, containing:") for key in self.cur_conf: logger.debug("- %s: %s", key, self.cur_conf[key]) logger.debug("satellite self configuration part: %s", self_conf) # Update Alignak name self.alignak_name = self.cur_conf['alignak_name'] logger.info("My Alignak instance: %s", self.alignak_name) # This to indicate that the new configuration got managed... self.new_conf = {} # Get the whole monitored objects configuration t00 = time.time() try: received_conf_part = unserialize(whole_conf) except AlignakClassLookupException as exp: # pragma: no cover, simple protection # This to indicate that the new configuration is not managed... self.new_conf = { "_status": "Cannot un-serialize configuration received from arbiter" } logger.error(self.new_conf['_status']) logger.error("Back trace of the error:\n%s", traceback.format_exc()) return except Exception as exp: # pylint: disable=broad-except # This to indicate that the new configuration is not managed... self.new_conf = { "_status": "Cannot un-serialize configuration received from arbiter" } logger.error(self.new_conf['_status']) logger.error(self.new_conf) self.exit_on_exception(exp, self.new_conf) logger.info("Monitored configuration %s received at %d. Un-serialized in %d secs", received_conf_part, t00, time.time() - t00) # Now we create our arbiters and schedulers links my_satellites = getattr(self, 'arbiters', {}) received_satellites = self.cur_conf['arbiters'] for link_uuid in received_satellites: rs_conf = received_satellites[link_uuid] logger.debug("- received %s - %s: %s", rs_conf['instance_id'], rs_conf['type'], rs_conf['name']) # Must look if we already had a configuration and save our broks already_got = rs_conf['instance_id'] in my_satellites broks = [] actions = {} wait_homerun = {} external_commands = {} running_id = 0 if already_got: logger.warning("I already got: %s", rs_conf['instance_id']) # Save some information running_id = my_satellites[link_uuid].running_id (broks, actions, wait_homerun, external_commands) = \ my_satellites[link_uuid].get_and_clear_context() # Delete the former link del my_satellites[link_uuid] # My new satellite link... new_link = SatelliteLink.get_a_satellite_link('arbiter', rs_conf) my_satellites[new_link.uuid] = new_link logger.info("I got a new arbiter satellite: %s", new_link) new_link.running_id = running_id new_link.external_commands = external_commands new_link.broks = broks new_link.wait_homerun = wait_homerun new_link.actions = actions # # replacing satellite address and port by those defined in satellite_map # if new_link.name in self_conf.satellite_map: # overriding = self_conf.satellite_map[new_link.name] # # satellite = dict(satellite) # make a copy # # new_link.update(self_conf.get('satellite_map', {})[new_link.name]) # logger.warning("Do not override the configuration for: %s, with: %s. " # "Please check whether this is necessary!", # new_link.name, overriding) # for arbiter_link in received_conf_part.arbiters: # logger.info("I have arbiter links in my configuration: %s", arbiter_link.name) # if arbiter_link.name != self.name and not arbiter_link.spare: # # Arbiter is not me! # logger.info("I found my master arbiter in the configuration: %s", # arbiter_link.name) # continue # # logger.info("I found myself in the received configuration: %s", arbiter_link.name) # self.link_to_myself = arbiter_link # # We received a configuration s we are not a master ! # self.is_master = False # self.link_to_myself.spare = True # # Set myself as alive ;) # self.link_to_myself.set_alive() # Now I have a configuration! self.have_conf = True
Setup a new configuration received from a Master arbiter. TODO: perharps we should not accept the configuration or raise an error if we do not find our own configuration data in the data. Thus this should never happen... :return: None
Below is the the instruction that describes the task: ### Input: Setup a new configuration received from a Master arbiter. TODO: perharps we should not accept the configuration or raise an error if we do not find our own configuration data in the data. Thus this should never happen... :return: None ### Response: def setup_new_conf(self): # pylint: disable=too-many-locals """ Setup a new configuration received from a Master arbiter. TODO: perharps we should not accept the configuration or raise an error if we do not find our own configuration data in the data. Thus this should never happen... :return: None """ # Execute the base class treatment... super(Arbiter, self).setup_new_conf() with self.conf_lock: logger.info("I received a new configuration from my master") # Get the new configuration self.cur_conf = self.new_conf # self_conf is our own configuration from the alignak environment # Arbiters do not have this property in the received configuration because # they already loaded a configuration on daemon load self_conf = self.cur_conf.get('self_conf', None) if not self_conf: self_conf = self.conf # whole_conf contains the full configuration load by my master whole_conf = self.cur_conf['whole_conf'] logger.debug("Received a new configuration, containing:") for key in self.cur_conf: logger.debug("- %s: %s", key, self.cur_conf[key]) logger.debug("satellite self configuration part: %s", self_conf) # Update Alignak name self.alignak_name = self.cur_conf['alignak_name'] logger.info("My Alignak instance: %s", self.alignak_name) # This to indicate that the new configuration got managed... self.new_conf = {} # Get the whole monitored objects configuration t00 = time.time() try: received_conf_part = unserialize(whole_conf) except AlignakClassLookupException as exp: # pragma: no cover, simple protection # This to indicate that the new configuration is not managed... self.new_conf = { "_status": "Cannot un-serialize configuration received from arbiter" } logger.error(self.new_conf['_status']) logger.error("Back trace of the error:\n%s", traceback.format_exc()) return except Exception as exp: # pylint: disable=broad-except # This to indicate that the new configuration is not managed... self.new_conf = { "_status": "Cannot un-serialize configuration received from arbiter" } logger.error(self.new_conf['_status']) logger.error(self.new_conf) self.exit_on_exception(exp, self.new_conf) logger.info("Monitored configuration %s received at %d. Un-serialized in %d secs", received_conf_part, t00, time.time() - t00) # Now we create our arbiters and schedulers links my_satellites = getattr(self, 'arbiters', {}) received_satellites = self.cur_conf['arbiters'] for link_uuid in received_satellites: rs_conf = received_satellites[link_uuid] logger.debug("- received %s - %s: %s", rs_conf['instance_id'], rs_conf['type'], rs_conf['name']) # Must look if we already had a configuration and save our broks already_got = rs_conf['instance_id'] in my_satellites broks = [] actions = {} wait_homerun = {} external_commands = {} running_id = 0 if already_got: logger.warning("I already got: %s", rs_conf['instance_id']) # Save some information running_id = my_satellites[link_uuid].running_id (broks, actions, wait_homerun, external_commands) = \ my_satellites[link_uuid].get_and_clear_context() # Delete the former link del my_satellites[link_uuid] # My new satellite link... new_link = SatelliteLink.get_a_satellite_link('arbiter', rs_conf) my_satellites[new_link.uuid] = new_link logger.info("I got a new arbiter satellite: %s", new_link) new_link.running_id = running_id new_link.external_commands = external_commands new_link.broks = broks new_link.wait_homerun = wait_homerun new_link.actions = actions # # replacing satellite address and port by those defined in satellite_map # if new_link.name in self_conf.satellite_map: # overriding = self_conf.satellite_map[new_link.name] # # satellite = dict(satellite) # make a copy # # new_link.update(self_conf.get('satellite_map', {})[new_link.name]) # logger.warning("Do not override the configuration for: %s, with: %s. " # "Please check whether this is necessary!", # new_link.name, overriding) # for arbiter_link in received_conf_part.arbiters: # logger.info("I have arbiter links in my configuration: %s", arbiter_link.name) # if arbiter_link.name != self.name and not arbiter_link.spare: # # Arbiter is not me! # logger.info("I found my master arbiter in the configuration: %s", # arbiter_link.name) # continue # # logger.info("I found myself in the received configuration: %s", arbiter_link.name) # self.link_to_myself = arbiter_link # # We received a configuration s we are not a master ! # self.is_master = False # self.link_to_myself.spare = True # # Set myself as alive ;) # self.link_to_myself.set_alive() # Now I have a configuration! self.have_conf = True
def lzprob(z): """ Returns the area under the normal curve 'to the left of' the given z value. Thus, for z<0, zprob(z) = 1-tail probability for z>0, 1.0-zprob(z) = 1-tail probability for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability Adapted from z.c in Gary Perlman's |Stat. Usage: lzprob(z) """ Z_MAX = 6.0 # maximum meaningful z-value if z == 0.0: x = 0.0 else: y = 0.5 * math.fabs(z) if y >= (Z_MAX*0.5): x = 1.0 elif (y < 1.0): w = y*y x = ((((((((0.000124818987 * w -0.001075204047) * w +0.005198775019) * w -0.019198292004) * w +0.059054035642) * w -0.151968751364) * w +0.319152932694) * w -0.531923007300) * w +0.797884560593) * y * 2.0 else: y = y - 2.0 x = (((((((((((((-0.000045255659 * y +0.000152529290) * y -0.000019538132) * y -0.000676904986) * y +0.001390604284) * y -0.000794620820) * y -0.002034254874) * y +0.006549791214) * y -0.010557625006) * y +0.011630447319) * y -0.009279453341) * y +0.005353579108) * y -0.002141268741) * y +0.000535310849) * y +0.999936657524 if z > 0.0: prob = ((x+1.0)*0.5) else: prob = ((1.0-x)*0.5) return prob
Returns the area under the normal curve 'to the left of' the given z value. Thus, for z<0, zprob(z) = 1-tail probability for z>0, 1.0-zprob(z) = 1-tail probability for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability Adapted from z.c in Gary Perlman's |Stat. Usage: lzprob(z)
Below is the the instruction that describes the task: ### Input: Returns the area under the normal curve 'to the left of' the given z value. Thus, for z<0, zprob(z) = 1-tail probability for z>0, 1.0-zprob(z) = 1-tail probability for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability Adapted from z.c in Gary Perlman's |Stat. Usage: lzprob(z) ### Response: def lzprob(z): """ Returns the area under the normal curve 'to the left of' the given z value. Thus, for z<0, zprob(z) = 1-tail probability for z>0, 1.0-zprob(z) = 1-tail probability for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability Adapted from z.c in Gary Perlman's |Stat. Usage: lzprob(z) """ Z_MAX = 6.0 # maximum meaningful z-value if z == 0.0: x = 0.0 else: y = 0.5 * math.fabs(z) if y >= (Z_MAX*0.5): x = 1.0 elif (y < 1.0): w = y*y x = ((((((((0.000124818987 * w -0.001075204047) * w +0.005198775019) * w -0.019198292004) * w +0.059054035642) * w -0.151968751364) * w +0.319152932694) * w -0.531923007300) * w +0.797884560593) * y * 2.0 else: y = y - 2.0 x = (((((((((((((-0.000045255659 * y +0.000152529290) * y -0.000019538132) * y -0.000676904986) * y +0.001390604284) * y -0.000794620820) * y -0.002034254874) * y +0.006549791214) * y -0.010557625006) * y +0.011630447319) * y -0.009279453341) * y +0.005353579108) * y -0.002141268741) * y +0.000535310849) * y +0.999936657524 if z > 0.0: prob = ((x+1.0)*0.5) else: prob = ((1.0-x)*0.5) return prob
def get_z_variables(nc): ''' Returns a list of all variables matching definitions for Z :param netcdf4.dataset nc: an open netcdf dataset object ''' z_variables = [] # Vertical coordinates will be identifiable by units of pressure or the # presence of the positive attribute with a value of up/down # optionally, the vertical type may be indicated by providing the # standard_name attribute or axis='Z' total_coords = get_coordinate_variables(nc) + get_auxiliary_coordinate_variables(nc) for coord_name in total_coords: if coord_name in z_variables: continue coord_var = nc.variables[coord_name] units = getattr(coord_var, 'units', None) positive = getattr(coord_var, 'positive', None) standard_name = getattr(coord_var, 'standard_name', None) axis = getattr(coord_var, 'axis', None) # If there are no units, we can't identify it as a vertical coordinate # by checking pressure or positive if units is not None: if units_convertible(units, 'bar'): z_variables.append(coord_name) elif isinstance(positive, basestring): if positive.lower() in ['up', 'down']: z_variables.append(coord_name) # if axis='Z' we're good if coord_name not in z_variables and axis == 'Z': z_variables.append(coord_name) if coord_name not in z_variables and standard_name in ('depth', 'height', 'altitude'): z_variables.append(coord_name) if coord_name not in z_variables and standard_name in DIMENSIONLESS_VERTICAL_COORDINATES: z_variables.append(coord_name) return z_variables
Returns a list of all variables matching definitions for Z :param netcdf4.dataset nc: an open netcdf dataset object
Below is the the instruction that describes the task: ### Input: Returns a list of all variables matching definitions for Z :param netcdf4.dataset nc: an open netcdf dataset object ### Response: def get_z_variables(nc): ''' Returns a list of all variables matching definitions for Z :param netcdf4.dataset nc: an open netcdf dataset object ''' z_variables = [] # Vertical coordinates will be identifiable by units of pressure or the # presence of the positive attribute with a value of up/down # optionally, the vertical type may be indicated by providing the # standard_name attribute or axis='Z' total_coords = get_coordinate_variables(nc) + get_auxiliary_coordinate_variables(nc) for coord_name in total_coords: if coord_name in z_variables: continue coord_var = nc.variables[coord_name] units = getattr(coord_var, 'units', None) positive = getattr(coord_var, 'positive', None) standard_name = getattr(coord_var, 'standard_name', None) axis = getattr(coord_var, 'axis', None) # If there are no units, we can't identify it as a vertical coordinate # by checking pressure or positive if units is not None: if units_convertible(units, 'bar'): z_variables.append(coord_name) elif isinstance(positive, basestring): if positive.lower() in ['up', 'down']: z_variables.append(coord_name) # if axis='Z' we're good if coord_name not in z_variables and axis == 'Z': z_variables.append(coord_name) if coord_name not in z_variables and standard_name in ('depth', 'height', 'altitude'): z_variables.append(coord_name) if coord_name not in z_variables and standard_name in DIMENSIONLESS_VERTICAL_COORDINATES: z_variables.append(coord_name) return z_variables
def notify_change(self, change): """Called when a property has changed.""" # Send the state to the frontend before the user-registered callbacks # are called. name = change['name'] if self.comm is not None and self.comm.kernel is not None: # Make sure this isn't information that the front-end just sent us. if name in self.keys and self._should_send_property(name, getattr(self, name)): # Send new state to front-end self.send_state(key=name) super(Widget, self).notify_change(change)
Called when a property has changed.
Below is the the instruction that describes the task: ### Input: Called when a property has changed. ### Response: def notify_change(self, change): """Called when a property has changed.""" # Send the state to the frontend before the user-registered callbacks # are called. name = change['name'] if self.comm is not None and self.comm.kernel is not None: # Make sure this isn't information that the front-end just sent us. if name in self.keys and self._should_send_property(name, getattr(self, name)): # Send new state to front-end self.send_state(key=name) super(Widget, self).notify_change(change)
def bcdc_package_show(package): """Query DataBC Catalogue API about given package """ params = {"id": package} r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params) if r.status_code != 200: raise ValueError("{d} is not present in DataBC API list".format(d=package)) return r.json()["result"]
Query DataBC Catalogue API about given package
Below is the the instruction that describes the task: ### Input: Query DataBC Catalogue API about given package ### Response: def bcdc_package_show(package): """Query DataBC Catalogue API about given package """ params = {"id": package} r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params) if r.status_code != 200: raise ValueError("{d} is not present in DataBC API list".format(d=package)) return r.json()["result"]
def get_thermal_conductivity(self, output='eigs', doping_levels=True, k_el=True, relaxation_time=1e-14): """ Gives the electronic part of the thermal conductivity in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials k_el (boolean): True for k_0-PF*T, False for k_0 relaxation_time (float): constant relaxation time in secs Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to thermal conductivity at p-type doping and 'n' to the thermal conductivity at n-type doping. Otherwise, returns a {temp:[]} dictionary. The result contains either the sorted three eigenvalues of the symmetric conductivity tensor (format='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). The result includes a given constant relaxation time units are W/mK """ result = None result_doping = None if doping_levels: result_doping = {doping: {t: [] for t in self._seebeck_doping[doping]} for doping in self._seebeck_doping} for doping in result_doping: for t in result_doping[doping]: for i in range(len(self.doping[doping])): if k_el: pf_tensor = np.dot(self._cond_doping[doping][t][i], np.dot( self._seebeck_doping[doping][ t][i], self._seebeck_doping[doping][ t][i])) result_doping[doping][t].append(( self._kappa_doping[doping][t][ i] - pf_tensor * t)) else: result_doping[doping][t].append(( self._kappa_doping[doping][t][i])) else: result = {t: [] for t in self._seebeck} for t in result: for i in range(len(self.mu_steps)): if k_el: pf_tensor = np.dot(self._cond[t][i], np.dot(self._seebeck[t][i], self._seebeck[t][i])) result[t].append((self._kappa[t][i] - pf_tensor * t)) else: result[t].append((self._kappa[t][i])) return BoltztrapAnalyzer._format_to_output(result, result_doping, output, doping_levels, multi=relaxation_time)
Gives the electronic part of the thermal conductivity in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials k_el (boolean): True for k_0-PF*T, False for k_0 relaxation_time (float): constant relaxation time in secs Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to thermal conductivity at p-type doping and 'n' to the thermal conductivity at n-type doping. Otherwise, returns a {temp:[]} dictionary. The result contains either the sorted three eigenvalues of the symmetric conductivity tensor (format='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). The result includes a given constant relaxation time units are W/mK
Below is the the instruction that describes the task: ### Input: Gives the electronic part of the thermal conductivity in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials k_el (boolean): True for k_0-PF*T, False for k_0 relaxation_time (float): constant relaxation time in secs Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to thermal conductivity at p-type doping and 'n' to the thermal conductivity at n-type doping. Otherwise, returns a {temp:[]} dictionary. The result contains either the sorted three eigenvalues of the symmetric conductivity tensor (format='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). The result includes a given constant relaxation time units are W/mK ### Response: def get_thermal_conductivity(self, output='eigs', doping_levels=True, k_el=True, relaxation_time=1e-14): """ Gives the electronic part of the thermal conductivity in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials k_el (boolean): True for k_0-PF*T, False for k_0 relaxation_time (float): constant relaxation time in secs Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to thermal conductivity at p-type doping and 'n' to the thermal conductivity at n-type doping. Otherwise, returns a {temp:[]} dictionary. The result contains either the sorted three eigenvalues of the symmetric conductivity tensor (format='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). The result includes a given constant relaxation time units are W/mK """ result = None result_doping = None if doping_levels: result_doping = {doping: {t: [] for t in self._seebeck_doping[doping]} for doping in self._seebeck_doping} for doping in result_doping: for t in result_doping[doping]: for i in range(len(self.doping[doping])): if k_el: pf_tensor = np.dot(self._cond_doping[doping][t][i], np.dot( self._seebeck_doping[doping][ t][i], self._seebeck_doping[doping][ t][i])) result_doping[doping][t].append(( self._kappa_doping[doping][t][ i] - pf_tensor * t)) else: result_doping[doping][t].append(( self._kappa_doping[doping][t][i])) else: result = {t: [] for t in self._seebeck} for t in result: for i in range(len(self.mu_steps)): if k_el: pf_tensor = np.dot(self._cond[t][i], np.dot(self._seebeck[t][i], self._seebeck[t][i])) result[t].append((self._kappa[t][i] - pf_tensor * t)) else: result[t].append((self._kappa[t][i])) return BoltztrapAnalyzer._format_to_output(result, result_doping, output, doping_levels, multi=relaxation_time)
def visit_ArraySelect(self, expression, *operands): """ ArraySelect (ArrayStore((ArrayStore(x0,v0) ...),xn, vn), x0) -> v0 """ arr, index = operands if isinstance(arr, ArrayVariable): return if isinstance(index, BitVecConstant): ival = index.value # props are slow and using them in tight loops should be avoided, esp when they offer no additional validation # arr._operands[1] = arr.index, arr._operands[0] = arr.array while isinstance(arr, ArrayStore) and isinstance(arr._operands[1], BitVecConstant) and arr._operands[1]._value != ival: arr = arr._operands[0] # arr.array if isinstance(index, BitVecConstant) and isinstance(arr, ArrayStore) and isinstance(arr.index, BitVecConstant) and arr.index.value == index.value: return arr.value else: if arr is not expression.array: return arr.select(index)
ArraySelect (ArrayStore((ArrayStore(x0,v0) ...),xn, vn), x0) -> v0
Below is the the instruction that describes the task: ### Input: ArraySelect (ArrayStore((ArrayStore(x0,v0) ...),xn, vn), x0) -> v0 ### Response: def visit_ArraySelect(self, expression, *operands): """ ArraySelect (ArrayStore((ArrayStore(x0,v0) ...),xn, vn), x0) -> v0 """ arr, index = operands if isinstance(arr, ArrayVariable): return if isinstance(index, BitVecConstant): ival = index.value # props are slow and using them in tight loops should be avoided, esp when they offer no additional validation # arr._operands[1] = arr.index, arr._operands[0] = arr.array while isinstance(arr, ArrayStore) and isinstance(arr._operands[1], BitVecConstant) and arr._operands[1]._value != ival: arr = arr._operands[0] # arr.array if isinstance(index, BitVecConstant) and isinstance(arr, ArrayStore) and isinstance(arr.index, BitVecConstant) and arr.index.value == index.value: return arr.value else: if arr is not expression.array: return arr.select(index)
def calibrate_cameras(self): """Calibrate cameras based on found chessboard corners.""" criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5) flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH) calib = StereoCalibration() (calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat) = cv2.stereoCalibrate(self.object_points, self.image_points["left"], self.image_points["right"], self.image_size, calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat, criteria=criteria, flags=flags)[1:] (calib.rect_trans["left"], calib.rect_trans["right"], calib.proj_mats["left"], calib.proj_mats["right"], calib.disp_to_depth_mat, calib.valid_boxes["left"], calib.valid_boxes["right"]) = cv2.stereoRectify(calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], self.image_size, calib.rot_mat, calib.trans_vec, flags=0) for side in ("left", "right"): (calib.undistortion_map[side], calib.rectification_map[side]) = cv2.initUndistortRectifyMap( calib.cam_mats[side], calib.dist_coefs[side], calib.rect_trans[side], calib.proj_mats[side], self.image_size, cv2.CV_32FC1) # This is replaced because my results were always bad. Estimates are # taken from the OpenCV samples. width, height = self.image_size focal_length = 0.8 * width calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width], [0, -1, 0, 0.5 * height], [0, 0, 0, -focal_length], [0, 0, 1, 0]]) return calib
Calibrate cameras based on found chessboard corners.
Below is the the instruction that describes the task: ### Input: Calibrate cameras based on found chessboard corners. ### Response: def calibrate_cameras(self): """Calibrate cameras based on found chessboard corners.""" criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5) flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH) calib = StereoCalibration() (calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat) = cv2.stereoCalibrate(self.object_points, self.image_points["left"], self.image_points["right"], self.image_size, calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat, criteria=criteria, flags=flags)[1:] (calib.rect_trans["left"], calib.rect_trans["right"], calib.proj_mats["left"], calib.proj_mats["right"], calib.disp_to_depth_mat, calib.valid_boxes["left"], calib.valid_boxes["right"]) = cv2.stereoRectify(calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], self.image_size, calib.rot_mat, calib.trans_vec, flags=0) for side in ("left", "right"): (calib.undistortion_map[side], calib.rectification_map[side]) = cv2.initUndistortRectifyMap( calib.cam_mats[side], calib.dist_coefs[side], calib.rect_trans[side], calib.proj_mats[side], self.image_size, cv2.CV_32FC1) # This is replaced because my results were always bad. Estimates are # taken from the OpenCV samples. width, height = self.image_size focal_length = 0.8 * width calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width], [0, -1, 0, 0.5 * height], [0, 0, 0, -focal_length], [0, 0, 1, 0]]) return calib
def get(self, selector, index=0, default=None): """ Get first element from CSSSelector """ elements = self(selector) if elements: try: return elements[index] except (IndexError): pass return default
Get first element from CSSSelector
Below is the the instruction that describes the task: ### Input: Get first element from CSSSelector ### Response: def get(self, selector, index=0, default=None): """ Get first element from CSSSelector """ elements = self(selector) if elements: try: return elements[index] except (IndexError): pass return default
def get_groups_with_perms(obj, attach_perms=False): """Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``.""" ctype = get_content_type(obj) group_model = get_group_obj_perms_model(obj) if not attach_perms: # It's much easier without attached perms so we do it first if that is the case group_rel_name = group_model.group.field.related_query_name() if group_model.objects.is_generic(): group_filters = { '%s__content_type' % group_rel_name: ctype, '%s__object_pk' % group_rel_name: obj.pk, } else: group_filters = {'%s__content_object' % group_rel_name: obj} return Group.objects.filter(**group_filters).distinct() else: group_perms_mapping = defaultdict(list) groups_with_perms = get_groups_with_perms(obj) queryset = group_model.objects.filter(group__in=groups_with_perms).prefetch_related('group', 'permission') if group_model is GroupObjectPermission: queryset = queryset.filter(object_pk=obj.pk, content_type=ctype) else: queryset = queryset.filter(content_object_id=obj.pk) for group_perm in queryset: group_perms_mapping[group_perm.group].append(group_perm.permission.codename) return dict(group_perms_mapping)
Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``.
Below is the the instruction that describes the task: ### Input: Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``. ### Response: def get_groups_with_perms(obj, attach_perms=False): """Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``.""" ctype = get_content_type(obj) group_model = get_group_obj_perms_model(obj) if not attach_perms: # It's much easier without attached perms so we do it first if that is the case group_rel_name = group_model.group.field.related_query_name() if group_model.objects.is_generic(): group_filters = { '%s__content_type' % group_rel_name: ctype, '%s__object_pk' % group_rel_name: obj.pk, } else: group_filters = {'%s__content_object' % group_rel_name: obj} return Group.objects.filter(**group_filters).distinct() else: group_perms_mapping = defaultdict(list) groups_with_perms = get_groups_with_perms(obj) queryset = group_model.objects.filter(group__in=groups_with_perms).prefetch_related('group', 'permission') if group_model is GroupObjectPermission: queryset = queryset.filter(object_pk=obj.pk, content_type=ctype) else: queryset = queryset.filter(content_object_id=obj.pk) for group_perm in queryset: group_perms_mapping[group_perm.group].append(group_perm.permission.codename) return dict(group_perms_mapping)
def update(self, data): """Hash data. """ view = memoryview(data) bs = self.block_size if self._buf: need = bs - len(self._buf) if len(view) < need: self._buf += view.tobytes() return self._add_block(self._buf + view[:need].tobytes()) view = view[need:] while len(view) >= bs: self._add_block(view[:bs]) view = view[bs:] self._buf = view.tobytes()
Hash data.
Below is the the instruction that describes the task: ### Input: Hash data. ### Response: def update(self, data): """Hash data. """ view = memoryview(data) bs = self.block_size if self._buf: need = bs - len(self._buf) if len(view) < need: self._buf += view.tobytes() return self._add_block(self._buf + view[:need].tobytes()) view = view[need:] while len(view) >= bs: self._add_block(view[:bs]) view = view[bs:] self._buf = view.tobytes()
def check_solver_status(status, raise_error=False): """Perform standard checks on a solver's status.""" if status == OPTIMAL: return elif (status in has_primals) and not raise_error: warn("solver status is '{}'".format(status), UserWarning) elif status is None: raise OptimizationError( "model was not optimized yet or solver context switched") else: raise OptimizationError("solver status is '{}'".format(status))
Perform standard checks on a solver's status.
Below is the the instruction that describes the task: ### Input: Perform standard checks on a solver's status. ### Response: def check_solver_status(status, raise_error=False): """Perform standard checks on a solver's status.""" if status == OPTIMAL: return elif (status in has_primals) and not raise_error: warn("solver status is '{}'".format(status), UserWarning) elif status is None: raise OptimizationError( "model was not optimized yet or solver context switched") else: raise OptimizationError("solver status is '{}'".format(status))
def get_nearest_site(self, coords, site, r=None): """ Given coords and a site, find closet site to coords. Args: coords (3x1 array): cartesian coords of center of sphere site: site to find closest to coords r: radius of sphere. Defaults to diagonal of unit cell Returns: Closest site and distance. """ index = self.index(site) if r is None: r = np.linalg.norm(np.sum(self.lattice.matrix, axis=0)) ns = self.get_sites_in_sphere(coords, r, include_index=True) # Get sites with identical index to site ns = [n for n in ns if n[2] == index] # Sort by distance to coords ns.sort(key=lambda x: x[1]) # Return PeriodicSite and distance of closest image return ns[0][0:2]
Given coords and a site, find closet site to coords. Args: coords (3x1 array): cartesian coords of center of sphere site: site to find closest to coords r: radius of sphere. Defaults to diagonal of unit cell Returns: Closest site and distance.
Below is the the instruction that describes the task: ### Input: Given coords and a site, find closet site to coords. Args: coords (3x1 array): cartesian coords of center of sphere site: site to find closest to coords r: radius of sphere. Defaults to diagonal of unit cell Returns: Closest site and distance. ### Response: def get_nearest_site(self, coords, site, r=None): """ Given coords and a site, find closet site to coords. Args: coords (3x1 array): cartesian coords of center of sphere site: site to find closest to coords r: radius of sphere. Defaults to diagonal of unit cell Returns: Closest site and distance. """ index = self.index(site) if r is None: r = np.linalg.norm(np.sum(self.lattice.matrix, axis=0)) ns = self.get_sites_in_sphere(coords, r, include_index=True) # Get sites with identical index to site ns = [n for n in ns if n[2] == index] # Sort by distance to coords ns.sort(key=lambda x: x[1]) # Return PeriodicSite and distance of closest image return ns[0][0:2]
def registry_key(self, key_name, value_name, value_type, **kwargs): """Add Registry Key data to Batch object. Args: key_name (str): The key_name value for this Indicator. value_name (str): The value_name value for this Indicator. value_type (str): The value_type value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of Registry Key. """ indicator_obj = RegistryKey(key_name, value_name, value_type, **kwargs) return self._indicator(indicator_obj)
Add Registry Key data to Batch object. Args: key_name (str): The key_name value for this Indicator. value_name (str): The value_name value for this Indicator. value_type (str): The value_type value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of Registry Key.
Below is the the instruction that describes the task: ### Input: Add Registry Key data to Batch object. Args: key_name (str): The key_name value for this Indicator. value_name (str): The value_name value for this Indicator. value_type (str): The value_type value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of Registry Key. ### Response: def registry_key(self, key_name, value_name, value_type, **kwargs): """Add Registry Key data to Batch object. Args: key_name (str): The key_name value for this Indicator. value_name (str): The value_name value for this Indicator. value_type (str): The value_type value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of Registry Key. """ indicator_obj = RegistryKey(key_name, value_name, value_type, **kwargs) return self._indicator(indicator_obj)
def set_residual(self, pores=[], throats=[], overwrite=False): r""" Specify locations of any residual invader. These locations are set to invaded at the start of the simulation. Parameters ---------- pores : array_like The pores locations that are to be filled with invader at the beginning of the simulation. throats : array_like The throat locations that are to be filled with invader at the beginning of the simulation. overwrite : boolean If ``True`` then all existing inlet locations will be removed and then the supplied locations will be added. If ``False``, then supplied locations are added to any already existing locations. """ Ps = self._parse_indices(pores) if overwrite: self['pore.residual'] = False self['pore.residual'][Ps] = True Ts = self._parse_indices(throats) if overwrite: self['throat.residual'] = False self['throat.residual'][Ts] = True
r""" Specify locations of any residual invader. These locations are set to invaded at the start of the simulation. Parameters ---------- pores : array_like The pores locations that are to be filled with invader at the beginning of the simulation. throats : array_like The throat locations that are to be filled with invader at the beginning of the simulation. overwrite : boolean If ``True`` then all existing inlet locations will be removed and then the supplied locations will be added. If ``False``, then supplied locations are added to any already existing locations.
Below is the the instruction that describes the task: ### Input: r""" Specify locations of any residual invader. These locations are set to invaded at the start of the simulation. Parameters ---------- pores : array_like The pores locations that are to be filled with invader at the beginning of the simulation. throats : array_like The throat locations that are to be filled with invader at the beginning of the simulation. overwrite : boolean If ``True`` then all existing inlet locations will be removed and then the supplied locations will be added. If ``False``, then supplied locations are added to any already existing locations. ### Response: def set_residual(self, pores=[], throats=[], overwrite=False): r""" Specify locations of any residual invader. These locations are set to invaded at the start of the simulation. Parameters ---------- pores : array_like The pores locations that are to be filled with invader at the beginning of the simulation. throats : array_like The throat locations that are to be filled with invader at the beginning of the simulation. overwrite : boolean If ``True`` then all existing inlet locations will be removed and then the supplied locations will be added. If ``False``, then supplied locations are added to any already existing locations. """ Ps = self._parse_indices(pores) if overwrite: self['pore.residual'] = False self['pore.residual'][Ps] = True Ts = self._parse_indices(throats) if overwrite: self['throat.residual'] = False self['throat.residual'][Ts] = True
def tree_search(problem, frontier): """Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. Don't worry about repeated paths to a state. [Fig. 3.7]""" frontier.append(Node(problem.initial)) while frontier: node = frontier.pop() if problem.goal_test(node.state): return node frontier.extend(node.expand(problem)) return None
Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. Don't worry about repeated paths to a state. [Fig. 3.7]
Below is the the instruction that describes the task: ### Input: Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. Don't worry about repeated paths to a state. [Fig. 3.7] ### Response: def tree_search(problem, frontier): """Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. Don't worry about repeated paths to a state. [Fig. 3.7]""" frontier.append(Node(problem.initial)) while frontier: node = frontier.pop() if problem.goal_test(node.state): return node frontier.extend(node.expand(problem)) return None
def do_hdr(self, line, hdrs_usr): """Initialize self.h2i.""" # If there is no header hint, consider the first line the header. if self.hdr_ex is None: self._init_hdr(line, hdrs_usr) return True # If there is a header hint, examine each beginning line until header hint is found. elif self.hdr_ex in line: self._init_hdr(line, hdrs_usr) return True return False
Initialize self.h2i.
Below is the the instruction that describes the task: ### Input: Initialize self.h2i. ### Response: def do_hdr(self, line, hdrs_usr): """Initialize self.h2i.""" # If there is no header hint, consider the first line the header. if self.hdr_ex is None: self._init_hdr(line, hdrs_usr) return True # If there is a header hint, examine each beginning line until header hint is found. elif self.hdr_ex in line: self._init_hdr(line, hdrs_usr) return True return False
def format_record(record): """Serialise LogRecord instance""" record = dict( (key, getattr(record, key, None)) for key in ( "threadName", "name", "thread", "created", "process", "processName", "args", "module", "filename", "levelno", "exc_text", "pathname", "lineno", "msg", "exc_info", "funcName", "relativeCreated", "levelname", "msecs") ) # Humanise output and conform to Exceptions record["message"] = str(record.pop("msg")) if os.getenv("PYBLISH_SAFE"): schema.validate(record, "record") return record
Serialise LogRecord instance
Below is the the instruction that describes the task: ### Input: Serialise LogRecord instance ### Response: def format_record(record): """Serialise LogRecord instance""" record = dict( (key, getattr(record, key, None)) for key in ( "threadName", "name", "thread", "created", "process", "processName", "args", "module", "filename", "levelno", "exc_text", "pathname", "lineno", "msg", "exc_info", "funcName", "relativeCreated", "levelname", "msecs") ) # Humanise output and conform to Exceptions record["message"] = str(record.pop("msg")) if os.getenv("PYBLISH_SAFE"): schema.validate(record, "record") return record
def _validate(claims, validate_claims, expiry_seconds): """ Validate expiry related claims. If validate_claims is False, do nothing. Otherwise, validate the exp and nbf claims if they are present, and validate the iat claim if expiry_seconds is provided. """ if not validate_claims: return now = time() # TODO: implement support for clock skew # The exp (expiration time) claim identifies the expiration time on or # after which the JWT MUST NOT be accepted for processing. The # processing of the exp claim requires that the current date/time MUST # be before the expiration date/time listed in the exp claim. try: expiration_time = claims[CLAIM_EXPIRATION_TIME] except KeyError: pass else: _check_expiration_time(now, expiration_time) # The iat (issued at) claim identifies the time at which the JWT was # issued. This claim can be used to determine the age of the JWT. # If expiry_seconds is provided, and the iat claims is present, # determine the age of the token and check if it has expired. try: issued_at = claims[CLAIM_ISSUED_AT] except KeyError: pass else: if expiry_seconds is not None: _check_expiration_time(now, issued_at + expiry_seconds) # The nbf (not before) claim identifies the time before which the JWT # MUST NOT be accepted for processing. The processing of the nbf claim # requires that the current date/time MUST be after or equal to the # not-before date/time listed in the nbf claim. try: not_before = claims[CLAIM_NOT_BEFORE] except KeyError: pass else: _check_not_before(now, not_before)
Validate expiry related claims. If validate_claims is False, do nothing. Otherwise, validate the exp and nbf claims if they are present, and validate the iat claim if expiry_seconds is provided.
Below is the the instruction that describes the task: ### Input: Validate expiry related claims. If validate_claims is False, do nothing. Otherwise, validate the exp and nbf claims if they are present, and validate the iat claim if expiry_seconds is provided. ### Response: def _validate(claims, validate_claims, expiry_seconds): """ Validate expiry related claims. If validate_claims is False, do nothing. Otherwise, validate the exp and nbf claims if they are present, and validate the iat claim if expiry_seconds is provided. """ if not validate_claims: return now = time() # TODO: implement support for clock skew # The exp (expiration time) claim identifies the expiration time on or # after which the JWT MUST NOT be accepted for processing. The # processing of the exp claim requires that the current date/time MUST # be before the expiration date/time listed in the exp claim. try: expiration_time = claims[CLAIM_EXPIRATION_TIME] except KeyError: pass else: _check_expiration_time(now, expiration_time) # The iat (issued at) claim identifies the time at which the JWT was # issued. This claim can be used to determine the age of the JWT. # If expiry_seconds is provided, and the iat claims is present, # determine the age of the token and check if it has expired. try: issued_at = claims[CLAIM_ISSUED_AT] except KeyError: pass else: if expiry_seconds is not None: _check_expiration_time(now, issued_at + expiry_seconds) # The nbf (not before) claim identifies the time before which the JWT # MUST NOT be accepted for processing. The processing of the nbf claim # requires that the current date/time MUST be after or equal to the # not-before date/time listed in the nbf claim. try: not_before = claims[CLAIM_NOT_BEFORE] except KeyError: pass else: _check_not_before(now, not_before)
def sigmalos(Pot,R,dens=None,surfdens=None,beta=0.,sigma_r=None): """ NAME: sigmalos PURPOSE: Compute the line-of-sight velocity dispersion using the spherical Jeans equation INPUT: Pot - potential or list of potentials (evaluated at R=r/sqrt(2),z=r/sqrt(2), sphericity not checked) R - Galactocentric projected radius (can be Quantity) dens= (None) tracer density profile (function of r); if None, the density is assumed to be that corresponding to the potential surfdens= (None) tracer surface density profile (value at R or function of R); if None, the surface density is assumed to be that corresponding to the density beta= (0.) anisotropy; can be a constant or a function of r sigma_r= (None) if given, the solution of the spherical Jeans equation sigma_r(r) (used instead of solving the Jeans equation as part of this routine) OUTPUT: sigma_los(R) HISTORY: 2018-08-27 - Written - Bovy (UofT) """ Pot= flatten_pot(Pot) if dens is None: densPot= True dens= lambda r: evaluateDensities(Pot,r*_INVSQRTTWO,r*_INVSQRTTWO, use_physical=False) else: densPot= False if callable(surfdens): called_surfdens= surfdens(R) elif surfdens is None: if densPot: called_surfdens= evaluateSurfaceDensities(Pot,R,numpy.inf, use_physical=False) if not densPot or numpy.isnan(called_surfdens): called_surfdens=\ 2.*integrate.quad(lambda x: dens(numpy.sqrt(R**2.+x**2.)), 0.,numpy.inf)[0] else: called_surfdens= surfdens if callable(beta): call_beta= beta else: call_beta= lambda x: beta if sigma_r is None: call_sigma_r= lambda r: sigmar(Pot,r,dens=dens,beta=beta) elif not callable(sigma_r): call_sigma_r= lambda x: sigma_r else: call_sigma_r= sigma_r return numpy.sqrt(2.*integrate.quad(\ lambda x: (1.-call_beta(x)*R**2./x**2.)*x*dens(x)\ *call_sigma_r(x)**2./numpy.sqrt(x**2.-R**2.),R,numpy.inf)[0]\ /called_surfdens)
NAME: sigmalos PURPOSE: Compute the line-of-sight velocity dispersion using the spherical Jeans equation INPUT: Pot - potential or list of potentials (evaluated at R=r/sqrt(2),z=r/sqrt(2), sphericity not checked) R - Galactocentric projected radius (can be Quantity) dens= (None) tracer density profile (function of r); if None, the density is assumed to be that corresponding to the potential surfdens= (None) tracer surface density profile (value at R or function of R); if None, the surface density is assumed to be that corresponding to the density beta= (0.) anisotropy; can be a constant or a function of r sigma_r= (None) if given, the solution of the spherical Jeans equation sigma_r(r) (used instead of solving the Jeans equation as part of this routine) OUTPUT: sigma_los(R) HISTORY: 2018-08-27 - Written - Bovy (UofT)
Below is the the instruction that describes the task: ### Input: NAME: sigmalos PURPOSE: Compute the line-of-sight velocity dispersion using the spherical Jeans equation INPUT: Pot - potential or list of potentials (evaluated at R=r/sqrt(2),z=r/sqrt(2), sphericity not checked) R - Galactocentric projected radius (can be Quantity) dens= (None) tracer density profile (function of r); if None, the density is assumed to be that corresponding to the potential surfdens= (None) tracer surface density profile (value at R or function of R); if None, the surface density is assumed to be that corresponding to the density beta= (0.) anisotropy; can be a constant or a function of r sigma_r= (None) if given, the solution of the spherical Jeans equation sigma_r(r) (used instead of solving the Jeans equation as part of this routine) OUTPUT: sigma_los(R) HISTORY: 2018-08-27 - Written - Bovy (UofT) ### Response: def sigmalos(Pot,R,dens=None,surfdens=None,beta=0.,sigma_r=None): """ NAME: sigmalos PURPOSE: Compute the line-of-sight velocity dispersion using the spherical Jeans equation INPUT: Pot - potential or list of potentials (evaluated at R=r/sqrt(2),z=r/sqrt(2), sphericity not checked) R - Galactocentric projected radius (can be Quantity) dens= (None) tracer density profile (function of r); if None, the density is assumed to be that corresponding to the potential surfdens= (None) tracer surface density profile (value at R or function of R); if None, the surface density is assumed to be that corresponding to the density beta= (0.) anisotropy; can be a constant or a function of r sigma_r= (None) if given, the solution of the spherical Jeans equation sigma_r(r) (used instead of solving the Jeans equation as part of this routine) OUTPUT: sigma_los(R) HISTORY: 2018-08-27 - Written - Bovy (UofT) """ Pot= flatten_pot(Pot) if dens is None: densPot= True dens= lambda r: evaluateDensities(Pot,r*_INVSQRTTWO,r*_INVSQRTTWO, use_physical=False) else: densPot= False if callable(surfdens): called_surfdens= surfdens(R) elif surfdens is None: if densPot: called_surfdens= evaluateSurfaceDensities(Pot,R,numpy.inf, use_physical=False) if not densPot or numpy.isnan(called_surfdens): called_surfdens=\ 2.*integrate.quad(lambda x: dens(numpy.sqrt(R**2.+x**2.)), 0.,numpy.inf)[0] else: called_surfdens= surfdens if callable(beta): call_beta= beta else: call_beta= lambda x: beta if sigma_r is None: call_sigma_r= lambda r: sigmar(Pot,r,dens=dens,beta=beta) elif not callable(sigma_r): call_sigma_r= lambda x: sigma_r else: call_sigma_r= sigma_r return numpy.sqrt(2.*integrate.quad(\ lambda x: (1.-call_beta(x)*R**2./x**2.)*x*dens(x)\ *call_sigma_r(x)**2./numpy.sqrt(x**2.-R**2.),R,numpy.inf)[0]\ /called_surfdens)
def logspace(self,bins=None,units=None,conversion_function=convert_time,resolution=None,end_at_end=True): """ bins overwrites resolution """ if type(bins) in [list, np.ndarray]: return bins min = conversion_function(self.min,from_units=self.units,to_units=units) max = conversion_function(self.max,from_units=self.units,to_units=units) if units is None: units = self.units if resolution is None: resolution = 1.0 if bins is None: bins = self.len(resolution=resolution,units=units,conversion_function=conversion_function)# + 1 if units != '1' and end_at_end: # continuous variable behaviour: # we end with the last valid value at the outer edge return np.logspace(np.log10(min),np.log10(max),bins+1)[:-1] # discrete variable behaviour: # we end with the last valid value as its own bin return np.logspace(np.log10(min),np.log10(max),bins)
bins overwrites resolution
Below is the the instruction that describes the task: ### Input: bins overwrites resolution ### Response: def logspace(self,bins=None,units=None,conversion_function=convert_time,resolution=None,end_at_end=True): """ bins overwrites resolution """ if type(bins) in [list, np.ndarray]: return bins min = conversion_function(self.min,from_units=self.units,to_units=units) max = conversion_function(self.max,from_units=self.units,to_units=units) if units is None: units = self.units if resolution is None: resolution = 1.0 if bins is None: bins = self.len(resolution=resolution,units=units,conversion_function=conversion_function)# + 1 if units != '1' and end_at_end: # continuous variable behaviour: # we end with the last valid value at the outer edge return np.logspace(np.log10(min),np.log10(max),bins+1)[:-1] # discrete variable behaviour: # we end with the last valid value as its own bin return np.logspace(np.log10(min),np.log10(max),bins)
def run(self, dag): """Expand all op nodes to the given basis. Args: dag(DAGCircuit): input dag Raises: QiskitError: if unable to unroll given the basis due to undefined decomposition rules (such as a bad basis) or excessive recursion. Returns: DAGCircuit: output unrolled dag """ # Walk through the DAG and expand each non-basis node for node in dag.op_nodes(): basic_insts = ['measure', 'reset', 'barrier', 'snapshot'] if node.name in basic_insts: # TODO: this is legacy behavior.Basis_insts should be removed that these # instructions should be part of the device-reported basis. Currently, no # backend reports "measure", for example. continue if node.name in self.basis: # If already a base, ignore. continue # TODO: allow choosing other possible decompositions rule = node.op.definition if not rule: raise QiskitError("Cannot unroll the circuit to the given basis, %s. " "No rule to expand instruction %s." % (str(self.basis), node.op.name)) # hacky way to build a dag on the same register as the rule is defined # TODO: need anonymous rules to address wires by index decomposition = DAGCircuit() decomposition.add_qreg(rule[0][1][0][0]) for inst in rule: decomposition.apply_operation_back(*inst) unrolled_dag = self.run(decomposition) # recursively unroll ops dag.substitute_node_with_dag(node, unrolled_dag) return dag
Expand all op nodes to the given basis. Args: dag(DAGCircuit): input dag Raises: QiskitError: if unable to unroll given the basis due to undefined decomposition rules (such as a bad basis) or excessive recursion. Returns: DAGCircuit: output unrolled dag
Below is the the instruction that describes the task: ### Input: Expand all op nodes to the given basis. Args: dag(DAGCircuit): input dag Raises: QiskitError: if unable to unroll given the basis due to undefined decomposition rules (such as a bad basis) or excessive recursion. Returns: DAGCircuit: output unrolled dag ### Response: def run(self, dag): """Expand all op nodes to the given basis. Args: dag(DAGCircuit): input dag Raises: QiskitError: if unable to unroll given the basis due to undefined decomposition rules (such as a bad basis) or excessive recursion. Returns: DAGCircuit: output unrolled dag """ # Walk through the DAG and expand each non-basis node for node in dag.op_nodes(): basic_insts = ['measure', 'reset', 'barrier', 'snapshot'] if node.name in basic_insts: # TODO: this is legacy behavior.Basis_insts should be removed that these # instructions should be part of the device-reported basis. Currently, no # backend reports "measure", for example. continue if node.name in self.basis: # If already a base, ignore. continue # TODO: allow choosing other possible decompositions rule = node.op.definition if not rule: raise QiskitError("Cannot unroll the circuit to the given basis, %s. " "No rule to expand instruction %s." % (str(self.basis), node.op.name)) # hacky way to build a dag on the same register as the rule is defined # TODO: need anonymous rules to address wires by index decomposition = DAGCircuit() decomposition.add_qreg(rule[0][1][0][0]) for inst in rule: decomposition.apply_operation_back(*inst) unrolled_dag = self.run(decomposition) # recursively unroll ops dag.substitute_node_with_dag(node, unrolled_dag) return dag
def results(self): """A list of namedtuples in the form (eid doi pii pubmed_id title subtype creator afid affilname affiliation_city affiliation_country author_count author_names author_ids author_afids coverDate coverDisplayDate publicationName issn source_id eIssn aggregationType volume issueIdentifier article_number pageRange description authkeywords citedby_count openaccess fund_acr fund_no fund_sponsor). Field definitions correspond to https://dev.elsevier.com/guides/ScopusSearchViews.htm, except for afid, affilname, affiliation_city, affiliation_country, author_count, author_names, author_ids and author_afids: These information are joined on ";". In case an author has multiple affiliations, they are joined on "-" (e.g. Author1Aff;Author2Aff1-Author2Aff2). Notes ----- The list of authors and the list of affiliations per author are deduplicated. """ out = [] fields = 'eid doi pii pubmed_id title subtype creator afid affilname '\ 'affiliation_city affiliation_country author_count '\ 'author_names author_ids author_afids coverDate '\ 'coverDisplayDate publicationName issn source_id eIssn '\ 'aggregationType volume issueIdentifier article_number '\ 'pageRange description authkeywords citedby_count '\ 'openaccess fund_acr fund_no fund_sponsor' doc = namedtuple('Document', fields) for item in self._json: info = {} # Parse affiliations try: info["affilname"] = _join(item['affiliation'], 'affilname') info["afid"] = _join(item['affiliation'], 'afid') info["aff_city"] = _join(item['affiliation'], 'affiliation-city') info["aff_country"] = _join(item['affiliation'], 'affiliation-country') except KeyError: pass # Parse authors try: # Deduplicate list of authors authors = _deduplicate(item['author']) # Extract information surnames = _replace_none([d['surname'] for d in authors]) firstnames = _replace_none([d['given-name'] for d in authors]) info["auth_names"] = ";".join([", ".join([t[0], t[1]]) for t in zip(surnames, firstnames)]) info["auth_ids"] = ";".join([d['authid'] for d in authors]) affs = [] for auth in authors: aff = listify(_deduplicate(auth.get('afid', []))) affs.append('-'.join([d['$'] for d in aff])) info["auth_afid"] = (';'.join(affs) or None) except KeyError: pass date = item.get('prism:coverDate') if isinstance(date, list): date = date[0].get('$') new = doc(article_number=item.get('article-number'), title=item.get('dc:title'), fund_sponsor=item.get('fund-sponsor'), subtype=item.get('subtype'), issn=item.get('prism:issn'), creator=item.get('dc:creator'), affilname=info.get("affilname"), author_names=info.get("auth_names"), doi=item.get('prism:doi'), coverDate=date, volume=item.get('prism:volume'), coverDisplayDate=item.get('prism:coverDisplayDate'), publicationName=item.get('prism:publicationName'), source_id=item.get('source-id'), author_ids=info.get("auth_ids"), aggregationType=item.get('prism:aggregationType'), issueIdentifier=item.get('prism:issueIdentifier'), pageRange=item.get('prism:pageRange'), author_afids=info.get("auth_afid"), fund_no=item.get('fund-no'), affiliation_country=info.get("aff_country"), citedby_count=item.get('citedby-count'), openaccess=item.get('openaccess'), eIssn=item.get('prism:eIssn'), author_count=item.get('author-count', {}).get('$'), affiliation_city=info.get("aff_city"), afid=info.get("afid"), description=item.get('dc:description'), pii=item.get('pii'), authkeywords=item.get('authkeywords'), eid=item['eid'], fund_acr=item.get('fund-acr'), pubmed_id=item.get('pubmed-id')) out.append(new) return out or None
A list of namedtuples in the form (eid doi pii pubmed_id title subtype creator afid affilname affiliation_city affiliation_country author_count author_names author_ids author_afids coverDate coverDisplayDate publicationName issn source_id eIssn aggregationType volume issueIdentifier article_number pageRange description authkeywords citedby_count openaccess fund_acr fund_no fund_sponsor). Field definitions correspond to https://dev.elsevier.com/guides/ScopusSearchViews.htm, except for afid, affilname, affiliation_city, affiliation_country, author_count, author_names, author_ids and author_afids: These information are joined on ";". In case an author has multiple affiliations, they are joined on "-" (e.g. Author1Aff;Author2Aff1-Author2Aff2). Notes ----- The list of authors and the list of affiliations per author are deduplicated.
Below is the the instruction that describes the task: ### Input: A list of namedtuples in the form (eid doi pii pubmed_id title subtype creator afid affilname affiliation_city affiliation_country author_count author_names author_ids author_afids coverDate coverDisplayDate publicationName issn source_id eIssn aggregationType volume issueIdentifier article_number pageRange description authkeywords citedby_count openaccess fund_acr fund_no fund_sponsor). Field definitions correspond to https://dev.elsevier.com/guides/ScopusSearchViews.htm, except for afid, affilname, affiliation_city, affiliation_country, author_count, author_names, author_ids and author_afids: These information are joined on ";". In case an author has multiple affiliations, they are joined on "-" (e.g. Author1Aff;Author2Aff1-Author2Aff2). Notes ----- The list of authors and the list of affiliations per author are deduplicated. ### Response: def results(self): """A list of namedtuples in the form (eid doi pii pubmed_id title subtype creator afid affilname affiliation_city affiliation_country author_count author_names author_ids author_afids coverDate coverDisplayDate publicationName issn source_id eIssn aggregationType volume issueIdentifier article_number pageRange description authkeywords citedby_count openaccess fund_acr fund_no fund_sponsor). Field definitions correspond to https://dev.elsevier.com/guides/ScopusSearchViews.htm, except for afid, affilname, affiliation_city, affiliation_country, author_count, author_names, author_ids and author_afids: These information are joined on ";". In case an author has multiple affiliations, they are joined on "-" (e.g. Author1Aff;Author2Aff1-Author2Aff2). Notes ----- The list of authors and the list of affiliations per author are deduplicated. """ out = [] fields = 'eid doi pii pubmed_id title subtype creator afid affilname '\ 'affiliation_city affiliation_country author_count '\ 'author_names author_ids author_afids coverDate '\ 'coverDisplayDate publicationName issn source_id eIssn '\ 'aggregationType volume issueIdentifier article_number '\ 'pageRange description authkeywords citedby_count '\ 'openaccess fund_acr fund_no fund_sponsor' doc = namedtuple('Document', fields) for item in self._json: info = {} # Parse affiliations try: info["affilname"] = _join(item['affiliation'], 'affilname') info["afid"] = _join(item['affiliation'], 'afid') info["aff_city"] = _join(item['affiliation'], 'affiliation-city') info["aff_country"] = _join(item['affiliation'], 'affiliation-country') except KeyError: pass # Parse authors try: # Deduplicate list of authors authors = _deduplicate(item['author']) # Extract information surnames = _replace_none([d['surname'] for d in authors]) firstnames = _replace_none([d['given-name'] for d in authors]) info["auth_names"] = ";".join([", ".join([t[0], t[1]]) for t in zip(surnames, firstnames)]) info["auth_ids"] = ";".join([d['authid'] for d in authors]) affs = [] for auth in authors: aff = listify(_deduplicate(auth.get('afid', []))) affs.append('-'.join([d['$'] for d in aff])) info["auth_afid"] = (';'.join(affs) or None) except KeyError: pass date = item.get('prism:coverDate') if isinstance(date, list): date = date[0].get('$') new = doc(article_number=item.get('article-number'), title=item.get('dc:title'), fund_sponsor=item.get('fund-sponsor'), subtype=item.get('subtype'), issn=item.get('prism:issn'), creator=item.get('dc:creator'), affilname=info.get("affilname"), author_names=info.get("auth_names"), doi=item.get('prism:doi'), coverDate=date, volume=item.get('prism:volume'), coverDisplayDate=item.get('prism:coverDisplayDate'), publicationName=item.get('prism:publicationName'), source_id=item.get('source-id'), author_ids=info.get("auth_ids"), aggregationType=item.get('prism:aggregationType'), issueIdentifier=item.get('prism:issueIdentifier'), pageRange=item.get('prism:pageRange'), author_afids=info.get("auth_afid"), fund_no=item.get('fund-no'), affiliation_country=info.get("aff_country"), citedby_count=item.get('citedby-count'), openaccess=item.get('openaccess'), eIssn=item.get('prism:eIssn'), author_count=item.get('author-count', {}).get('$'), affiliation_city=info.get("aff_city"), afid=info.get("afid"), description=item.get('dc:description'), pii=item.get('pii'), authkeywords=item.get('authkeywords'), eid=item['eid'], fund_acr=item.get('fund-acr'), pubmed_id=item.get('pubmed-id')) out.append(new) return out or None
def remove_server(self, server_id): """ Remove a registered WBEM server from the subscription manager. This also unregisters listeners from that server and removes all owned indication subscriptions, owned indication filters and owned listener destinations. Parameters: server_id (:term:`string`): The server ID of the WBEM server, returned by :meth:`~pywbem.WBEMSubscriptionManager.add_server`. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. """ # Validate server_id server = self._get_server(server_id) # Delete any instances we recorded to be cleaned up if server_id in self._owned_subscriptions: inst_list = self._owned_subscriptions[server_id] # We iterate backwards because we change the list for i in six.moves.range(len(inst_list) - 1, -1, -1): inst = inst_list[i] server.conn.DeleteInstance(inst.path) del inst_list[i] del self._owned_subscriptions[server_id] if server_id in self._owned_filters: inst_list = self._owned_filters[server_id] # We iterate backwards because we change the list for i in six.moves.range(len(inst_list) - 1, -1, -1): inst = inst_list[i] server.conn.DeleteInstance(inst.path) del inst_list[i] del self._owned_filters[server_id] if server_id in self._owned_destinations: inst_list = self._owned_destinations[server_id] # We iterate backwards because we change the list for i in six.moves.range(len(inst_list) - 1, -1, -1): inst = inst_list[i] server.conn.DeleteInstance(inst.path) del inst_list[i] del self._owned_destinations[server_id] # Remove server from this listener del self._servers[server_id]
Remove a registered WBEM server from the subscription manager. This also unregisters listeners from that server and removes all owned indication subscriptions, owned indication filters and owned listener destinations. Parameters: server_id (:term:`string`): The server ID of the WBEM server, returned by :meth:`~pywbem.WBEMSubscriptionManager.add_server`. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`.
Below is the the instruction that describes the task: ### Input: Remove a registered WBEM server from the subscription manager. This also unregisters listeners from that server and removes all owned indication subscriptions, owned indication filters and owned listener destinations. Parameters: server_id (:term:`string`): The server ID of the WBEM server, returned by :meth:`~pywbem.WBEMSubscriptionManager.add_server`. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. ### Response: def remove_server(self, server_id): """ Remove a registered WBEM server from the subscription manager. This also unregisters listeners from that server and removes all owned indication subscriptions, owned indication filters and owned listener destinations. Parameters: server_id (:term:`string`): The server ID of the WBEM server, returned by :meth:`~pywbem.WBEMSubscriptionManager.add_server`. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. """ # Validate server_id server = self._get_server(server_id) # Delete any instances we recorded to be cleaned up if server_id in self._owned_subscriptions: inst_list = self._owned_subscriptions[server_id] # We iterate backwards because we change the list for i in six.moves.range(len(inst_list) - 1, -1, -1): inst = inst_list[i] server.conn.DeleteInstance(inst.path) del inst_list[i] del self._owned_subscriptions[server_id] if server_id in self._owned_filters: inst_list = self._owned_filters[server_id] # We iterate backwards because we change the list for i in six.moves.range(len(inst_list) - 1, -1, -1): inst = inst_list[i] server.conn.DeleteInstance(inst.path) del inst_list[i] del self._owned_filters[server_id] if server_id in self._owned_destinations: inst_list = self._owned_destinations[server_id] # We iterate backwards because we change the list for i in six.moves.range(len(inst_list) - 1, -1, -1): inst = inst_list[i] server.conn.DeleteInstance(inst.path) del inst_list[i] del self._owned_destinations[server_id] # Remove server from this listener del self._servers[server_id]
def _parse_raw_members( self, leaderboard_name, members, members_only=False, **options): ''' Parse the raw leaders data as returned from a given leader board query. Do associative lookups with the member to rank, score and potentially sort the results. @param leaderboard_name [String] Name of the leaderboard. @param members [List] A list of members as returned from a sorted set range query @param members_only [bool] Set True to return the members as is, Default is False. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a list of members. ''' if members_only: return [{self.MEMBER_KEY: m} for m in members] if members: return self.ranked_in_list_in(leaderboard_name, members, **options) else: return []
Parse the raw leaders data as returned from a given leader board query. Do associative lookups with the member to rank, score and potentially sort the results. @param leaderboard_name [String] Name of the leaderboard. @param members [List] A list of members as returned from a sorted set range query @param members_only [bool] Set True to return the members as is, Default is False. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a list of members.
Below is the the instruction that describes the task: ### Input: Parse the raw leaders data as returned from a given leader board query. Do associative lookups with the member to rank, score and potentially sort the results. @param leaderboard_name [String] Name of the leaderboard. @param members [List] A list of members as returned from a sorted set range query @param members_only [bool] Set True to return the members as is, Default is False. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a list of members. ### Response: def _parse_raw_members( self, leaderboard_name, members, members_only=False, **options): ''' Parse the raw leaders data as returned from a given leader board query. Do associative lookups with the member to rank, score and potentially sort the results. @param leaderboard_name [String] Name of the leaderboard. @param members [List] A list of members as returned from a sorted set range query @param members_only [bool] Set True to return the members as is, Default is False. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a list of members. ''' if members_only: return [{self.MEMBER_KEY: m} for m in members] if members: return self.ranked_in_list_in(leaderboard_name, members, **options) else: return []
def _PrintVSSStoreIdentifiersOverview( self, volume_system, volume_identifiers): """Prints an overview of VSS store identifiers. Args: volume_system (dfvfs.VShadowVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier. """ header = 'The following Volume Shadow Snapshots (VSS) were found:\n' self._output_writer.Write(header) column_names = ['Identifier', 'Creation Time'] table_view = views.CLITabularTableView(column_names=column_names) for volume_identifier in volume_identifiers: volume = volume_system.GetVolumeByIdentifier(volume_identifier) if not volume: raise errors.SourceScannerError( 'Volume missing for identifier: {0:s}.'.format( volume_identifier)) volume_attribute = volume.GetAttribute('creation_time') filetime = dfdatetime_filetime.Filetime(timestamp=volume_attribute.value) creation_time = filetime.CopyToDateTimeString() if volume.HasExternalData(): creation_time = '{0:s}\tWARNING: data stored outside volume'.format( creation_time) table_view.AddRow([volume.identifier, creation_time]) self._output_writer.Write('\n') table_view.Write(self._output_writer) self._output_writer.Write('\n')
Prints an overview of VSS store identifiers. Args: volume_system (dfvfs.VShadowVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier.
Below is the the instruction that describes the task: ### Input: Prints an overview of VSS store identifiers. Args: volume_system (dfvfs.VShadowVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier. ### Response: def _PrintVSSStoreIdentifiersOverview( self, volume_system, volume_identifiers): """Prints an overview of VSS store identifiers. Args: volume_system (dfvfs.VShadowVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier. """ header = 'The following Volume Shadow Snapshots (VSS) were found:\n' self._output_writer.Write(header) column_names = ['Identifier', 'Creation Time'] table_view = views.CLITabularTableView(column_names=column_names) for volume_identifier in volume_identifiers: volume = volume_system.GetVolumeByIdentifier(volume_identifier) if not volume: raise errors.SourceScannerError( 'Volume missing for identifier: {0:s}.'.format( volume_identifier)) volume_attribute = volume.GetAttribute('creation_time') filetime = dfdatetime_filetime.Filetime(timestamp=volume_attribute.value) creation_time = filetime.CopyToDateTimeString() if volume.HasExternalData(): creation_time = '{0:s}\tWARNING: data stored outside volume'.format( creation_time) table_view.AddRow([volume.identifier, creation_time]) self._output_writer.Write('\n') table_view.Write(self._output_writer) self._output_writer.Write('\n')
def isolate_and_get(src_container, src_resources, local_dst_dir, **kwargs): """ Uses :func:`copy_resources` to copy resources from a container, but afterwards generates a compressed tarball and downloads it. :param src_container: Container name or id. :type src_container: unicode :param src_resources: Resources, as (file or directory) names to copy. :type src_resources: iterable :param local_dst_dir: Local directory to store the compressed tarball in. Can also be a file name; the default file name is ``container_<container name>.tar.gz``. :type local_dst_dir: unicode :param kwargs: Additional kwargs for :func:`copy_resources`. """ with temp_dir() as remote_tmp: copy_path = posixpath.join(remote_tmp, 'copy_tmp') archive_path = posixpath.join(remote_tmp, 'container_{0}.tar.gz'.format(src_container)) copy_resources(src_container, src_resources, copy_path, **kwargs) with cd(copy_path): sudo(targz(archive_path, '*')) get(archive_path, local_dst_dir)
Uses :func:`copy_resources` to copy resources from a container, but afterwards generates a compressed tarball and downloads it. :param src_container: Container name or id. :type src_container: unicode :param src_resources: Resources, as (file or directory) names to copy. :type src_resources: iterable :param local_dst_dir: Local directory to store the compressed tarball in. Can also be a file name; the default file name is ``container_<container name>.tar.gz``. :type local_dst_dir: unicode :param kwargs: Additional kwargs for :func:`copy_resources`.
Below is the the instruction that describes the task: ### Input: Uses :func:`copy_resources` to copy resources from a container, but afterwards generates a compressed tarball and downloads it. :param src_container: Container name or id. :type src_container: unicode :param src_resources: Resources, as (file or directory) names to copy. :type src_resources: iterable :param local_dst_dir: Local directory to store the compressed tarball in. Can also be a file name; the default file name is ``container_<container name>.tar.gz``. :type local_dst_dir: unicode :param kwargs: Additional kwargs for :func:`copy_resources`. ### Response: def isolate_and_get(src_container, src_resources, local_dst_dir, **kwargs): """ Uses :func:`copy_resources` to copy resources from a container, but afterwards generates a compressed tarball and downloads it. :param src_container: Container name or id. :type src_container: unicode :param src_resources: Resources, as (file or directory) names to copy. :type src_resources: iterable :param local_dst_dir: Local directory to store the compressed tarball in. Can also be a file name; the default file name is ``container_<container name>.tar.gz``. :type local_dst_dir: unicode :param kwargs: Additional kwargs for :func:`copy_resources`. """ with temp_dir() as remote_tmp: copy_path = posixpath.join(remote_tmp, 'copy_tmp') archive_path = posixpath.join(remote_tmp, 'container_{0}.tar.gz'.format(src_container)) copy_resources(src_container, src_resources, copy_path, **kwargs) with cd(copy_path): sudo(targz(archive_path, '*')) get(archive_path, local_dst_dir)
def is_cloned(cls): """ Let us know if we are currently in the cloned version of PyFunceble which implicitly mean that we are in developement mode. """ if not PyFunceble.path.isdir(".git"): # The git directory does not exist. # We return False, the current version is not the cloned version. return False # We list the list of file which can be found only in a cloned version. list_of_file = [ ".coveragerc", ".coveralls.yml", ".gitignore", ".PyFunceble_production.yaml", ".travis.yml", "CODE_OF_CONDUCT.md", "CONTRIBUTING.md", "dir_structure_production.json", "MANIFEST.in", "README.rst", "requirements.txt", "setup.py", "version.yaml", ] # We list the list of directory which can be found only in a cloned # version. list_of_dir = ["docs", "PyFunceble", "tests"] for file in list_of_file: # We loop through the list of file. if not PyFunceble.path.isfile(file): # The file does not exist in the current directory. # We return False, the current version is not the cloned version. return False # All required files exist in the current directory. for directory in list_of_dir: # We loop through the list of directory. if not PyFunceble.path.isdir(directory): # The directory does not exist in the current directory. # We return False, the current version is not the cloned version. return False # All required directories exist in the current directory. # We return True, the current version is a cloned version. return True
Let us know if we are currently in the cloned version of PyFunceble which implicitly mean that we are in developement mode.
Below is the the instruction that describes the task: ### Input: Let us know if we are currently in the cloned version of PyFunceble which implicitly mean that we are in developement mode. ### Response: def is_cloned(cls): """ Let us know if we are currently in the cloned version of PyFunceble which implicitly mean that we are in developement mode. """ if not PyFunceble.path.isdir(".git"): # The git directory does not exist. # We return False, the current version is not the cloned version. return False # We list the list of file which can be found only in a cloned version. list_of_file = [ ".coveragerc", ".coveralls.yml", ".gitignore", ".PyFunceble_production.yaml", ".travis.yml", "CODE_OF_CONDUCT.md", "CONTRIBUTING.md", "dir_structure_production.json", "MANIFEST.in", "README.rst", "requirements.txt", "setup.py", "version.yaml", ] # We list the list of directory which can be found only in a cloned # version. list_of_dir = ["docs", "PyFunceble", "tests"] for file in list_of_file: # We loop through the list of file. if not PyFunceble.path.isfile(file): # The file does not exist in the current directory. # We return False, the current version is not the cloned version. return False # All required files exist in the current directory. for directory in list_of_dir: # We loop through the list of directory. if not PyFunceble.path.isdir(directory): # The directory does not exist in the current directory. # We return False, the current version is not the cloned version. return False # All required directories exist in the current directory. # We return True, the current version is a cloned version. return True
def longest_utterances_per_container(self): """ Return a tuple/list containing the length of the longest utterance of ever container. """ lengths = [] for cnt in self.containers: longest_in_container = 0 for utt_idx in self.utt_ids: utt_length = cnt._file[utt_idx].shape[0] longest_in_container = max(utt_length, longest_in_container) lengths.append(longest_in_container) return lengths
Return a tuple/list containing the length of the longest utterance of ever container.
Below is the the instruction that describes the task: ### Input: Return a tuple/list containing the length of the longest utterance of ever container. ### Response: def longest_utterances_per_container(self): """ Return a tuple/list containing the length of the longest utterance of ever container. """ lengths = [] for cnt in self.containers: longest_in_container = 0 for utt_idx in self.utt_ids: utt_length = cnt._file[utt_idx].shape[0] longest_in_container = max(utt_length, longest_in_container) lengths.append(longest_in_container) return lengths
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None): """Matrix band part of ones. Args: rows: int determining number of rows in output cols: int num_lower: int, maximum distance backward. Negative values indicate unlimited. num_upper: int, maximum distance forward. Negative values indicate unlimited. out_shape: shape to reshape output by. Returns: Tensor of size rows * cols reshaped into shape out_shape. """ if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]): # Needed info is constant, so we construct in numpy if num_lower < 0: num_lower = rows - 1 if num_upper < 0: num_upper = cols - 1 lower_mask = np.tri(cols, rows, num_lower).T upper_mask = np.tri(rows, cols, num_upper) band = np.ones((rows, cols)) * lower_mask * upper_mask if out_shape: band = band.reshape(out_shape) band = tf.constant(band, tf.float32) else: band = tf.matrix_band_part( tf.ones([rows, cols]), tf.cast(num_lower, tf.int64), tf.cast(num_upper, tf.int64)) if out_shape: band = tf.reshape(band, out_shape) return band
Matrix band part of ones. Args: rows: int determining number of rows in output cols: int num_lower: int, maximum distance backward. Negative values indicate unlimited. num_upper: int, maximum distance forward. Negative values indicate unlimited. out_shape: shape to reshape output by. Returns: Tensor of size rows * cols reshaped into shape out_shape.
Below is the the instruction that describes the task: ### Input: Matrix band part of ones. Args: rows: int determining number of rows in output cols: int num_lower: int, maximum distance backward. Negative values indicate unlimited. num_upper: int, maximum distance forward. Negative values indicate unlimited. out_shape: shape to reshape output by. Returns: Tensor of size rows * cols reshaped into shape out_shape. ### Response: def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None): """Matrix band part of ones. Args: rows: int determining number of rows in output cols: int num_lower: int, maximum distance backward. Negative values indicate unlimited. num_upper: int, maximum distance forward. Negative values indicate unlimited. out_shape: shape to reshape output by. Returns: Tensor of size rows * cols reshaped into shape out_shape. """ if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]): # Needed info is constant, so we construct in numpy if num_lower < 0: num_lower = rows - 1 if num_upper < 0: num_upper = cols - 1 lower_mask = np.tri(cols, rows, num_lower).T upper_mask = np.tri(rows, cols, num_upper) band = np.ones((rows, cols)) * lower_mask * upper_mask if out_shape: band = band.reshape(out_shape) band = tf.constant(band, tf.float32) else: band = tf.matrix_band_part( tf.ones([rows, cols]), tf.cast(num_lower, tf.int64), tf.cast(num_upper, tf.int64)) if out_shape: band = tf.reshape(band, out_shape) return band
def load(self, configuration): """ Load a YAML configuration file. :param configuration: Configuration filename or YAML string """ try: self.config = yaml.load(open(configuration, "rb")) except IOError: try: self.config = yaml.load(configuration) except ParserError, e: raise ParserError('Error parsing config: %s' % e) # put customer data into self.customer if isinstance(self.config, dict): self.customer = self.config.get('customer', {}) self.instances_dict = self.config.get('instances', {}) self.web2py_dir = self.config.get('web2py', None) self.api_type = self.config.get('api_type', 'jsonrpc') self.valid = True else: self.customer = {} self.instances_dict = {} self.web2py_dir = None self.valid = False
Load a YAML configuration file. :param configuration: Configuration filename or YAML string
Below is the the instruction that describes the task: ### Input: Load a YAML configuration file. :param configuration: Configuration filename or YAML string ### Response: def load(self, configuration): """ Load a YAML configuration file. :param configuration: Configuration filename or YAML string """ try: self.config = yaml.load(open(configuration, "rb")) except IOError: try: self.config = yaml.load(configuration) except ParserError, e: raise ParserError('Error parsing config: %s' % e) # put customer data into self.customer if isinstance(self.config, dict): self.customer = self.config.get('customer', {}) self.instances_dict = self.config.get('instances', {}) self.web2py_dir = self.config.get('web2py', None) self.api_type = self.config.get('api_type', 'jsonrpc') self.valid = True else: self.customer = {} self.instances_dict = {} self.web2py_dir = None self.valid = False
def patch_namespaced_service(self, name, namespace, body, **kwargs): # noqa: E501 """patch_namespaced_service # noqa: E501 partially update the specified Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_service(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Service (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Service If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_service_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.patch_namespaced_service_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
patch_namespaced_service # noqa: E501 partially update the specified Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_service(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Service (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Service If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: patch_namespaced_service # noqa: E501 partially update the specified Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_service(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Service (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Service If the method is called asynchronously, returns the request thread. ### Response: def patch_namespaced_service(self, name, namespace, body, **kwargs): # noqa: E501 """patch_namespaced_service # noqa: E501 partially update the specified Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_service(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Service (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Service If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_service_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.patch_namespaced_service_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
def get_name_from_name_hash128( self, name ): """ Get the name from a name hash """ cur = self.db.cursor() name = namedb_get_name_from_name_hash128( cur, name, self.lastblock ) return name
Get the name from a name hash
Below is the the instruction that describes the task: ### Input: Get the name from a name hash ### Response: def get_name_from_name_hash128( self, name ): """ Get the name from a name hash """ cur = self.db.cursor() name = namedb_get_name_from_name_hash128( cur, name, self.lastblock ) return name
def _parse_rmw_row_response(row_response): """Parses the response to a ``ReadModifyWriteRow`` request. :type row_response: :class:`.data_v2_pb2.Row` :param row_response: The response row (with only modified cells) from a ``ReadModifyWriteRow`` request. :rtype: dict :returns: The new contents of all modified cells. Returned as a dictionary of column families, each of which holds a dictionary of columns. Each column contains a list of cells modified. Each cell is represented with a two-tuple with the value (in bytes) and the timestamp for the cell. For example: .. code:: python { u'col-fam-id': { b'col-name1': [ (b'cell-val', datetime.datetime(...)), (b'cell-val-newer', datetime.datetime(...)), ], b'col-name2': [ (b'altcol-cell-val', datetime.datetime(...)), ], }, u'col-fam-id2': { b'col-name3-but-other-fam': [ (b'foo', datetime.datetime(...)), ], }, } """ result = {} for column_family in row_response.row.families: column_family_id, curr_family = _parse_family_pb(column_family) result[column_family_id] = curr_family return result
Parses the response to a ``ReadModifyWriteRow`` request. :type row_response: :class:`.data_v2_pb2.Row` :param row_response: The response row (with only modified cells) from a ``ReadModifyWriteRow`` request. :rtype: dict :returns: The new contents of all modified cells. Returned as a dictionary of column families, each of which holds a dictionary of columns. Each column contains a list of cells modified. Each cell is represented with a two-tuple with the value (in bytes) and the timestamp for the cell. For example: .. code:: python { u'col-fam-id': { b'col-name1': [ (b'cell-val', datetime.datetime(...)), (b'cell-val-newer', datetime.datetime(...)), ], b'col-name2': [ (b'altcol-cell-val', datetime.datetime(...)), ], }, u'col-fam-id2': { b'col-name3-but-other-fam': [ (b'foo', datetime.datetime(...)), ], }, }
Below is the the instruction that describes the task: ### Input: Parses the response to a ``ReadModifyWriteRow`` request. :type row_response: :class:`.data_v2_pb2.Row` :param row_response: The response row (with only modified cells) from a ``ReadModifyWriteRow`` request. :rtype: dict :returns: The new contents of all modified cells. Returned as a dictionary of column families, each of which holds a dictionary of columns. Each column contains a list of cells modified. Each cell is represented with a two-tuple with the value (in bytes) and the timestamp for the cell. For example: .. code:: python { u'col-fam-id': { b'col-name1': [ (b'cell-val', datetime.datetime(...)), (b'cell-val-newer', datetime.datetime(...)), ], b'col-name2': [ (b'altcol-cell-val', datetime.datetime(...)), ], }, u'col-fam-id2': { b'col-name3-but-other-fam': [ (b'foo', datetime.datetime(...)), ], }, } ### Response: def _parse_rmw_row_response(row_response): """Parses the response to a ``ReadModifyWriteRow`` request. :type row_response: :class:`.data_v2_pb2.Row` :param row_response: The response row (with only modified cells) from a ``ReadModifyWriteRow`` request. :rtype: dict :returns: The new contents of all modified cells. Returned as a dictionary of column families, each of which holds a dictionary of columns. Each column contains a list of cells modified. Each cell is represented with a two-tuple with the value (in bytes) and the timestamp for the cell. For example: .. code:: python { u'col-fam-id': { b'col-name1': [ (b'cell-val', datetime.datetime(...)), (b'cell-val-newer', datetime.datetime(...)), ], b'col-name2': [ (b'altcol-cell-val', datetime.datetime(...)), ], }, u'col-fam-id2': { b'col-name3-but-other-fam': [ (b'foo', datetime.datetime(...)), ], }, } """ result = {} for column_family in row_response.row.families: column_family_id, curr_family = _parse_family_pb(column_family) result[column_family_id] = curr_family return result
def _set(self): """Called internally by Client to indicate this request has finished""" self.__event.set() if self._complete_func: self.__run_completion_func(self._complete_func, self.id_)
Called internally by Client to indicate this request has finished
Below is the the instruction that describes the task: ### Input: Called internally by Client to indicate this request has finished ### Response: def _set(self): """Called internally by Client to indicate this request has finished""" self.__event.set() if self._complete_func: self.__run_completion_func(self._complete_func, self.id_)
def parallel_graph_evaluation(data, adj_matrix, nb_runs=16, nb_jobs=None, **kwargs): """Parallelize the various runs of CGNN to evaluate a graph.""" nb_jobs = SETTINGS.get_default(nb_jobs=nb_jobs) if nb_runs == 1: return graph_evaluation(data, adj_matrix, **kwargs) else: output = Parallel(n_jobs=nb_jobs)(delayed(graph_evaluation)(data, adj_matrix, idx=run, gpu_id=run % SETTINGS.GPU, **kwargs) for run in range(nb_runs)) return np.mean(output)
Parallelize the various runs of CGNN to evaluate a graph.
Below is the the instruction that describes the task: ### Input: Parallelize the various runs of CGNN to evaluate a graph. ### Response: def parallel_graph_evaluation(data, adj_matrix, nb_runs=16, nb_jobs=None, **kwargs): """Parallelize the various runs of CGNN to evaluate a graph.""" nb_jobs = SETTINGS.get_default(nb_jobs=nb_jobs) if nb_runs == 1: return graph_evaluation(data, adj_matrix, **kwargs) else: output = Parallel(n_jobs=nb_jobs)(delayed(graph_evaluation)(data, adj_matrix, idx=run, gpu_id=run % SETTINGS.GPU, **kwargs) for run in range(nb_runs)) return np.mean(output)
def writeGlyph(self, name, unicodes=None, location=None, masters=None, note=None, mute=False, ): """ Add a new glyph to the current instance. * name: the glyph name. Required. * unicodes: unicode values for this glyph if it needs to be different from the unicode values associated with this glyph name in the masters. * location: a design space location for this glyph if it needs to be different from the instance location. * masters: a list of masters and locations for this glyph if they need to be different from the masters specified for this instance. * note: a note for this glyph * mute: if this glyph is muted. None of the other attributes matter if this one is true. """ if self.currentInstance is None: return glyphElement = ET.Element('glyph') if mute: glyphElement.attrib['mute'] = "1" if unicodes is not None: glyphElement.attrib['unicode'] = " ".join([hex(u) for u in unicodes]) if location is not None: locationElement = self._makeLocationElement(location) glyphElement.append(locationElement) if name is not None: glyphElement.attrib['name'] = name if note is not None: noteElement = ET.Element('note') noteElement.text = note glyphElement.append(noteElement) if masters is not None: mastersElement = ET.Element("masters") for glyphName, masterName, location in masters: masterElement = ET.Element("master") if glyphName is not None: masterElement.attrib['glyphname'] = glyphName masterElement.attrib['source'] = masterName if location is not None: locationElement = self._makeLocationElement(location) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) if self.currentInstance.findall('.glyphs') == []: glyphsElement = ET.Element('glyphs') self.currentInstance.append(glyphsElement) else: glyphsElement = self.currentInstance.findall('.glyphs')[0] glyphsElement.append(glyphElement)
Add a new glyph to the current instance. * name: the glyph name. Required. * unicodes: unicode values for this glyph if it needs to be different from the unicode values associated with this glyph name in the masters. * location: a design space location for this glyph if it needs to be different from the instance location. * masters: a list of masters and locations for this glyph if they need to be different from the masters specified for this instance. * note: a note for this glyph * mute: if this glyph is muted. None of the other attributes matter if this one is true.
Below is the the instruction that describes the task: ### Input: Add a new glyph to the current instance. * name: the glyph name. Required. * unicodes: unicode values for this glyph if it needs to be different from the unicode values associated with this glyph name in the masters. * location: a design space location for this glyph if it needs to be different from the instance location. * masters: a list of masters and locations for this glyph if they need to be different from the masters specified for this instance. * note: a note for this glyph * mute: if this glyph is muted. None of the other attributes matter if this one is true. ### Response: def writeGlyph(self, name, unicodes=None, location=None, masters=None, note=None, mute=False, ): """ Add a new glyph to the current instance. * name: the glyph name. Required. * unicodes: unicode values for this glyph if it needs to be different from the unicode values associated with this glyph name in the masters. * location: a design space location for this glyph if it needs to be different from the instance location. * masters: a list of masters and locations for this glyph if they need to be different from the masters specified for this instance. * note: a note for this glyph * mute: if this glyph is muted. None of the other attributes matter if this one is true. """ if self.currentInstance is None: return glyphElement = ET.Element('glyph') if mute: glyphElement.attrib['mute'] = "1" if unicodes is not None: glyphElement.attrib['unicode'] = " ".join([hex(u) for u in unicodes]) if location is not None: locationElement = self._makeLocationElement(location) glyphElement.append(locationElement) if name is not None: glyphElement.attrib['name'] = name if note is not None: noteElement = ET.Element('note') noteElement.text = note glyphElement.append(noteElement) if masters is not None: mastersElement = ET.Element("masters") for glyphName, masterName, location in masters: masterElement = ET.Element("master") if glyphName is not None: masterElement.attrib['glyphname'] = glyphName masterElement.attrib['source'] = masterName if location is not None: locationElement = self._makeLocationElement(location) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) if self.currentInstance.findall('.glyphs') == []: glyphsElement = ET.Element('glyphs') self.currentInstance.append(glyphsElement) else: glyphsElement = self.currentInstance.findall('.glyphs')[0] glyphsElement.append(glyphElement)
def _find_conflicts_between_sub_selection_sets( context, # type: ValidationContext cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]] compared_fragments, # type: PairSet are_mutually_exclusive, # type: bool parent_type1, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None] selection_set1, # type: SelectionSet parent_type2, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None] selection_set2, # type: SelectionSet ): # type: (...) -> List[Tuple[Tuple[str, str], List[Node], List[Node]]] """Find all conflicts found between two selection sets. Includes those found via spreading in fragments. Called when determining if conflicts exist between the sub-fields of two overlapping fields. """ conflicts = [] # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]] field_map1, fragment_names1 = _get_fields_and_fragments_names( context, cached_fields_and_fragment_names, parent_type1, selection_set1 ) field_map2, fragment_names2 = _get_fields_and_fragments_names( context, cached_fields_and_fragment_names, parent_type2, selection_set2 ) # (H) First, collect all conflicts between these two collections of field. _collect_conflicts_between( context, conflicts, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, field_map1, field_map2, ) # (I) Then collect conflicts between the first collection of fields and # those referenced by each fragment name associated with the second. for fragment_name2 in fragment_names2: _collect_conflicts_between_fields_and_fragment( context, conflicts, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, field_map1, fragment_name2, ) # (I) Then collect conflicts between the second collection of fields and # those referenced by each fragment name associated with the first. for fragment_name1 in fragment_names1: _collect_conflicts_between_fields_and_fragment( context, conflicts, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, field_map2, fragment_name1, ) # (J) Also collect conflicts between any fragment names by the first and # fragment names by the second. This compares each item in the first set of # names to each item in the second set of names. for fragment_name1 in fragment_names1: for fragment_name2 in fragment_names2: _collect_conflicts_between_fragments( context, conflicts, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, fragment_name1, fragment_name2, ) return conflicts
Find all conflicts found between two selection sets. Includes those found via spreading in fragments. Called when determining if conflicts exist between the sub-fields of two overlapping fields.
Below is the the instruction that describes the task: ### Input: Find all conflicts found between two selection sets. Includes those found via spreading in fragments. Called when determining if conflicts exist between the sub-fields of two overlapping fields. ### Response: def _find_conflicts_between_sub_selection_sets( context, # type: ValidationContext cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]] compared_fragments, # type: PairSet are_mutually_exclusive, # type: bool parent_type1, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None] selection_set1, # type: SelectionSet parent_type2, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None] selection_set2, # type: SelectionSet ): # type: (...) -> List[Tuple[Tuple[str, str], List[Node], List[Node]]] """Find all conflicts found between two selection sets. Includes those found via spreading in fragments. Called when determining if conflicts exist between the sub-fields of two overlapping fields. """ conflicts = [] # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]] field_map1, fragment_names1 = _get_fields_and_fragments_names( context, cached_fields_and_fragment_names, parent_type1, selection_set1 ) field_map2, fragment_names2 = _get_fields_and_fragments_names( context, cached_fields_and_fragment_names, parent_type2, selection_set2 ) # (H) First, collect all conflicts between these two collections of field. _collect_conflicts_between( context, conflicts, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, field_map1, field_map2, ) # (I) Then collect conflicts between the first collection of fields and # those referenced by each fragment name associated with the second. for fragment_name2 in fragment_names2: _collect_conflicts_between_fields_and_fragment( context, conflicts, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, field_map1, fragment_name2, ) # (I) Then collect conflicts between the second collection of fields and # those referenced by each fragment name associated with the first. for fragment_name1 in fragment_names1: _collect_conflicts_between_fields_and_fragment( context, conflicts, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, field_map2, fragment_name1, ) # (J) Also collect conflicts between any fragment names by the first and # fragment names by the second. This compares each item in the first set of # names to each item in the second set of names. for fragment_name1 in fragment_names1: for fragment_name2 in fragment_names2: _collect_conflicts_between_fragments( context, conflicts, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, fragment_name1, fragment_name2, ) return conflicts
def build_event_connections(self, component, runnable, structure): """ Adds event connections to a runnable component based on the structure specifications in the component model. @param component: Component model containing structure specifications. @type component: lems.model.component.FatComponent @param runnable: Runnable component to which structure is to be added. @type runnable: lems.sim.runnable.Runnable @param structure: The structure object to be used to add structure code in the runnable component. @type structure: lems.model.structure.Structure """ if self.debug: print("\n++++++++ Calling build_event_connections of %s with runnable %s, parent %s"%(component.id, runnable.id, runnable.parent)) # Process event connections for ec in structure.event_connections: if self.debug: print(ec.toxml()) source = runnable.parent.resolve_path(ec.from_) target = runnable.parent.resolve_path(ec.to) if ec.receiver: receiver_template = self.build_runnable(ec.receiver, target) #receiver = copy.deepcopy(receiver_template) receiver = receiver_template.copy() receiver.id = "{0}__{1}__".format(component.id, receiver_template.id) if ec.receiver_container: target.add_attachment(receiver, ec.receiver_container) target.add_child(receiver_template.id, receiver) target = receiver else: source = runnable.resolve_path(ec.from_) target = runnable.resolve_path(ec.to) source_port = ec.source_port target_port = ec.target_port if not source_port: if len(source.event_out_ports) == 1: source_port = source.event_out_ports[0] else: raise SimBuildError(("No source event port " "uniquely identifiable" " in '{0}'").format(source.id)) if not target_port: if len(target.event_in_ports) == 1: target_port = target.event_in_ports[0] else: raise SimBuildError(("No destination event port " "uniquely identifiable " "in '{0}'").format(target)) if self.debug: print("register_event_out_callback\n Source: %s, %s (port: %s) \n -> %s, %s (port: %s)"%(source, id(source), source_port, target, id(target), target_port)) source.register_event_out_callback(\ source_port, lambda: target.inc_event_in(target_port))
Adds event connections to a runnable component based on the structure specifications in the component model. @param component: Component model containing structure specifications. @type component: lems.model.component.FatComponent @param runnable: Runnable component to which structure is to be added. @type runnable: lems.sim.runnable.Runnable @param structure: The structure object to be used to add structure code in the runnable component. @type structure: lems.model.structure.Structure
Below is the the instruction that describes the task: ### Input: Adds event connections to a runnable component based on the structure specifications in the component model. @param component: Component model containing structure specifications. @type component: lems.model.component.FatComponent @param runnable: Runnable component to which structure is to be added. @type runnable: lems.sim.runnable.Runnable @param structure: The structure object to be used to add structure code in the runnable component. @type structure: lems.model.structure.Structure ### Response: def build_event_connections(self, component, runnable, structure): """ Adds event connections to a runnable component based on the structure specifications in the component model. @param component: Component model containing structure specifications. @type component: lems.model.component.FatComponent @param runnable: Runnable component to which structure is to be added. @type runnable: lems.sim.runnable.Runnable @param structure: The structure object to be used to add structure code in the runnable component. @type structure: lems.model.structure.Structure """ if self.debug: print("\n++++++++ Calling build_event_connections of %s with runnable %s, parent %s"%(component.id, runnable.id, runnable.parent)) # Process event connections for ec in structure.event_connections: if self.debug: print(ec.toxml()) source = runnable.parent.resolve_path(ec.from_) target = runnable.parent.resolve_path(ec.to) if ec.receiver: receiver_template = self.build_runnable(ec.receiver, target) #receiver = copy.deepcopy(receiver_template) receiver = receiver_template.copy() receiver.id = "{0}__{1}__".format(component.id, receiver_template.id) if ec.receiver_container: target.add_attachment(receiver, ec.receiver_container) target.add_child(receiver_template.id, receiver) target = receiver else: source = runnable.resolve_path(ec.from_) target = runnable.resolve_path(ec.to) source_port = ec.source_port target_port = ec.target_port if not source_port: if len(source.event_out_ports) == 1: source_port = source.event_out_ports[0] else: raise SimBuildError(("No source event port " "uniquely identifiable" " in '{0}'").format(source.id)) if not target_port: if len(target.event_in_ports) == 1: target_port = target.event_in_ports[0] else: raise SimBuildError(("No destination event port " "uniquely identifiable " "in '{0}'").format(target)) if self.debug: print("register_event_out_callback\n Source: %s, %s (port: %s) \n -> %s, %s (port: %s)"%(source, id(source), source_port, target, id(target), target_port)) source.register_event_out_callback(\ source_port, lambda: target.inc_event_in(target_port))
def get_report(report=None): """Returns details of a specific report """ if not report: report = list_reports()[-1:][0] report_path = _get_reports_path(report) report_dict = {"report": report} for filename in os.listdir(report_path): with open(os.path.join(report_path, filename), "r") as f: report_dict[filename] = f.read() return report_dict
Returns details of a specific report
Below is the the instruction that describes the task: ### Input: Returns details of a specific report ### Response: def get_report(report=None): """Returns details of a specific report """ if not report: report = list_reports()[-1:][0] report_path = _get_reports_path(report) report_dict = {"report": report} for filename in os.listdir(report_path): with open(os.path.join(report_path, filename), "r") as f: report_dict[filename] = f.read() return report_dict
def get_mod_log(self, subreddit, mod=None, action=None, *args, **kwargs): """Return a get_content generator for moderation log items. :param subreddit: Either a Subreddit object or the name of the subreddit to return the modlog for. :param mod: If given, only return the actions made by this moderator. Both a moderator name or Redditor object can be used here. :param action: If given, only return entries for the specified action. The additional parameters are passed directly into :meth:`.get_content`. Note: the `url` parameter cannot be altered. """ params = kwargs.setdefault('params', {}) if mod is not None: params['mod'] = six.text_type(mod) if action is not None: params['type'] = six.text_type(action) url = self.config['modlog'].format(subreddit=six.text_type(subreddit)) return self.get_content(url, *args, **kwargs)
Return a get_content generator for moderation log items. :param subreddit: Either a Subreddit object or the name of the subreddit to return the modlog for. :param mod: If given, only return the actions made by this moderator. Both a moderator name or Redditor object can be used here. :param action: If given, only return entries for the specified action. The additional parameters are passed directly into :meth:`.get_content`. Note: the `url` parameter cannot be altered.
Below is the the instruction that describes the task: ### Input: Return a get_content generator for moderation log items. :param subreddit: Either a Subreddit object or the name of the subreddit to return the modlog for. :param mod: If given, only return the actions made by this moderator. Both a moderator name or Redditor object can be used here. :param action: If given, only return entries for the specified action. The additional parameters are passed directly into :meth:`.get_content`. Note: the `url` parameter cannot be altered. ### Response: def get_mod_log(self, subreddit, mod=None, action=None, *args, **kwargs): """Return a get_content generator for moderation log items. :param subreddit: Either a Subreddit object or the name of the subreddit to return the modlog for. :param mod: If given, only return the actions made by this moderator. Both a moderator name or Redditor object can be used here. :param action: If given, only return entries for the specified action. The additional parameters are passed directly into :meth:`.get_content`. Note: the `url` parameter cannot be altered. """ params = kwargs.setdefault('params', {}) if mod is not None: params['mod'] = six.text_type(mod) if action is not None: params['type'] = six.text_type(action) url = self.config['modlog'].format(subreddit=six.text_type(subreddit)) return self.get_content(url, *args, **kwargs)
def reduce(self, func, dim=None, keep_attrs=None, **kwargs): """Reduce the items in this group by applying `func` along the pre-defined resampling dimension. Parameters ---------- func : function Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if dim == DEFAULT_DIMS: dim = None return super(DatasetResample, self).reduce( func, dim, keep_attrs, **kwargs)
Reduce the items in this group by applying `func` along the pre-defined resampling dimension. Parameters ---------- func : function Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed.
Below is the the instruction that describes the task: ### Input: Reduce the items in this group by applying `func` along the pre-defined resampling dimension. Parameters ---------- func : function Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. ### Response: def reduce(self, func, dim=None, keep_attrs=None, **kwargs): """Reduce the items in this group by applying `func` along the pre-defined resampling dimension. Parameters ---------- func : function Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if dim == DEFAULT_DIMS: dim = None return super(DatasetResample, self).reduce( func, dim, keep_attrs, **kwargs)
def detect_client_auth_request(server_handshake_bytes): """ Determines if a CertificateRequest message is sent from the server asking the client for a certificate :param server_handshake_bytes: A byte string of the handshake data received from the server :return: A boolean - if a client certificate request was found """ for record_type, _, record_data in parse_tls_records(server_handshake_bytes): if record_type != b'\x16': continue for message_type, message_data in parse_handshake_messages(record_data): if message_type == b'\x0d': return True return False
Determines if a CertificateRequest message is sent from the server asking the client for a certificate :param server_handshake_bytes: A byte string of the handshake data received from the server :return: A boolean - if a client certificate request was found
Below is the the instruction that describes the task: ### Input: Determines if a CertificateRequest message is sent from the server asking the client for a certificate :param server_handshake_bytes: A byte string of the handshake data received from the server :return: A boolean - if a client certificate request was found ### Response: def detect_client_auth_request(server_handshake_bytes): """ Determines if a CertificateRequest message is sent from the server asking the client for a certificate :param server_handshake_bytes: A byte string of the handshake data received from the server :return: A boolean - if a client certificate request was found """ for record_type, _, record_data in parse_tls_records(server_handshake_bytes): if record_type != b'\x16': continue for message_type, message_data in parse_handshake_messages(record_data): if message_type == b'\x0d': return True return False
def element_attribs_to_dict(self, element): """ Convert the ``.attrib`` attributes of an etree element into a dict, leaving out the xml:id attribute. Each key will be prepended by graph's namespace. """ return {self.ns+':'+key: val for (key, val) in element.attrib.items() if key != add_ns('id')}
Convert the ``.attrib`` attributes of an etree element into a dict, leaving out the xml:id attribute. Each key will be prepended by graph's namespace.
Below is the the instruction that describes the task: ### Input: Convert the ``.attrib`` attributes of an etree element into a dict, leaving out the xml:id attribute. Each key will be prepended by graph's namespace. ### Response: def element_attribs_to_dict(self, element): """ Convert the ``.attrib`` attributes of an etree element into a dict, leaving out the xml:id attribute. Each key will be prepended by graph's namespace. """ return {self.ns+':'+key: val for (key, val) in element.attrib.items() if key != add_ns('id')}
def report_changes(self, content): """ 1) Write changes in file, 2) Commit changes in git 3.1) If something changed, return tuple(True, changes) 3.2) If nothing changed, return tuple(False, None) If style is "verbose", return changes in human-friendly format, else use unified diff """ self.write(content) if self.commit(): return True, self.reporter.report() else: return False, None
1) Write changes in file, 2) Commit changes in git 3.1) If something changed, return tuple(True, changes) 3.2) If nothing changed, return tuple(False, None) If style is "verbose", return changes in human-friendly format, else use unified diff
Below is the the instruction that describes the task: ### Input: 1) Write changes in file, 2) Commit changes in git 3.1) If something changed, return tuple(True, changes) 3.2) If nothing changed, return tuple(False, None) If style is "verbose", return changes in human-friendly format, else use unified diff ### Response: def report_changes(self, content): """ 1) Write changes in file, 2) Commit changes in git 3.1) If something changed, return tuple(True, changes) 3.2) If nothing changed, return tuple(False, None) If style is "verbose", return changes in human-friendly format, else use unified diff """ self.write(content) if self.commit(): return True, self.reporter.report() else: return False, None
def maxchord(A, ve = None): """ Maximal chordal subgraph of sparsity graph. Returns a lower triangular sparse matrix which is the projection of :math:`A` on a maximal chordal subgraph and a perfect elimination order :math:`p`. Only the lower triangular part of :math:`A` is accessed. The optional argument `ve` is the index of the last vertex to be eliminated (the default value is `n-1`). If :math:`A` is chordal, then the matrix returned is equal to :math:`A`. :param A: :py:class:`spmatrix` :param ve: integer between 0 and `A.size[0]`-1 (optional) .. seealso:: P. M. Dearing, D. R. Shier, D. D. Warner, `Maximal chordal subgraphs <http://dx.doi.org/10.1016/0166-218X(88)90075-3>`_, Discrete Applied Mathematics, 20:3, 1988, pp. 181-190. """ n = A.size[0] assert A.size[1] == n, "A must be a square matrix" assert type(A) is spmatrix, "A must be a sparse matrix" if ve is None: ve = n-1 else: assert type(ve) is int and 0<=ve<n,\ "ve must be an integer between 0 and A.size[0]-1" As = symmetrize(A) cp,ri,val = As.CCS # permutation vector p = matrix(0,(n,1)) # weight array w = matrix(0,(n,1)) max_w = 0 S = [list(range(ve))+list(range(ve+1,n))+[ve]] + [[] for i in range(n-1)] C = [set() for i in range(n)] E = [[] for i in range(n)] # edge list V = [[] for i in range(n)] # num. values for i in range(n-1,-1,-1): # select next node to number while True: if len(S[max_w]) > 0: v = S[max_w].pop() if w[v] >= 0: break else: max_w -= 1 p[i] = v w[v] = -1 # set w[v] = -1 to mark that node v has been numbered # loop over unnumbered neighbors of node v for ii in range(cp[v],cp[v+1]): u = ri[ii] d = val[ii] if w[u] >= 0: if C[u].issubset(C[v]): C[u].update([v]) w[u] += 1 S[w[u]].append(u) # bump up u to S[w[u]] max_w = max(max_w,w[u]) # update max deg. E[min(u,v)].append(max(u,v)) V[min(u,v)].append(d) elif u == v: E[u].append(u) V[u].append(d) # build adjacency matrix of reordered max. chordal subgraph Am = spmatrix([d for d in chain.from_iterable(V)],[i for i in chain.from_iterable(E)],\ [i for i in chain.from_iterable([len(Ej)*[j] for j,Ej in enumerate(E)])],(n,n)) return Am,p
Maximal chordal subgraph of sparsity graph. Returns a lower triangular sparse matrix which is the projection of :math:`A` on a maximal chordal subgraph and a perfect elimination order :math:`p`. Only the lower triangular part of :math:`A` is accessed. The optional argument `ve` is the index of the last vertex to be eliminated (the default value is `n-1`). If :math:`A` is chordal, then the matrix returned is equal to :math:`A`. :param A: :py:class:`spmatrix` :param ve: integer between 0 and `A.size[0]`-1 (optional) .. seealso:: P. M. Dearing, D. R. Shier, D. D. Warner, `Maximal chordal subgraphs <http://dx.doi.org/10.1016/0166-218X(88)90075-3>`_, Discrete Applied Mathematics, 20:3, 1988, pp. 181-190.
Below is the the instruction that describes the task: ### Input: Maximal chordal subgraph of sparsity graph. Returns a lower triangular sparse matrix which is the projection of :math:`A` on a maximal chordal subgraph and a perfect elimination order :math:`p`. Only the lower triangular part of :math:`A` is accessed. The optional argument `ve` is the index of the last vertex to be eliminated (the default value is `n-1`). If :math:`A` is chordal, then the matrix returned is equal to :math:`A`. :param A: :py:class:`spmatrix` :param ve: integer between 0 and `A.size[0]`-1 (optional) .. seealso:: P. M. Dearing, D. R. Shier, D. D. Warner, `Maximal chordal subgraphs <http://dx.doi.org/10.1016/0166-218X(88)90075-3>`_, Discrete Applied Mathematics, 20:3, 1988, pp. 181-190. ### Response: def maxchord(A, ve = None): """ Maximal chordal subgraph of sparsity graph. Returns a lower triangular sparse matrix which is the projection of :math:`A` on a maximal chordal subgraph and a perfect elimination order :math:`p`. Only the lower triangular part of :math:`A` is accessed. The optional argument `ve` is the index of the last vertex to be eliminated (the default value is `n-1`). If :math:`A` is chordal, then the matrix returned is equal to :math:`A`. :param A: :py:class:`spmatrix` :param ve: integer between 0 and `A.size[0]`-1 (optional) .. seealso:: P. M. Dearing, D. R. Shier, D. D. Warner, `Maximal chordal subgraphs <http://dx.doi.org/10.1016/0166-218X(88)90075-3>`_, Discrete Applied Mathematics, 20:3, 1988, pp. 181-190. """ n = A.size[0] assert A.size[1] == n, "A must be a square matrix" assert type(A) is spmatrix, "A must be a sparse matrix" if ve is None: ve = n-1 else: assert type(ve) is int and 0<=ve<n,\ "ve must be an integer between 0 and A.size[0]-1" As = symmetrize(A) cp,ri,val = As.CCS # permutation vector p = matrix(0,(n,1)) # weight array w = matrix(0,(n,1)) max_w = 0 S = [list(range(ve))+list(range(ve+1,n))+[ve]] + [[] for i in range(n-1)] C = [set() for i in range(n)] E = [[] for i in range(n)] # edge list V = [[] for i in range(n)] # num. values for i in range(n-1,-1,-1): # select next node to number while True: if len(S[max_w]) > 0: v = S[max_w].pop() if w[v] >= 0: break else: max_w -= 1 p[i] = v w[v] = -1 # set w[v] = -1 to mark that node v has been numbered # loop over unnumbered neighbors of node v for ii in range(cp[v],cp[v+1]): u = ri[ii] d = val[ii] if w[u] >= 0: if C[u].issubset(C[v]): C[u].update([v]) w[u] += 1 S[w[u]].append(u) # bump up u to S[w[u]] max_w = max(max_w,w[u]) # update max deg. E[min(u,v)].append(max(u,v)) V[min(u,v)].append(d) elif u == v: E[u].append(u) V[u].append(d) # build adjacency matrix of reordered max. chordal subgraph Am = spmatrix([d for d in chain.from_iterable(V)],[i for i in chain.from_iterable(E)],\ [i for i in chain.from_iterable([len(Ej)*[j] for j,Ej in enumerate(E)])],(n,n)) return Am,p
def monitor_session_span_command_src_tengigabitethernet(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span") session = ET.SubElement(monitor, "session") session_number_key = ET.SubElement(session, "session-number") session_number_key.text = kwargs.pop('session_number') span_command = ET.SubElement(session, "span-command") src_tengigabitethernet = ET.SubElement(span_command, "src-tengigabitethernet") src_tengigabitethernet.text = kwargs.pop('src_tengigabitethernet') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def monitor_session_span_command_src_tengigabitethernet(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span") session = ET.SubElement(monitor, "session") session_number_key = ET.SubElement(session, "session-number") session_number_key.text = kwargs.pop('session_number') span_command = ET.SubElement(session, "span-command") src_tengigabitethernet = ET.SubElement(span_command, "src-tengigabitethernet") src_tengigabitethernet.text = kwargs.pop('src_tengigabitethernet') callback = kwargs.pop('callback', self._callback) return callback(config)
def start_semester_view(request): """ Initiates a semester"s worth of workshift, with the option to copy workshift types from the previous semester. """ page_name = "Start Semester" year, season = utils.get_year_season() start_date, end_date = utils.get_semester_start_end(year, season) semester_form = SemesterForm( data=request.POST or None, initial={ "year": year, "season": season, "start_date": start_date.strftime(date_formats[0]), "end_date": end_date.strftime(date_formats[0]), }, prefix="semester", ) pool_forms = [] try: prev_semester = Semester.objects.latest("end_date") except Semester.DoesNotExist: pass else: pools = WorkshiftPool.objects.filter( semester=prev_semester, is_primary=False, ) for pool in pools: form = StartPoolForm( data=request.POST or None, initial={ "title": pool.title, "hours": pool.hours, }, prefix="pool-{}".format(pool.pk), ) pool_forms.append(form) if semester_form.is_valid() and all(i.is_valid() for i in pool_forms): # And save this semester semester = semester_form.save() for pool_form in pool_forms: pool_form.save(semester=semester) return HttpResponseRedirect(wurl("workshift:manage", sem_url=semester.sem_url)) return render_to_response("start_semester.html", { "page_name": page_name, "semester_form": semester_form, "pool_forms": pool_forms, }, context_instance=RequestContext(request))
Initiates a semester"s worth of workshift, with the option to copy workshift types from the previous semester.
Below is the the instruction that describes the task: ### Input: Initiates a semester"s worth of workshift, with the option to copy workshift types from the previous semester. ### Response: def start_semester_view(request): """ Initiates a semester"s worth of workshift, with the option to copy workshift types from the previous semester. """ page_name = "Start Semester" year, season = utils.get_year_season() start_date, end_date = utils.get_semester_start_end(year, season) semester_form = SemesterForm( data=request.POST or None, initial={ "year": year, "season": season, "start_date": start_date.strftime(date_formats[0]), "end_date": end_date.strftime(date_formats[0]), }, prefix="semester", ) pool_forms = [] try: prev_semester = Semester.objects.latest("end_date") except Semester.DoesNotExist: pass else: pools = WorkshiftPool.objects.filter( semester=prev_semester, is_primary=False, ) for pool in pools: form = StartPoolForm( data=request.POST or None, initial={ "title": pool.title, "hours": pool.hours, }, prefix="pool-{}".format(pool.pk), ) pool_forms.append(form) if semester_form.is_valid() and all(i.is_valid() for i in pool_forms): # And save this semester semester = semester_form.save() for pool_form in pool_forms: pool_form.save(semester=semester) return HttpResponseRedirect(wurl("workshift:manage", sem_url=semester.sem_url)) return render_to_response("start_semester.html", { "page_name": page_name, "semester_form": semester_form, "pool_forms": pool_forms, }, context_instance=RequestContext(request))
def get_opts(opts): """ Validate options and apply defaults for options not supplied. :param opts: dictionary mapping str->str. :return: dictionary mapping str->Opt. All possible keys are present. """ defaults = { 'board': None, 'terrain': Opt.random, 'numbers': Opt.preset, 'ports': Opt.preset, 'pieces': Opt.preset, 'players': Opt.preset, } _opts = defaults.copy() if opts is None: opts = dict() try: for key, val in opts.copy().items(): if key == 'board': # board is a string, not a regular opt, and gets special handling # in _read_tiles_from_string continue opts[key] = Opt(val) _opts.update(opts) except Exception: raise ValueError('Invalid options={}'.format(opts)) logging.debug('used defaults=\n{}\n on opts=\n{}\nreturned total opts=\n{}'.format( pprint.pformat(defaults), pprint.pformat(opts), pprint.pformat(_opts))) return _opts
Validate options and apply defaults for options not supplied. :param opts: dictionary mapping str->str. :return: dictionary mapping str->Opt. All possible keys are present.
Below is the the instruction that describes the task: ### Input: Validate options and apply defaults for options not supplied. :param opts: dictionary mapping str->str. :return: dictionary mapping str->Opt. All possible keys are present. ### Response: def get_opts(opts): """ Validate options and apply defaults for options not supplied. :param opts: dictionary mapping str->str. :return: dictionary mapping str->Opt. All possible keys are present. """ defaults = { 'board': None, 'terrain': Opt.random, 'numbers': Opt.preset, 'ports': Opt.preset, 'pieces': Opt.preset, 'players': Opt.preset, } _opts = defaults.copy() if opts is None: opts = dict() try: for key, val in opts.copy().items(): if key == 'board': # board is a string, not a regular opt, and gets special handling # in _read_tiles_from_string continue opts[key] = Opt(val) _opts.update(opts) except Exception: raise ValueError('Invalid options={}'.format(opts)) logging.debug('used defaults=\n{}\n on opts=\n{}\nreturned total opts=\n{}'.format( pprint.pformat(defaults), pprint.pformat(opts), pprint.pformat(_opts))) return _opts
def getOutputElementCount(self, name): """ Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`. """ if name == 'categoriesOut': return self.maxCategoryCount elif name == 'categoryProbabilitiesOut': return self.maxCategoryCount elif name == 'bestPrototypeIndices': return self._bestPrototypeIndexCount if self._bestPrototypeIndexCount else 0 else: raise Exception('Unknown output: ' + name)
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.
Below is the the instruction that describes the task: ### Input: Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`. ### Response: def getOutputElementCount(self, name): """ Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`. """ if name == 'categoriesOut': return self.maxCategoryCount elif name == 'categoryProbabilitiesOut': return self.maxCategoryCount elif name == 'bestPrototypeIndices': return self._bestPrototypeIndexCount if self._bestPrototypeIndexCount else 0 else: raise Exception('Unknown output: ' + name)
def setHolidayDates(self, cmd_dict=None, password="00000000"): """ Serial call to set holiday list. If a buffer dictionary is not supplied, the method will use the class object buffer populated with assignHolidayDate. Args: cmd_dict (dict): Optional dictionary of holidays. password (str): Optional password. Returns: bool: True on completion. """ result = False self.setContext("setHolidayDates") if not cmd_dict: cmd_dict = self.m_holiday_date_params try: if not self.request(False): self.writeCmdMsg("Bad read CRC on setting") else: if not self.serialCmdPwdAuth(password): self.writeCmdMsg("Password failure") else: req_table = "" req_table += binascii.hexlify(str(cmd_dict["Holiday_1_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_1_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_2_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_2_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_3_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_3_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_4_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_4_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_5_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_5_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_6_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_6_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_7_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_7_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_8_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_8_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_9_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_9_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_10_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_10_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_11_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_11_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_12_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_12_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_13_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_13_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_14_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_14_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_15_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_15_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_16_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_16_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_17_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_17_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_18_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_18_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_19_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_19_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_20_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_20_Day"]).zfill(2)) req_str = "015731023030423028" + req_table + "2903" req_str += self.calc_crc16(req_str[2:].decode("hex")) self.m_serial_port.write(req_str.decode("hex")) if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06": self.writeCmdMsg("Success(setHolidayDates: 06 returned.") result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext("") return result
Serial call to set holiday list. If a buffer dictionary is not supplied, the method will use the class object buffer populated with assignHolidayDate. Args: cmd_dict (dict): Optional dictionary of holidays. password (str): Optional password. Returns: bool: True on completion.
Below is the the instruction that describes the task: ### Input: Serial call to set holiday list. If a buffer dictionary is not supplied, the method will use the class object buffer populated with assignHolidayDate. Args: cmd_dict (dict): Optional dictionary of holidays. password (str): Optional password. Returns: bool: True on completion. ### Response: def setHolidayDates(self, cmd_dict=None, password="00000000"): """ Serial call to set holiday list. If a buffer dictionary is not supplied, the method will use the class object buffer populated with assignHolidayDate. Args: cmd_dict (dict): Optional dictionary of holidays. password (str): Optional password. Returns: bool: True on completion. """ result = False self.setContext("setHolidayDates") if not cmd_dict: cmd_dict = self.m_holiday_date_params try: if not self.request(False): self.writeCmdMsg("Bad read CRC on setting") else: if not self.serialCmdPwdAuth(password): self.writeCmdMsg("Password failure") else: req_table = "" req_table += binascii.hexlify(str(cmd_dict["Holiday_1_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_1_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_2_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_2_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_3_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_3_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_4_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_4_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_5_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_5_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_6_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_6_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_7_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_7_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_8_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_8_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_9_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_9_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_10_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_10_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_11_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_11_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_12_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_12_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_13_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_13_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_14_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_14_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_15_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_15_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_16_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_16_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_17_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_17_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_18_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_18_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_19_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_19_Day"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_20_Month"]).zfill(2)) req_table += binascii.hexlify(str(cmd_dict["Holiday_20_Day"]).zfill(2)) req_str = "015731023030423028" + req_table + "2903" req_str += self.calc_crc16(req_str[2:].decode("hex")) self.m_serial_port.write(req_str.decode("hex")) if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06": self.writeCmdMsg("Success(setHolidayDates: 06 returned.") result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext("") return result
def from_dict(cls, d): """ Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object created using the as_dict method. :param d: dict representation of the SimplestChemenvStrategy object :return: StructureEnvironments object """ return cls(distance_cutoff=d["distance_cutoff"], angle_cutoff=d["angle_cutoff"], additional_condition=d["additional_condition"], continuous_symmetry_measure_cutoff=d["continuous_symmetry_measure_cutoff"], symmetry_measure_type=d["symmetry_measure_type"])
Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object created using the as_dict method. :param d: dict representation of the SimplestChemenvStrategy object :return: StructureEnvironments object
Below is the the instruction that describes the task: ### Input: Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object created using the as_dict method. :param d: dict representation of the SimplestChemenvStrategy object :return: StructureEnvironments object ### Response: def from_dict(cls, d): """ Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object created using the as_dict method. :param d: dict representation of the SimplestChemenvStrategy object :return: StructureEnvironments object """ return cls(distance_cutoff=d["distance_cutoff"], angle_cutoff=d["angle_cutoff"], additional_condition=d["additional_condition"], continuous_symmetry_measure_cutoff=d["continuous_symmetry_measure_cutoff"], symmetry_measure_type=d["symmetry_measure_type"])
def tiles(self): """ Iterate over tile images of this layer This is an optimised generator function that returns (tile_x, tile_y, tile_image) tuples, :rtype: Generator :return: (x, y, image) tuples """ images = self.parent.images for x, y, gid in [i for i in self.iter_data() if i[2]]: yield x, y, images[gid]
Iterate over tile images of this layer This is an optimised generator function that returns (tile_x, tile_y, tile_image) tuples, :rtype: Generator :return: (x, y, image) tuples
Below is the the instruction that describes the task: ### Input: Iterate over tile images of this layer This is an optimised generator function that returns (tile_x, tile_y, tile_image) tuples, :rtype: Generator :return: (x, y, image) tuples ### Response: def tiles(self): """ Iterate over tile images of this layer This is an optimised generator function that returns (tile_x, tile_y, tile_image) tuples, :rtype: Generator :return: (x, y, image) tuples """ images = self.parent.images for x, y, gid in [i for i in self.iter_data() if i[2]]: yield x, y, images[gid]
def setup(self): """Get default configuration.""" self.allow = self.config['allow'] self.halt = self.config['halt'] self.skip = self.config['skip']
Get default configuration.
Below is the the instruction that describes the task: ### Input: Get default configuration. ### Response: def setup(self): """Get default configuration.""" self.allow = self.config['allow'] self.halt = self.config['halt'] self.skip = self.config['skip']
def remove_all_locks(self): """Removes all locks and ensures their content is written to disk.""" locks = list(self._locks.items()) locks.sort(key=lambda l: l[1].get_last_access()) for l in locks: self._remove_lock(l[0])
Removes all locks and ensures their content is written to disk.
Below is the the instruction that describes the task: ### Input: Removes all locks and ensures their content is written to disk. ### Response: def remove_all_locks(self): """Removes all locks and ensures their content is written to disk.""" locks = list(self._locks.items()) locks.sort(key=lambda l: l[1].get_last_access()) for l in locks: self._remove_lock(l[0])
def show_in_view(self, sourceview, matches, targetname=None): """ Show search result in ncurses view. """ append = self.options.append_view or self.options.alter_view == 'append' remove = self.options.alter_view == 'remove' action_name = ', appending to' if append else ', removing from' if remove else ' into' targetname = config.engine.show(matches, targetname or self.options.to_view or "rtcontrol", append=append, disjoin=remove) msg = "Filtered %d out of %d torrents using [ %s ]" % ( len(matches), sourceview.size(), sourceview.matcher) self.LOG.info("%s%s rTorrent view %r." % (msg, action_name, targetname)) config.engine.log(msg)
Show search result in ncurses view.
Below is the the instruction that describes the task: ### Input: Show search result in ncurses view. ### Response: def show_in_view(self, sourceview, matches, targetname=None): """ Show search result in ncurses view. """ append = self.options.append_view or self.options.alter_view == 'append' remove = self.options.alter_view == 'remove' action_name = ', appending to' if append else ', removing from' if remove else ' into' targetname = config.engine.show(matches, targetname or self.options.to_view or "rtcontrol", append=append, disjoin=remove) msg = "Filtered %d out of %d torrents using [ %s ]" % ( len(matches), sourceview.size(), sourceview.matcher) self.LOG.info("%s%s rTorrent view %r." % (msg, action_name, targetname)) config.engine.log(msg)