body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
56a0c97df9633746bffcd591ba27a59c87916a8652620a430570ff364dc0c92d
def get_loaded_plugins() -> Set[Plugin]: '\n :说明:\n\n 获取当前已导入的所有插件。\n\n :返回:\n\n - ``Set[Plugin]``\n ' return set(plugins.values())
:说明: 获取当前已导入的所有插件。 :返回: - ``Set[Plugin]``
nonebot/plugin/__init__.py
get_loaded_plugins
SK-415/nonebot2
1,757
python
def get_loaded_plugins() -> Set[Plugin]: '\n :说明:\n\n 获取当前已导入的所有插件。\n\n :返回:\n\n - ``Set[Plugin]``\n ' return set(plugins.values())
def get_loaded_plugins() -> Set[Plugin]: '\n :说明:\n\n 获取当前已导入的所有插件。\n\n :返回:\n\n - ``Set[Plugin]``\n ' return set(plugins.values())<|docstring|>:说明: 获取当前已导入的所有插件。 :返回: - ``Set[Plugin]``<|endoftext|>
384fce58d23c7fdbd7ceeb0187ae279a00929d0109e71b4bad8e6d526a2066c5
def require(name: str) -> Optional[Export]: '\n :说明:\n\n 获取一个插件的导出内容\n\n :参数:\n\n * ``name: str``: 插件名,与 ``load_plugin`` 参数一致。如果为 ``load_plugins`` 导入的插件,则为文件(夹)名。\n\n :返回:\n\n - ``Optional[Export]``\n ' plugin = (get_plugin(name) or load_plugin(name)) return (plugin.export if plugin else None)
:说明: 获取一个插件的导出内容 :参数: * ``name: str``: 插件名,与 ``load_plugin`` 参数一致。如果为 ``load_plugins`` 导入的插件,则为文件(夹)名。 :返回: - ``Optional[Export]``
nonebot/plugin/__init__.py
require
SK-415/nonebot2
1,757
python
def require(name: str) -> Optional[Export]: '\n :说明:\n\n 获取一个插件的导出内容\n\n :参数:\n\n * ``name: str``: 插件名,与 ``load_plugin`` 参数一致。如果为 ``load_plugins`` 导入的插件,则为文件(夹)名。\n\n :返回:\n\n - ``Optional[Export]``\n ' plugin = (get_plugin(name) or load_plugin(name)) return (plugin.export if plugin else None)
def require(name: str) -> Optional[Export]: '\n :说明:\n\n 获取一个插件的导出内容\n\n :参数:\n\n * ``name: str``: 插件名,与 ``load_plugin`` 参数一致。如果为 ``load_plugins`` 导入的插件,则为文件(夹)名。\n\n :返回:\n\n - ``Optional[Export]``\n ' plugin = (get_plugin(name) or load_plugin(name)) return (plugin.export if plugin else None)<|docstring|>:说明: 获取一个插件的导出内容 :参数: * ``name: str``: 插件名,与 ``load_plugin`` 参数一致。如果为 ``load_plugins`` 导入的插件,则为文件(夹)名。 :返回: - ``Optional[Export]``<|endoftext|>
022e034a79dcfd9ed3bb3a71e4dbbbe08574ef11bd41d12d0d5ec484c6bcdfff
@property def export(self) -> Export: '\n - **类型**: ``Export``\n - **说明**: 插件内定义的导出内容\n ' return getattr(self.module, '__export__', Export())
- **类型**: ``Export`` - **说明**: 插件内定义的导出内容
nonebot/plugin/__init__.py
export
SK-415/nonebot2
1,757
python
@property def export(self) -> Export: '\n - **类型**: ``Export``\n - **说明**: 插件内定义的导出内容\n ' return getattr(self.module, '__export__', Export())
@property def export(self) -> Export: '\n - **类型**: ``Export``\n - **说明**: 插件内定义的导出内容\n ' return getattr(self.module, '__export__', Export())<|docstring|>- **类型**: ``Export`` - **说明**: 插件内定义的导出内容<|endoftext|>
ce652caa00280b5eee0a89b56dcdfedc07771300338f97029e2aff460d419999
@property def matcher(self) -> Set[Type[Matcher]]: '\n - **类型**: ``Set[Type[Matcher]]``\n - **说明**: 插件内定义的 ``Matcher``\n ' return _plugin_matchers.get(self.name, set())
- **类型**: ``Set[Type[Matcher]]`` - **说明**: 插件内定义的 ``Matcher``
nonebot/plugin/__init__.py
matcher
SK-415/nonebot2
1,757
python
@property def matcher(self) -> Set[Type[Matcher]]: '\n - **类型**: ``Set[Type[Matcher]]``\n - **说明**: 插件内定义的 ``Matcher``\n ' return _plugin_matchers.get(self.name, set())
@property def matcher(self) -> Set[Type[Matcher]]: '\n - **类型**: ``Set[Type[Matcher]]``\n - **说明**: 插件内定义的 ``Matcher``\n ' return _plugin_matchers.get(self.name, set())<|docstring|>- **类型**: ``Set[Type[Matcher]]`` - **说明**: 插件内定义的 ``Matcher``<|endoftext|>
51d1d4117a066b6aa72f4a8b80340d4e1646d7467b791ebb772879101d073660
def __init__(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs): '\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_command`` 的参数默认值,参考 `on_command <#on-command-cmd-rule-none-aliases-none-kwargs>`_\n ' self.basecmd: Tuple[(str, ...)] = ((cmd,) if isinstance(cmd, str) else cmd) '\n - **类型**: ``Tuple[str, ...]``\n - **说明**: 命令前缀\n ' if ('aliases' in kwargs): del kwargs['aliases'] self.base_kwargs: Dict[(str, Any)] = kwargs '\n - **类型**: ``Dict[str, Any]``\n - **说明**: 其他传递给 ``on_command`` 的参数默认值\n '
:参数: * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀 * ``**kwargs``: 其他传递给 ``on_command`` 的参数默认值,参考 `on_command <#on-command-cmd-rule-none-aliases-none-kwargs>`_
nonebot/plugin/__init__.py
__init__
SK-415/nonebot2
1,757
python
def __init__(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs): '\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_command`` 的参数默认值,参考 `on_command <#on-command-cmd-rule-none-aliases-none-kwargs>`_\n ' self.basecmd: Tuple[(str, ...)] = ((cmd,) if isinstance(cmd, str) else cmd) '\n - **类型**: ``Tuple[str, ...]``\n - **说明**: 命令前缀\n ' if ('aliases' in kwargs): del kwargs['aliases'] self.base_kwargs: Dict[(str, Any)] = kwargs '\n - **类型**: ``Dict[str, Any]``\n - **说明**: 其他传递给 ``on_command`` 的参数默认值\n '
def __init__(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs): '\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_command`` 的参数默认值,参考 `on_command <#on-command-cmd-rule-none-aliases-none-kwargs>`_\n ' self.basecmd: Tuple[(str, ...)] = ((cmd,) if isinstance(cmd, str) else cmd) '\n - **类型**: ``Tuple[str, ...]``\n - **说明**: 命令前缀\n ' if ('aliases' in kwargs): del kwargs['aliases'] self.base_kwargs: Dict[(str, Any)] = kwargs '\n - **类型**: ``Dict[str, Any]``\n - **说明**: 其他传递给 ``on_command`` 的参数默认值\n '<|docstring|>:参数: * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀 * ``**kwargs``: 其他传递给 ``on_command`` 的参数默认值,参考 `on_command <#on-command-cmd-rule-none-aliases-none-kwargs>`_<|endoftext|>
af620a0f71f3850eb31a73ad47dd8ca4f3cd2156f96681e45c37f630cd123c51
def command(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个新的命令。\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_command`` 的参数,将会覆盖命令组默认值\n\n :返回:\n\n - ``Type[Matcher]``\n ' sub_cmd = ((cmd,) if isinstance(cmd, str) else cmd) cmd = (self.basecmd + sub_cmd) final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) return on_command(cmd, **final_kwargs)
:说明: 注册一个新的命令。 :参数: * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀 * ``**kwargs``: 其他传递给 ``on_command`` 的参数,将会覆盖命令组默认值 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
command
SK-415/nonebot2
1,757
python
def command(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个新的命令。\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_command`` 的参数,将会覆盖命令组默认值\n\n :返回:\n\n - ``Type[Matcher]``\n ' sub_cmd = ((cmd,) if isinstance(cmd, str) else cmd) cmd = (self.basecmd + sub_cmd) final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) return on_command(cmd, **final_kwargs)
def command(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个新的命令。\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_command`` 的参数,将会覆盖命令组默认值\n\n :返回:\n\n - ``Type[Matcher]``\n ' sub_cmd = ((cmd,) if isinstance(cmd, str) else cmd) cmd = (self.basecmd + sub_cmd) final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) return on_command(cmd, **final_kwargs)<|docstring|>:说明: 注册一个新的命令。 :参数: * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀 * ``**kwargs``: 其他传递给 ``on_command`` 的参数,将会覆盖命令组默认值 :返回: - ``Type[Matcher]``<|endoftext|>
14063a37bd2d739e80ab531c4540f9d2eeac231ac13521132f6467d9d790d8f7
def shell_command(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个新的命令。\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_shell_command`` 的参数,将会覆盖命令组默认值\n\n :返回:\n\n - ``Type[Matcher]``\n ' sub_cmd = ((cmd,) if isinstance(cmd, str) else cmd) cmd = (self.basecmd + sub_cmd) final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) return on_shell_command(cmd, **final_kwargs)
:说明: 注册一个新的命令。 :参数: * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀 * ``**kwargs``: 其他传递给 ``on_shell_command`` 的参数,将会覆盖命令组默认值 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
shell_command
SK-415/nonebot2
1,757
python
def shell_command(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个新的命令。\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_shell_command`` 的参数,将会覆盖命令组默认值\n\n :返回:\n\n - ``Type[Matcher]``\n ' sub_cmd = ((cmd,) if isinstance(cmd, str) else cmd) cmd = (self.basecmd + sub_cmd) final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) return on_shell_command(cmd, **final_kwargs)
def shell_command(self, cmd: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个新的命令。\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀\n * ``**kwargs``: 其他传递给 ``on_shell_command`` 的参数,将会覆盖命令组默认值\n\n :返回:\n\n - ``Type[Matcher]``\n ' sub_cmd = ((cmd,) if isinstance(cmd, str) else cmd) cmd = (self.basecmd + sub_cmd) final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) return on_shell_command(cmd, **final_kwargs)<|docstring|>:说明: 注册一个新的命令。 :参数: * ``cmd: Union[str, Tuple[str, ...]]``: 命令前缀 * ``**kwargs``: 其他传递给 ``on_shell_command`` 的参数,将会覆盖命令组默认值 :返回: - ``Type[Matcher]``<|endoftext|>
4d2e69a2823b9ec78c296c6d63cf884183f7a7e59052b312d94881594dcc1430
def __init__(self, **kwargs): '\n :说明:\n\n 创建一个事件响应器组合,参数为默认值,与 ``on`` 一致\n ' self.matchers: List[Type[Matcher]] = [] '\n :类型: ``List[Type[Matcher]]``\n :说明: 组内事件响应器列表\n ' self.base_kwargs: Dict[(str, Any)] = kwargs '\n - **类型**: ``Dict[str, Any]``\n - **说明**: 其他传递给 ``on`` 的参数默认值\n '
:说明: 创建一个事件响应器组合,参数为默认值,与 ``on`` 一致
nonebot/plugin/__init__.py
__init__
SK-415/nonebot2
1,757
python
def __init__(self, **kwargs): '\n :说明:\n\n 创建一个事件响应器组合,参数为默认值,与 ``on`` 一致\n ' self.matchers: List[Type[Matcher]] = [] '\n :类型: ``List[Type[Matcher]]``\n :说明: 组内事件响应器列表\n ' self.base_kwargs: Dict[(str, Any)] = kwargs '\n - **类型**: ``Dict[str, Any]``\n - **说明**: 其他传递给 ``on`` 的参数默认值\n '
def __init__(self, **kwargs): '\n :说明:\n\n 创建一个事件响应器组合,参数为默认值,与 ``on`` 一致\n ' self.matchers: List[Type[Matcher]] = [] '\n :类型: ``List[Type[Matcher]]``\n :说明: 组内事件响应器列表\n ' self.base_kwargs: Dict[(str, Any)] = kwargs '\n - **类型**: ``Dict[str, Any]``\n - **说明**: 其他传递给 ``on`` 的参数默认值\n '<|docstring|>:说明: 创建一个事件响应器组合,参数为默认值,与 ``on`` 一致<|endoftext|>
53e7069f71f8b1aa872e061617514347acf9cfd805122a7c23aae4be4baf9078
def on(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个基础事件响应器,可自定义类型。\n\n :参数:\n\n * ``type: str``: 事件响应器类型\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) matcher = on(**final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个基础事件响应器,可自定义类型。 :参数: * ``type: str``: 事件响应器类型 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on
SK-415/nonebot2
1,757
python
def on(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个基础事件响应器,可自定义类型。\n\n :参数:\n\n * ``type: str``: 事件响应器类型\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) matcher = on(**final_kwargs) self.matchers.append(matcher) return matcher
def on(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个基础事件响应器,可自定义类型。\n\n :参数:\n\n * ``type: str``: 事件响应器类型\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) matcher = on(**final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个基础事件响应器,可自定义类型。 :参数: * ``type: str``: 事件响应器类型 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
7a4ece578bff4e6e8d6c0a77cf545d58f3ec21f84720f3bff4549365fd07a4ab
def on_metaevent(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个元事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) final_kwargs.pop('permission', None) matcher = on_metaevent(**final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个元事件响应器。 :参数: * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_metaevent
SK-415/nonebot2
1,757
python
def on_metaevent(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个元事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) final_kwargs.pop('permission', None) matcher = on_metaevent(**final_kwargs) self.matchers.append(matcher) return matcher
def on_metaevent(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个元事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) final_kwargs.pop('permission', None) matcher = on_metaevent(**final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个元事件响应器。 :参数: * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
9ee4fbac9a30ba229d7637d2eb35af1c247959c614155e97528d9ba1bb0b1941
def on_message(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_message(**final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个消息事件响应器。 :参数: * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_message
SK-415/nonebot2
1,757
python
def on_message(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_message(**final_kwargs) self.matchers.append(matcher) return matcher
def on_message(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_message(**final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个消息事件响应器。 :参数: * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
1cb41e8cf2850ae45ae91bef4aeaed42b4033b0610f36896508094b91d7cc04a
def on_notice(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个通知事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_notice(**final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个通知事件响应器。 :参数: * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_notice
SK-415/nonebot2
1,757
python
def on_notice(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个通知事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_notice(**final_kwargs) self.matchers.append(matcher) return matcher
def on_notice(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个通知事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_notice(**final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个通知事件响应器。 :参数: * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
47964eda0692099479485a818e89fa1e679706e8aec480f5d8d2061c5442dc21
def on_request(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个请求事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_request(**final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个请求事件响应器。 :参数: * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_request
SK-415/nonebot2
1,757
python
def on_request(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个请求事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_request(**final_kwargs) self.matchers.append(matcher) return matcher
def on_request(self, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个请求事件响应器。\n\n :参数:\n\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_request(**final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个请求事件响应器。 :参数: * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
a12f86a4a42d99bee9ae52e5485e471ff809a7546acf1cec3b515fa0b62c9172
def on_startswith(self, msg: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容开头时响应。\n\n :参数:\n\n * ``msg: Union[str, Tuple[str, ...]]``: 指定消息开头内容\n * ``ignorecase: bool``: 是否忽略大小写\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_startswith(msg, **final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容开头时响应。 :参数: * ``msg: Union[str, Tuple[str, ...]]``: 指定消息开头内容 * ``ignorecase: bool``: 是否忽略大小写 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_startswith
SK-415/nonebot2
1,757
python
def on_startswith(self, msg: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容开头时响应。\n\n :参数:\n\n * ``msg: Union[str, Tuple[str, ...]]``: 指定消息开头内容\n * ``ignorecase: bool``: 是否忽略大小写\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_startswith(msg, **final_kwargs) self.matchers.append(matcher) return matcher
def on_startswith(self, msg: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容开头时响应。\n\n :参数:\n\n * ``msg: Union[str, Tuple[str, ...]]``: 指定消息开头内容\n * ``ignorecase: bool``: 是否忽略大小写\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_startswith(msg, **final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容开头时响应。 :参数: * ``msg: Union[str, Tuple[str, ...]]``: 指定消息开头内容 * ``ignorecase: bool``: 是否忽略大小写 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
ad46f561473c046b295576fe9db33feeca8269513973d859b79269026e2fbc57
def on_endswith(self, msg: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容结尾时响应。\n\n :参数:\n\n * ``msg: Union[str, Tuple[str, ...]]``: 指定消息结尾内容\n * ``ignorecase: bool``: 是否忽略大小写\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_endswith(msg, **final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容结尾时响应。 :参数: * ``msg: Union[str, Tuple[str, ...]]``: 指定消息结尾内容 * ``ignorecase: bool``: 是否忽略大小写 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_endswith
SK-415/nonebot2
1,757
python
def on_endswith(self, msg: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容结尾时响应。\n\n :参数:\n\n * ``msg: Union[str, Tuple[str, ...]]``: 指定消息结尾内容\n * ``ignorecase: bool``: 是否忽略大小写\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_endswith(msg, **final_kwargs) self.matchers.append(matcher) return matcher
def on_endswith(self, msg: Union[(str, Tuple[(str, ...)])], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容结尾时响应。\n\n :参数:\n\n * ``msg: Union[str, Tuple[str, ...]]``: 指定消息结尾内容\n * ``ignorecase: bool``: 是否忽略大小写\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_endswith(msg, **final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个消息事件响应器,并且当消息的**文本部分**以指定内容结尾时响应。 :参数: * ``msg: Union[str, Tuple[str, ...]]``: 指定消息结尾内容 * ``ignorecase: bool``: 是否忽略大小写 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
a716562ba2ce7402f5e5e681e32ede2cec0516be1f0b0c953433b090a9c903a3
def on_keyword(self, keywords: Set[str], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息纯文本部分包含关键词时响应。\n\n :参数:\n\n * ``keywords: Set[str]``: 关键词列表\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_keyword(keywords, **final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个消息事件响应器,并且当消息纯文本部分包含关键词时响应。 :参数: * ``keywords: Set[str]``: 关键词列表 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_keyword
SK-415/nonebot2
1,757
python
def on_keyword(self, keywords: Set[str], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息纯文本部分包含关键词时响应。\n\n :参数:\n\n * ``keywords: Set[str]``: 关键词列表\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_keyword(keywords, **final_kwargs) self.matchers.append(matcher) return matcher
def on_keyword(self, keywords: Set[str], **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息纯文本部分包含关键词时响应。\n\n :参数:\n\n * ``keywords: Set[str]``: 关键词列表\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_keyword(keywords, **final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个消息事件响应器,并且当消息纯文本部分包含关键词时响应。 :参数: * ``keywords: Set[str]``: 关键词列表 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
ec492e04ecdc7c38402b718d1ce1fc095e2669064fb2f3b7fb6551ada8065b3d
def on_command(self, cmd: Union[(str, Tuple[(str, ...)])], aliases: Optional[Set[Union[(str, Tuple[(str, ...)])]]]=None, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息以指定命令开头时响应。\n\n 命令匹配规则参考: `命令形式匹配 <rule.html#command-command>`_\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容\n * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_command(cmd, aliases=aliases, **final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个消息事件响应器,并且当消息以指定命令开头时响应。 命令匹配规则参考: `命令形式匹配 <rule.html#command-command>`_ :参数: * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容 * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_command
SK-415/nonebot2
1,757
python
def on_command(self, cmd: Union[(str, Tuple[(str, ...)])], aliases: Optional[Set[Union[(str, Tuple[(str, ...)])]]]=None, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息以指定命令开头时响应。\n\n 命令匹配规则参考: `命令形式匹配 <rule.html#command-command>`_\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容\n * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_command(cmd, aliases=aliases, **final_kwargs) self.matchers.append(matcher) return matcher
def on_command(self, cmd: Union[(str, Tuple[(str, ...)])], aliases: Optional[Set[Union[(str, Tuple[(str, ...)])]]]=None, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息以指定命令开头时响应。\n\n 命令匹配规则参考: `命令形式匹配 <rule.html#command-command>`_\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容\n * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_command(cmd, aliases=aliases, **final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个消息事件响应器,并且当消息以指定命令开头时响应。 命令匹配规则参考: `命令形式匹配 <rule.html#command-command>`_ :参数: * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容 * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
1b552dfb5158c6b87aa30032ba7b66db8942cc15f9f61bf51a92367f3f87d0f7
def on_shell_command(self, cmd: Union[(str, Tuple[(str, ...)])], aliases: Optional[Set[Union[(str, Tuple[(str, ...)])]]]=None, parser: Optional[ArgumentParser]=None, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个支持 ``shell_like`` 解析参数的命令消息事件响应器。\n\n 与普通的 ``on_command`` 不同的是,在添加 ``parser`` 参数时, 响应器会自动处理消息。\n\n 并将用户输入的原始参数列表保存在 ``state["argv"]``, ``parser`` 处理的参数保存在 ``state["args"]`` 中\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容\n * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名\n * ``parser: Optional[ArgumentParser]``: ``nonebot.rule.ArgumentParser`` 对象\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_shell_command(cmd, aliases=aliases, parser=parser, **final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个支持 ``shell_like`` 解析参数的命令消息事件响应器。 与普通的 ``on_command`` 不同的是,在添加 ``parser`` 参数时, 响应器会自动处理消息。 并将用户输入的原始参数列表保存在 ``state["argv"]``, ``parser`` 处理的参数保存在 ``state["args"]`` 中 :参数: * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容 * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名 * ``parser: Optional[ArgumentParser]``: ``nonebot.rule.ArgumentParser`` 对象 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_shell_command
SK-415/nonebot2
1,757
python
def on_shell_command(self, cmd: Union[(str, Tuple[(str, ...)])], aliases: Optional[Set[Union[(str, Tuple[(str, ...)])]]]=None, parser: Optional[ArgumentParser]=None, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个支持 ``shell_like`` 解析参数的命令消息事件响应器。\n\n 与普通的 ``on_command`` 不同的是,在添加 ``parser`` 参数时, 响应器会自动处理消息。\n\n 并将用户输入的原始参数列表保存在 ``state["argv"]``, ``parser`` 处理的参数保存在 ``state["args"]`` 中\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容\n * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名\n * ``parser: Optional[ArgumentParser]``: ``nonebot.rule.ArgumentParser`` 对象\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_shell_command(cmd, aliases=aliases, parser=parser, **final_kwargs) self.matchers.append(matcher) return matcher
def on_shell_command(self, cmd: Union[(str, Tuple[(str, ...)])], aliases: Optional[Set[Union[(str, Tuple[(str, ...)])]]]=None, parser: Optional[ArgumentParser]=None, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个支持 ``shell_like`` 解析参数的命令消息事件响应器。\n\n 与普通的 ``on_command`` 不同的是,在添加 ``parser`` 参数时, 响应器会自动处理消息。\n\n 并将用户输入的原始参数列表保存在 ``state["argv"]``, ``parser`` 处理的参数保存在 ``state["args"]`` 中\n\n :参数:\n\n * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容\n * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名\n * ``parser: Optional[ArgumentParser]``: ``nonebot.rule.ArgumentParser`` 对象\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_shell_command(cmd, aliases=aliases, parser=parser, **final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个支持 ``shell_like`` 解析参数的命令消息事件响应器。 与普通的 ``on_command`` 不同的是,在添加 ``parser`` 参数时, 响应器会自动处理消息。 并将用户输入的原始参数列表保存在 ``state["argv"]``, ``parser`` 处理的参数保存在 ``state["args"]`` 中 :参数: * ``cmd: Union[str, Tuple[str, ...]]``: 指定命令内容 * ``aliases: Optional[Set[Union[str, Tuple[str, ...]]]]``: 命令别名 * ``parser: Optional[ArgumentParser]``: ``nonebot.rule.ArgumentParser`` 对象 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
b57984492b10abe67195d58cc69330bf7ebfb4e79baf01fa043bc87b7cbbe69a
def on_regex(self, pattern: str, flags: Union[(int, re.RegexFlag)]=0, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息匹配正则表达式时响应。\n\n 命令匹配规则参考: `正则匹配 <rule.html#regex-regex-flags-0>`_\n\n :参数:\n\n * ``pattern: str``: 正则表达式\n * ``flags: Union[int, re.RegexFlag]``: 正则匹配标志\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_regex(pattern, flags=flags, **final_kwargs) self.matchers.append(matcher) return matcher
:说明: 注册一个消息事件响应器,并且当消息匹配正则表达式时响应。 命令匹配规则参考: `正则匹配 <rule.html#regex-regex-flags-0>`_ :参数: * ``pattern: str``: 正则表达式 * ``flags: Union[int, re.RegexFlag]``: 正则匹配标志 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``
nonebot/plugin/__init__.py
on_regex
SK-415/nonebot2
1,757
python
def on_regex(self, pattern: str, flags: Union[(int, re.RegexFlag)]=0, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息匹配正则表达式时响应。\n\n 命令匹配规则参考: `正则匹配 <rule.html#regex-regex-flags-0>`_\n\n :参数:\n\n * ``pattern: str``: 正则表达式\n * ``flags: Union[int, re.RegexFlag]``: 正则匹配标志\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_regex(pattern, flags=flags, **final_kwargs) self.matchers.append(matcher) return matcher
def on_regex(self, pattern: str, flags: Union[(int, re.RegexFlag)]=0, **kwargs) -> Type[Matcher]: '\n :说明:\n\n 注册一个消息事件响应器,并且当消息匹配正则表达式时响应。\n\n 命令匹配规则参考: `正则匹配 <rule.html#regex-regex-flags-0>`_\n\n :参数:\n\n * ``pattern: str``: 正则表达式\n * ``flags: Union[int, re.RegexFlag]``: 正则匹配标志\n * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则\n * ``permission: Optional[Permission]``: 事件响应权限\n * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表\n * ``temp: bool``: 是否为临时事件响应器(仅执行一次)\n * ``priority: int``: 事件响应器优先级\n * ``block: bool``: 是否阻止事件向更低优先级传递\n * ``state: Optional[T_State]``: 默认 state\n * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数\n\n :返回:\n\n - ``Type[Matcher]``\n ' final_kwargs = self.base_kwargs.copy() final_kwargs.update(kwargs) final_kwargs.pop('type', None) matcher = on_regex(pattern, flags=flags, **final_kwargs) self.matchers.append(matcher) return matcher<|docstring|>:说明: 注册一个消息事件响应器,并且当消息匹配正则表达式时响应。 命令匹配规则参考: `正则匹配 <rule.html#regex-regex-flags-0>`_ :参数: * ``pattern: str``: 正则表达式 * ``flags: Union[int, re.RegexFlag]``: 正则匹配标志 * ``rule: Optional[Union[Rule, T_RuleChecker]]``: 事件响应规则 * ``permission: Optional[Permission]``: 事件响应权限 * ``handlers: Optional[List[Union[T_Handler, Handler]]]``: 事件处理函数列表 * ``temp: bool``: 是否为临时事件响应器(仅执行一次) * ``priority: int``: 事件响应器优先级 * ``block: bool``: 是否阻止事件向更低优先级传递 * ``state: Optional[T_State]``: 默认 state * ``state_factory: Optional[T_StateFactory]``: 默认 state 的工厂函数 :返回: - ``Type[Matcher]``<|endoftext|>
dcb4d18dc9635ac99353c3e0eb70d69238b37e796b9cc051d61b5667c691bfdc
def bark_alpha(fs): '\n Returns the alpha parameter corresponding to a Bark scale.\n ' return ((0.8517 * np.sqrt(np.arctan(((0.06583 * fs) / 1000.0)))) - 0.1916)
Returns the alpha parameter corresponding to a Bark scale.
lib/sigproc/freqwarp.py
bark_alpha
qingyundou/tacotron_qdou
2
python
def bark_alpha(fs): '\n \n ' return ((0.8517 * np.sqrt(np.arctan(((0.06583 * fs) / 1000.0)))) - 0.1916)
def bark_alpha(fs): '\n \n ' return ((0.8517 * np.sqrt(np.arctan(((0.06583 * fs) / 1000.0)))) - 0.1916)<|docstring|>Returns the alpha parameter corresponding to a Bark scale.<|endoftext|>
b4937b5b45334b82bc8050f504069b78157a53330b7480d1c94c1fd0b6099a2a
def erb_alpha(fs): '\n Returns the alpha parameter corresponding to an ERB scale.\n ' return ((0.5941 * np.sqrt(np.arctan(((0.1418 * fs) / 1000.0)))) + 0.03237)
Returns the alpha parameter corresponding to an ERB scale.
lib/sigproc/freqwarp.py
erb_alpha
qingyundou/tacotron_qdou
2
python
def erb_alpha(fs): '\n \n ' return ((0.5941 * np.sqrt(np.arctan(((0.1418 * fs) / 1000.0)))) + 0.03237)
def erb_alpha(fs): '\n \n ' return ((0.5941 * np.sqrt(np.arctan(((0.1418 * fs) / 1000.0)))) + 0.03237)<|docstring|>Returns the alpha parameter corresponding to an ERB scale.<|endoftext|>
116cee98c22ce4218515b77c4f7580b7c687778257d3a93e3ad5786e6b841a89
def loghspec2fwcep(C, fs, order=(- 1)): '\n From https://github.com/covarep/covarep/blob/master/envelope/hspec2fwcep.m\n ' if (len(C.shape) > 1): FWCEP = np.zeros((C.shape[0], (1 + order))) for n in range(C.shape[0]): FWCEP[(n, :)] = loghspec2fwcep(C[(n, :)], fs, order) return FWCEP dftlen = ((len(C) - 1) * 2) if (order == (- 1)): order = int((dftlen / 2)) freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) env = np.interp(freqlin, freqmel, C) fwcep = np.fft.irfft(env) fwcep = fwcep[0:(1 + order)] fwcep[1:] *= 2 if 0: import matplotlib.pyplot as plt plt.ion() Cmel = fwcep2loghspec(fwcep, fs, dftlen) plt.plot(C, 'k') plt.plot(env, 'r') plt.plot(Cmel, 'b') from IPython.core.debugger import Pdb Pdb().set_trace() return fwcep
From https://github.com/covarep/covarep/blob/master/envelope/hspec2fwcep.m
lib/sigproc/freqwarp.py
loghspec2fwcep
qingyundou/tacotron_qdou
2
python
def loghspec2fwcep(C, fs, order=(- 1)): '\n \n ' if (len(C.shape) > 1): FWCEP = np.zeros((C.shape[0], (1 + order))) for n in range(C.shape[0]): FWCEP[(n, :)] = loghspec2fwcep(C[(n, :)], fs, order) return FWCEP dftlen = ((len(C) - 1) * 2) if (order == (- 1)): order = int((dftlen / 2)) freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) env = np.interp(freqlin, freqmel, C) fwcep = np.fft.irfft(env) fwcep = fwcep[0:(1 + order)] fwcep[1:] *= 2 if 0: import matplotlib.pyplot as plt plt.ion() Cmel = fwcep2loghspec(fwcep, fs, dftlen) plt.plot(C, 'k') plt.plot(env, 'r') plt.plot(Cmel, 'b') from IPython.core.debugger import Pdb Pdb().set_trace() return fwcep
def loghspec2fwcep(C, fs, order=(- 1)): '\n \n ' if (len(C.shape) > 1): FWCEP = np.zeros((C.shape[0], (1 + order))) for n in range(C.shape[0]): FWCEP[(n, :)] = loghspec2fwcep(C[(n, :)], fs, order) return FWCEP dftlen = ((len(C) - 1) * 2) if (order == (- 1)): order = int((dftlen / 2)) freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) env = np.interp(freqlin, freqmel, C) fwcep = np.fft.irfft(env) fwcep = fwcep[0:(1 + order)] fwcep[1:] *= 2 if 0: import matplotlib.pyplot as plt plt.ion() Cmel = fwcep2loghspec(fwcep, fs, dftlen) plt.plot(C, 'k') plt.plot(env, 'r') plt.plot(Cmel, 'b') from IPython.core.debugger import Pdb Pdb().set_trace() return fwcep<|docstring|>From https://github.com/covarep/covarep/blob/master/envelope/hspec2fwcep.m<|endoftext|>
d4b898cd5d58528e16081c937665dfc71042060fcfb4c2641b32bd26c7c96f03
def fwcep2loghspec(fwcep, fs, dftlen): '\n From https://github.com/covarep/covarep/blob/master/envelope/fwcep2hspec.m\n ' if (len(fwcep.shape) > 1): C = np.zeros((fwcep.shape[0], (int((dftlen / 2)) + 1))) for n in range(C.shape[0]): C[(n, :)] = fwcep2loghspec(fwcep[(n, :)], fs, dftlen) return C freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) Cwrapl = np.real(np.fft.rfft(fwcep, dftlen)) C = np.interp(freqmel, freqlin, Cwrapl) return C
From https://github.com/covarep/covarep/blob/master/envelope/fwcep2hspec.m
lib/sigproc/freqwarp.py
fwcep2loghspec
qingyundou/tacotron_qdou
2
python
def fwcep2loghspec(fwcep, fs, dftlen): '\n \n ' if (len(fwcep.shape) > 1): C = np.zeros((fwcep.shape[0], (int((dftlen / 2)) + 1))) for n in range(C.shape[0]): C[(n, :)] = fwcep2loghspec(fwcep[(n, :)], fs, dftlen) return C freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) Cwrapl = np.real(np.fft.rfft(fwcep, dftlen)) C = np.interp(freqmel, freqlin, Cwrapl) return C
def fwcep2loghspec(fwcep, fs, dftlen): '\n \n ' if (len(fwcep.shape) > 1): C = np.zeros((fwcep.shape[0], (int((dftlen / 2)) + 1))) for n in range(C.shape[0]): C[(n, :)] = fwcep2loghspec(fwcep[(n, :)], fs, dftlen) return C freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) Cwrapl = np.real(np.fft.rfft(fwcep, dftlen)) C = np.interp(freqmel, freqlin, Cwrapl) return C<|docstring|>From https://github.com/covarep/covarep/blob/master/envelope/fwcep2hspec.m<|endoftext|>
8505052e03ed5363441f3229530d199b5cbde50a62265cd62502bf594f3f2ee5
def linbnd2fwbnd(X, fs, dftlen, nbbnds): '\n Split the spectral data X into mel-bands\n ' freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) Z = np.zeros((X.shape[0], nbbnds)) for t in np.arange(X.shape[0]): Y = np.interp(freqlin, freqmel, X[(t, :)]) for k in np.arange(nbbnds): ids = int(((float(k) / nbbnds) * (int((dftlen / 2)) + 1))) ide = int(((float((k + 1)) / nbbnds) * (int((dftlen / 2)) + 1))) Z[(t, k)] = np.mean(Y[ids:ide]) return Z
Split the spectral data X into mel-bands
lib/sigproc/freqwarp.py
linbnd2fwbnd
qingyundou/tacotron_qdou
2
python
def linbnd2fwbnd(X, fs, dftlen, nbbnds): '\n \n ' freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) Z = np.zeros((X.shape[0], nbbnds)) for t in np.arange(X.shape[0]): Y = np.interp(freqlin, freqmel, X[(t, :)]) for k in np.arange(nbbnds): ids = int(((float(k) / nbbnds) * (int((dftlen / 2)) + 1))) ide = int(((float((k + 1)) / nbbnds) * (int((dftlen / 2)) + 1))) Z[(t, k)] = np.mean(Y[ids:ide]) return Z
def linbnd2fwbnd(X, fs, dftlen, nbbnds): '\n \n ' freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) Z = np.zeros((X.shape[0], nbbnds)) for t in np.arange(X.shape[0]): Y = np.interp(freqlin, freqmel, X[(t, :)]) for k in np.arange(nbbnds): ids = int(((float(k) / nbbnds) * (int((dftlen / 2)) + 1))) ide = int(((float((k + 1)) / nbbnds) * (int((dftlen / 2)) + 1))) Z[(t, k)] = np.mean(Y[ids:ide]) return Z<|docstring|>Split the spectral data X into mel-bands<|endoftext|>
49c6166583a55d8ac794f040af94ac3cee810a76ecce2791d733259d729cf63e
def freq2fwspecidx(freq, fs, nbbnds, dftlen=4096): '\n Retrieve the closest index to a frequency in frequency-warped spectrum.\n ' FF = ((fs * np.arange((int((dftlen / 2)) + 1))) / float(dftlen)) fwFF = linbnd2fwbnd(FF[(np.newaxis, :)], fs, dftlen, nbbnds) return np.min(np.where((fwFF > freq))[1])
Retrieve the closest index to a frequency in frequency-warped spectrum.
lib/sigproc/freqwarp.py
freq2fwspecidx
qingyundou/tacotron_qdou
2
python
def freq2fwspecidx(freq, fs, nbbnds, dftlen=4096): '\n \n ' FF = ((fs * np.arange((int((dftlen / 2)) + 1))) / float(dftlen)) fwFF = linbnd2fwbnd(FF[(np.newaxis, :)], fs, dftlen, nbbnds) return np.min(np.where((fwFF > freq))[1])
def freq2fwspecidx(freq, fs, nbbnds, dftlen=4096): '\n \n ' FF = ((fs * np.arange((int((dftlen / 2)) + 1))) / float(dftlen)) fwFF = linbnd2fwbnd(FF[(np.newaxis, :)], fs, dftlen, nbbnds) return np.min(np.where((fwFF > freq))[1])<|docstring|>Retrieve the closest index to a frequency in frequency-warped spectrum.<|endoftext|>
3cc0417d87130a184a2440968dccae83259e225dbba7f09df02314f8ac7c0bff
def fwbnd2linbnd(Z, fs, dftlen, smooth=False): '\n Reconstruct spectral data from mel-bands\n ' nbbnds = Z.shape[1] freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) X = np.zeros((Z.shape[0], (int((dftlen / 2)) + 1))) for t in np.arange(X.shape[0]): for k in np.arange(nbbnds): ids = int(((float(k) / nbbnds) * (int((dftlen / 2)) + 1))) ide = int(((float((k + 1)) / nbbnds) * (int((dftlen / 2)) + 1))) X[(t, ids:ide)] = Z[(t, k)] if smooth: rcc = np.fft.irfft(X[(t, :)]) rcc = rcc[:(int((dftlen / 2)) + 1)] rcc[1:] *= 2 rcc = rcc[:nbbnds] X[(t, :)] = np.real(np.fft.rfft(rcc, dftlen)) X[(t, :)] = np.interp(freqmel, freqlin, X[(t, :)]) return X
Reconstruct spectral data from mel-bands
lib/sigproc/freqwarp.py
fwbnd2linbnd
qingyundou/tacotron_qdou
2
python
def fwbnd2linbnd(Z, fs, dftlen, smooth=False): '\n \n ' nbbnds = Z.shape[1] freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) X = np.zeros((Z.shape[0], (int((dftlen / 2)) + 1))) for t in np.arange(X.shape[0]): for k in np.arange(nbbnds): ids = int(((float(k) / nbbnds) * (int((dftlen / 2)) + 1))) ide = int(((float((k + 1)) / nbbnds) * (int((dftlen / 2)) + 1))) X[(t, ids:ide)] = Z[(t, k)] if smooth: rcc = np.fft.irfft(X[(t, :)]) rcc = rcc[:(int((dftlen / 2)) + 1)] rcc[1:] *= 2 rcc = rcc[:nbbnds] X[(t, :)] = np.real(np.fft.rfft(rcc, dftlen)) X[(t, :)] = np.interp(freqmel, freqlin, X[(t, :)]) return X
def fwbnd2linbnd(Z, fs, dftlen, smooth=False): '\n \n ' nbbnds = Z.shape[1] freqlin = ((fs * np.arange((int((dftlen / 2)) + 1))) / dftlen) freqmel = (((0.5 * fs) * sp.lin2mel(freqlin)) / sp.lin2mel((0.5 * fs))) X = np.zeros((Z.shape[0], (int((dftlen / 2)) + 1))) for t in np.arange(X.shape[0]): for k in np.arange(nbbnds): ids = int(((float(k) / nbbnds) * (int((dftlen / 2)) + 1))) ide = int(((float((k + 1)) / nbbnds) * (int((dftlen / 2)) + 1))) X[(t, ids:ide)] = Z[(t, k)] if smooth: rcc = np.fft.irfft(X[(t, :)]) rcc = rcc[:(int((dftlen / 2)) + 1)] rcc[1:] *= 2 rcc = rcc[:nbbnds] X[(t, :)] = np.real(np.fft.rfft(rcc, dftlen)) X[(t, :)] = np.interp(freqmel, freqlin, X[(t, :)]) return X<|docstring|>Reconstruct spectral data from mel-bands<|endoftext|>
63a11ee9449ad8741586bc5071fa6cdf97201786f66a9b6777116984dbebf6ec
def inf_generator(iterable): 'Allows training with DataLoaders in a single infinite loop:\n\t\tfor i, (x, y) in enumerate(inf_generator(train_loader)):\n\t' iterator = iterable.__iter__() while True: try: (yield iterator.__next__()) except StopIteration: iterator = iterable.__iter__()
Allows training with DataLoaders in a single infinite loop: for i, (x, y) in enumerate(inf_generator(train_loader)):
lib/utils.py
inf_generator
JurijsNazarovs/panel_me_ode
6
python
def inf_generator(iterable): 'Allows training with DataLoaders in a single infinite loop:\n\t\tfor i, (x, y) in enumerate(inf_generator(train_loader)):\n\t' iterator = iterable.__iter__() while True: try: (yield iterator.__next__()) except StopIteration: iterator = iterable.__iter__()
def inf_generator(iterable): 'Allows training with DataLoaders in a single infinite loop:\n\t\tfor i, (x, y) in enumerate(inf_generator(train_loader)):\n\t' iterator = iterable.__iter__() while True: try: (yield iterator.__next__()) except StopIteration: iterator = iterable.__iter__()<|docstring|>Allows training with DataLoaders in a single infinite loop: for i, (x, y) in enumerate(inf_generator(train_loader)):<|endoftext|>
8f88fb5b9b6c731cbe6c9c8a3f2636546f0c6620db89c57c99d9474dbe095868
@torch.jit.script_method def tokenize(self, input: str) -> List[Tuple[(str, int, int)]]: '\n Process a single line of raw inputs into tokens, it supports\n two input formats:\n 1) a single text\n 2) a token\n\n Returns a list of tokens with start and end indices in original input.\n ' raise NotImplementedError
Process a single line of raw inputs into tokens, it supports two input formats: 1) a single text 2) a token Returns a list of tokens with start and end indices in original input.
pytext/torchscript/tokenizer/tokenizer.py
tokenize
z-a-f/pytext
6,199
python
@torch.jit.script_method def tokenize(self, input: str) -> List[Tuple[(str, int, int)]]: '\n Process a single line of raw inputs into tokens, it supports\n two input formats:\n 1) a single text\n 2) a token\n\n Returns a list of tokens with start and end indices in original input.\n ' raise NotImplementedError
@torch.jit.script_method def tokenize(self, input: str) -> List[Tuple[(str, int, int)]]: '\n Process a single line of raw inputs into tokens, it supports\n two input formats:\n 1) a single text\n 2) a token\n\n Returns a list of tokens with start and end indices in original input.\n ' raise NotImplementedError<|docstring|>Process a single line of raw inputs into tokens, it supports two input formats: 1) a single text 2) a token Returns a list of tokens with start and end indices in original input.<|endoftext|>
a47f5570dbc066d12452832dd8c7dd7b35aa82969d3c520bed401df444fe6628
@torch.jit.script_method def tokenize(self, raw_token: str) -> List[Tuple[(str, int, int)]]: "\n This tokenizers splits a raw_token into its constituent words by splitting\n the raw_token on space. This function handle multiple spaces between\n words too.\n Note:\n torch scripting doesn't support try-except and since re.finditer uses\n try in its implemetation regex based tokenization is not supported.\n " tokenize_input = (raw_token.lower() if self.lowercase else raw_token) tokens = tokenize_input.split() torchify_tokens = torch.jit.annotate(List[Tuple[(str, int, int)]], []) (start, end) = (0, 0) for token in tokens: start = tokenize_input.find(token, end, (- 1)) end = (start + len(token)) torchify_tokens.append((token.strip(), start, end)) start = (end + 1) return torchify_tokens
This tokenizers splits a raw_token into its constituent words by splitting the raw_token on space. This function handle multiple spaces between words too. Note: torch scripting doesn't support try-except and since re.finditer uses try in its implemetation regex based tokenization is not supported.
pytext/torchscript/tokenizer/tokenizer.py
tokenize
z-a-f/pytext
6,199
python
@torch.jit.script_method def tokenize(self, raw_token: str) -> List[Tuple[(str, int, int)]]: "\n This tokenizers splits a raw_token into its constituent words by splitting\n the raw_token on space. This function handle multiple spaces between\n words too.\n Note:\n torch scripting doesn't support try-except and since re.finditer uses\n try in its implemetation regex based tokenization is not supported.\n " tokenize_input = (raw_token.lower() if self.lowercase else raw_token) tokens = tokenize_input.split() torchify_tokens = torch.jit.annotate(List[Tuple[(str, int, int)]], []) (start, end) = (0, 0) for token in tokens: start = tokenize_input.find(token, end, (- 1)) end = (start + len(token)) torchify_tokens.append((token.strip(), start, end)) start = (end + 1) return torchify_tokens
@torch.jit.script_method def tokenize(self, raw_token: str) -> List[Tuple[(str, int, int)]]: "\n This tokenizers splits a raw_token into its constituent words by splitting\n the raw_token on space. This function handle multiple spaces between\n words too.\n Note:\n torch scripting doesn't support try-except and since re.finditer uses\n try in its implemetation regex based tokenization is not supported.\n " tokenize_input = (raw_token.lower() if self.lowercase else raw_token) tokens = tokenize_input.split() torchify_tokens = torch.jit.annotate(List[Tuple[(str, int, int)]], []) (start, end) = (0, 0) for token in tokens: start = tokenize_input.find(token, end, (- 1)) end = (start + len(token)) torchify_tokens.append((token.strip(), start, end)) start = (end + 1) return torchify_tokens<|docstring|>This tokenizers splits a raw_token into its constituent words by splitting the raw_token on space. This function handle multiple spaces between words too. Note: torch scripting doesn't support try-except and since re.finditer uses try in its implemetation regex based tokenization is not supported.<|endoftext|>
913f107537524f9cfe4d247d659146b3451c1c07fdac43b9f0c84b2143805cab
def run(self): 'Run command.' onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools/mypy-onnx.py')) returncode = subprocess.call([sys.executable, onnx_script]) sys.exit(returncode)
Run command.
setup.py
run
yihonglyu/onnx
12,820
python
def run(self): onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools/mypy-onnx.py')) returncode = subprocess.call([sys.executable, onnx_script]) sys.exit(returncode)
def run(self): onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools/mypy-onnx.py')) returncode = subprocess.call([sys.executable, onnx_script]) sys.exit(returncode)<|docstring|>Run command.<|endoftext|>
28e635c98a3156e38f25f9f9fc6f2d5cdb3e22f7787561d86e0fe22f908eea0a
def test_abc_abstract(self): '\n One cannot instantiate `TradeStrategy` itself.\n ' with pytest.raises(TypeError): TradeStrategy()
One cannot instantiate `TradeStrategy` itself.
tests/strategy/test_abc.py
test_abc_abstract
shishaboy/epymetheus
0
python
def test_abc_abstract(self): '\n \n ' with pytest.raises(TypeError): TradeStrategy()
def test_abc_abstract(self): '\n \n ' with pytest.raises(TypeError): TradeStrategy()<|docstring|>One cannot instantiate `TradeStrategy` itself.<|endoftext|>
53d4a2b528e00493adfe6f84eb56f15ef511b13b661228b55d8ff539ffcc5353
def test_abc_nologic(self): '\n One cannot instantiate strategy without logic.\n ' with pytest.raises(TypeError): StrategyWithoutLogic()
One cannot instantiate strategy without logic.
tests/strategy/test_abc.py
test_abc_nologic
shishaboy/epymetheus
0
python
def test_abc_nologic(self): '\n \n ' with pytest.raises(TypeError): StrategyWithoutLogic()
def test_abc_nologic(self): '\n \n ' with pytest.raises(TypeError): StrategyWithoutLogic()<|docstring|>One cannot instantiate strategy without logic.<|endoftext|>
afc83dcc852fc44d96a6b43f926936127a3a2c5f3cc218c9744689f82f22d6ab
def load_data(self, data_dir=None, use_apple2orange=False, use_summer2winter_yosemite=False, use_horse2zebra=False, use_monet2photo=False, use_cezanne2photo=False, use_ukiyoe2photo=False, use_vangogh2photo=False, use_maps=False, use_cityscapes=False, use_facades=False, use_iphone2dslr_flower=False, batch_size=32): 'Load data to train the model\n\n Args:\n data_dir (str, optional): string representing the directory to load data from. Defaults to ``None``\n use_apple2orange (bool, optional): use the apple2orange dataset to train the model. Defaults to ``False``\n use_summer2winter_yosemite (bool, optional): use the summer2winter_yosemite dataset to train the model. Defaults to ``False``\n use_horse2zebra (bool, optional): use the horse2zebra dataset to train the model. Defaults to ``False``\n use_monet2photo (bool, optional): use the monet2photo dataset to train the model. Defaults to ``False``\n use_cezanne2photo (bool, optional): use the cezanne2photo dataset to train the model. Defaults to ``False``\n use_ukiyoe2photo (bool, optional): use the ukiyoe2photo dataset to train the model. Defaults to ``False``\n use_vangogh2photo (bool, optional): use the vangogh2photo dataset to train the model. Defaults to ``False``\n use_maps (bool, optional): use the maps dataset to train the model. Defaults to ``False``\n use_cityscapes (bool, optional): use the cityscapes dataset to train the model. Defaults to ``False``\n use_facades (bool, optional): use the facades dataset to train the model. Defaults to ``False``\n use_iphone2dslr_flower (bool, optional): use the iphone2dslr_flower dataset to train the model. Defaults to ``False``\n batch_size (int, optional): mini batch size for training the model. Defaults to ``32``\n\n Return:\n four tensorflow dataset objects representing trainA, trainB, testA, testB \n ' if use_apple2orange: data_obj = cyclegan_dataloader(dataset_name='apple2orange') elif use_summer2winter_yosemite: data_obj = cyclegan_dataloader(dataset_name='summer2winter_yosemite') elif use_horse2zebra: data_obj = cyclegan_dataloader(dataset_name='horse2zebra') elif use_monet2photo: data_obj = cyclegan_dataloader(dataset_name='monet2photo') elif use_cezanne2photo: data_obj = cyclegan_dataloader(dataset_name='cezanne2photo') elif use_ukiyoe2photo: data_obj = cyclegan_dataloader(dataset_name='ukiyoe2photo') elif use_vangogh2photo: data_obj = cyclegan_dataloader(dataset_name='vangogh2photo') elif use_maps: data_obj = cyclegan_dataloader(dataset_name='maps') elif use_cityscapes: data_obj = cyclegan_dataloader(dataset_name='cityscapes') elif use_facades: data_obj = cyclegan_dataloader(dataset_name='facades') elif use_iphone2dslr_flower: data_obj = cyclegan_dataloader(dataset_name='iphone2dslr_flower') else: data_obj = cyclegan_dataloader(datadir=data_dir) (trainA, trainB, testA, testB) = data_obj.load_dataset() for data in trainA.take(1): self.img_size = data.shape self.channels = data.shape[(- 1)] trainA = trainA.shuffle(100000).batch(batch_size) trainB = trainB.shuffle(100000).batch(batch_size) testA = testA.shuffle(100000).batch(batch_size) testB = testB.shuffle(100000).batch(batch_size) return (trainA, trainB, testA, testB)
Load data to train the model Args: data_dir (str, optional): string representing the directory to load data from. Defaults to ``None`` use_apple2orange (bool, optional): use the apple2orange dataset to train the model. Defaults to ``False`` use_summer2winter_yosemite (bool, optional): use the summer2winter_yosemite dataset to train the model. Defaults to ``False`` use_horse2zebra (bool, optional): use the horse2zebra dataset to train the model. Defaults to ``False`` use_monet2photo (bool, optional): use the monet2photo dataset to train the model. Defaults to ``False`` use_cezanne2photo (bool, optional): use the cezanne2photo dataset to train the model. Defaults to ``False`` use_ukiyoe2photo (bool, optional): use the ukiyoe2photo dataset to train the model. Defaults to ``False`` use_vangogh2photo (bool, optional): use the vangogh2photo dataset to train the model. Defaults to ``False`` use_maps (bool, optional): use the maps dataset to train the model. Defaults to ``False`` use_cityscapes (bool, optional): use the cityscapes dataset to train the model. Defaults to ``False`` use_facades (bool, optional): use the facades dataset to train the model. Defaults to ``False`` use_iphone2dslr_flower (bool, optional): use the iphone2dslr_flower dataset to train the model. Defaults to ``False`` batch_size (int, optional): mini batch size for training the model. Defaults to ``32`` Return: four tensorflow dataset objects representing trainA, trainB, testA, testB
simplegan/gan/cyclegan.py
load_data
grohith327/EasyGAN
23
python
def load_data(self, data_dir=None, use_apple2orange=False, use_summer2winter_yosemite=False, use_horse2zebra=False, use_monet2photo=False, use_cezanne2photo=False, use_ukiyoe2photo=False, use_vangogh2photo=False, use_maps=False, use_cityscapes=False, use_facades=False, use_iphone2dslr_flower=False, batch_size=32): 'Load data to train the model\n\n Args:\n data_dir (str, optional): string representing the directory to load data from. Defaults to ``None``\n use_apple2orange (bool, optional): use the apple2orange dataset to train the model. Defaults to ``False``\n use_summer2winter_yosemite (bool, optional): use the summer2winter_yosemite dataset to train the model. Defaults to ``False``\n use_horse2zebra (bool, optional): use the horse2zebra dataset to train the model. Defaults to ``False``\n use_monet2photo (bool, optional): use the monet2photo dataset to train the model. Defaults to ``False``\n use_cezanne2photo (bool, optional): use the cezanne2photo dataset to train the model. Defaults to ``False``\n use_ukiyoe2photo (bool, optional): use the ukiyoe2photo dataset to train the model. Defaults to ``False``\n use_vangogh2photo (bool, optional): use the vangogh2photo dataset to train the model. Defaults to ``False``\n use_maps (bool, optional): use the maps dataset to train the model. Defaults to ``False``\n use_cityscapes (bool, optional): use the cityscapes dataset to train the model. Defaults to ``False``\n use_facades (bool, optional): use the facades dataset to train the model. Defaults to ``False``\n use_iphone2dslr_flower (bool, optional): use the iphone2dslr_flower dataset to train the model. Defaults to ``False``\n batch_size (int, optional): mini batch size for training the model. Defaults to ``32``\n\n Return:\n four tensorflow dataset objects representing trainA, trainB, testA, testB \n ' if use_apple2orange: data_obj = cyclegan_dataloader(dataset_name='apple2orange') elif use_summer2winter_yosemite: data_obj = cyclegan_dataloader(dataset_name='summer2winter_yosemite') elif use_horse2zebra: data_obj = cyclegan_dataloader(dataset_name='horse2zebra') elif use_monet2photo: data_obj = cyclegan_dataloader(dataset_name='monet2photo') elif use_cezanne2photo: data_obj = cyclegan_dataloader(dataset_name='cezanne2photo') elif use_ukiyoe2photo: data_obj = cyclegan_dataloader(dataset_name='ukiyoe2photo') elif use_vangogh2photo: data_obj = cyclegan_dataloader(dataset_name='vangogh2photo') elif use_maps: data_obj = cyclegan_dataloader(dataset_name='maps') elif use_cityscapes: data_obj = cyclegan_dataloader(dataset_name='cityscapes') elif use_facades: data_obj = cyclegan_dataloader(dataset_name='facades') elif use_iphone2dslr_flower: data_obj = cyclegan_dataloader(dataset_name='iphone2dslr_flower') else: data_obj = cyclegan_dataloader(datadir=data_dir) (trainA, trainB, testA, testB) = data_obj.load_dataset() for data in trainA.take(1): self.img_size = data.shape self.channels = data.shape[(- 1)] trainA = trainA.shuffle(100000).batch(batch_size) trainB = trainB.shuffle(100000).batch(batch_size) testA = testA.shuffle(100000).batch(batch_size) testB = testB.shuffle(100000).batch(batch_size) return (trainA, trainB, testA, testB)
def load_data(self, data_dir=None, use_apple2orange=False, use_summer2winter_yosemite=False, use_horse2zebra=False, use_monet2photo=False, use_cezanne2photo=False, use_ukiyoe2photo=False, use_vangogh2photo=False, use_maps=False, use_cityscapes=False, use_facades=False, use_iphone2dslr_flower=False, batch_size=32): 'Load data to train the model\n\n Args:\n data_dir (str, optional): string representing the directory to load data from. Defaults to ``None``\n use_apple2orange (bool, optional): use the apple2orange dataset to train the model. Defaults to ``False``\n use_summer2winter_yosemite (bool, optional): use the summer2winter_yosemite dataset to train the model. Defaults to ``False``\n use_horse2zebra (bool, optional): use the horse2zebra dataset to train the model. Defaults to ``False``\n use_monet2photo (bool, optional): use the monet2photo dataset to train the model. Defaults to ``False``\n use_cezanne2photo (bool, optional): use the cezanne2photo dataset to train the model. Defaults to ``False``\n use_ukiyoe2photo (bool, optional): use the ukiyoe2photo dataset to train the model. Defaults to ``False``\n use_vangogh2photo (bool, optional): use the vangogh2photo dataset to train the model. Defaults to ``False``\n use_maps (bool, optional): use the maps dataset to train the model. Defaults to ``False``\n use_cityscapes (bool, optional): use the cityscapes dataset to train the model. Defaults to ``False``\n use_facades (bool, optional): use the facades dataset to train the model. Defaults to ``False``\n use_iphone2dslr_flower (bool, optional): use the iphone2dslr_flower dataset to train the model. Defaults to ``False``\n batch_size (int, optional): mini batch size for training the model. Defaults to ``32``\n\n Return:\n four tensorflow dataset objects representing trainA, trainB, testA, testB \n ' if use_apple2orange: data_obj = cyclegan_dataloader(dataset_name='apple2orange') elif use_summer2winter_yosemite: data_obj = cyclegan_dataloader(dataset_name='summer2winter_yosemite') elif use_horse2zebra: data_obj = cyclegan_dataloader(dataset_name='horse2zebra') elif use_monet2photo: data_obj = cyclegan_dataloader(dataset_name='monet2photo') elif use_cezanne2photo: data_obj = cyclegan_dataloader(dataset_name='cezanne2photo') elif use_ukiyoe2photo: data_obj = cyclegan_dataloader(dataset_name='ukiyoe2photo') elif use_vangogh2photo: data_obj = cyclegan_dataloader(dataset_name='vangogh2photo') elif use_maps: data_obj = cyclegan_dataloader(dataset_name='maps') elif use_cityscapes: data_obj = cyclegan_dataloader(dataset_name='cityscapes') elif use_facades: data_obj = cyclegan_dataloader(dataset_name='facades') elif use_iphone2dslr_flower: data_obj = cyclegan_dataloader(dataset_name='iphone2dslr_flower') else: data_obj = cyclegan_dataloader(datadir=data_dir) (trainA, trainB, testA, testB) = data_obj.load_dataset() for data in trainA.take(1): self.img_size = data.shape self.channels = data.shape[(- 1)] trainA = trainA.shuffle(100000).batch(batch_size) trainB = trainB.shuffle(100000).batch(batch_size) testA = testA.shuffle(100000).batch(batch_size) testB = testB.shuffle(100000).batch(batch_size) return (trainA, trainB, testA, testB)<|docstring|>Load data to train the model Args: data_dir (str, optional): string representing the directory to load data from. Defaults to ``None`` use_apple2orange (bool, optional): use the apple2orange dataset to train the model. Defaults to ``False`` use_summer2winter_yosemite (bool, optional): use the summer2winter_yosemite dataset to train the model. Defaults to ``False`` use_horse2zebra (bool, optional): use the horse2zebra dataset to train the model. Defaults to ``False`` use_monet2photo (bool, optional): use the monet2photo dataset to train the model. Defaults to ``False`` use_cezanne2photo (bool, optional): use the cezanne2photo dataset to train the model. Defaults to ``False`` use_ukiyoe2photo (bool, optional): use the ukiyoe2photo dataset to train the model. Defaults to ``False`` use_vangogh2photo (bool, optional): use the vangogh2photo dataset to train the model. Defaults to ``False`` use_maps (bool, optional): use the maps dataset to train the model. Defaults to ``False`` use_cityscapes (bool, optional): use the cityscapes dataset to train the model. Defaults to ``False`` use_facades (bool, optional): use the facades dataset to train the model. Defaults to ``False`` use_iphone2dslr_flower (bool, optional): use the iphone2dslr_flower dataset to train the model. Defaults to ``False`` batch_size (int, optional): mini batch size for training the model. Defaults to ``32`` Return: four tensorflow dataset objects representing trainA, trainB, testA, testB<|endoftext|>
ebe9d30a3628ff54fc5234951793cbecd51755d766f7f3486e8e6b9f80c9a19b
def get_sample(self, data=None, n_samples=1, save_dir=None): 'View sample of the data\n\n Args:\n data (tf.data object): dataset to load samples from\n n_samples (int, optional): number of samples to load. Defaults to ``1``\n save_dir (str, optional): directory to save the sample images. Defaults to ``None``\n\n Return:\n ``None`` if save_dir is ``not None``, otherwise returns numpy array of samples with shape (n_samples, img_shape)\n ' assert (data is not None), 'Data not provided' sample_images = [] data.unbatch() for img in data.take(n_samples): img = img.numpy() sample_images.append(img[0]) sample_images = np.array(sample_images) if (save_dir is None): return sample_images assert os.path.exists(save_dir), 'Directory does not exist' for (i, sample) in enumerate(sample_images): imageio.imwrite(os.path.join(save_dir, (('sample_' + str(i)) + '.jpg')), sample)
View sample of the data Args: data (tf.data object): dataset to load samples from n_samples (int, optional): number of samples to load. Defaults to ``1`` save_dir (str, optional): directory to save the sample images. Defaults to ``None`` Return: ``None`` if save_dir is ``not None``, otherwise returns numpy array of samples with shape (n_samples, img_shape)
simplegan/gan/cyclegan.py
get_sample
grohith327/EasyGAN
23
python
def get_sample(self, data=None, n_samples=1, save_dir=None): 'View sample of the data\n\n Args:\n data (tf.data object): dataset to load samples from\n n_samples (int, optional): number of samples to load. Defaults to ``1``\n save_dir (str, optional): directory to save the sample images. Defaults to ``None``\n\n Return:\n ``None`` if save_dir is ``not None``, otherwise returns numpy array of samples with shape (n_samples, img_shape)\n ' assert (data is not None), 'Data not provided' sample_images = [] data.unbatch() for img in data.take(n_samples): img = img.numpy() sample_images.append(img[0]) sample_images = np.array(sample_images) if (save_dir is None): return sample_images assert os.path.exists(save_dir), 'Directory does not exist' for (i, sample) in enumerate(sample_images): imageio.imwrite(os.path.join(save_dir, (('sample_' + str(i)) + '.jpg')), sample)
def get_sample(self, data=None, n_samples=1, save_dir=None): 'View sample of the data\n\n Args:\n data (tf.data object): dataset to load samples from\n n_samples (int, optional): number of samples to load. Defaults to ``1``\n save_dir (str, optional): directory to save the sample images. Defaults to ``None``\n\n Return:\n ``None`` if save_dir is ``not None``, otherwise returns numpy array of samples with shape (n_samples, img_shape)\n ' assert (data is not None), 'Data not provided' sample_images = [] data.unbatch() for img in data.take(n_samples): img = img.numpy() sample_images.append(img[0]) sample_images = np.array(sample_images) if (save_dir is None): return sample_images assert os.path.exists(save_dir), 'Directory does not exist' for (i, sample) in enumerate(sample_images): imageio.imwrite(os.path.join(save_dir, (('sample_' + str(i)) + '.jpg')), sample)<|docstring|>View sample of the data Args: data (tf.data object): dataset to load samples from n_samples (int, optional): number of samples to load. Defaults to ``1`` save_dir (str, optional): directory to save the sample images. Defaults to ``None`` Return: ``None`` if save_dir is ``not None``, otherwise returns numpy array of samples with shape (n_samples, img_shape)<|endoftext|>
9c5e49e0f9beb3d7850ee8bcb5807bcb1eb8212f9a45576bc539dec0cd74e107
def discriminator(self): 'Discriminator module for CycleGAN. Use it as a regular TensorFlow 2.0 Keras Model.\n\n Return:\n A tf.keras model \n ' kernel_initializer = self.config['kernel_initializer'] kernel_size = self.config['kernel_size'] disc_channels = self.config['disc_channels'] inputs = Input(shape=self.img_size) x = inputs down_stack = [] for (i, channel) in enumerate(disc_channels[:(- 1)]): if (i == 0): down_stack.append(self._downsample(channel, kernel_size=kernel_size, kernel_initializer=kernel_initializer, batchnorm=False)) else: down_stack.append(self._downsample(channel, kernel_size=kernel_size, kernel_initializer=kernel_initializer)) down_stack.append(ZeroPadding2D()) down_stack.append(Conv2D(disc_channels[(- 1)], kernel_size=kernel_size, strides=1, kernel_initializer=kernel_initializer, use_bias=False)) down_stack.append(BatchNormalization()) down_stack.append(LeakyReLU()) down_stack.append(ZeroPadding2D()) last = Conv2D(1, kernel_size=kernel_size, strides=1, kernel_initializer=kernel_initializer) for down in down_stack: x = down(x) out = last(x) model = Model(inputs=inputs, outputs=out) return model
Discriminator module for CycleGAN. Use it as a regular TensorFlow 2.0 Keras Model. Return: A tf.keras model
simplegan/gan/cyclegan.py
discriminator
grohith327/EasyGAN
23
python
def discriminator(self): 'Discriminator module for CycleGAN. Use it as a regular TensorFlow 2.0 Keras Model.\n\n Return:\n A tf.keras model \n ' kernel_initializer = self.config['kernel_initializer'] kernel_size = self.config['kernel_size'] disc_channels = self.config['disc_channels'] inputs = Input(shape=self.img_size) x = inputs down_stack = [] for (i, channel) in enumerate(disc_channels[:(- 1)]): if (i == 0): down_stack.append(self._downsample(channel, kernel_size=kernel_size, kernel_initializer=kernel_initializer, batchnorm=False)) else: down_stack.append(self._downsample(channel, kernel_size=kernel_size, kernel_initializer=kernel_initializer)) down_stack.append(ZeroPadding2D()) down_stack.append(Conv2D(disc_channels[(- 1)], kernel_size=kernel_size, strides=1, kernel_initializer=kernel_initializer, use_bias=False)) down_stack.append(BatchNormalization()) down_stack.append(LeakyReLU()) down_stack.append(ZeroPadding2D()) last = Conv2D(1, kernel_size=kernel_size, strides=1, kernel_initializer=kernel_initializer) for down in down_stack: x = down(x) out = last(x) model = Model(inputs=inputs, outputs=out) return model
def discriminator(self): 'Discriminator module for CycleGAN. Use it as a regular TensorFlow 2.0 Keras Model.\n\n Return:\n A tf.keras model \n ' kernel_initializer = self.config['kernel_initializer'] kernel_size = self.config['kernel_size'] disc_channels = self.config['disc_channels'] inputs = Input(shape=self.img_size) x = inputs down_stack = [] for (i, channel) in enumerate(disc_channels[:(- 1)]): if (i == 0): down_stack.append(self._downsample(channel, kernel_size=kernel_size, kernel_initializer=kernel_initializer, batchnorm=False)) else: down_stack.append(self._downsample(channel, kernel_size=kernel_size, kernel_initializer=kernel_initializer)) down_stack.append(ZeroPadding2D()) down_stack.append(Conv2D(disc_channels[(- 1)], kernel_size=kernel_size, strides=1, kernel_initializer=kernel_initializer, use_bias=False)) down_stack.append(BatchNormalization()) down_stack.append(LeakyReLU()) down_stack.append(ZeroPadding2D()) last = Conv2D(1, kernel_size=kernel_size, strides=1, kernel_initializer=kernel_initializer) for down in down_stack: x = down(x) out = last(x) model = Model(inputs=inputs, outputs=out) return model<|docstring|>Discriminator module for CycleGAN. Use it as a regular TensorFlow 2.0 Keras Model. Return: A tf.keras model<|endoftext|>
7d3a0d79afc8b4de951a016a5a3ad5417a75b8490db6b5d6d76a32d537f51435
def __load_model(self): '\n Call build model to initialize the two generators and discriminators\n\n Note: Forward and backward GANs have the same architecture\n ' (self.gen_model_g, self.gen_model_f) = (self.generator(), self.generator()) (self.disc_model_x, self.disc_model_y) = (self.discriminator(), self.discriminator()) if (self.config['gen_g_path'] is not None): self.gen_model_g.load_weights(self.config['gen_g_path']) print('Generator-G checkpoint restored') if (self.config['gen_f_path'] is not None): self.gen_model_f.load_weights(self.config['gen_f_path']) print('Generator-F checkpoint restored') if (self.config['disc_x_path'] is not None): self.disc_model_x.load_weights(self.config['disc_x_path']) print('Discriminator-X checkpoint restored') if (self.config['disc_y_path'] is not None): self.disc_model_y.load_weights(self.config['disc_y_path']) print('Discriminator-Y checkpoint restored')
Call build model to initialize the two generators and discriminators Note: Forward and backward GANs have the same architecture
simplegan/gan/cyclegan.py
__load_model
grohith327/EasyGAN
23
python
def __load_model(self): '\n Call build model to initialize the two generators and discriminators\n\n Note: Forward and backward GANs have the same architecture\n ' (self.gen_model_g, self.gen_model_f) = (self.generator(), self.generator()) (self.disc_model_x, self.disc_model_y) = (self.discriminator(), self.discriminator()) if (self.config['gen_g_path'] is not None): self.gen_model_g.load_weights(self.config['gen_g_path']) print('Generator-G checkpoint restored') if (self.config['gen_f_path'] is not None): self.gen_model_f.load_weights(self.config['gen_f_path']) print('Generator-F checkpoint restored') if (self.config['disc_x_path'] is not None): self.disc_model_x.load_weights(self.config['disc_x_path']) print('Discriminator-X checkpoint restored') if (self.config['disc_y_path'] is not None): self.disc_model_y.load_weights(self.config['disc_y_path']) print('Discriminator-Y checkpoint restored')
def __load_model(self): '\n Call build model to initialize the two generators and discriminators\n\n Note: Forward and backward GANs have the same architecture\n ' (self.gen_model_g, self.gen_model_f) = (self.generator(), self.generator()) (self.disc_model_x, self.disc_model_y) = (self.discriminator(), self.discriminator()) if (self.config['gen_g_path'] is not None): self.gen_model_g.load_weights(self.config['gen_g_path']) print('Generator-G checkpoint restored') if (self.config['gen_f_path'] is not None): self.gen_model_f.load_weights(self.config['gen_f_path']) print('Generator-F checkpoint restored') if (self.config['disc_x_path'] is not None): self.disc_model_x.load_weights(self.config['disc_x_path']) print('Discriminator-X checkpoint restored') if (self.config['disc_y_path'] is not None): self.disc_model_y.load_weights(self.config['disc_y_path']) print('Discriminator-Y checkpoint restored')<|docstring|>Call build model to initialize the two generators and discriminators Note: Forward and backward GANs have the same architecture<|endoftext|>
b16628b74a9a0813785109f0256105374155cedcd606a28a5afe1a084d2963b4
def fit(self, trainA=None, trainB=None, testA=None, testB=None, epochs=150, gen_g_optimizer='Adam', gen_f_optimizer='Adam', disc_x_optimizer='Adam', disc_y_optimizer='Adam', verbose=1, gen_g_learning_rate=0.0002, gen_f_learning_rate=0.0002, disc_x_learning_rate=0.0002, disc_y_learning_rate=0.0002, beta_1=0.5, tensorboard=False, save_model=None, LAMBDA=100, save_img_per_epoch=30): 'Function to train the model\n\n Args:\n trainA (tf.data object): training data A\n trainB (tf.data object): training data B\n testA (tf.data object): testing data A\n testB (tf.data object): testing data B\n epochs (int, optional): number of epochs to train the model. Defaults to ``150``\n gen_g_optimizer (str, optional): optimizer used to train generator `G`. Defaults to ``Adam``\n gen_F_optimizer (str, optional): optimizer used to train generator `F`. Defaults to ``Adam``\n disc_x_optimizer (str, optional): optimizer used to train discriminator `X`. Defaults to ``Adam``\n disc_y_optimizer (str, optional): optimizer used to train discriminator `Y`. Defaults to ``Adam``\n verbose (int, optional): 1 - prints training outputs, 0 - no outputs. Defaults to ``1``\n gen_g_learning_rate (float, optional): learning rate of the generator `G` optimizer. Defaults to ``2e-4``\n gen_f_learning_rate (float, optional): learning rate of the generator `F` optimizer. Defaults to ``2e-4``\n disc_x_learning_rate (float, optional): learning rate of the discriminator `X` optimizer. Defaults to ``2e-4``\n disc_y_learning_rate (float, optional): learning rate of the discriminator `Y` optimizer. Defaults to ``2e-4``\n beta_1 (float, optional): decay rate of the first momement. set if ``Adam`` optimizer is used. Defaults to ``0.5``\n tensorboard (bool, optional): if true, writes loss values to ``logs/gradient_tape`` directory\n which aids visualization. Defaults to ``False``\n save_model (str, optional): Directory to save the trained model. Defaults to ``None``\n LAMBDA (int, optional): used to calculate generator loss. Defaults to ``100``\n save_img_per_epoch (int, optional): frequency of saving images during training. Defaults to ``30``\n ' assert (trainA is not None), 'Initialize training data A through trainA parameter' assert (trainB is not None), 'Initialize training data B through trainB parameter' assert (testA is not None), 'Initialize testing data A through testA parameter' assert (testB is not None), 'Initialize testing data B through testB parameter' self.LAMBDA = LAMBDA self.__load_model() kwargs = {} kwargs['learning_rate'] = gen_g_learning_rate if (gen_g_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 gen_g_optimizer = getattr(tf.keras.optimizers, gen_g_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = gen_f_learning_rate if (gen_f_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 gen_f_optimizer = getattr(tf.keras.optimizers, gen_f_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = disc_x_learning_rate if (disc_x_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 disc_x_optimizer = getattr(tf.keras.optimizers, disc_x_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = disc_y_learning_rate if (disc_y_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 disc_y_optimizer = getattr(tf.keras.optimizers, disc_y_optimizer)(**kwargs) if tensorboard: current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') train_log_dir = (('logs/gradient_tape/' + current_time) + '/train') train_summary_writer = tf.summary.create_file_writer(train_log_dir) steps = 0 curr_dir = os.getcwd() try: os.mkdir(os.path.join(curr_dir, 'cyclegan_samples')) except OSError: pass self.save_img_dir = os.path.join(curr_dir, 'cyclegan_samples') generator_g_loss = tf.keras.metrics.Mean() discriminator_x_loss = tf.keras.metrics.Mean() generator_f_loss = tf.keras.metrics.Mean() discriminator_y_loss = tf.keras.metrics.Mean() try: total = tf.data.experimental.cardinality(trainA).numpy() except: total = 0 total = (total if (total > 0) else len(list(trainA))) for epoch in range(epochs): generator_g_loss.reset_states() generator_f_loss.reset_states() discriminator_x_loss.reset_states() discriminator_y_loss.reset_states() pbar = tqdm(total=total, desc=('Epoch - ' + str((epoch + 1)))) for (image_x, image_y) in tf.data.Dataset.zip((trainA, trainB)): with tf.GradientTape(persistent=True) as tape: fake_y = self.gen_model_g(image_x, training=True) cycled_x = self.gen_model_f(fake_y, training=True) fake_x = self.gen_model_f(image_y, training=True) cycled_y = self.gen_model_g(fake_x, training=True) same_x = self.gen_model_f(image_x, training=True) same_y = self.gen_model_g(image_y, training=True) disc_real_x = self.disc_model_x(image_x, training=True) disc_real_y = self.disc_model_y(image_y, training=True) disc_fake_x = self.disc_model_x(fake_x, training=True) disc_fake_y = self.disc_model_y(fake_y, training=True) gen_g_loss = gan_generator_loss(disc_fake_y) gen_f_loss = gan_generator_loss(disc_fake_x) total_cycle_loss = (cycle_loss(image_x, cycled_x, self.LAMBDA) + cycle_loss(image_y, cycled_y, self.LAMBDA)) total_gen_g_loss = ((gen_g_loss + total_cycle_loss) + identity_loss(image_y, same_y, self.LAMBDA)) total_gen_f_loss = ((gen_f_loss + total_cycle_loss) + identity_loss(image_x, same_x, self.LAMBDA)) disc_x_loss = gan_discriminator_loss(disc_real_x, disc_fake_x) disc_y_loss = gan_discriminator_loss(disc_real_y, disc_fake_y) generator_g_gradients = tape.gradient(total_gen_g_loss, self.gen_model_g.trainable_variables) generator_f_gradients = tape.gradient(total_gen_f_loss, self.gen_model_f.trainable_variables) discriminator_x_gradients = tape.gradient(disc_x_loss, self.disc_model_x.trainable_variables) discriminator_y_gradients = tape.gradient(disc_y_loss, self.disc_model_y.trainable_variables) gen_g_optimizer.apply_gradients(zip(generator_g_gradients, self.gen_model_g.trainable_variables)) gen_f_optimizer.apply_gradients(zip(generator_f_gradients, self.gen_model_f.trainable_variables)) disc_x_optimizer.apply_gradients(zip(discriminator_x_gradients, self.disc_model_x.trainable_variables)) disc_y_optimizer.apply_gradients(zip(discriminator_y_gradients, self.disc_model_y.trainable_variables)) generator_g_loss(total_gen_g_loss) generator_f_loss(total_gen_f_loss) discriminator_x_loss(disc_x_loss) discriminator_y_loss(disc_y_loss) steps += 1 pbar.update(1) pbar.set_postfix(disc_x_loss=discriminator_x_loss.result().numpy(), disc_y_loss=discriminator_y_loss.result().numpy(), gen_g_loss=generator_g_loss.result().numpy(), gen_f_loss=generator_f_loss.result().numpy()) if tensorboard: with train_summary_writer.as_default(): tf.summary.scalar('Generator_G_loss', total_gen_g_loss.numpy(), step=steps) tf.summary.scalar('Generator_F_loss', total_gen_f_loss.numpy(), step=steps) tf.summary.scalar('Discriminator_X_loss', disc_x_loss.numpy(), step=steps) tf.summary.scalar('Discriminator_Y_loss', disc_y_loss.numpy(), step=steps) if ((epoch % save_img_per_epoch) == 0): for image in testA.take(1): self._save_samples(self.gen_model_g, image, str(epoch)) if (verbose == 1): print('Epoch:', (epoch + 1), 'Generator_G_loss:', generator_g_loss.result().numpy(), 'Generator_F_loss:', generator_f_loss.result().numpy(), 'Discriminator_X_loss:', discriminator_x_loss.result().numpy(), 'Discriminator_Y_loss:', discriminator_y_loss.result().numpy()) if (save_model is not None): assert isinstance(save_model, str), 'Not a valid directory' if (save_model[(- 1)] != '/'): self.gen_model_g.save_weights((save_model + '/generator_g_checkpoint')) self.gen_model_f.save_weights((save_model + '/generator_f_checkpoint')) self.disc_model_x.save_weights((save_model + '/discrimnator_x_checkpoint')) self.disc_model_y.save_weights((save_model + '/discrimnator_y_checkpoint')) else: self.gen_model_g.save_weights((save_model + 'generator_g_checkpoint')) self.gen_model_f.save_weights((save_model + 'generator_f_checkpoint')) self.disc_model_x.save_weights((save_model + 'discrimnator_x_checkpoint')) self.disc_model_y.save_weights((save_model + 'discrimnator_y_checkpoint'))
Function to train the model Args: trainA (tf.data object): training data A trainB (tf.data object): training data B testA (tf.data object): testing data A testB (tf.data object): testing data B epochs (int, optional): number of epochs to train the model. Defaults to ``150`` gen_g_optimizer (str, optional): optimizer used to train generator `G`. Defaults to ``Adam`` gen_F_optimizer (str, optional): optimizer used to train generator `F`. Defaults to ``Adam`` disc_x_optimizer (str, optional): optimizer used to train discriminator `X`. Defaults to ``Adam`` disc_y_optimizer (str, optional): optimizer used to train discriminator `Y`. Defaults to ``Adam`` verbose (int, optional): 1 - prints training outputs, 0 - no outputs. Defaults to ``1`` gen_g_learning_rate (float, optional): learning rate of the generator `G` optimizer. Defaults to ``2e-4`` gen_f_learning_rate (float, optional): learning rate of the generator `F` optimizer. Defaults to ``2e-4`` disc_x_learning_rate (float, optional): learning rate of the discriminator `X` optimizer. Defaults to ``2e-4`` disc_y_learning_rate (float, optional): learning rate of the discriminator `Y` optimizer. Defaults to ``2e-4`` beta_1 (float, optional): decay rate of the first momement. set if ``Adam`` optimizer is used. Defaults to ``0.5`` tensorboard (bool, optional): if true, writes loss values to ``logs/gradient_tape`` directory which aids visualization. Defaults to ``False`` save_model (str, optional): Directory to save the trained model. Defaults to ``None`` LAMBDA (int, optional): used to calculate generator loss. Defaults to ``100`` save_img_per_epoch (int, optional): frequency of saving images during training. Defaults to ``30``
simplegan/gan/cyclegan.py
fit
grohith327/EasyGAN
23
python
def fit(self, trainA=None, trainB=None, testA=None, testB=None, epochs=150, gen_g_optimizer='Adam', gen_f_optimizer='Adam', disc_x_optimizer='Adam', disc_y_optimizer='Adam', verbose=1, gen_g_learning_rate=0.0002, gen_f_learning_rate=0.0002, disc_x_learning_rate=0.0002, disc_y_learning_rate=0.0002, beta_1=0.5, tensorboard=False, save_model=None, LAMBDA=100, save_img_per_epoch=30): 'Function to train the model\n\n Args:\n trainA (tf.data object): training data A\n trainB (tf.data object): training data B\n testA (tf.data object): testing data A\n testB (tf.data object): testing data B\n epochs (int, optional): number of epochs to train the model. Defaults to ``150``\n gen_g_optimizer (str, optional): optimizer used to train generator `G`. Defaults to ``Adam``\n gen_F_optimizer (str, optional): optimizer used to train generator `F`. Defaults to ``Adam``\n disc_x_optimizer (str, optional): optimizer used to train discriminator `X`. Defaults to ``Adam``\n disc_y_optimizer (str, optional): optimizer used to train discriminator `Y`. Defaults to ``Adam``\n verbose (int, optional): 1 - prints training outputs, 0 - no outputs. Defaults to ``1``\n gen_g_learning_rate (float, optional): learning rate of the generator `G` optimizer. Defaults to ``2e-4``\n gen_f_learning_rate (float, optional): learning rate of the generator `F` optimizer. Defaults to ``2e-4``\n disc_x_learning_rate (float, optional): learning rate of the discriminator `X` optimizer. Defaults to ``2e-4``\n disc_y_learning_rate (float, optional): learning rate of the discriminator `Y` optimizer. Defaults to ``2e-4``\n beta_1 (float, optional): decay rate of the first momement. set if ``Adam`` optimizer is used. Defaults to ``0.5``\n tensorboard (bool, optional): if true, writes loss values to ``logs/gradient_tape`` directory\n which aids visualization. Defaults to ``False``\n save_model (str, optional): Directory to save the trained model. Defaults to ``None``\n LAMBDA (int, optional): used to calculate generator loss. Defaults to ``100``\n save_img_per_epoch (int, optional): frequency of saving images during training. Defaults to ``30``\n ' assert (trainA is not None), 'Initialize training data A through trainA parameter' assert (trainB is not None), 'Initialize training data B through trainB parameter' assert (testA is not None), 'Initialize testing data A through testA parameter' assert (testB is not None), 'Initialize testing data B through testB parameter' self.LAMBDA = LAMBDA self.__load_model() kwargs = {} kwargs['learning_rate'] = gen_g_learning_rate if (gen_g_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 gen_g_optimizer = getattr(tf.keras.optimizers, gen_g_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = gen_f_learning_rate if (gen_f_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 gen_f_optimizer = getattr(tf.keras.optimizers, gen_f_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = disc_x_learning_rate if (disc_x_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 disc_x_optimizer = getattr(tf.keras.optimizers, disc_x_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = disc_y_learning_rate if (disc_y_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 disc_y_optimizer = getattr(tf.keras.optimizers, disc_y_optimizer)(**kwargs) if tensorboard: current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') train_log_dir = (('logs/gradient_tape/' + current_time) + '/train') train_summary_writer = tf.summary.create_file_writer(train_log_dir) steps = 0 curr_dir = os.getcwd() try: os.mkdir(os.path.join(curr_dir, 'cyclegan_samples')) except OSError: pass self.save_img_dir = os.path.join(curr_dir, 'cyclegan_samples') generator_g_loss = tf.keras.metrics.Mean() discriminator_x_loss = tf.keras.metrics.Mean() generator_f_loss = tf.keras.metrics.Mean() discriminator_y_loss = tf.keras.metrics.Mean() try: total = tf.data.experimental.cardinality(trainA).numpy() except: total = 0 total = (total if (total > 0) else len(list(trainA))) for epoch in range(epochs): generator_g_loss.reset_states() generator_f_loss.reset_states() discriminator_x_loss.reset_states() discriminator_y_loss.reset_states() pbar = tqdm(total=total, desc=('Epoch - ' + str((epoch + 1)))) for (image_x, image_y) in tf.data.Dataset.zip((trainA, trainB)): with tf.GradientTape(persistent=True) as tape: fake_y = self.gen_model_g(image_x, training=True) cycled_x = self.gen_model_f(fake_y, training=True) fake_x = self.gen_model_f(image_y, training=True) cycled_y = self.gen_model_g(fake_x, training=True) same_x = self.gen_model_f(image_x, training=True) same_y = self.gen_model_g(image_y, training=True) disc_real_x = self.disc_model_x(image_x, training=True) disc_real_y = self.disc_model_y(image_y, training=True) disc_fake_x = self.disc_model_x(fake_x, training=True) disc_fake_y = self.disc_model_y(fake_y, training=True) gen_g_loss = gan_generator_loss(disc_fake_y) gen_f_loss = gan_generator_loss(disc_fake_x) total_cycle_loss = (cycle_loss(image_x, cycled_x, self.LAMBDA) + cycle_loss(image_y, cycled_y, self.LAMBDA)) total_gen_g_loss = ((gen_g_loss + total_cycle_loss) + identity_loss(image_y, same_y, self.LAMBDA)) total_gen_f_loss = ((gen_f_loss + total_cycle_loss) + identity_loss(image_x, same_x, self.LAMBDA)) disc_x_loss = gan_discriminator_loss(disc_real_x, disc_fake_x) disc_y_loss = gan_discriminator_loss(disc_real_y, disc_fake_y) generator_g_gradients = tape.gradient(total_gen_g_loss, self.gen_model_g.trainable_variables) generator_f_gradients = tape.gradient(total_gen_f_loss, self.gen_model_f.trainable_variables) discriminator_x_gradients = tape.gradient(disc_x_loss, self.disc_model_x.trainable_variables) discriminator_y_gradients = tape.gradient(disc_y_loss, self.disc_model_y.trainable_variables) gen_g_optimizer.apply_gradients(zip(generator_g_gradients, self.gen_model_g.trainable_variables)) gen_f_optimizer.apply_gradients(zip(generator_f_gradients, self.gen_model_f.trainable_variables)) disc_x_optimizer.apply_gradients(zip(discriminator_x_gradients, self.disc_model_x.trainable_variables)) disc_y_optimizer.apply_gradients(zip(discriminator_y_gradients, self.disc_model_y.trainable_variables)) generator_g_loss(total_gen_g_loss) generator_f_loss(total_gen_f_loss) discriminator_x_loss(disc_x_loss) discriminator_y_loss(disc_y_loss) steps += 1 pbar.update(1) pbar.set_postfix(disc_x_loss=discriminator_x_loss.result().numpy(), disc_y_loss=discriminator_y_loss.result().numpy(), gen_g_loss=generator_g_loss.result().numpy(), gen_f_loss=generator_f_loss.result().numpy()) if tensorboard: with train_summary_writer.as_default(): tf.summary.scalar('Generator_G_loss', total_gen_g_loss.numpy(), step=steps) tf.summary.scalar('Generator_F_loss', total_gen_f_loss.numpy(), step=steps) tf.summary.scalar('Discriminator_X_loss', disc_x_loss.numpy(), step=steps) tf.summary.scalar('Discriminator_Y_loss', disc_y_loss.numpy(), step=steps) if ((epoch % save_img_per_epoch) == 0): for image in testA.take(1): self._save_samples(self.gen_model_g, image, str(epoch)) if (verbose == 1): print('Epoch:', (epoch + 1), 'Generator_G_loss:', generator_g_loss.result().numpy(), 'Generator_F_loss:', generator_f_loss.result().numpy(), 'Discriminator_X_loss:', discriminator_x_loss.result().numpy(), 'Discriminator_Y_loss:', discriminator_y_loss.result().numpy()) if (save_model is not None): assert isinstance(save_model, str), 'Not a valid directory' if (save_model[(- 1)] != '/'): self.gen_model_g.save_weights((save_model + '/generator_g_checkpoint')) self.gen_model_f.save_weights((save_model + '/generator_f_checkpoint')) self.disc_model_x.save_weights((save_model + '/discrimnator_x_checkpoint')) self.disc_model_y.save_weights((save_model + '/discrimnator_y_checkpoint')) else: self.gen_model_g.save_weights((save_model + 'generator_g_checkpoint')) self.gen_model_f.save_weights((save_model + 'generator_f_checkpoint')) self.disc_model_x.save_weights((save_model + 'discrimnator_x_checkpoint')) self.disc_model_y.save_weights((save_model + 'discrimnator_y_checkpoint'))
def fit(self, trainA=None, trainB=None, testA=None, testB=None, epochs=150, gen_g_optimizer='Adam', gen_f_optimizer='Adam', disc_x_optimizer='Adam', disc_y_optimizer='Adam', verbose=1, gen_g_learning_rate=0.0002, gen_f_learning_rate=0.0002, disc_x_learning_rate=0.0002, disc_y_learning_rate=0.0002, beta_1=0.5, tensorboard=False, save_model=None, LAMBDA=100, save_img_per_epoch=30): 'Function to train the model\n\n Args:\n trainA (tf.data object): training data A\n trainB (tf.data object): training data B\n testA (tf.data object): testing data A\n testB (tf.data object): testing data B\n epochs (int, optional): number of epochs to train the model. Defaults to ``150``\n gen_g_optimizer (str, optional): optimizer used to train generator `G`. Defaults to ``Adam``\n gen_F_optimizer (str, optional): optimizer used to train generator `F`. Defaults to ``Adam``\n disc_x_optimizer (str, optional): optimizer used to train discriminator `X`. Defaults to ``Adam``\n disc_y_optimizer (str, optional): optimizer used to train discriminator `Y`. Defaults to ``Adam``\n verbose (int, optional): 1 - prints training outputs, 0 - no outputs. Defaults to ``1``\n gen_g_learning_rate (float, optional): learning rate of the generator `G` optimizer. Defaults to ``2e-4``\n gen_f_learning_rate (float, optional): learning rate of the generator `F` optimizer. Defaults to ``2e-4``\n disc_x_learning_rate (float, optional): learning rate of the discriminator `X` optimizer. Defaults to ``2e-4``\n disc_y_learning_rate (float, optional): learning rate of the discriminator `Y` optimizer. Defaults to ``2e-4``\n beta_1 (float, optional): decay rate of the first momement. set if ``Adam`` optimizer is used. Defaults to ``0.5``\n tensorboard (bool, optional): if true, writes loss values to ``logs/gradient_tape`` directory\n which aids visualization. Defaults to ``False``\n save_model (str, optional): Directory to save the trained model. Defaults to ``None``\n LAMBDA (int, optional): used to calculate generator loss. Defaults to ``100``\n save_img_per_epoch (int, optional): frequency of saving images during training. Defaults to ``30``\n ' assert (trainA is not None), 'Initialize training data A through trainA parameter' assert (trainB is not None), 'Initialize training data B through trainB parameter' assert (testA is not None), 'Initialize testing data A through testA parameter' assert (testB is not None), 'Initialize testing data B through testB parameter' self.LAMBDA = LAMBDA self.__load_model() kwargs = {} kwargs['learning_rate'] = gen_g_learning_rate if (gen_g_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 gen_g_optimizer = getattr(tf.keras.optimizers, gen_g_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = gen_f_learning_rate if (gen_f_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 gen_f_optimizer = getattr(tf.keras.optimizers, gen_f_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = disc_x_learning_rate if (disc_x_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 disc_x_optimizer = getattr(tf.keras.optimizers, disc_x_optimizer)(**kwargs) kwargs = {} kwargs['learning_rate'] = disc_y_learning_rate if (disc_y_optimizer == 'Adam'): kwargs['beta_1'] = beta_1 disc_y_optimizer = getattr(tf.keras.optimizers, disc_y_optimizer)(**kwargs) if tensorboard: current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') train_log_dir = (('logs/gradient_tape/' + current_time) + '/train') train_summary_writer = tf.summary.create_file_writer(train_log_dir) steps = 0 curr_dir = os.getcwd() try: os.mkdir(os.path.join(curr_dir, 'cyclegan_samples')) except OSError: pass self.save_img_dir = os.path.join(curr_dir, 'cyclegan_samples') generator_g_loss = tf.keras.metrics.Mean() discriminator_x_loss = tf.keras.metrics.Mean() generator_f_loss = tf.keras.metrics.Mean() discriminator_y_loss = tf.keras.metrics.Mean() try: total = tf.data.experimental.cardinality(trainA).numpy() except: total = 0 total = (total if (total > 0) else len(list(trainA))) for epoch in range(epochs): generator_g_loss.reset_states() generator_f_loss.reset_states() discriminator_x_loss.reset_states() discriminator_y_loss.reset_states() pbar = tqdm(total=total, desc=('Epoch - ' + str((epoch + 1)))) for (image_x, image_y) in tf.data.Dataset.zip((trainA, trainB)): with tf.GradientTape(persistent=True) as tape: fake_y = self.gen_model_g(image_x, training=True) cycled_x = self.gen_model_f(fake_y, training=True) fake_x = self.gen_model_f(image_y, training=True) cycled_y = self.gen_model_g(fake_x, training=True) same_x = self.gen_model_f(image_x, training=True) same_y = self.gen_model_g(image_y, training=True) disc_real_x = self.disc_model_x(image_x, training=True) disc_real_y = self.disc_model_y(image_y, training=True) disc_fake_x = self.disc_model_x(fake_x, training=True) disc_fake_y = self.disc_model_y(fake_y, training=True) gen_g_loss = gan_generator_loss(disc_fake_y) gen_f_loss = gan_generator_loss(disc_fake_x) total_cycle_loss = (cycle_loss(image_x, cycled_x, self.LAMBDA) + cycle_loss(image_y, cycled_y, self.LAMBDA)) total_gen_g_loss = ((gen_g_loss + total_cycle_loss) + identity_loss(image_y, same_y, self.LAMBDA)) total_gen_f_loss = ((gen_f_loss + total_cycle_loss) + identity_loss(image_x, same_x, self.LAMBDA)) disc_x_loss = gan_discriminator_loss(disc_real_x, disc_fake_x) disc_y_loss = gan_discriminator_loss(disc_real_y, disc_fake_y) generator_g_gradients = tape.gradient(total_gen_g_loss, self.gen_model_g.trainable_variables) generator_f_gradients = tape.gradient(total_gen_f_loss, self.gen_model_f.trainable_variables) discriminator_x_gradients = tape.gradient(disc_x_loss, self.disc_model_x.trainable_variables) discriminator_y_gradients = tape.gradient(disc_y_loss, self.disc_model_y.trainable_variables) gen_g_optimizer.apply_gradients(zip(generator_g_gradients, self.gen_model_g.trainable_variables)) gen_f_optimizer.apply_gradients(zip(generator_f_gradients, self.gen_model_f.trainable_variables)) disc_x_optimizer.apply_gradients(zip(discriminator_x_gradients, self.disc_model_x.trainable_variables)) disc_y_optimizer.apply_gradients(zip(discriminator_y_gradients, self.disc_model_y.trainable_variables)) generator_g_loss(total_gen_g_loss) generator_f_loss(total_gen_f_loss) discriminator_x_loss(disc_x_loss) discriminator_y_loss(disc_y_loss) steps += 1 pbar.update(1) pbar.set_postfix(disc_x_loss=discriminator_x_loss.result().numpy(), disc_y_loss=discriminator_y_loss.result().numpy(), gen_g_loss=generator_g_loss.result().numpy(), gen_f_loss=generator_f_loss.result().numpy()) if tensorboard: with train_summary_writer.as_default(): tf.summary.scalar('Generator_G_loss', total_gen_g_loss.numpy(), step=steps) tf.summary.scalar('Generator_F_loss', total_gen_f_loss.numpy(), step=steps) tf.summary.scalar('Discriminator_X_loss', disc_x_loss.numpy(), step=steps) tf.summary.scalar('Discriminator_Y_loss', disc_y_loss.numpy(), step=steps) if ((epoch % save_img_per_epoch) == 0): for image in testA.take(1): self._save_samples(self.gen_model_g, image, str(epoch)) if (verbose == 1): print('Epoch:', (epoch + 1), 'Generator_G_loss:', generator_g_loss.result().numpy(), 'Generator_F_loss:', generator_f_loss.result().numpy(), 'Discriminator_X_loss:', discriminator_x_loss.result().numpy(), 'Discriminator_Y_loss:', discriminator_y_loss.result().numpy()) if (save_model is not None): assert isinstance(save_model, str), 'Not a valid directory' if (save_model[(- 1)] != '/'): self.gen_model_g.save_weights((save_model + '/generator_g_checkpoint')) self.gen_model_f.save_weights((save_model + '/generator_f_checkpoint')) self.disc_model_x.save_weights((save_model + '/discrimnator_x_checkpoint')) self.disc_model_y.save_weights((save_model + '/discrimnator_y_checkpoint')) else: self.gen_model_g.save_weights((save_model + 'generator_g_checkpoint')) self.gen_model_f.save_weights((save_model + 'generator_f_checkpoint')) self.disc_model_x.save_weights((save_model + 'discrimnator_x_checkpoint')) self.disc_model_y.save_weights((save_model + 'discrimnator_y_checkpoint'))<|docstring|>Function to train the model Args: trainA (tf.data object): training data A trainB (tf.data object): training data B testA (tf.data object): testing data A testB (tf.data object): testing data B epochs (int, optional): number of epochs to train the model. Defaults to ``150`` gen_g_optimizer (str, optional): optimizer used to train generator `G`. Defaults to ``Adam`` gen_F_optimizer (str, optional): optimizer used to train generator `F`. Defaults to ``Adam`` disc_x_optimizer (str, optional): optimizer used to train discriminator `X`. Defaults to ``Adam`` disc_y_optimizer (str, optional): optimizer used to train discriminator `Y`. Defaults to ``Adam`` verbose (int, optional): 1 - prints training outputs, 0 - no outputs. Defaults to ``1`` gen_g_learning_rate (float, optional): learning rate of the generator `G` optimizer. Defaults to ``2e-4`` gen_f_learning_rate (float, optional): learning rate of the generator `F` optimizer. Defaults to ``2e-4`` disc_x_learning_rate (float, optional): learning rate of the discriminator `X` optimizer. Defaults to ``2e-4`` disc_y_learning_rate (float, optional): learning rate of the discriminator `Y` optimizer. Defaults to ``2e-4`` beta_1 (float, optional): decay rate of the first momement. set if ``Adam`` optimizer is used. Defaults to ``0.5`` tensorboard (bool, optional): if true, writes loss values to ``logs/gradient_tape`` directory which aids visualization. Defaults to ``False`` save_model (str, optional): Directory to save the trained model. Defaults to ``None`` LAMBDA (int, optional): used to calculate generator loss. Defaults to ``100`` save_img_per_epoch (int, optional): frequency of saving images during training. Defaults to ``30``<|endoftext|>
3a28295b2579eadebbfc4e5eaed8528088b45718a17b106ca9efb2e965702e62
def generate_samples(self, test_ds=None, save_dir=None): 'Generate samples using the trained model\n\n Args:\n test_ds (tf.data object): test data object used to generate samples`\n save_dir (str, optional): directory to save the generated images. Defaults to ``None``\n\n Return:\n returns ``None`` if save_dir is ``not None``, otherwise returns a numpy array with generated samples\n ' assert (test_ds is not None), 'Enter input test dataset' if (self.gen_model_g is None): self.__load_model() generated_samples = [] for image in test_ds: gen_image = self.gen_model_g(image, training=False).numpy() generated_samples.append(gen_image[0]) generated_samples = np.array(generated_samples) if (save_dir is None): return generated_samples assert os.path.exists(save_dir), 'Directory does not exist' for (i, sample) in enumerate(generated_samples): imageio.imwrite(os.path.join(save_dir, (('sample_' + str(i)) + '.jpg')), sample)
Generate samples using the trained model Args: test_ds (tf.data object): test data object used to generate samples` save_dir (str, optional): directory to save the generated images. Defaults to ``None`` Return: returns ``None`` if save_dir is ``not None``, otherwise returns a numpy array with generated samples
simplegan/gan/cyclegan.py
generate_samples
grohith327/EasyGAN
23
python
def generate_samples(self, test_ds=None, save_dir=None): 'Generate samples using the trained model\n\n Args:\n test_ds (tf.data object): test data object used to generate samples`\n save_dir (str, optional): directory to save the generated images. Defaults to ``None``\n\n Return:\n returns ``None`` if save_dir is ``not None``, otherwise returns a numpy array with generated samples\n ' assert (test_ds is not None), 'Enter input test dataset' if (self.gen_model_g is None): self.__load_model() generated_samples = [] for image in test_ds: gen_image = self.gen_model_g(image, training=False).numpy() generated_samples.append(gen_image[0]) generated_samples = np.array(generated_samples) if (save_dir is None): return generated_samples assert os.path.exists(save_dir), 'Directory does not exist' for (i, sample) in enumerate(generated_samples): imageio.imwrite(os.path.join(save_dir, (('sample_' + str(i)) + '.jpg')), sample)
def generate_samples(self, test_ds=None, save_dir=None): 'Generate samples using the trained model\n\n Args:\n test_ds (tf.data object): test data object used to generate samples`\n save_dir (str, optional): directory to save the generated images. Defaults to ``None``\n\n Return:\n returns ``None`` if save_dir is ``not None``, otherwise returns a numpy array with generated samples\n ' assert (test_ds is not None), 'Enter input test dataset' if (self.gen_model_g is None): self.__load_model() generated_samples = [] for image in test_ds: gen_image = self.gen_model_g(image, training=False).numpy() generated_samples.append(gen_image[0]) generated_samples = np.array(generated_samples) if (save_dir is None): return generated_samples assert os.path.exists(save_dir), 'Directory does not exist' for (i, sample) in enumerate(generated_samples): imageio.imwrite(os.path.join(save_dir, (('sample_' + str(i)) + '.jpg')), sample)<|docstring|>Generate samples using the trained model Args: test_ds (tf.data object): test data object used to generate samples` save_dir (str, optional): directory to save the generated images. Defaults to ``None`` Return: returns ``None`` if save_dir is ``not None``, otherwise returns a numpy array with generated samples<|endoftext|>
e44a108d6e66f6957e63883a8df751d1d40320cd9faaee1728ccb8cdc1b83f1c
def simMetaData(self, dataKey): 'Produces a dictionary of metadata for the given datakey. Meta data consists of:\n - dataKey (string)\n - includesInitial (bool)\n - numComponents (int)\n - numFrames (int)\n - nodeData (bool): True if node data\n - elmtData (bool): True if element data\n - grainData (bool): True if grain data\n - shape (tuple): shape of data array\n - notVector (bool): True if the data components do not represent a vector\n\n Args:\n dataKey (string): Sim datakey\n\n Returns:\n dict: Metadata\n ' includesInitial = (self.simData[dataKey].shape[(- 1)] == (self.numFrames + 1)) numComponents = (self.simData[dataKey].shape[(- 2)] if (len(self.simData[dataKey].shape) == 3) else 1) nodeData = (self.simData[dataKey].shape[0] == self.numNodes) elmtData = (self.simData[dataKey].shape[0] == self.numElmts) grainData = (self.simData[dataKey].shape[0] == self.numGrains) notVector = (dataKey in self.simDataKeysNotVector) metaData = {'dataKey': dataKey, 'includesInitial': includesInitial, 'numComponents': numComponents, 'numFrames': self.simData[dataKey].shape[(- 1)], 'nodeData': nodeData, 'elmtData': elmtData, 'grainData': grainData, 'shape': self.simData[dataKey].shape, 'notVector': notVector} return metaData
Produces a dictionary of metadata for the given datakey. Meta data consists of: - dataKey (string) - includesInitial (bool) - numComponents (int) - numFrames (int) - nodeData (bool): True if node data - elmtData (bool): True if element data - grainData (bool): True if grain data - shape (tuple): shape of data array - notVector (bool): True if the data components do not represent a vector Args: dataKey (string): Sim datakey Returns: dict: Metadata
fepx.py
simMetaData
MechMicroMan/DefDAP-allan
0
python
def simMetaData(self, dataKey): 'Produces a dictionary of metadata for the given datakey. Meta data consists of:\n - dataKey (string)\n - includesInitial (bool)\n - numComponents (int)\n - numFrames (int)\n - nodeData (bool): True if node data\n - elmtData (bool): True if element data\n - grainData (bool): True if grain data\n - shape (tuple): shape of data array\n - notVector (bool): True if the data components do not represent a vector\n\n Args:\n dataKey (string): Sim datakey\n\n Returns:\n dict: Metadata\n ' includesInitial = (self.simData[dataKey].shape[(- 1)] == (self.numFrames + 1)) numComponents = (self.simData[dataKey].shape[(- 2)] if (len(self.simData[dataKey].shape) == 3) else 1) nodeData = (self.simData[dataKey].shape[0] == self.numNodes) elmtData = (self.simData[dataKey].shape[0] == self.numElmts) grainData = (self.simData[dataKey].shape[0] == self.numGrains) notVector = (dataKey in self.simDataKeysNotVector) metaData = {'dataKey': dataKey, 'includesInitial': includesInitial, 'numComponents': numComponents, 'numFrames': self.simData[dataKey].shape[(- 1)], 'nodeData': nodeData, 'elmtData': elmtData, 'grainData': grainData, 'shape': self.simData[dataKey].shape, 'notVector': notVector} return metaData
def simMetaData(self, dataKey): 'Produces a dictionary of metadata for the given datakey. Meta data consists of:\n - dataKey (string)\n - includesInitial (bool)\n - numComponents (int)\n - numFrames (int)\n - nodeData (bool): True if node data\n - elmtData (bool): True if element data\n - grainData (bool): True if grain data\n - shape (tuple): shape of data array\n - notVector (bool): True if the data components do not represent a vector\n\n Args:\n dataKey (string): Sim datakey\n\n Returns:\n dict: Metadata\n ' includesInitial = (self.simData[dataKey].shape[(- 1)] == (self.numFrames + 1)) numComponents = (self.simData[dataKey].shape[(- 2)] if (len(self.simData[dataKey].shape) == 3) else 1) nodeData = (self.simData[dataKey].shape[0] == self.numNodes) elmtData = (self.simData[dataKey].shape[0] == self.numElmts) grainData = (self.simData[dataKey].shape[0] == self.numGrains) notVector = (dataKey in self.simDataKeysNotVector) metaData = {'dataKey': dataKey, 'includesInitial': includesInitial, 'numComponents': numComponents, 'numFrames': self.simData[dataKey].shape[(- 1)], 'nodeData': nodeData, 'elmtData': elmtData, 'grainData': grainData, 'shape': self.simData[dataKey].shape, 'notVector': notVector} return metaData<|docstring|>Produces a dictionary of metadata for the given datakey. Meta data consists of: - dataKey (string) - includesInitial (bool) - numComponents (int) - numFrames (int) - nodeData (bool): True if node data - elmtData (bool): True if element data - grainData (bool): True if grain data - shape (tuple): shape of data array - notVector (bool): True if the data components do not represent a vector Args: dataKey (string): Sim datakey Returns: dict: Metadata<|endoftext|>
43a3556e00ce836cf10afe2d46784d4ae12d80ce95f7c65b74a2241106fa7b3a
def constructVtkMesh(self): 'Create VTK mesh using initial (undeformaed) node positions\n\n Returns:\n vtkUnstructuredGrid: VTK mesh\n ' CON_ORDER = [0, 2, 4, 9, 1, 3, 5, 6, 7, 8] ELMT_TYPE = 24 points = vtk.vtkPoints() for coord in self.nodePos: points.InsertNextPoint(coord) uGrid = vtk.vtkUnstructuredGrid() uGrid.SetPoints(points) con = vtk.vtkIdList() for elmtCon in self.elmtCon: con.Reset() for pointID in elmtCon[CON_ORDER]: con.InsertNextId(pointID) uGrid.InsertNextCell(ELMT_TYPE, con) return uGrid
Create VTK mesh using initial (undeformaed) node positions Returns: vtkUnstructuredGrid: VTK mesh
fepx.py
constructVtkMesh
MechMicroMan/DefDAP-allan
0
python
def constructVtkMesh(self): 'Create VTK mesh using initial (undeformaed) node positions\n\n Returns:\n vtkUnstructuredGrid: VTK mesh\n ' CON_ORDER = [0, 2, 4, 9, 1, 3, 5, 6, 7, 8] ELMT_TYPE = 24 points = vtk.vtkPoints() for coord in self.nodePos: points.InsertNextPoint(coord) uGrid = vtk.vtkUnstructuredGrid() uGrid.SetPoints(points) con = vtk.vtkIdList() for elmtCon in self.elmtCon: con.Reset() for pointID in elmtCon[CON_ORDER]: con.InsertNextId(pointID) uGrid.InsertNextCell(ELMT_TYPE, con) return uGrid
def constructVtkMesh(self): 'Create VTK mesh using initial (undeformaed) node positions\n\n Returns:\n vtkUnstructuredGrid: VTK mesh\n ' CON_ORDER = [0, 2, 4, 9, 1, 3, 5, 6, 7, 8] ELMT_TYPE = 24 points = vtk.vtkPoints() for coord in self.nodePos: points.InsertNextPoint(coord) uGrid = vtk.vtkUnstructuredGrid() uGrid.SetPoints(points) con = vtk.vtkIdList() for elmtCon in self.elmtCon: con.Reset() for pointID in elmtCon[CON_ORDER]: con.InsertNextId(pointID) uGrid.InsertNextCell(ELMT_TYPE, con) return uGrid<|docstring|>Create VTK mesh using initial (undeformaed) node positions Returns: vtkUnstructuredGrid: VTK mesh<|endoftext|>
9a145c840c6f8b675acfd8439bd648fc2589953ba890f9f23505bdb4c279a7a8
def calcGradient(self, inDataKey, outDataKey): 'Calculate gradient of simulation data wrt initial coordinates\n\n Args:\n inDataKey (string): Sim data key to caluclate gradient of\n outDataKey (string): Sim data key to store result\n ' self._validateSimDataKey(inDataKey, fieldType='node') simMetaData = self.simMetaData(inDataKey) inDataShape = simMetaData['shape'] if (simMetaData['numComponents'] == 1): gradient = np.empty((inDataShape[0], 3, inDataShape[1])) else: gradient = np.empty((inDataShape[0], (3 * inDataShape[1]), inDataShape[2])) uGrid = self.constructVtkMesh() numFrames = inDataShape[(- 1)] for i in range(numFrames): vtkData = vnp.numpy_to_vtk(np.ascontiguousarray(self.simData[inDataKey][(..., i)])) vtkData.SetName(inDataKey) uGrid.GetPointData().AddArray(vtkData) gradFilter = vtk.vtkGradientFilter() gradFilter.SetInputDataObject(uGrid) gradFilter.SetInputScalars(vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, inDataKey) gradFilter.Update() gradient[(:, :, i)] = vnp.vtk_to_numpy(gradFilter.GetOutput().GetPointData().GetArray('Gradients')) gradFilter = None uGrid.GetPointData().RemoveArray(inDataKey) vtkData = None self.createSimData(outDataKey, gradient)
Calculate gradient of simulation data wrt initial coordinates Args: inDataKey (string): Sim data key to caluclate gradient of outDataKey (string): Sim data key to store result
fepx.py
calcGradient
MechMicroMan/DefDAP-allan
0
python
def calcGradient(self, inDataKey, outDataKey): 'Calculate gradient of simulation data wrt initial coordinates\n\n Args:\n inDataKey (string): Sim data key to caluclate gradient of\n outDataKey (string): Sim data key to store result\n ' self._validateSimDataKey(inDataKey, fieldType='node') simMetaData = self.simMetaData(inDataKey) inDataShape = simMetaData['shape'] if (simMetaData['numComponents'] == 1): gradient = np.empty((inDataShape[0], 3, inDataShape[1])) else: gradient = np.empty((inDataShape[0], (3 * inDataShape[1]), inDataShape[2])) uGrid = self.constructVtkMesh() numFrames = inDataShape[(- 1)] for i in range(numFrames): vtkData = vnp.numpy_to_vtk(np.ascontiguousarray(self.simData[inDataKey][(..., i)])) vtkData.SetName(inDataKey) uGrid.GetPointData().AddArray(vtkData) gradFilter = vtk.vtkGradientFilter() gradFilter.SetInputDataObject(uGrid) gradFilter.SetInputScalars(vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, inDataKey) gradFilter.Update() gradient[(:, :, i)] = vnp.vtk_to_numpy(gradFilter.GetOutput().GetPointData().GetArray('Gradients')) gradFilter = None uGrid.GetPointData().RemoveArray(inDataKey) vtkData = None self.createSimData(outDataKey, gradient)
def calcGradient(self, inDataKey, outDataKey): 'Calculate gradient of simulation data wrt initial coordinates\n\n Args:\n inDataKey (string): Sim data key to caluclate gradient of\n outDataKey (string): Sim data key to store result\n ' self._validateSimDataKey(inDataKey, fieldType='node') simMetaData = self.simMetaData(inDataKey) inDataShape = simMetaData['shape'] if (simMetaData['numComponents'] == 1): gradient = np.empty((inDataShape[0], 3, inDataShape[1])) else: gradient = np.empty((inDataShape[0], (3 * inDataShape[1]), inDataShape[2])) uGrid = self.constructVtkMesh() numFrames = inDataShape[(- 1)] for i in range(numFrames): vtkData = vnp.numpy_to_vtk(np.ascontiguousarray(self.simData[inDataKey][(..., i)])) vtkData.SetName(inDataKey) uGrid.GetPointData().AddArray(vtkData) gradFilter = vtk.vtkGradientFilter() gradFilter.SetInputDataObject(uGrid) gradFilter.SetInputScalars(vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, inDataKey) gradFilter.Update() gradient[(:, :, i)] = vnp.vtk_to_numpy(gradFilter.GetOutput().GetPointData().GetArray('Gradients')) gradFilter = None uGrid.GetPointData().RemoveArray(inDataKey) vtkData = None self.createSimData(outDataKey, gradient)<|docstring|>Calculate gradient of simulation data wrt initial coordinates Args: inDataKey (string): Sim data key to caluclate gradient of outDataKey (string): Sim data key to store result<|endoftext|>
d91ce2454fc0c8d7b84078f7c85ca49c36121086740ec3299b315d060ceb71dc
def nodeToElmtData(self, inDataKey, outDataKey): 'Convert node data to element data using VTK framework\n\n Args:\n inDataKey (string): Sim data key to convert\n outDataKey (string): Sim data key to store result\n ' self._validateSimDataKey(inDataKey, fieldType='node') simMetaData = self.simMetaData(inDataKey) inDataShape = simMetaData['shape'] elmtData = np.empty(((self.numElmts,) + inDataShape[1:])) uGrid = self.constructVtkMesh() numFrames = inDataShape[(- 1)] for i in range(numFrames): vtkData = vnp.numpy_to_vtk(np.ascontiguousarray(self.simData[inDataKey][(..., i)])) vtkData.SetName(inDataKey) uGrid.GetPointData().AddArray(vtkData) conversionFilter = vtk.vtkPointDataToCellData() conversionFilter.SetInputDataObject(uGrid) conversionFilter.Update() elmtData[(..., i)] = vnp.vtk_to_numpy(conversionFilter.GetOutput().GetCellData().GetArray(inDataKey)) conversionFilter = None uGrid.GetPointData().RemoveArray(inDataKey) uGrid.GetCellData().RemoveArray(inDataKey) vtkData = None self.createSimData(outDataKey, elmtData)
Convert node data to element data using VTK framework Args: inDataKey (string): Sim data key to convert outDataKey (string): Sim data key to store result
fepx.py
nodeToElmtData
MechMicroMan/DefDAP-allan
0
python
def nodeToElmtData(self, inDataKey, outDataKey): 'Convert node data to element data using VTK framework\n\n Args:\n inDataKey (string): Sim data key to convert\n outDataKey (string): Sim data key to store result\n ' self._validateSimDataKey(inDataKey, fieldType='node') simMetaData = self.simMetaData(inDataKey) inDataShape = simMetaData['shape'] elmtData = np.empty(((self.numElmts,) + inDataShape[1:])) uGrid = self.constructVtkMesh() numFrames = inDataShape[(- 1)] for i in range(numFrames): vtkData = vnp.numpy_to_vtk(np.ascontiguousarray(self.simData[inDataKey][(..., i)])) vtkData.SetName(inDataKey) uGrid.GetPointData().AddArray(vtkData) conversionFilter = vtk.vtkPointDataToCellData() conversionFilter.SetInputDataObject(uGrid) conversionFilter.Update() elmtData[(..., i)] = vnp.vtk_to_numpy(conversionFilter.GetOutput().GetCellData().GetArray(inDataKey)) conversionFilter = None uGrid.GetPointData().RemoveArray(inDataKey) uGrid.GetCellData().RemoveArray(inDataKey) vtkData = None self.createSimData(outDataKey, elmtData)
def nodeToElmtData(self, inDataKey, outDataKey): 'Convert node data to element data using VTK framework\n\n Args:\n inDataKey (string): Sim data key to convert\n outDataKey (string): Sim data key to store result\n ' self._validateSimDataKey(inDataKey, fieldType='node') simMetaData = self.simMetaData(inDataKey) inDataShape = simMetaData['shape'] elmtData = np.empty(((self.numElmts,) + inDataShape[1:])) uGrid = self.constructVtkMesh() numFrames = inDataShape[(- 1)] for i in range(numFrames): vtkData = vnp.numpy_to_vtk(np.ascontiguousarray(self.simData[inDataKey][(..., i)])) vtkData.SetName(inDataKey) uGrid.GetPointData().AddArray(vtkData) conversionFilter = vtk.vtkPointDataToCellData() conversionFilter.SetInputDataObject(uGrid) conversionFilter.Update() elmtData[(..., i)] = vnp.vtk_to_numpy(conversionFilter.GetOutput().GetCellData().GetArray(inDataKey)) conversionFilter = None uGrid.GetPointData().RemoveArray(inDataKey) uGrid.GetCellData().RemoveArray(inDataKey) vtkData = None self.createSimData(outDataKey, elmtData)<|docstring|>Convert node data to element data using VTK framework Args: inDataKey (string): Sim data key to convert outDataKey (string): Sim data key to store result<|endoftext|>
ade003bcb46244f1b313479b03ba4eaf3b2e3e7242fb84a9db962fd4d2785ad4
def calcGrainAverage(self, inDataKey, outDataKey=None): 'Calculate grain avergae of elemnet data.\n\n Args:\n inDataKey (str): Data key of input data\n outDataKey (None, optional): Data key to save data to. If none given then the data is returned from the function\n\n Returns:\n Array: Grain average data or nothing if outDataKey specified.\n ' self._validateSimDataKey(inDataKey, fieldType='element') inMetaData = self.simMetaData(inDataKey) outDataShape = ((self.numGrains,) + inMetaData['shape'][1:]) grainData = np.empty(outDataShape) for i in range(self.numGrains): grainData[i] = self.simData[inDataKey][((self.elmtGrain == (i + 1)), ...)].mean(axis=0) if (outDataKey is None): return grainData else: self.createSimData(outDataKey, grainData, isNotVector=inMetaData['notVector']) return
Calculate grain avergae of elemnet data. Args: inDataKey (str): Data key of input data outDataKey (None, optional): Data key to save data to. If none given then the data is returned from the function Returns: Array: Grain average data or nothing if outDataKey specified.
fepx.py
calcGrainAverage
MechMicroMan/DefDAP-allan
0
python
def calcGrainAverage(self, inDataKey, outDataKey=None): 'Calculate grain avergae of elemnet data.\n\n Args:\n inDataKey (str): Data key of input data\n outDataKey (None, optional): Data key to save data to. If none given then the data is returned from the function\n\n Returns:\n Array: Grain average data or nothing if outDataKey specified.\n ' self._validateSimDataKey(inDataKey, fieldType='element') inMetaData = self.simMetaData(inDataKey) outDataShape = ((self.numGrains,) + inMetaData['shape'][1:]) grainData = np.empty(outDataShape) for i in range(self.numGrains): grainData[i] = self.simData[inDataKey][((self.elmtGrain == (i + 1)), ...)].mean(axis=0) if (outDataKey is None): return grainData else: self.createSimData(outDataKey, grainData, isNotVector=inMetaData['notVector']) return
def calcGrainAverage(self, inDataKey, outDataKey=None): 'Calculate grain avergae of elemnet data.\n\n Args:\n inDataKey (str): Data key of input data\n outDataKey (None, optional): Data key to save data to. If none given then the data is returned from the function\n\n Returns:\n Array: Grain average data or nothing if outDataKey specified.\n ' self._validateSimDataKey(inDataKey, fieldType='element') inMetaData = self.simMetaData(inDataKey) outDataShape = ((self.numGrains,) + inMetaData['shape'][1:]) grainData = np.empty(outDataShape) for i in range(self.numGrains): grainData[i] = self.simData[inDataKey][((self.elmtGrain == (i + 1)), ...)].mean(axis=0) if (outDataKey is None): return grainData else: self.createSimData(outDataKey, grainData, isNotVector=inMetaData['notVector']) return<|docstring|>Calculate grain avergae of elemnet data. Args: inDataKey (str): Data key of input data outDataKey (None, optional): Data key to save data to. If none given then the data is returned from the function Returns: Array: Grain average data or nothing if outDataKey specified.<|endoftext|>
035fb3b6cab5c366c8dc58f38c2b6d43c1cc76b66da548785fedca73685816d8
def writeVTU(self, fileName, frameNums, outputs, times=None, useInitialNodePos=True): 'Write data out to VTK compatible files. Files are output to the data directory.\n\n Args:\n fileName (string): Base name of output files.\n frameNums (lst(int)): The simlation frames to output. Either an integer for a single\n frame, a list of ints for many or -1 for all. 0 is intial and 1 is first sim output\n outputs (list(string)): The properties to be output. Current options are misOri, gammadot,\n backstress and elStats.\n times (list(float), optional): The time at each frame (only used for labeling)\n useInitialNodePos (bool, optional): If false uses updateded node positions from each frame. Default: True.\n ' CON_ORDER = [0, 2, 4, 9, 1, 3, 5, 6, 7, 8] ELMT_TYPE = 24 FILE_POSTFIX = '' cell_types = np.empty(self.numElmts, dtype='uint8') cell_types[:] = ELMT_TYPE offsets = np.arange(start=10, stop=(10 * (self.numElmts + 1)), step=10, dtype=int) if useInitialNodePos: x = self.nodePos[(:, 0)] y = self.nodePos[(:, 1)] z = self.nodePos[(:, 2)] frameNums = self._validateFrameNums(frameNums) if (times is None): times = frameNums includeElStats = False simMetaDatas = [] for dataKey in outputs: if (dataKey in self.simData): simMetaData = self.simMetaData(dataKey) if simMetaData['nodeData']: simMetaDatas.insert(0, simMetaData) else: simMetaDatas.append(simMetaData) elif (dataKey == 'elStats'): if (self.elStats is not None): includeElStats = True else: print('Element stats have not been loaded') elif (dataKey in self.simDataKeys): print('{:} data is not loaded.'.format(dataKey)) else: print('"{:}" is not a valid output data key.'.format(dataKey)) fileNameFull = '{:s}{:s}{:s}'.format(self.dataDir, fileName, FILE_POSTFIX) print(fileNameFull) vtgFile = pyevtk.vtk.VtkGroup(fileNameFull) for frameNum in frameNums: fileNameFull = '{:s}{:s}.{:d}.vtu'.format(fileName, FILE_POSTFIX, frameNum) vtgFile.addFile(fileNameFull, times[frameNum], relToCWD=False) fileNameFull = '{:s}{:s}{:s}.{:d}'.format(self.dataDir, fileName, FILE_POSTFIX, frameNum) vtuFile = pyevtk.vtk.VtkFile(fileNameFull, pyevtk.vtk.VtkUnstructuredGrid) vtuFile.openGrid() vtuFile.openPiece(ncells=self.numElmts, npoints=self.numNodes) if (not useInitialNodePos): x = self.simData['nodePos'][(:, 0, frameNum)] y = self.simData['nodePos'][(:, 1, frameNum)] z = self.simData['nodePos'][(:, 2, frameNum)] vtuFile.openElement('Points') vtuFile.addData('points', (x, y, z)) vtuFile.closeElement('Points') vtuFile.openElement('Cells') vtuFile.addHeader('connectivity', self.elmtCon.dtype.name, self.elmtCon.size, 1) vtuFile.addData('offsets', offsets) vtuFile.addData('types', cell_types) vtuFile.closeElement('Cells') vtuFile.openElement('PointData') for simMetaData in simMetaDatas: if ((simMetaData['includesInitial'] or (frameNum > 0)) and simMetaData['nodeData']): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, trueFrameNum)].size, 1) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, 0, trueFrameNum)].size, 3) else: for i in range(simMetaData['numComponents']): vtuFile.addHeader('{:s} {:d}'.format(dataKey, (i + 1)), self.simData[dataKey].dtype.name, self.simData[dataKey][(:, i, trueFrameNum)].size, 1) vtuFile.closeElement('PointData') vtuFile.openElement('CellData') for simMetaData in simMetaDatas: if ((simMetaData['includesInitial'] or (frameNum > 0)) and simMetaData['elmtData']): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, trueFrameNum)].size, 1) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, 0, trueFrameNum)].size, 3) else: for i in range(simMetaData['numComponents']): vtuFile.addHeader('{:s} {:d}'.format(dataKey, (i + 1)), self.simData[dataKey].dtype.name, self.simData[dataKey][(:, i, trueFrameNum)].size, 1) if (includeElStats and (frameNum == 0)): for i in range(self.elStats.shape[1]): vtuFile.addHeader('Element stat - {:}'.format(self.meshElStatNames[i]), self.elStats.dtype.name, self.elStats[(:, i)].size, 1) vtuFile.closeElement('CellData') vtuFile.closePiece() vtuFile.closeGrid() vtuFile.appendData((x, y, z)) vtuFile.appendData(self.elmtCon[(:, CON_ORDER)].flatten()).appendData(offsets).appendData(cell_types) for simMetaData in simMetaDatas: if (simMetaData['includesInitial'] or (frameNum > 0)): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.appendData(self.simData[dataKey][(:, trueFrameNum)]) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.appendData((self.simData[dataKey][(:, 0, trueFrameNum)], self.simData[dataKey][(:, 1, trueFrameNum)], self.simData[dataKey][(:, 2, trueFrameNum)])) else: for i in range(simMetaData['numComponents']): vtuFile.appendData(self.simData[dataKey][(:, i, trueFrameNum)]) if (includeElStats and (frameNum == 0)): for i in range(self.elStats.shape[1]): vtuFile.appendData(self.elStats[(:, i)]) vtuFile.save() vtgFile.save()
Write data out to VTK compatible files. Files are output to the data directory. Args: fileName (string): Base name of output files. frameNums (lst(int)): The simlation frames to output. Either an integer for a single frame, a list of ints for many or -1 for all. 0 is intial and 1 is first sim output outputs (list(string)): The properties to be output. Current options are misOri, gammadot, backstress and elStats. times (list(float), optional): The time at each frame (only used for labeling) useInitialNodePos (bool, optional): If false uses updateded node positions from each frame. Default: True.
fepx.py
writeVTU
MechMicroMan/DefDAP-allan
0
python
def writeVTU(self, fileName, frameNums, outputs, times=None, useInitialNodePos=True): 'Write data out to VTK compatible files. Files are output to the data directory.\n\n Args:\n fileName (string): Base name of output files.\n frameNums (lst(int)): The simlation frames to output. Either an integer for a single\n frame, a list of ints for many or -1 for all. 0 is intial and 1 is first sim output\n outputs (list(string)): The properties to be output. Current options are misOri, gammadot,\n backstress and elStats.\n times (list(float), optional): The time at each frame (only used for labeling)\n useInitialNodePos (bool, optional): If false uses updateded node positions from each frame. Default: True.\n ' CON_ORDER = [0, 2, 4, 9, 1, 3, 5, 6, 7, 8] ELMT_TYPE = 24 FILE_POSTFIX = cell_types = np.empty(self.numElmts, dtype='uint8') cell_types[:] = ELMT_TYPE offsets = np.arange(start=10, stop=(10 * (self.numElmts + 1)), step=10, dtype=int) if useInitialNodePos: x = self.nodePos[(:, 0)] y = self.nodePos[(:, 1)] z = self.nodePos[(:, 2)] frameNums = self._validateFrameNums(frameNums) if (times is None): times = frameNums includeElStats = False simMetaDatas = [] for dataKey in outputs: if (dataKey in self.simData): simMetaData = self.simMetaData(dataKey) if simMetaData['nodeData']: simMetaDatas.insert(0, simMetaData) else: simMetaDatas.append(simMetaData) elif (dataKey == 'elStats'): if (self.elStats is not None): includeElStats = True else: print('Element stats have not been loaded') elif (dataKey in self.simDataKeys): print('{:} data is not loaded.'.format(dataKey)) else: print('"{:}" is not a valid output data key.'.format(dataKey)) fileNameFull = '{:s}{:s}{:s}'.format(self.dataDir, fileName, FILE_POSTFIX) print(fileNameFull) vtgFile = pyevtk.vtk.VtkGroup(fileNameFull) for frameNum in frameNums: fileNameFull = '{:s}{:s}.{:d}.vtu'.format(fileName, FILE_POSTFIX, frameNum) vtgFile.addFile(fileNameFull, times[frameNum], relToCWD=False) fileNameFull = '{:s}{:s}{:s}.{:d}'.format(self.dataDir, fileName, FILE_POSTFIX, frameNum) vtuFile = pyevtk.vtk.VtkFile(fileNameFull, pyevtk.vtk.VtkUnstructuredGrid) vtuFile.openGrid() vtuFile.openPiece(ncells=self.numElmts, npoints=self.numNodes) if (not useInitialNodePos): x = self.simData['nodePos'][(:, 0, frameNum)] y = self.simData['nodePos'][(:, 1, frameNum)] z = self.simData['nodePos'][(:, 2, frameNum)] vtuFile.openElement('Points') vtuFile.addData('points', (x, y, z)) vtuFile.closeElement('Points') vtuFile.openElement('Cells') vtuFile.addHeader('connectivity', self.elmtCon.dtype.name, self.elmtCon.size, 1) vtuFile.addData('offsets', offsets) vtuFile.addData('types', cell_types) vtuFile.closeElement('Cells') vtuFile.openElement('PointData') for simMetaData in simMetaDatas: if ((simMetaData['includesInitial'] or (frameNum > 0)) and simMetaData['nodeData']): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, trueFrameNum)].size, 1) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, 0, trueFrameNum)].size, 3) else: for i in range(simMetaData['numComponents']): vtuFile.addHeader('{:s} {:d}'.format(dataKey, (i + 1)), self.simData[dataKey].dtype.name, self.simData[dataKey][(:, i, trueFrameNum)].size, 1) vtuFile.closeElement('PointData') vtuFile.openElement('CellData') for simMetaData in simMetaDatas: if ((simMetaData['includesInitial'] or (frameNum > 0)) and simMetaData['elmtData']): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, trueFrameNum)].size, 1) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, 0, trueFrameNum)].size, 3) else: for i in range(simMetaData['numComponents']): vtuFile.addHeader('{:s} {:d}'.format(dataKey, (i + 1)), self.simData[dataKey].dtype.name, self.simData[dataKey][(:, i, trueFrameNum)].size, 1) if (includeElStats and (frameNum == 0)): for i in range(self.elStats.shape[1]): vtuFile.addHeader('Element stat - {:}'.format(self.meshElStatNames[i]), self.elStats.dtype.name, self.elStats[(:, i)].size, 1) vtuFile.closeElement('CellData') vtuFile.closePiece() vtuFile.closeGrid() vtuFile.appendData((x, y, z)) vtuFile.appendData(self.elmtCon[(:, CON_ORDER)].flatten()).appendData(offsets).appendData(cell_types) for simMetaData in simMetaDatas: if (simMetaData['includesInitial'] or (frameNum > 0)): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.appendData(self.simData[dataKey][(:, trueFrameNum)]) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.appendData((self.simData[dataKey][(:, 0, trueFrameNum)], self.simData[dataKey][(:, 1, trueFrameNum)], self.simData[dataKey][(:, 2, trueFrameNum)])) else: for i in range(simMetaData['numComponents']): vtuFile.appendData(self.simData[dataKey][(:, i, trueFrameNum)]) if (includeElStats and (frameNum == 0)): for i in range(self.elStats.shape[1]): vtuFile.appendData(self.elStats[(:, i)]) vtuFile.save() vtgFile.save()
def writeVTU(self, fileName, frameNums, outputs, times=None, useInitialNodePos=True): 'Write data out to VTK compatible files. Files are output to the data directory.\n\n Args:\n fileName (string): Base name of output files.\n frameNums (lst(int)): The simlation frames to output. Either an integer for a single\n frame, a list of ints for many or -1 for all. 0 is intial and 1 is first sim output\n outputs (list(string)): The properties to be output. Current options are misOri, gammadot,\n backstress and elStats.\n times (list(float), optional): The time at each frame (only used for labeling)\n useInitialNodePos (bool, optional): If false uses updateded node positions from each frame. Default: True.\n ' CON_ORDER = [0, 2, 4, 9, 1, 3, 5, 6, 7, 8] ELMT_TYPE = 24 FILE_POSTFIX = cell_types = np.empty(self.numElmts, dtype='uint8') cell_types[:] = ELMT_TYPE offsets = np.arange(start=10, stop=(10 * (self.numElmts + 1)), step=10, dtype=int) if useInitialNodePos: x = self.nodePos[(:, 0)] y = self.nodePos[(:, 1)] z = self.nodePos[(:, 2)] frameNums = self._validateFrameNums(frameNums) if (times is None): times = frameNums includeElStats = False simMetaDatas = [] for dataKey in outputs: if (dataKey in self.simData): simMetaData = self.simMetaData(dataKey) if simMetaData['nodeData']: simMetaDatas.insert(0, simMetaData) else: simMetaDatas.append(simMetaData) elif (dataKey == 'elStats'): if (self.elStats is not None): includeElStats = True else: print('Element stats have not been loaded') elif (dataKey in self.simDataKeys): print('{:} data is not loaded.'.format(dataKey)) else: print('"{:}" is not a valid output data key.'.format(dataKey)) fileNameFull = '{:s}{:s}{:s}'.format(self.dataDir, fileName, FILE_POSTFIX) print(fileNameFull) vtgFile = pyevtk.vtk.VtkGroup(fileNameFull) for frameNum in frameNums: fileNameFull = '{:s}{:s}.{:d}.vtu'.format(fileName, FILE_POSTFIX, frameNum) vtgFile.addFile(fileNameFull, times[frameNum], relToCWD=False) fileNameFull = '{:s}{:s}{:s}.{:d}'.format(self.dataDir, fileName, FILE_POSTFIX, frameNum) vtuFile = pyevtk.vtk.VtkFile(fileNameFull, pyevtk.vtk.VtkUnstructuredGrid) vtuFile.openGrid() vtuFile.openPiece(ncells=self.numElmts, npoints=self.numNodes) if (not useInitialNodePos): x = self.simData['nodePos'][(:, 0, frameNum)] y = self.simData['nodePos'][(:, 1, frameNum)] z = self.simData['nodePos'][(:, 2, frameNum)] vtuFile.openElement('Points') vtuFile.addData('points', (x, y, z)) vtuFile.closeElement('Points') vtuFile.openElement('Cells') vtuFile.addHeader('connectivity', self.elmtCon.dtype.name, self.elmtCon.size, 1) vtuFile.addData('offsets', offsets) vtuFile.addData('types', cell_types) vtuFile.closeElement('Cells') vtuFile.openElement('PointData') for simMetaData in simMetaDatas: if ((simMetaData['includesInitial'] or (frameNum > 0)) and simMetaData['nodeData']): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, trueFrameNum)].size, 1) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, 0, trueFrameNum)].size, 3) else: for i in range(simMetaData['numComponents']): vtuFile.addHeader('{:s} {:d}'.format(dataKey, (i + 1)), self.simData[dataKey].dtype.name, self.simData[dataKey][(:, i, trueFrameNum)].size, 1) vtuFile.closeElement('PointData') vtuFile.openElement('CellData') for simMetaData in simMetaDatas: if ((simMetaData['includesInitial'] or (frameNum > 0)) and simMetaData['elmtData']): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, trueFrameNum)].size, 1) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.addHeader(dataKey, self.simData[dataKey].dtype.name, self.simData[dataKey][(:, 0, trueFrameNum)].size, 3) else: for i in range(simMetaData['numComponents']): vtuFile.addHeader('{:s} {:d}'.format(dataKey, (i + 1)), self.simData[dataKey].dtype.name, self.simData[dataKey][(:, i, trueFrameNum)].size, 1) if (includeElStats and (frameNum == 0)): for i in range(self.elStats.shape[1]): vtuFile.addHeader('Element stat - {:}'.format(self.meshElStatNames[i]), self.elStats.dtype.name, self.elStats[(:, i)].size, 1) vtuFile.closeElement('CellData') vtuFile.closePiece() vtuFile.closeGrid() vtuFile.appendData((x, y, z)) vtuFile.appendData(self.elmtCon[(:, CON_ORDER)].flatten()).appendData(offsets).appendData(cell_types) for simMetaData in simMetaDatas: if (simMetaData['includesInitial'] or (frameNum > 0)): dataKey = simMetaData['dataKey'] trueFrameNum = (frameNum if simMetaData['includesInitial'] else (frameNum - 1)) if (simMetaData['numComponents'] == 1): vtuFile.appendData(self.simData[dataKey][(:, trueFrameNum)]) elif ((simMetaData['numComponents'] == 3) and (dataKey not in self.simDataKeysNotVector)): vtuFile.appendData((self.simData[dataKey][(:, 0, trueFrameNum)], self.simData[dataKey][(:, 1, trueFrameNum)], self.simData[dataKey][(:, 2, trueFrameNum)])) else: for i in range(simMetaData['numComponents']): vtuFile.appendData(self.simData[dataKey][(:, i, trueFrameNum)]) if (includeElStats and (frameNum == 0)): for i in range(self.elStats.shape[1]): vtuFile.appendData(self.elStats[(:, i)]) vtuFile.save() vtgFile.save()<|docstring|>Write data out to VTK compatible files. Files are output to the data directory. Args: fileName (string): Base name of output files. frameNums (lst(int)): The simlation frames to output. Either an integer for a single frame, a list of ints for many or -1 for all. 0 is intial and 1 is first sim output outputs (list(string)): The properties to be output. Current options are misOri, gammadot, backstress and elStats. times (list(float), optional): The time at each frame (only used for labeling) useInitialNodePos (bool, optional): If false uses updateded node positions from each frame. Default: True.<|endoftext|>
ee83ee3389efffe280c61c9be3595852c89fef6ae633acb47f77cd85df84ec02
@staticmethod def combineFiles(baseDir, inDirs, outDir): 'Combine output files from multiple simulations. If a simulation was run in smaller parts\n\n Args:\n baseDir (string): Base directory of whole simulation. No trailing slash\n inDirs (List(string)): List of simulation directory names to\n combine (baseDir/inDir). No trailing slash\n outDir (string): Directory to output combined files to (baseDir/outDir)\n ' fileNames = [] for fileName in os.listdir('{:s}/{:s}'.format(baseDir, inDirs[0])): if (fnmatch.fnmatch(fileName, 'post.*') and (fileName != 'post.conv') and (fileName != 'post.stats') and (not (fnmatch.fnmatch(fileName, 'post.debug.*') or fnmatch.fnmatch(fileName, 'post.log.*') or fnmatch.fnmatch(fileName, 'post.restart.*')))): fileNames.append(fileName) if (not os.path.isdir('{:s}/{:s}'.format(baseDir, outDir))): os.mkdir('{:s}/{:s}'.format(baseDir, outDir)) for fileName in fileNames: outFile = open('{:s}/{:s}/{:s}'.format(baseDir, outDir, fileName), 'w') for inDir in inDirs: inFile = open('{:s}/{:s}/{:s}'.format(baseDir, inDir, fileName), 'r') for line in inFile: outFile.write(line) inFile.close() outFile.close()
Combine output files from multiple simulations. If a simulation was run in smaller parts Args: baseDir (string): Base directory of whole simulation. No trailing slash inDirs (List(string)): List of simulation directory names to combine (baseDir/inDir). No trailing slash outDir (string): Directory to output combined files to (baseDir/outDir)
fepx.py
combineFiles
MechMicroMan/DefDAP-allan
0
python
@staticmethod def combineFiles(baseDir, inDirs, outDir): 'Combine output files from multiple simulations. If a simulation was run in smaller parts\n\n Args:\n baseDir (string): Base directory of whole simulation. No trailing slash\n inDirs (List(string)): List of simulation directory names to\n combine (baseDir/inDir). No trailing slash\n outDir (string): Directory to output combined files to (baseDir/outDir)\n ' fileNames = [] for fileName in os.listdir('{:s}/{:s}'.format(baseDir, inDirs[0])): if (fnmatch.fnmatch(fileName, 'post.*') and (fileName != 'post.conv') and (fileName != 'post.stats') and (not (fnmatch.fnmatch(fileName, 'post.debug.*') or fnmatch.fnmatch(fileName, 'post.log.*') or fnmatch.fnmatch(fileName, 'post.restart.*')))): fileNames.append(fileName) if (not os.path.isdir('{:s}/{:s}'.format(baseDir, outDir))): os.mkdir('{:s}/{:s}'.format(baseDir, outDir)) for fileName in fileNames: outFile = open('{:s}/{:s}/{:s}'.format(baseDir, outDir, fileName), 'w') for inDir in inDirs: inFile = open('{:s}/{:s}/{:s}'.format(baseDir, inDir, fileName), 'r') for line in inFile: outFile.write(line) inFile.close() outFile.close()
@staticmethod def combineFiles(baseDir, inDirs, outDir): 'Combine output files from multiple simulations. If a simulation was run in smaller parts\n\n Args:\n baseDir (string): Base directory of whole simulation. No trailing slash\n inDirs (List(string)): List of simulation directory names to\n combine (baseDir/inDir). No trailing slash\n outDir (string): Directory to output combined files to (baseDir/outDir)\n ' fileNames = [] for fileName in os.listdir('{:s}/{:s}'.format(baseDir, inDirs[0])): if (fnmatch.fnmatch(fileName, 'post.*') and (fileName != 'post.conv') and (fileName != 'post.stats') and (not (fnmatch.fnmatch(fileName, 'post.debug.*') or fnmatch.fnmatch(fileName, 'post.log.*') or fnmatch.fnmatch(fileName, 'post.restart.*')))): fileNames.append(fileName) if (not os.path.isdir('{:s}/{:s}'.format(baseDir, outDir))): os.mkdir('{:s}/{:s}'.format(baseDir, outDir)) for fileName in fileNames: outFile = open('{:s}/{:s}/{:s}'.format(baseDir, outDir, fileName), 'w') for inDir in inDirs: inFile = open('{:s}/{:s}/{:s}'.format(baseDir, inDir, fileName), 'r') for line in inFile: outFile.write(line) inFile.close() outFile.close()<|docstring|>Combine output files from multiple simulations. If a simulation was run in smaller parts Args: baseDir (string): Base directory of whole simulation. No trailing slash inDirs (List(string)): List of simulation directory names to combine (baseDir/inDir). No trailing slash outDir (string): Directory to output combined files to (baseDir/outDir)<|endoftext|>
5d670f745e29c0129732a92bd464e34e1296092676ff3ce29417a4e70721511b
@property def elmtGrain(self): 'Returns an array of grain IDs for elements in the surface (note grain IDs are 1 based)\n ' return self.mesh.elmtGrain[self.elmtIDs]
Returns an array of grain IDs for elements in the surface (note grain IDs are 1 based)
fepx.py
elmtGrain
MechMicroMan/DefDAP-allan
0
python
@property def elmtGrain(self): '\n ' return self.mesh.elmtGrain[self.elmtIDs]
@property def elmtGrain(self): '\n ' return self.mesh.elmtGrain[self.elmtIDs]<|docstring|>Returns an array of grain IDs for elements in the surface (note grain IDs are 1 based)<|endoftext|>
90fbd8e1bd023e7a03c5ce281f04bfb2bec5bd59b535be04fe3f8c54cfefcbd3
@property def grainIDs(self): 'Returns an array of grain IDs included in the surface\n ' return np.unique(self.elmtGrain)
Returns an array of grain IDs included in the surface
fepx.py
grainIDs
MechMicroMan/DefDAP-allan
0
python
@property def grainIDs(self): '\n ' return np.unique(self.elmtGrain)
@property def grainIDs(self): '\n ' return np.unique(self.elmtGrain)<|docstring|>Returns an array of grain IDs included in the surface<|endoftext|>
fe8efef3779253baa1622033c906eaed90b552cba0cfb38b18022a68b48c5b12
@property def elmtGrainLayer(self): 'Returns an array of grain IDs for elements in the surface (note grain IDs are 1 based)\n ' return self.mesh.elmtGrain[self.elmtIDsLayer]
Returns an array of grain IDs for elements in the surface (note grain IDs are 1 based)
fepx.py
elmtGrainLayer
MechMicroMan/DefDAP-allan
0
python
@property def elmtGrainLayer(self): '\n ' return self.mesh.elmtGrain[self.elmtIDsLayer]
@property def elmtGrainLayer(self): '\n ' return self.mesh.elmtGrain[self.elmtIDsLayer]<|docstring|>Returns an array of grain IDs for elements in the surface (note grain IDs are 1 based)<|endoftext|>
bce3be24ae236e9f08a05ec0c69b05cc0c2dfcadf10a446b718d37120bbc68fe
@property def grainIDsLayer(self): 'Returns an array of grain IDs included in the surface\n ' return np.unique(self.elmtGrainLayer)
Returns an array of grain IDs included in the surface
fepx.py
grainIDsLayer
MechMicroMan/DefDAP-allan
0
python
@property def grainIDsLayer(self): '\n ' return np.unique(self.elmtGrainLayer)
@property def grainIDsLayer(self): '\n ' return np.unique(self.elmtGrainLayer)<|docstring|>Returns an array of grain IDs included in the surface<|endoftext|>
3c602cf35775e43ca261e10b893b528d473e5464a5c24652595f94567109d935
def uniform_weights(x, x_mask): 'Return uniform weights over non-masked x (a sequence of vectors).\n\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n x_avg: batch * hdim\n ' alpha = torch.ones(x.size(0), x.size(1)) if x.data.is_cuda: alpha = alpha.cuda() alpha = (alpha * x_mask.eq(0).float()) alpha = (alpha / alpha.sum(1).expand(alpha.size())) return alpha
Return uniform weights over non-masked x (a sequence of vectors). Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_avg: batch * hdim
drqa/reader/layers.py
uniform_weights
litian6363/DrQA
4,500
python
def uniform_weights(x, x_mask): 'Return uniform weights over non-masked x (a sequence of vectors).\n\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n x_avg: batch * hdim\n ' alpha = torch.ones(x.size(0), x.size(1)) if x.data.is_cuda: alpha = alpha.cuda() alpha = (alpha * x_mask.eq(0).float()) alpha = (alpha / alpha.sum(1).expand(alpha.size())) return alpha
def uniform_weights(x, x_mask): 'Return uniform weights over non-masked x (a sequence of vectors).\n\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n x_avg: batch * hdim\n ' alpha = torch.ones(x.size(0), x.size(1)) if x.data.is_cuda: alpha = alpha.cuda() alpha = (alpha * x_mask.eq(0).float()) alpha = (alpha / alpha.sum(1).expand(alpha.size())) return alpha<|docstring|>Return uniform weights over non-masked x (a sequence of vectors). Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_avg: batch * hdim<|endoftext|>
c623ab4873d68c938d03feb384eb8266117a7249aaece3796a793369d6666b69
def weighted_avg(x, weights): 'Return a weighted average of x (a sequence of vectors).\n\n Args:\n x: batch * len * hdim\n weights: batch * len, sum(dim = 1) = 1\n Output:\n x_avg: batch * hdim\n ' return weights.unsqueeze(1).bmm(x).squeeze(1)
Return a weighted average of x (a sequence of vectors). Args: x: batch * len * hdim weights: batch * len, sum(dim = 1) = 1 Output: x_avg: batch * hdim
drqa/reader/layers.py
weighted_avg
litian6363/DrQA
4,500
python
def weighted_avg(x, weights): 'Return a weighted average of x (a sequence of vectors).\n\n Args:\n x: batch * len * hdim\n weights: batch * len, sum(dim = 1) = 1\n Output:\n x_avg: batch * hdim\n ' return weights.unsqueeze(1).bmm(x).squeeze(1)
def weighted_avg(x, weights): 'Return a weighted average of x (a sequence of vectors).\n\n Args:\n x: batch * len * hdim\n weights: batch * len, sum(dim = 1) = 1\n Output:\n x_avg: batch * hdim\n ' return weights.unsqueeze(1).bmm(x).squeeze(1)<|docstring|>Return a weighted average of x (a sequence of vectors). Args: x: batch * len * hdim weights: batch * len, sum(dim = 1) = 1 Output: x_avg: batch * hdim<|endoftext|>
12376e4cff8416cc11c5746cb2c051c2f9e574afc7f782e866560cec8dac0291
def forward(self, x, x_mask): 'Encode either padded or non-padded sequences.\n\n Can choose to either handle or ignore variable length sequences.\n Always handle padding in eval.\n\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n x_encoded: batch * len * hdim_encoded\n ' if (x_mask.data.sum() == 0): output = self._forward_unpadded(x, x_mask) elif (self.padding or (not self.training)): output = self._forward_padded(x, x_mask) else: output = self._forward_unpadded(x, x_mask) return output.contiguous()
Encode either padded or non-padded sequences. Can choose to either handle or ignore variable length sequences. Always handle padding in eval. Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_encoded: batch * len * hdim_encoded
drqa/reader/layers.py
forward
litian6363/DrQA
4,500
python
def forward(self, x, x_mask): 'Encode either padded or non-padded sequences.\n\n Can choose to either handle or ignore variable length sequences.\n Always handle padding in eval.\n\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n x_encoded: batch * len * hdim_encoded\n ' if (x_mask.data.sum() == 0): output = self._forward_unpadded(x, x_mask) elif (self.padding or (not self.training)): output = self._forward_padded(x, x_mask) else: output = self._forward_unpadded(x, x_mask) return output.contiguous()
def forward(self, x, x_mask): 'Encode either padded or non-padded sequences.\n\n Can choose to either handle or ignore variable length sequences.\n Always handle padding in eval.\n\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n x_encoded: batch * len * hdim_encoded\n ' if (x_mask.data.sum() == 0): output = self._forward_unpadded(x, x_mask) elif (self.padding or (not self.training)): output = self._forward_padded(x, x_mask) else: output = self._forward_unpadded(x, x_mask) return output.contiguous()<|docstring|>Encode either padded or non-padded sequences. Can choose to either handle or ignore variable length sequences. Always handle padding in eval. Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_encoded: batch * len * hdim_encoded<|endoftext|>
13cb7ae33767ad694210226c2c78358812cda998bf8faa63ad06841f92189672
def _forward_unpadded(self, x, x_mask): 'Faster encoding that ignores any padding.' x = x.transpose(0, 1) outputs = [x] for i in range(self.num_layers): rnn_input = outputs[(- 1)] if (self.dropout_rate > 0): rnn_input = F.dropout(rnn_input, p=self.dropout_rate, training=self.training) rnn_output = self.rnns[i](rnn_input)[0] outputs.append(rnn_output) if self.concat_layers: output = torch.cat(outputs[1:], 2) else: output = outputs[(- 1)] output = output.transpose(0, 1) if (self.dropout_output and (self.dropout_rate > 0)): output = F.dropout(output, p=self.dropout_rate, training=self.training) return output
Faster encoding that ignores any padding.
drqa/reader/layers.py
_forward_unpadded
litian6363/DrQA
4,500
python
def _forward_unpadded(self, x, x_mask): x = x.transpose(0, 1) outputs = [x] for i in range(self.num_layers): rnn_input = outputs[(- 1)] if (self.dropout_rate > 0): rnn_input = F.dropout(rnn_input, p=self.dropout_rate, training=self.training) rnn_output = self.rnns[i](rnn_input)[0] outputs.append(rnn_output) if self.concat_layers: output = torch.cat(outputs[1:], 2) else: output = outputs[(- 1)] output = output.transpose(0, 1) if (self.dropout_output and (self.dropout_rate > 0)): output = F.dropout(output, p=self.dropout_rate, training=self.training) return output
def _forward_unpadded(self, x, x_mask): x = x.transpose(0, 1) outputs = [x] for i in range(self.num_layers): rnn_input = outputs[(- 1)] if (self.dropout_rate > 0): rnn_input = F.dropout(rnn_input, p=self.dropout_rate, training=self.training) rnn_output = self.rnns[i](rnn_input)[0] outputs.append(rnn_output) if self.concat_layers: output = torch.cat(outputs[1:], 2) else: output = outputs[(- 1)] output = output.transpose(0, 1) if (self.dropout_output and (self.dropout_rate > 0)): output = F.dropout(output, p=self.dropout_rate, training=self.training) return output<|docstring|>Faster encoding that ignores any padding.<|endoftext|>
7371674c83287ce2dac3cc76c823e8f86f5198c07c05e29363520cc25f3a65f2
def _forward_padded(self, x, x_mask): 'Slower (significantly), but more precise, encoding that handles\n padding.\n ' lengths = x_mask.data.eq(0).long().sum(1).squeeze() (_, idx_sort) = torch.sort(lengths, dim=0, descending=True) (_, idx_unsort) = torch.sort(idx_sort, dim=0) lengths = list(lengths[idx_sort]) x = x.index_select(0, idx_sort) x = x.transpose(0, 1) rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths) outputs = [rnn_input] for i in range(self.num_layers): rnn_input = outputs[(- 1)] if (self.dropout_rate > 0): dropout_input = F.dropout(rnn_input.data, p=self.dropout_rate, training=self.training) rnn_input = nn.utils.rnn.PackedSequence(dropout_input, rnn_input.batch_sizes) outputs.append(self.rnns[i](rnn_input)[0]) for (i, o) in enumerate(outputs[1:], 1): outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0] if self.concat_layers: output = torch.cat(outputs[1:], 2) else: output = outputs[(- 1)] output = output.transpose(0, 1) output = output.index_select(0, idx_unsort) if (output.size(1) != x_mask.size(1)): padding = torch.zeros(output.size(0), (x_mask.size(1) - output.size(1)), output.size(2)).type(output.data.type()) output = torch.cat([output, padding], 1) if (self.dropout_output and (self.dropout_rate > 0)): output = F.dropout(output, p=self.dropout_rate, training=self.training) return output
Slower (significantly), but more precise, encoding that handles padding.
drqa/reader/layers.py
_forward_padded
litian6363/DrQA
4,500
python
def _forward_padded(self, x, x_mask): 'Slower (significantly), but more precise, encoding that handles\n padding.\n ' lengths = x_mask.data.eq(0).long().sum(1).squeeze() (_, idx_sort) = torch.sort(lengths, dim=0, descending=True) (_, idx_unsort) = torch.sort(idx_sort, dim=0) lengths = list(lengths[idx_sort]) x = x.index_select(0, idx_sort) x = x.transpose(0, 1) rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths) outputs = [rnn_input] for i in range(self.num_layers): rnn_input = outputs[(- 1)] if (self.dropout_rate > 0): dropout_input = F.dropout(rnn_input.data, p=self.dropout_rate, training=self.training) rnn_input = nn.utils.rnn.PackedSequence(dropout_input, rnn_input.batch_sizes) outputs.append(self.rnns[i](rnn_input)[0]) for (i, o) in enumerate(outputs[1:], 1): outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0] if self.concat_layers: output = torch.cat(outputs[1:], 2) else: output = outputs[(- 1)] output = output.transpose(0, 1) output = output.index_select(0, idx_unsort) if (output.size(1) != x_mask.size(1)): padding = torch.zeros(output.size(0), (x_mask.size(1) - output.size(1)), output.size(2)).type(output.data.type()) output = torch.cat([output, padding], 1) if (self.dropout_output and (self.dropout_rate > 0)): output = F.dropout(output, p=self.dropout_rate, training=self.training) return output
def _forward_padded(self, x, x_mask): 'Slower (significantly), but more precise, encoding that handles\n padding.\n ' lengths = x_mask.data.eq(0).long().sum(1).squeeze() (_, idx_sort) = torch.sort(lengths, dim=0, descending=True) (_, idx_unsort) = torch.sort(idx_sort, dim=0) lengths = list(lengths[idx_sort]) x = x.index_select(0, idx_sort) x = x.transpose(0, 1) rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths) outputs = [rnn_input] for i in range(self.num_layers): rnn_input = outputs[(- 1)] if (self.dropout_rate > 0): dropout_input = F.dropout(rnn_input.data, p=self.dropout_rate, training=self.training) rnn_input = nn.utils.rnn.PackedSequence(dropout_input, rnn_input.batch_sizes) outputs.append(self.rnns[i](rnn_input)[0]) for (i, o) in enumerate(outputs[1:], 1): outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0] if self.concat_layers: output = torch.cat(outputs[1:], 2) else: output = outputs[(- 1)] output = output.transpose(0, 1) output = output.index_select(0, idx_unsort) if (output.size(1) != x_mask.size(1)): padding = torch.zeros(output.size(0), (x_mask.size(1) - output.size(1)), output.size(2)).type(output.data.type()) output = torch.cat([output, padding], 1) if (self.dropout_output and (self.dropout_rate > 0)): output = F.dropout(output, p=self.dropout_rate, training=self.training) return output<|docstring|>Slower (significantly), but more precise, encoding that handles padding.<|endoftext|>
a65110fdce0f4929d753158013b0421f4478b0388cefdd79dfb6ca8ea9325632
def forward(self, x, y, y_mask): '\n Args:\n x: batch * len1 * hdim\n y: batch * len2 * hdim\n y_mask: batch * len2 (1 for padding, 0 for true)\n Output:\n matched_seq: batch * len1 * hdim\n ' if self.linear: x_proj = self.linear(x.view((- 1), x.size(2))).view(x.size()) x_proj = F.relu(x_proj) y_proj = self.linear(y.view((- 1), y.size(2))).view(y.size()) y_proj = F.relu(y_proj) else: x_proj = x y_proj = y scores = x_proj.bmm(y_proj.transpose(2, 1)) y_mask = y_mask.unsqueeze(1).expand(scores.size()) scores.data.masked_fill_(y_mask.data, (- float('inf'))) alpha_flat = F.softmax(scores.view((- 1), y.size(1)), dim=(- 1)) alpha = alpha_flat.view((- 1), x.size(1), y.size(1)) matched_seq = alpha.bmm(y) return matched_seq
Args: x: batch * len1 * hdim y: batch * len2 * hdim y_mask: batch * len2 (1 for padding, 0 for true) Output: matched_seq: batch * len1 * hdim
drqa/reader/layers.py
forward
litian6363/DrQA
4,500
python
def forward(self, x, y, y_mask): '\n Args:\n x: batch * len1 * hdim\n y: batch * len2 * hdim\n y_mask: batch * len2 (1 for padding, 0 for true)\n Output:\n matched_seq: batch * len1 * hdim\n ' if self.linear: x_proj = self.linear(x.view((- 1), x.size(2))).view(x.size()) x_proj = F.relu(x_proj) y_proj = self.linear(y.view((- 1), y.size(2))).view(y.size()) y_proj = F.relu(y_proj) else: x_proj = x y_proj = y scores = x_proj.bmm(y_proj.transpose(2, 1)) y_mask = y_mask.unsqueeze(1).expand(scores.size()) scores.data.masked_fill_(y_mask.data, (- float('inf'))) alpha_flat = F.softmax(scores.view((- 1), y.size(1)), dim=(- 1)) alpha = alpha_flat.view((- 1), x.size(1), y.size(1)) matched_seq = alpha.bmm(y) return matched_seq
def forward(self, x, y, y_mask): '\n Args:\n x: batch * len1 * hdim\n y: batch * len2 * hdim\n y_mask: batch * len2 (1 for padding, 0 for true)\n Output:\n matched_seq: batch * len1 * hdim\n ' if self.linear: x_proj = self.linear(x.view((- 1), x.size(2))).view(x.size()) x_proj = F.relu(x_proj) y_proj = self.linear(y.view((- 1), y.size(2))).view(y.size()) y_proj = F.relu(y_proj) else: x_proj = x y_proj = y scores = x_proj.bmm(y_proj.transpose(2, 1)) y_mask = y_mask.unsqueeze(1).expand(scores.size()) scores.data.masked_fill_(y_mask.data, (- float('inf'))) alpha_flat = F.softmax(scores.view((- 1), y.size(1)), dim=(- 1)) alpha = alpha_flat.view((- 1), x.size(1), y.size(1)) matched_seq = alpha.bmm(y) return matched_seq<|docstring|>Args: x: batch * len1 * hdim y: batch * len2 * hdim y_mask: batch * len2 (1 for padding, 0 for true) Output: matched_seq: batch * len1 * hdim<|endoftext|>
02cb33613cf4e1b4b5c1b2e35d7d7c58a3e60432d7f1957f5d272e47ef3fa78f
def forward(self, x, y, x_mask): '\n Args:\n x: batch * len * hdim1\n y: batch * hdim2\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n alpha = batch * len\n ' Wy = (self.linear(y) if (self.linear is not None) else y) xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2) xWy.data.masked_fill_(x_mask.data, (- float('inf'))) if self.normalize: if self.training: alpha = F.log_softmax(xWy, dim=(- 1)) else: alpha = F.softmax(xWy, dim=(- 1)) else: alpha = xWy.exp() return alpha
Args: x: batch * len * hdim1 y: batch * hdim2 x_mask: batch * len (1 for padding, 0 for true) Output: alpha = batch * len
drqa/reader/layers.py
forward
litian6363/DrQA
4,500
python
def forward(self, x, y, x_mask): '\n Args:\n x: batch * len * hdim1\n y: batch * hdim2\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n alpha = batch * len\n ' Wy = (self.linear(y) if (self.linear is not None) else y) xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2) xWy.data.masked_fill_(x_mask.data, (- float('inf'))) if self.normalize: if self.training: alpha = F.log_softmax(xWy, dim=(- 1)) else: alpha = F.softmax(xWy, dim=(- 1)) else: alpha = xWy.exp() return alpha
def forward(self, x, y, x_mask): '\n Args:\n x: batch * len * hdim1\n y: batch * hdim2\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n alpha = batch * len\n ' Wy = (self.linear(y) if (self.linear is not None) else y) xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2) xWy.data.masked_fill_(x_mask.data, (- float('inf'))) if self.normalize: if self.training: alpha = F.log_softmax(xWy, dim=(- 1)) else: alpha = F.softmax(xWy, dim=(- 1)) else: alpha = xWy.exp() return alpha<|docstring|>Args: x: batch * len * hdim1 y: batch * hdim2 x_mask: batch * len (1 for padding, 0 for true) Output: alpha = batch * len<|endoftext|>
339e1d7b8fc198b37895c32d327e9ff604f3c061cb586ad2ef18d9015cccd8c6
def forward(self, x, x_mask): '\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n alpha: batch * len\n ' x_flat = x.view((- 1), x.size((- 1))) scores = self.linear(x_flat).view(x.size(0), x.size(1)) scores.data.masked_fill_(x_mask.data, (- float('inf'))) alpha = F.softmax(scores, dim=(- 1)) return alpha
Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: alpha: batch * len
drqa/reader/layers.py
forward
litian6363/DrQA
4,500
python
def forward(self, x, x_mask): '\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n alpha: batch * len\n ' x_flat = x.view((- 1), x.size((- 1))) scores = self.linear(x_flat).view(x.size(0), x.size(1)) scores.data.masked_fill_(x_mask.data, (- float('inf'))) alpha = F.softmax(scores, dim=(- 1)) return alpha
def forward(self, x, x_mask): '\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n alpha: batch * len\n ' x_flat = x.view((- 1), x.size((- 1))) scores = self.linear(x_flat).view(x.size(0), x.size(1)) scores.data.masked_fill_(x_mask.data, (- float('inf'))) alpha = F.softmax(scores, dim=(- 1)) return alpha<|docstring|>Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: alpha: batch * len<|endoftext|>
c8d8253b60ecc53db340219a67a5026ccde6627abf446eb47adf1d2f4eb12f57
def arity(function): '\n Return the arity of a function\n\n :param function: function\n :type function: ``function``\n\n :return: arity of the function\n :rtype: ``int``\n ' return len(inspect.getargspec(function).args)
Return the arity of a function :param function: function :type function: ``function`` :return: arity of the function :rtype: ``int``
testplan/common/utils/callable.py
arity
apretori-tic/testplan
0
python
def arity(function): '\n Return the arity of a function\n\n :param function: function\n :type function: ``function``\n\n :return: arity of the function\n :rtype: ``int``\n ' return len(inspect.getargspec(function).args)
def arity(function): '\n Return the arity of a function\n\n :param function: function\n :type function: ``function``\n\n :return: arity of the function\n :rtype: ``int``\n ' return len(inspect.getargspec(function).args)<|docstring|>Return the arity of a function :param function: function :type function: ``function`` :return: arity of the function :rtype: ``int``<|endoftext|>
6e72f345a7cd1218c95b5cf0fd936baacbccedea43f0e59067c448ecae07dbc0
def getargspec(callable_): '\n Return an Argspec for any callable object\n\n :param callable_: a callable object\n :type callable_: ``callable``\n\n :return: argspec for the callable\n :rtype: ``inspect.ArgSpec``\n ' if callable(callable_): if (inspect.ismethod(callable_) or inspect.isfunction(callable_)): return inspect.getargspec(callable_) else: return inspect.getargspec(callable_.__call__) else: raise ValueError('{} is not callable'.format(callable_))
Return an Argspec for any callable object :param callable_: a callable object :type callable_: ``callable`` :return: argspec for the callable :rtype: ``inspect.ArgSpec``
testplan/common/utils/callable.py
getargspec
apretori-tic/testplan
0
python
def getargspec(callable_): '\n Return an Argspec for any callable object\n\n :param callable_: a callable object\n :type callable_: ``callable``\n\n :return: argspec for the callable\n :rtype: ``inspect.ArgSpec``\n ' if callable(callable_): if (inspect.ismethod(callable_) or inspect.isfunction(callable_)): return inspect.getargspec(callable_) else: return inspect.getargspec(callable_.__call__) else: raise ValueError('{} is not callable'.format(callable_))
def getargspec(callable_): '\n Return an Argspec for any callable object\n\n :param callable_: a callable object\n :type callable_: ``callable``\n\n :return: argspec for the callable\n :rtype: ``inspect.ArgSpec``\n ' if callable(callable_): if (inspect.ismethod(callable_) or inspect.isfunction(callable_)): return inspect.getargspec(callable_) else: return inspect.getargspec(callable_.__call__) else: raise ValueError('{} is not callable'.format(callable_))<|docstring|>Return an Argspec for any callable object :param callable_: a callable object :type callable_: ``callable`` :return: argspec for the callable :rtype: ``inspect.ArgSpec``<|endoftext|>
f6c72358fdc01eea6af401d89b1335fdfa2be61e6ccd664a8737d05ff2c8794c
def update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): '\n Update a wrapper function to look like the wrapped function.\n\n :param wrapper: Function to be updated.\n :type wrapper: ``func``\n :param wrapped: Original function.\n :type wrapped: ``func``\n :param assigned: Tuple naming the attributes assigned directly from the\n wrapped function to the wrapper function (defaults to\n functools.WRAPPER_ASSIGNMENTS)\n :type assigned: ``tuple``\n :param updated: tuple naming the attributes of the wrapper that are updated\n with the corresponding attribute from the wrapped function\n (defaults to functools.WRAPPER_UPDATES)\n :type updated: ``tuple``\n :return: Wrapper function.\n :rtype: ``func``\n ' for attr in assigned: try: value = getattr(wrapped, attr) except AttributeError: pass else: setattr(wrapper, attr, value) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) wrapper.__wrapped__ = wrapped return wrapper
Update a wrapper function to look like the wrapped function. :param wrapper: Function to be updated. :type wrapper: ``func`` :param wrapped: Original function. :type wrapped: ``func`` :param assigned: Tuple naming the attributes assigned directly from the wrapped function to the wrapper function (defaults to functools.WRAPPER_ASSIGNMENTS) :type assigned: ``tuple`` :param updated: tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function (defaults to functools.WRAPPER_UPDATES) :type updated: ``tuple`` :return: Wrapper function. :rtype: ``func``
testplan/common/utils/callable.py
update_wrapper
apretori-tic/testplan
0
python
def update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): '\n Update a wrapper function to look like the wrapped function.\n\n :param wrapper: Function to be updated.\n :type wrapper: ``func``\n :param wrapped: Original function.\n :type wrapped: ``func``\n :param assigned: Tuple naming the attributes assigned directly from the\n wrapped function to the wrapper function (defaults to\n functools.WRAPPER_ASSIGNMENTS)\n :type assigned: ``tuple``\n :param updated: tuple naming the attributes of the wrapper that are updated\n with the corresponding attribute from the wrapped function\n (defaults to functools.WRAPPER_UPDATES)\n :type updated: ``tuple``\n :return: Wrapper function.\n :rtype: ``func``\n ' for attr in assigned: try: value = getattr(wrapped, attr) except AttributeError: pass else: setattr(wrapper, attr, value) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) wrapper.__wrapped__ = wrapped return wrapper
def update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): '\n Update a wrapper function to look like the wrapped function.\n\n :param wrapper: Function to be updated.\n :type wrapper: ``func``\n :param wrapped: Original function.\n :type wrapped: ``func``\n :param assigned: Tuple naming the attributes assigned directly from the\n wrapped function to the wrapper function (defaults to\n functools.WRAPPER_ASSIGNMENTS)\n :type assigned: ``tuple``\n :param updated: tuple naming the attributes of the wrapper that are updated\n with the corresponding attribute from the wrapped function\n (defaults to functools.WRAPPER_UPDATES)\n :type updated: ``tuple``\n :return: Wrapper function.\n :rtype: ``func``\n ' for attr in assigned: try: value = getattr(wrapped, attr) except AttributeError: pass else: setattr(wrapper, attr, value) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) wrapper.__wrapped__ = wrapped return wrapper<|docstring|>Update a wrapper function to look like the wrapped function. :param wrapper: Function to be updated. :type wrapper: ``func`` :param wrapped: Original function. :type wrapped: ``func`` :param assigned: Tuple naming the attributes assigned directly from the wrapped function to the wrapper function (defaults to functools.WRAPPER_ASSIGNMENTS) :type assigned: ``tuple`` :param updated: tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function (defaults to functools.WRAPPER_UPDATES) :type updated: ``tuple`` :return: Wrapper function. :rtype: ``func``<|endoftext|>
8f9ff76b15674f396ca30a365b7f7fde2a49d2cc2d47c14efc0f60e23ae1a07f
def wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): '\n Custom wraps function that uses the backported ``update_wrapper``.\n\n Also sets ``wrapper_of`` attribute for code highlighting, for methods that\n are decorated for the first time.\n ' def _inner(wrapper): wrapper = update_wrapper(wrapper=wrapper, wrapped=wrapped, assigned=assigned, updated=updated) if (not hasattr(wrapped, 'wrapper_of')): wrapper.wrapper_of = wrapped return wrapper return _inner
Custom wraps function that uses the backported ``update_wrapper``. Also sets ``wrapper_of`` attribute for code highlighting, for methods that are decorated for the first time.
testplan/common/utils/callable.py
wraps
apretori-tic/testplan
0
python
def wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): '\n Custom wraps function that uses the backported ``update_wrapper``.\n\n Also sets ``wrapper_of`` attribute for code highlighting, for methods that\n are decorated for the first time.\n ' def _inner(wrapper): wrapper = update_wrapper(wrapper=wrapper, wrapped=wrapped, assigned=assigned, updated=updated) if (not hasattr(wrapped, 'wrapper_of')): wrapper.wrapper_of = wrapped return wrapper return _inner
def wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): '\n Custom wraps function that uses the backported ``update_wrapper``.\n\n Also sets ``wrapper_of`` attribute for code highlighting, for methods that\n are decorated for the first time.\n ' def _inner(wrapper): wrapper = update_wrapper(wrapper=wrapper, wrapped=wrapped, assigned=assigned, updated=updated) if (not hasattr(wrapped, 'wrapper_of')): wrapper.wrapper_of = wrapped return wrapper return _inner<|docstring|>Custom wraps function that uses the backported ``update_wrapper``. Also sets ``wrapper_of`` attribute for code highlighting, for methods that are decorated for the first time.<|endoftext|>
32e0f7bcbf17ef84c75cbf0ec61df110c4b931314c7692261e09bf151e134eb0
@staticmethod def phase_from_date(date: datetime.date, time_of_day: Optional[TimeOfDay]) -> Optional[int]: '\n Get the phase index value for a given date and time of day (AM/PM).\n\n :param date: the date of the price phase.\n :param time_of_day: the time of day for the price phase.\n\n :returns: either the phase index or ``None`` if this is a sunday.\n ' if (date.weekday() == 6): return None elif (time_of_day is None): raise ValueError('all days but sunday require time of day') return ((date.weekday() * 2) + time_of_day.value)
Get the phase index value for a given date and time of day (AM/PM). :param date: the date of the price phase. :param time_of_day: the time of day for the price phase. :returns: either the phase index or ``None`` if this is a sunday.
stalkbroker/models/_ticker.py
phase_from_date
peake100/stalkbroker-py
0
python
@staticmethod def phase_from_date(date: datetime.date, time_of_day: Optional[TimeOfDay]) -> Optional[int]: '\n Get the phase index value for a given date and time of day (AM/PM).\n\n :param date: the date of the price phase.\n :param time_of_day: the time of day for the price phase.\n\n :returns: either the phase index or ``None`` if this is a sunday.\n ' if (date.weekday() == 6): return None elif (time_of_day is None): raise ValueError('all days but sunday require time of day') return ((date.weekday() * 2) + time_of_day.value)
@staticmethod def phase_from_date(date: datetime.date, time_of_day: Optional[TimeOfDay]) -> Optional[int]: '\n Get the phase index value for a given date and time of day (AM/PM).\n\n :param date: the date of the price phase.\n :param time_of_day: the time of day for the price phase.\n\n :returns: either the phase index or ``None`` if this is a sunday.\n ' if (date.weekday() == 6): return None elif (time_of_day is None): raise ValueError('all days but sunday require time of day') return ((date.weekday() * 2) + time_of_day.value)<|docstring|>Get the phase index value for a given date and time of day (AM/PM). :param date: the date of the price phase. :param time_of_day: the time of day for the price phase. :returns: either the phase index or ``None`` if this is a sunday.<|endoftext|>
04c1ed5cf7f4daaa1d9bda724b6691729adad7f24c039ae18c1fdff8066d82d5
@staticmethod def phase_from_datetime(dt: datetime.datetime) -> Optional[int]: '\n Get the phase index value for a given datetime.\n\n :param dt: the date of the price phase.\n\n :returns: either the phase index or ``None`` if this is a sunday.\n ' if (dt.hour < 12): time_of_day: TimeOfDay = TimeOfDay.AM else: time_of_day = TimeOfDay.PM return Ticker.phase_from_date(date=dt.date(), time_of_day=time_of_day)
Get the phase index value for a given datetime. :param dt: the date of the price phase. :returns: either the phase index or ``None`` if this is a sunday.
stalkbroker/models/_ticker.py
phase_from_datetime
peake100/stalkbroker-py
0
python
@staticmethod def phase_from_datetime(dt: datetime.datetime) -> Optional[int]: '\n Get the phase index value for a given datetime.\n\n :param dt: the date of the price phase.\n\n :returns: either the phase index or ``None`` if this is a sunday.\n ' if (dt.hour < 12): time_of_day: TimeOfDay = TimeOfDay.AM else: time_of_day = TimeOfDay.PM return Ticker.phase_from_date(date=dt.date(), time_of_day=time_of_day)
@staticmethod def phase_from_datetime(dt: datetime.datetime) -> Optional[int]: '\n Get the phase index value for a given datetime.\n\n :param dt: the date of the price phase.\n\n :returns: either the phase index or ``None`` if this is a sunday.\n ' if (dt.hour < 12): time_of_day: TimeOfDay = TimeOfDay.AM else: time_of_day = TimeOfDay.PM return Ticker.phase_from_date(date=dt.date(), time_of_day=time_of_day)<|docstring|>Get the phase index value for a given datetime. :param dt: the date of the price phase. :returns: either the phase index or ``None`` if this is a sunday.<|endoftext|>
e5073c6e9fcc6c62bc982304990ad3671cafafadc008694e1ec29cfc33fe7641
@staticmethod def phase_name(phase: int) -> str: '\n Return the name to use in reports for a given price phase.\n\n :param phase: the index of the price phase. Use ``-1`` for sunday.\n\n :returns: phase name.\n ' if (phase == (- 1)): return "Daisey's Deal" day = (phase // 2) day_str = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday'}[day] period_str = TimeOfDay.from_phase_index(phase).name return ((day_str + ' ') + period_str)
Return the name to use in reports for a given price phase. :param phase: the index of the price phase. Use ``-1`` for sunday. :returns: phase name.
stalkbroker/models/_ticker.py
phase_name
peake100/stalkbroker-py
0
python
@staticmethod def phase_name(phase: int) -> str: '\n Return the name to use in reports for a given price phase.\n\n :param phase: the index of the price phase. Use ``-1`` for sunday.\n\n :returns: phase name.\n ' if (phase == (- 1)): return "Daisey's Deal" day = (phase // 2) day_str = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday'}[day] period_str = TimeOfDay.from_phase_index(phase).name return ((day_str + ' ') + period_str)
@staticmethod def phase_name(phase: int) -> str: '\n Return the name to use in reports for a given price phase.\n\n :param phase: the index of the price phase. Use ``-1`` for sunday.\n\n :returns: phase name.\n ' if (phase == (- 1)): return "Daisey's Deal" day = (phase // 2) day_str = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday'}[day] period_str = TimeOfDay.from_phase_index(phase).name return ((day_str + ' ') + period_str)<|docstring|>Return the name to use in reports for a given price phase. :param phase: the index of the price phase. Use ``-1`` for sunday. :returns: phase name.<|endoftext|>
9ecda71066be83c0121543de60ad0d0e5af6fbce36774c2d4efabac95dcacc0d
def for_date(self, date: datetime.date, time_of_day: Optional[TimeOfDay]) -> PhaseInfo: '\n Return the phase info for a given date described by this ticker.\n\n :param date: the date of the desired phase info.\n :param time_of_day: the time of day (AM/PM of the desired phase info).\n Can be ``None`` if this is a sunday.\n ' phase = self.phase_from_date(date, time_of_day) if (phase is None): return PhaseInfo(price=self.purchase_price, name=self.phase_name((- 1)), date=date, time_of_day=None) return self[phase]
Return the phase info for a given date described by this ticker. :param date: the date of the desired phase info. :param time_of_day: the time of day (AM/PM of the desired phase info). Can be ``None`` if this is a sunday.
stalkbroker/models/_ticker.py
for_date
peake100/stalkbroker-py
0
python
def for_date(self, date: datetime.date, time_of_day: Optional[TimeOfDay]) -> PhaseInfo: '\n Return the phase info for a given date described by this ticker.\n\n :param date: the date of the desired phase info.\n :param time_of_day: the time of day (AM/PM of the desired phase info).\n Can be ``None`` if this is a sunday.\n ' phase = self.phase_from_date(date, time_of_day) if (phase is None): return PhaseInfo(price=self.purchase_price, name=self.phase_name((- 1)), date=date, time_of_day=None) return self[phase]
def for_date(self, date: datetime.date, time_of_day: Optional[TimeOfDay]) -> PhaseInfo: '\n Return the phase info for a given date described by this ticker.\n\n :param date: the date of the desired phase info.\n :param time_of_day: the time of day (AM/PM of the desired phase info).\n Can be ``None`` if this is a sunday.\n ' phase = self.phase_from_date(date, time_of_day) if (phase is None): return PhaseInfo(price=self.purchase_price, name=self.phase_name((- 1)), date=date, time_of_day=None) return self[phase]<|docstring|>Return the phase info for a given date described by this ticker. :param date: the date of the desired phase info. :param time_of_day: the time of day (AM/PM of the desired phase info). Can be ``None`` if this is a sunday.<|endoftext|>
818f71acd6311ef7272b48cb97e66fb88d33e3a059135707665ffc6baa9b8a4e
def set_price(self, price: int, date: datetime.date, time_of_day: Optional[TimeOfDay]) -> None: '\n Set the price for a phase in the week described by the date and time of day.\n\n :param price: Turnip price to set.\n :param date: The date this price occurred on.\n :param time_of_day: The time of day this price occurred on. Can be ``None`` if\n this is a sunday\n ' phase = self.phase_from_date(date, time_of_day) if (phase is None): self.purchase_price = price else: self.phases[phase] = price
Set the price for a phase in the week described by the date and time of day. :param price: Turnip price to set. :param date: The date this price occurred on. :param time_of_day: The time of day this price occurred on. Can be ``None`` if this is a sunday
stalkbroker/models/_ticker.py
set_price
peake100/stalkbroker-py
0
python
def set_price(self, price: int, date: datetime.date, time_of_day: Optional[TimeOfDay]) -> None: '\n Set the price for a phase in the week described by the date and time of day.\n\n :param price: Turnip price to set.\n :param date: The date this price occurred on.\n :param time_of_day: The time of day this price occurred on. Can be ``None`` if\n this is a sunday\n ' phase = self.phase_from_date(date, time_of_day) if (phase is None): self.purchase_price = price else: self.phases[phase] = price
def set_price(self, price: int, date: datetime.date, time_of_day: Optional[TimeOfDay]) -> None: '\n Set the price for a phase in the week described by the date and time of day.\n\n :param price: Turnip price to set.\n :param date: The date this price occurred on.\n :param time_of_day: The time of day this price occurred on. Can be ``None`` if\n this is a sunday\n ' phase = self.phase_from_date(date, time_of_day) if (phase is None): self.purchase_price = price else: self.phases[phase] = price<|docstring|>Set the price for a phase in the week described by the date and time of day. :param price: Turnip price to set. :param date: The date this price occurred on. :param time_of_day: The time of day this price occurred on. Can be ``None`` if this is a sunday<|endoftext|>
ac80fdce54e477529c2c185d7a728d3260c828e0844d72c1e939051a405fc2d5
async def _validate_input(data: dict[(str, Any)]) -> tuple[(str, str)]: 'Validate the user input allows us to connect.' bond = Bond(data[CONF_HOST], data[CONF_ACCESS_TOKEN]) try: hub = BondHub(bond) (await hub.setup(max_devices=1)) except ClientConnectionError as error: raise InputValidationError('cannot_connect') from error except ClientResponseError as error: if (error.status == HTTP_UNAUTHORIZED): raise InputValidationError('invalid_auth') from error raise InputValidationError('unknown') from error except Exception as error: _LOGGER.exception('Unexpected exception') raise InputValidationError('unknown') from error if (not hub.bond_id): raise InputValidationError('old_firmware') return (hub.bond_id, hub.name)
Validate the user input allows us to connect.
homeassistant/components/bond/config_flow.py
_validate_input
xonestonex/core
1
python
async def _validate_input(data: dict[(str, Any)]) -> tuple[(str, str)]: bond = Bond(data[CONF_HOST], data[CONF_ACCESS_TOKEN]) try: hub = BondHub(bond) (await hub.setup(max_devices=1)) except ClientConnectionError as error: raise InputValidationError('cannot_connect') from error except ClientResponseError as error: if (error.status == HTTP_UNAUTHORIZED): raise InputValidationError('invalid_auth') from error raise InputValidationError('unknown') from error except Exception as error: _LOGGER.exception('Unexpected exception') raise InputValidationError('unknown') from error if (not hub.bond_id): raise InputValidationError('old_firmware') return (hub.bond_id, hub.name)
async def _validate_input(data: dict[(str, Any)]) -> tuple[(str, str)]: bond = Bond(data[CONF_HOST], data[CONF_ACCESS_TOKEN]) try: hub = BondHub(bond) (await hub.setup(max_devices=1)) except ClientConnectionError as error: raise InputValidationError('cannot_connect') from error except ClientResponseError as error: if (error.status == HTTP_UNAUTHORIZED): raise InputValidationError('invalid_auth') from error raise InputValidationError('unknown') from error except Exception as error: _LOGGER.exception('Unexpected exception') raise InputValidationError('unknown') from error if (not hub.bond_id): raise InputValidationError('old_firmware') return (hub.bond_id, hub.name)<|docstring|>Validate the user input allows us to connect.<|endoftext|>
b23bbea8734f8b5718a52859d0cd34e1ad58f164c33f6e369101c6f98f20b705
def __init__(self) -> None: 'Initialize config flow.' self._discovered: dict[(str, str)] = {}
Initialize config flow.
homeassistant/components/bond/config_flow.py
__init__
xonestonex/core
1
python
def __init__(self) -> None: self._discovered: dict[(str, str)] = {}
def __init__(self) -> None: self._discovered: dict[(str, str)] = {}<|docstring|>Initialize config flow.<|endoftext|>
a91dcf3ffc4d6d211ee3d860a1e6f06ab4e85a065e4db1ca6087715659d352f9
async def _async_try_automatic_configure(self) -> None: 'Try to auto configure the device.\n\n Failure is acceptable here since the device may have been\n online longer then the allowed setup period, and we will\n instead ask them to manually enter the token.\n ' bond = Bond(self._discovered[CONF_HOST], '') try: response = (await bond.token()) except ClientConnectionError: return token = response.get('token') if (token is None): return self._discovered[CONF_ACCESS_TOKEN] = token (_, hub_name) = (await _validate_input(self._discovered)) self._discovered[CONF_NAME] = hub_name
Try to auto configure the device. Failure is acceptable here since the device may have been online longer then the allowed setup period, and we will instead ask them to manually enter the token.
homeassistant/components/bond/config_flow.py
_async_try_automatic_configure
xonestonex/core
1
python
async def _async_try_automatic_configure(self) -> None: 'Try to auto configure the device.\n\n Failure is acceptable here since the device may have been\n online longer then the allowed setup period, and we will\n instead ask them to manually enter the token.\n ' bond = Bond(self._discovered[CONF_HOST], ) try: response = (await bond.token()) except ClientConnectionError: return token = response.get('token') if (token is None): return self._discovered[CONF_ACCESS_TOKEN] = token (_, hub_name) = (await _validate_input(self._discovered)) self._discovered[CONF_NAME] = hub_name
async def _async_try_automatic_configure(self) -> None: 'Try to auto configure the device.\n\n Failure is acceptable here since the device may have been\n online longer then the allowed setup period, and we will\n instead ask them to manually enter the token.\n ' bond = Bond(self._discovered[CONF_HOST], ) try: response = (await bond.token()) except ClientConnectionError: return token = response.get('token') if (token is None): return self._discovered[CONF_ACCESS_TOKEN] = token (_, hub_name) = (await _validate_input(self._discovered)) self._discovered[CONF_NAME] = hub_name<|docstring|>Try to auto configure the device. Failure is acceptable here since the device may have been online longer then the allowed setup period, and we will instead ask them to manually enter the token.<|endoftext|>
84e8f8302ea353550cbb6ef7b249bf4c4277702abd5a8ec912a6140e1ee54da2
async def async_step_zeroconf(self, discovery_info: DiscoveryInfoType) -> dict[(str, Any)]: 'Handle a flow initialized by zeroconf discovery.' name: str = discovery_info[CONF_NAME] host: str = discovery_info[CONF_HOST] bond_id = name.partition('.')[0] (await self.async_set_unique_id(bond_id)) self._abort_if_unique_id_configured({CONF_HOST: host}) self._discovered = {CONF_HOST: host, CONF_NAME: bond_id} (await self._async_try_automatic_configure()) self.context.update({'title_placeholders': {CONF_HOST: self._discovered[CONF_HOST], CONF_NAME: self._discovered[CONF_NAME]}}) return (await self.async_step_confirm())
Handle a flow initialized by zeroconf discovery.
homeassistant/components/bond/config_flow.py
async_step_zeroconf
xonestonex/core
1
python
async def async_step_zeroconf(self, discovery_info: DiscoveryInfoType) -> dict[(str, Any)]: name: str = discovery_info[CONF_NAME] host: str = discovery_info[CONF_HOST] bond_id = name.partition('.')[0] (await self.async_set_unique_id(bond_id)) self._abort_if_unique_id_configured({CONF_HOST: host}) self._discovered = {CONF_HOST: host, CONF_NAME: bond_id} (await self._async_try_automatic_configure()) self.context.update({'title_placeholders': {CONF_HOST: self._discovered[CONF_HOST], CONF_NAME: self._discovered[CONF_NAME]}}) return (await self.async_step_confirm())
async def async_step_zeroconf(self, discovery_info: DiscoveryInfoType) -> dict[(str, Any)]: name: str = discovery_info[CONF_NAME] host: str = discovery_info[CONF_HOST] bond_id = name.partition('.')[0] (await self.async_set_unique_id(bond_id)) self._abort_if_unique_id_configured({CONF_HOST: host}) self._discovered = {CONF_HOST: host, CONF_NAME: bond_id} (await self._async_try_automatic_configure()) self.context.update({'title_placeholders': {CONF_HOST: self._discovered[CONF_HOST], CONF_NAME: self._discovered[CONF_NAME]}}) return (await self.async_step_confirm())<|docstring|>Handle a flow initialized by zeroconf discovery.<|endoftext|>
70e90043db9f72c5429d5db6cf2ec101508d6226a8fd9eee6325f14ff67317a1
async def async_step_confirm(self, user_input: (dict[(str, Any)] | None)=None) -> dict[(str, Any)]: 'Handle confirmation flow for discovered bond hub.' errors = {} if (user_input is not None): if (CONF_ACCESS_TOKEN in self._discovered): return self.async_create_entry(title=self._discovered[CONF_NAME], data={CONF_ACCESS_TOKEN: self._discovered[CONF_ACCESS_TOKEN], CONF_HOST: self._discovered[CONF_HOST]}) data = {CONF_ACCESS_TOKEN: user_input[CONF_ACCESS_TOKEN], CONF_HOST: self._discovered[CONF_HOST]} try: (_, hub_name) = (await _validate_input(data)) except InputValidationError as error: errors['base'] = error.base else: return self.async_create_entry(title=hub_name, data=data) if (CONF_ACCESS_TOKEN in self._discovered): data_schema = TOKEN_SCHEMA else: data_schema = DISCOVERY_SCHEMA return self.async_show_form(step_id='confirm', data_schema=data_schema, errors=errors, description_placeholders=self._discovered)
Handle confirmation flow for discovered bond hub.
homeassistant/components/bond/config_flow.py
async_step_confirm
xonestonex/core
1
python
async def async_step_confirm(self, user_input: (dict[(str, Any)] | None)=None) -> dict[(str, Any)]: errors = {} if (user_input is not None): if (CONF_ACCESS_TOKEN in self._discovered): return self.async_create_entry(title=self._discovered[CONF_NAME], data={CONF_ACCESS_TOKEN: self._discovered[CONF_ACCESS_TOKEN], CONF_HOST: self._discovered[CONF_HOST]}) data = {CONF_ACCESS_TOKEN: user_input[CONF_ACCESS_TOKEN], CONF_HOST: self._discovered[CONF_HOST]} try: (_, hub_name) = (await _validate_input(data)) except InputValidationError as error: errors['base'] = error.base else: return self.async_create_entry(title=hub_name, data=data) if (CONF_ACCESS_TOKEN in self._discovered): data_schema = TOKEN_SCHEMA else: data_schema = DISCOVERY_SCHEMA return self.async_show_form(step_id='confirm', data_schema=data_schema, errors=errors, description_placeholders=self._discovered)
async def async_step_confirm(self, user_input: (dict[(str, Any)] | None)=None) -> dict[(str, Any)]: errors = {} if (user_input is not None): if (CONF_ACCESS_TOKEN in self._discovered): return self.async_create_entry(title=self._discovered[CONF_NAME], data={CONF_ACCESS_TOKEN: self._discovered[CONF_ACCESS_TOKEN], CONF_HOST: self._discovered[CONF_HOST]}) data = {CONF_ACCESS_TOKEN: user_input[CONF_ACCESS_TOKEN], CONF_HOST: self._discovered[CONF_HOST]} try: (_, hub_name) = (await _validate_input(data)) except InputValidationError as error: errors['base'] = error.base else: return self.async_create_entry(title=hub_name, data=data) if (CONF_ACCESS_TOKEN in self._discovered): data_schema = TOKEN_SCHEMA else: data_schema = DISCOVERY_SCHEMA return self.async_show_form(step_id='confirm', data_schema=data_schema, errors=errors, description_placeholders=self._discovered)<|docstring|>Handle confirmation flow for discovered bond hub.<|endoftext|>
c27d66c7d62c0781d030df0859da684d89e839a266ce23a1a884025353bb6d61
async def async_step_user(self, user_input: (dict[(str, Any)] | None)=None) -> dict[(str, Any)]: 'Handle a flow initialized by the user.' errors = {} if (user_input is not None): try: (bond_id, hub_name) = (await _validate_input(user_input)) except InputValidationError as error: errors['base'] = error.base else: (await self.async_set_unique_id(bond_id)) self._abort_if_unique_id_configured() return self.async_create_entry(title=hub_name, data=user_input) return self.async_show_form(step_id='user', data_schema=USER_SCHEMA, errors=errors)
Handle a flow initialized by the user.
homeassistant/components/bond/config_flow.py
async_step_user
xonestonex/core
1
python
async def async_step_user(self, user_input: (dict[(str, Any)] | None)=None) -> dict[(str, Any)]: errors = {} if (user_input is not None): try: (bond_id, hub_name) = (await _validate_input(user_input)) except InputValidationError as error: errors['base'] = error.base else: (await self.async_set_unique_id(bond_id)) self._abort_if_unique_id_configured() return self.async_create_entry(title=hub_name, data=user_input) return self.async_show_form(step_id='user', data_schema=USER_SCHEMA, errors=errors)
async def async_step_user(self, user_input: (dict[(str, Any)] | None)=None) -> dict[(str, Any)]: errors = {} if (user_input is not None): try: (bond_id, hub_name) = (await _validate_input(user_input)) except InputValidationError as error: errors['base'] = error.base else: (await self.async_set_unique_id(bond_id)) self._abort_if_unique_id_configured() return self.async_create_entry(title=hub_name, data=user_input) return self.async_show_form(step_id='user', data_schema=USER_SCHEMA, errors=errors)<|docstring|>Handle a flow initialized by the user.<|endoftext|>
eafb7e9533312c22fb986e74494c32179da60e5e8444063e1069f015a5b985e4
def __init__(self, base: str): 'Initialize with error base.' super().__init__() self.base = base
Initialize with error base.
homeassistant/components/bond/config_flow.py
__init__
xonestonex/core
1
python
def __init__(self, base: str): super().__init__() self.base = base
def __init__(self, base: str): super().__init__() self.base = base<|docstring|>Initialize with error base.<|endoftext|>
e60003f9a30c5e059f87f05b16f076d31cfec9bd2962aad742215fda170b10e4
def __init__(self, key, nxt=None): 'Initialize the SLLNode.\n\n Args:\n key (T): the key of this node.\n value(V): the payload of this node.\n nxt (Optional[Node]): the next node of this node.\n ' self._key: T = key self._next: Optional[SLLNode] = nxt
Initialize the SLLNode. Args: key (T): the key of this node. value(V): the payload of this node. nxt (Optional[Node]): the next node of this node.
ads/fundamentals/_nodes.py
__init__
Aminul-Momin/Algorithms_and_Data_Structures
0
python
def __init__(self, key, nxt=None): 'Initialize the SLLNode.\n\n Args:\n key (T): the key of this node.\n value(V): the payload of this node.\n nxt (Optional[Node]): the next node of this node.\n ' self._key: T = key self._next: Optional[SLLNode] = nxt
def __init__(self, key, nxt=None): 'Initialize the SLLNode.\n\n Args:\n key (T): the key of this node.\n value(V): the payload of this node.\n nxt (Optional[Node]): the next node of this node.\n ' self._key: T = key self._next: Optional[SLLNode] = nxt<|docstring|>Initialize the SLLNode. Args: key (T): the key of this node. value(V): the payload of this node. nxt (Optional[Node]): the next node of this node.<|endoftext|>
aa0c10ab8a7e99950d5c0266fd759efff479f87176ed7782e0ab55a7139bb30f
def __init__(self, key, nxt=None, prev=None): 'Initialize the DLLNode.\n\n Args:\n key (T): the key of this node.\n value(V): the payload of this node.\n nxt (Optional[Node]): the next node of this node.\n prev (Optional[Node]): the previous node of this node.\n ' super(DLLNode, self).__init__(key, nxt) self._prev: Optional[SLLNode] = prev
Initialize the DLLNode. Args: key (T): the key of this node. value(V): the payload of this node. nxt (Optional[Node]): the next node of this node. prev (Optional[Node]): the previous node of this node.
ads/fundamentals/_nodes.py
__init__
Aminul-Momin/Algorithms_and_Data_Structures
0
python
def __init__(self, key, nxt=None, prev=None): 'Initialize the DLLNode.\n\n Args:\n key (T): the key of this node.\n value(V): the payload of this node.\n nxt (Optional[Node]): the next node of this node.\n prev (Optional[Node]): the previous node of this node.\n ' super(DLLNode, self).__init__(key, nxt) self._prev: Optional[SLLNode] = prev
def __init__(self, key, nxt=None, prev=None): 'Initialize the DLLNode.\n\n Args:\n key (T): the key of this node.\n value(V): the payload of this node.\n nxt (Optional[Node]): the next node of this node.\n prev (Optional[Node]): the previous node of this node.\n ' super(DLLNode, self).__init__(key, nxt) self._prev: Optional[SLLNode] = prev<|docstring|>Initialize the DLLNode. Args: key (T): the key of this node. value(V): the payload of this node. nxt (Optional[Node]): the next node of this node. prev (Optional[Node]): the previous node of this node.<|endoftext|>
304925143c9ae2f9320eb7db4dca1a37fa29beb56eb3dabada651c7a05aeee77
def _plot_foil(foil, N_sections=21, N_points=50, geometry='airfoils', flatten=False, ax=None): 'Plot a FoilGeometry in 3D.' if (ax is None): (fig, ax) = _create_3d_axes() ax.set_proj_type('ortho') independent_plot = True else: independent_plot = False sa = (1 - np.cos(np.linspace((np.pi / 2), 0, N_points))) for s in np.linspace((- 1), 1, N_sections): if (geometry == 'airfoils'): coords = foil.surface_xyz(s, sa, 'lower', flatten=flatten).T ax.plot(coords[0], coords[1], coords[2], c='r', zorder=0.9, lw=0.25) coords = foil.surface_xyz(s, sa, 'upper', flatten=flatten).T ax.plot(coords[0], coords[1], coords[2], c='b', lw=0.25) elif (geometry == 'chords'): coords = foil.surface_xyz(s, sa, surface='chord').T ax.plot(coords[0], coords[1], coords[2], c='k', lw=0.5) s = np.linspace((- 1), 1, N_sections) LE = foil.surface_xyz(s, 0, surface='chord', flatten=flatten).T c4 = foil.surface_xyz(s, 0.25, surface='chord', flatten=flatten).T TE = foil.surface_xyz(s, 1, surface='chord', flatten=flatten).T ax.plot(LE[0], LE[1], LE[2], 'k--', lw=0.8) ax.plot(c4[0], c4[1], c4[2], 'g--', lw=0.8) ax.plot(TE[0], TE[1], TE[2], 'k--', lw=0.8) gsim.extras.plots._set_axes_equal(ax) xlim = ax.get_xlim3d() zlim = ax.get_zlim3d() z = 0.75 vertices = np.vstack((LE[0:2].T, TE[0:2].T[::(- 1)])) poly = PolyCollection([vertices], facecolors=['k'], alpha=0.25) ax.add_collection3d(poly, zs=[z], zdir='z') ax.plot(c4[0], c4[1], z, 'g--', lw=0.8) xyz = foil.surface_xyz(s, foil._layout.r_x(s), surface='chord').T ax.plot(xyz[0], xyz[1], z, 'r--', lw=0.8, label='reference lines') x = np.full(*c4[1].shape, (- 1.25)) ax.plot(x, c4[1], c4[2], 'g--', lw=0.8, label='quarter-chord') xyz = foil.surface_xyz(s, foil._layout.r_yz(s), surface='chord').T ax.plot(x, xyz[1], xyz[2], 'r--', lw=0.8)
Plot a FoilGeometry in 3D.
source/figures/paraglider/geometry/canopy/examples/generate_canopy_examples.py
_plot_foil
pfheatwole/thesis
0
python
def _plot_foil(foil, N_sections=21, N_points=50, geometry='airfoils', flatten=False, ax=None): if (ax is None): (fig, ax) = _create_3d_axes() ax.set_proj_type('ortho') independent_plot = True else: independent_plot = False sa = (1 - np.cos(np.linspace((np.pi / 2), 0, N_points))) for s in np.linspace((- 1), 1, N_sections): if (geometry == 'airfoils'): coords = foil.surface_xyz(s, sa, 'lower', flatten=flatten).T ax.plot(coords[0], coords[1], coords[2], c='r', zorder=0.9, lw=0.25) coords = foil.surface_xyz(s, sa, 'upper', flatten=flatten).T ax.plot(coords[0], coords[1], coords[2], c='b', lw=0.25) elif (geometry == 'chords'): coords = foil.surface_xyz(s, sa, surface='chord').T ax.plot(coords[0], coords[1], coords[2], c='k', lw=0.5) s = np.linspace((- 1), 1, N_sections) LE = foil.surface_xyz(s, 0, surface='chord', flatten=flatten).T c4 = foil.surface_xyz(s, 0.25, surface='chord', flatten=flatten).T TE = foil.surface_xyz(s, 1, surface='chord', flatten=flatten).T ax.plot(LE[0], LE[1], LE[2], 'k--', lw=0.8) ax.plot(c4[0], c4[1], c4[2], 'g--', lw=0.8) ax.plot(TE[0], TE[1], TE[2], 'k--', lw=0.8) gsim.extras.plots._set_axes_equal(ax) xlim = ax.get_xlim3d() zlim = ax.get_zlim3d() z = 0.75 vertices = np.vstack((LE[0:2].T, TE[0:2].T[::(- 1)])) poly = PolyCollection([vertices], facecolors=['k'], alpha=0.25) ax.add_collection3d(poly, zs=[z], zdir='z') ax.plot(c4[0], c4[1], z, 'g--', lw=0.8) xyz = foil.surface_xyz(s, foil._layout.r_x(s), surface='chord').T ax.plot(xyz[0], xyz[1], z, 'r--', lw=0.8, label='reference lines') x = np.full(*c4[1].shape, (- 1.25)) ax.plot(x, c4[1], c4[2], 'g--', lw=0.8, label='quarter-chord') xyz = foil.surface_xyz(s, foil._layout.r_yz(s), surface='chord').T ax.plot(x, xyz[1], xyz[2], 'r--', lw=0.8)
def _plot_foil(foil, N_sections=21, N_points=50, geometry='airfoils', flatten=False, ax=None): if (ax is None): (fig, ax) = _create_3d_axes() ax.set_proj_type('ortho') independent_plot = True else: independent_plot = False sa = (1 - np.cos(np.linspace((np.pi / 2), 0, N_points))) for s in np.linspace((- 1), 1, N_sections): if (geometry == 'airfoils'): coords = foil.surface_xyz(s, sa, 'lower', flatten=flatten).T ax.plot(coords[0], coords[1], coords[2], c='r', zorder=0.9, lw=0.25) coords = foil.surface_xyz(s, sa, 'upper', flatten=flatten).T ax.plot(coords[0], coords[1], coords[2], c='b', lw=0.25) elif (geometry == 'chords'): coords = foil.surface_xyz(s, sa, surface='chord').T ax.plot(coords[0], coords[1], coords[2], c='k', lw=0.5) s = np.linspace((- 1), 1, N_sections) LE = foil.surface_xyz(s, 0, surface='chord', flatten=flatten).T c4 = foil.surface_xyz(s, 0.25, surface='chord', flatten=flatten).T TE = foil.surface_xyz(s, 1, surface='chord', flatten=flatten).T ax.plot(LE[0], LE[1], LE[2], 'k--', lw=0.8) ax.plot(c4[0], c4[1], c4[2], 'g--', lw=0.8) ax.plot(TE[0], TE[1], TE[2], 'k--', lw=0.8) gsim.extras.plots._set_axes_equal(ax) xlim = ax.get_xlim3d() zlim = ax.get_zlim3d() z = 0.75 vertices = np.vstack((LE[0:2].T, TE[0:2].T[::(- 1)])) poly = PolyCollection([vertices], facecolors=['k'], alpha=0.25) ax.add_collection3d(poly, zs=[z], zdir='z') ax.plot(c4[0], c4[1], z, 'g--', lw=0.8) xyz = foil.surface_xyz(s, foil._layout.r_x(s), surface='chord').T ax.plot(xyz[0], xyz[1], z, 'r--', lw=0.8, label='reference lines') x = np.full(*c4[1].shape, (- 1.25)) ax.plot(x, c4[1], c4[2], 'g--', lw=0.8, label='quarter-chord') xyz = foil.surface_xyz(s, foil._layout.r_yz(s), surface='chord').T ax.plot(x, xyz[1], xyz[2], 'r--', lw=0.8)<|docstring|>Plot a FoilGeometry in 3D.<|endoftext|>
9ea0e1b01e55c5da45bae6b9a5b73de638d813205de36eb196b5e8fd2517d4c4
def __init__(self, username, password, **kwargs): "\n\n :param username: Login username\n :param password: Login password\n :param kwargs: See below\n\n :Keyword Arguments:\n - **auto_patch**: Patch the api objects to match the public API. Default: False\n - **drop_incompat_key**: Remove api object keys that is not in the public API. Default: False\n - **timeout**: Timeout interval in seconds. Default: 15\n - **api_url**: Override the default api url base\n - **cookie**: Saved cookie string from a previous session\n - **settings**: A dict of settings from a previous session\n - **on_login**: Callback after successful login\n - **proxy**: Specify a proxy ex: 'http://127.0.0.1:8888' (ALPHA)\n :return:\n " self.username = username self.password = password self.auto_patch = kwargs.pop('auto_patch', False) self.drop_incompat_keys = kwargs.pop('drop_incompat_keys', False) self.api_url = (kwargs.pop('api_url', None) or self.API_URL) self.timeout = kwargs.pop('timeout', 15) self.on_login = kwargs.pop('on_login', None) self.logger = logger user_settings = (kwargs.pop('settings', None) or {}) self.uuid = (kwargs.pop('guid', None) or kwargs.pop('uuid', None) or user_settings.get('uuid') or self.generate_uuid(False)) self.device_id = (kwargs.pop('device_id', None) or user_settings.get('device_id') or self.generate_deviceid()) self.signature_key = (kwargs.pop('signature_key', None) or user_settings.get('signature_key') or self.IG_SIG_KEY) self.key_version = (kwargs.pop('key_version', None) or user_settings.get('key_version') or self.SIG_KEY_VERSION) self.ig_capabilities = (kwargs.pop('ig_capabilities', None) or user_settings.get('ig_capabilities') or self.IG_CAPABILITIES) self.application_id = (kwargs.pop('application_id', None) or user_settings.get('application_id') or self.APPLICATION_ID) custom_ua = (kwargs.pop('user_agent', '') or user_settings.get('user_agent')) if custom_ua: self.user_agent = custom_ua else: self.app_version = (kwargs.pop('app_version', None) or user_settings.get('app_version') or Constants.APP_VERSION) self.android_release = (kwargs.pop('android_release', None) or user_settings.get('android_release') or Constants.ANDROID_RELEASE) self.android_version = int((kwargs.pop('android_version', None) or user_settings.get('android_version') or Constants.ANDROID_VERSION)) self.phone_manufacturer = (kwargs.pop('phone_manufacturer', None) or user_settings.get('phone_manufacturer') or Constants.PHONE_MANUFACTURER) self.phone_device = (kwargs.pop('phone_device', None) or user_settings.get('phone_device') or Constants.PHONE_DEVICE) self.phone_model = (kwargs.pop('phone_model', None) or user_settings.get('phone_model') or Constants.PHONE_MODEL) self.phone_dpi = (kwargs.pop('phone_dpi', None) or user_settings.get('phone_dpi') or Constants.PHONE_DPI) self.phone_resolution = (kwargs.pop('phone_resolution', None) or user_settings.get('phone_resolution') or Constants.PHONE_RESOLUTION) self.phone_chipset = (kwargs.pop('phone_chipset', None) or user_settings.get('phone_chipset') or Constants.PHONE_CHIPSET) cookie_string = (kwargs.pop('cookie', None) or user_settings.get('cookie')) cookie_jar = ClientCookieJar(cookie_string=cookie_string) if (cookie_string and cookie_jar.expires_earliest and (int(time.time()) >= cookie_jar.expires_earliest)): raise ClientCookieExpiredError('Oldest cookie expired at {0!s}'.format(cookie_jar.expires_earliest)) cookie_handler = compat_urllib_request.HTTPCookieProcessor(cookie_jar) proxy_handler = None proxy = kwargs.pop('proxy', None) if proxy: warnings.warn('Proxy support is alpha.', UserWarning) parsed_url = compat_urllib_parse_urlparse(proxy) if (parsed_url.netloc and parsed_url.scheme): proxy_address = '{0!s}://{1!s}'.format(parsed_url.scheme, parsed_url.netloc) proxy_handler = compat_urllib_request.ProxyHandler({'https': proxy_address}) else: raise ValueError('Invalid proxy argument: {0!s}'.format(proxy)) handlers = [] if proxy_handler: handlers.append(proxy_handler) custom_ssl_context = kwargs.pop('custom_ssl_context', None) try: httpshandler = compat_urllib_request.HTTPSHandler(context=custom_ssl_context) except TypeError: httpshandler = compat_urllib_request.HTTPSHandler() handlers.extend([compat_urllib_request.HTTPHandler(), httpshandler, cookie_handler]) opener = compat_urllib_request.build_opener(*handlers) opener.cookie_jar = cookie_jar self.opener = opener self.ad_id = (kwargs.pop('ad_id', None) or user_settings.get('ad_id') or self.generate_adid()) if (not cookie_string): if ((not self.username) or (not self.password)): raise ClientLoginRequiredError('login_required', code=400) self.login() self.logger.debug('USERAGENT: {0!s}'.format(self.user_agent)) super(Client, self).__init__()
:param username: Login username :param password: Login password :param kwargs: See below :Keyword Arguments: - **auto_patch**: Patch the api objects to match the public API. Default: False - **drop_incompat_key**: Remove api object keys that is not in the public API. Default: False - **timeout**: Timeout interval in seconds. Default: 15 - **api_url**: Override the default api url base - **cookie**: Saved cookie string from a previous session - **settings**: A dict of settings from a previous session - **on_login**: Callback after successful login - **proxy**: Specify a proxy ex: 'http://127.0.0.1:8888' (ALPHA) :return:
client.py
__init__
tomLamprecht/PepperNaoInstagram
1
python
def __init__(self, username, password, **kwargs): "\n\n :param username: Login username\n :param password: Login password\n :param kwargs: See below\n\n :Keyword Arguments:\n - **auto_patch**: Patch the api objects to match the public API. Default: False\n - **drop_incompat_key**: Remove api object keys that is not in the public API. Default: False\n - **timeout**: Timeout interval in seconds. Default: 15\n - **api_url**: Override the default api url base\n - **cookie**: Saved cookie string from a previous session\n - **settings**: A dict of settings from a previous session\n - **on_login**: Callback after successful login\n - **proxy**: Specify a proxy ex: 'http://127.0.0.1:8888' (ALPHA)\n :return:\n " self.username = username self.password = password self.auto_patch = kwargs.pop('auto_patch', False) self.drop_incompat_keys = kwargs.pop('drop_incompat_keys', False) self.api_url = (kwargs.pop('api_url', None) or self.API_URL) self.timeout = kwargs.pop('timeout', 15) self.on_login = kwargs.pop('on_login', None) self.logger = logger user_settings = (kwargs.pop('settings', None) or {}) self.uuid = (kwargs.pop('guid', None) or kwargs.pop('uuid', None) or user_settings.get('uuid') or self.generate_uuid(False)) self.device_id = (kwargs.pop('device_id', None) or user_settings.get('device_id') or self.generate_deviceid()) self.signature_key = (kwargs.pop('signature_key', None) or user_settings.get('signature_key') or self.IG_SIG_KEY) self.key_version = (kwargs.pop('key_version', None) or user_settings.get('key_version') or self.SIG_KEY_VERSION) self.ig_capabilities = (kwargs.pop('ig_capabilities', None) or user_settings.get('ig_capabilities') or self.IG_CAPABILITIES) self.application_id = (kwargs.pop('application_id', None) or user_settings.get('application_id') or self.APPLICATION_ID) custom_ua = (kwargs.pop('user_agent', ) or user_settings.get('user_agent')) if custom_ua: self.user_agent = custom_ua else: self.app_version = (kwargs.pop('app_version', None) or user_settings.get('app_version') or Constants.APP_VERSION) self.android_release = (kwargs.pop('android_release', None) or user_settings.get('android_release') or Constants.ANDROID_RELEASE) self.android_version = int((kwargs.pop('android_version', None) or user_settings.get('android_version') or Constants.ANDROID_VERSION)) self.phone_manufacturer = (kwargs.pop('phone_manufacturer', None) or user_settings.get('phone_manufacturer') or Constants.PHONE_MANUFACTURER) self.phone_device = (kwargs.pop('phone_device', None) or user_settings.get('phone_device') or Constants.PHONE_DEVICE) self.phone_model = (kwargs.pop('phone_model', None) or user_settings.get('phone_model') or Constants.PHONE_MODEL) self.phone_dpi = (kwargs.pop('phone_dpi', None) or user_settings.get('phone_dpi') or Constants.PHONE_DPI) self.phone_resolution = (kwargs.pop('phone_resolution', None) or user_settings.get('phone_resolution') or Constants.PHONE_RESOLUTION) self.phone_chipset = (kwargs.pop('phone_chipset', None) or user_settings.get('phone_chipset') or Constants.PHONE_CHIPSET) cookie_string = (kwargs.pop('cookie', None) or user_settings.get('cookie')) cookie_jar = ClientCookieJar(cookie_string=cookie_string) if (cookie_string and cookie_jar.expires_earliest and (int(time.time()) >= cookie_jar.expires_earliest)): raise ClientCookieExpiredError('Oldest cookie expired at {0!s}'.format(cookie_jar.expires_earliest)) cookie_handler = compat_urllib_request.HTTPCookieProcessor(cookie_jar) proxy_handler = None proxy = kwargs.pop('proxy', None) if proxy: warnings.warn('Proxy support is alpha.', UserWarning) parsed_url = compat_urllib_parse_urlparse(proxy) if (parsed_url.netloc and parsed_url.scheme): proxy_address = '{0!s}://{1!s}'.format(parsed_url.scheme, parsed_url.netloc) proxy_handler = compat_urllib_request.ProxyHandler({'https': proxy_address}) else: raise ValueError('Invalid proxy argument: {0!s}'.format(proxy)) handlers = [] if proxy_handler: handlers.append(proxy_handler) custom_ssl_context = kwargs.pop('custom_ssl_context', None) try: httpshandler = compat_urllib_request.HTTPSHandler(context=custom_ssl_context) except TypeError: httpshandler = compat_urllib_request.HTTPSHandler() handlers.extend([compat_urllib_request.HTTPHandler(), httpshandler, cookie_handler]) opener = compat_urllib_request.build_opener(*handlers) opener.cookie_jar = cookie_jar self.opener = opener self.ad_id = (kwargs.pop('ad_id', None) or user_settings.get('ad_id') or self.generate_adid()) if (not cookie_string): if ((not self.username) or (not self.password)): raise ClientLoginRequiredError('login_required', code=400) self.login() self.logger.debug('USERAGENT: {0!s}'.format(self.user_agent)) super(Client, self).__init__()
def __init__(self, username, password, **kwargs): "\n\n :param username: Login username\n :param password: Login password\n :param kwargs: See below\n\n :Keyword Arguments:\n - **auto_patch**: Patch the api objects to match the public API. Default: False\n - **drop_incompat_key**: Remove api object keys that is not in the public API. Default: False\n - **timeout**: Timeout interval in seconds. Default: 15\n - **api_url**: Override the default api url base\n - **cookie**: Saved cookie string from a previous session\n - **settings**: A dict of settings from a previous session\n - **on_login**: Callback after successful login\n - **proxy**: Specify a proxy ex: 'http://127.0.0.1:8888' (ALPHA)\n :return:\n " self.username = username self.password = password self.auto_patch = kwargs.pop('auto_patch', False) self.drop_incompat_keys = kwargs.pop('drop_incompat_keys', False) self.api_url = (kwargs.pop('api_url', None) or self.API_URL) self.timeout = kwargs.pop('timeout', 15) self.on_login = kwargs.pop('on_login', None) self.logger = logger user_settings = (kwargs.pop('settings', None) or {}) self.uuid = (kwargs.pop('guid', None) or kwargs.pop('uuid', None) or user_settings.get('uuid') or self.generate_uuid(False)) self.device_id = (kwargs.pop('device_id', None) or user_settings.get('device_id') or self.generate_deviceid()) self.signature_key = (kwargs.pop('signature_key', None) or user_settings.get('signature_key') or self.IG_SIG_KEY) self.key_version = (kwargs.pop('key_version', None) or user_settings.get('key_version') or self.SIG_KEY_VERSION) self.ig_capabilities = (kwargs.pop('ig_capabilities', None) or user_settings.get('ig_capabilities') or self.IG_CAPABILITIES) self.application_id = (kwargs.pop('application_id', None) or user_settings.get('application_id') or self.APPLICATION_ID) custom_ua = (kwargs.pop('user_agent', ) or user_settings.get('user_agent')) if custom_ua: self.user_agent = custom_ua else: self.app_version = (kwargs.pop('app_version', None) or user_settings.get('app_version') or Constants.APP_VERSION) self.android_release = (kwargs.pop('android_release', None) or user_settings.get('android_release') or Constants.ANDROID_RELEASE) self.android_version = int((kwargs.pop('android_version', None) or user_settings.get('android_version') or Constants.ANDROID_VERSION)) self.phone_manufacturer = (kwargs.pop('phone_manufacturer', None) or user_settings.get('phone_manufacturer') or Constants.PHONE_MANUFACTURER) self.phone_device = (kwargs.pop('phone_device', None) or user_settings.get('phone_device') or Constants.PHONE_DEVICE) self.phone_model = (kwargs.pop('phone_model', None) or user_settings.get('phone_model') or Constants.PHONE_MODEL) self.phone_dpi = (kwargs.pop('phone_dpi', None) or user_settings.get('phone_dpi') or Constants.PHONE_DPI) self.phone_resolution = (kwargs.pop('phone_resolution', None) or user_settings.get('phone_resolution') or Constants.PHONE_RESOLUTION) self.phone_chipset = (kwargs.pop('phone_chipset', None) or user_settings.get('phone_chipset') or Constants.PHONE_CHIPSET) cookie_string = (kwargs.pop('cookie', None) or user_settings.get('cookie')) cookie_jar = ClientCookieJar(cookie_string=cookie_string) if (cookie_string and cookie_jar.expires_earliest and (int(time.time()) >= cookie_jar.expires_earliest)): raise ClientCookieExpiredError('Oldest cookie expired at {0!s}'.format(cookie_jar.expires_earliest)) cookie_handler = compat_urllib_request.HTTPCookieProcessor(cookie_jar) proxy_handler = None proxy = kwargs.pop('proxy', None) if proxy: warnings.warn('Proxy support is alpha.', UserWarning) parsed_url = compat_urllib_parse_urlparse(proxy) if (parsed_url.netloc and parsed_url.scheme): proxy_address = '{0!s}://{1!s}'.format(parsed_url.scheme, parsed_url.netloc) proxy_handler = compat_urllib_request.ProxyHandler({'https': proxy_address}) else: raise ValueError('Invalid proxy argument: {0!s}'.format(proxy)) handlers = [] if proxy_handler: handlers.append(proxy_handler) custom_ssl_context = kwargs.pop('custom_ssl_context', None) try: httpshandler = compat_urllib_request.HTTPSHandler(context=custom_ssl_context) except TypeError: httpshandler = compat_urllib_request.HTTPSHandler() handlers.extend([compat_urllib_request.HTTPHandler(), httpshandler, cookie_handler]) opener = compat_urllib_request.build_opener(*handlers) opener.cookie_jar = cookie_jar self.opener = opener self.ad_id = (kwargs.pop('ad_id', None) or user_settings.get('ad_id') or self.generate_adid()) if (not cookie_string): if ((not self.username) or (not self.password)): raise ClientLoginRequiredError('login_required', code=400) self.login() self.logger.debug('USERAGENT: {0!s}'.format(self.user_agent)) super(Client, self).__init__()<|docstring|>:param username: Login username :param password: Login password :param kwargs: See below :Keyword Arguments: - **auto_patch**: Patch the api objects to match the public API. Default: False - **drop_incompat_key**: Remove api object keys that is not in the public API. Default: False - **timeout**: Timeout interval in seconds. Default: 15 - **api_url**: Override the default api url base - **cookie**: Saved cookie string from a previous session - **settings**: A dict of settings from a previous session - **on_login**: Callback after successful login - **proxy**: Specify a proxy ex: 'http://127.0.0.1:8888' (ALPHA) :return:<|endoftext|>
2039222bdf7c9887ff9f9ab339bd2dcd47e666fc966709bd1fc88c00637ee9bb
@property def settings(self): 'Helper property that extracts the settings that you should cache\n in addition to username and password.' return {'uuid': self.uuid, 'device_id': self.device_id, 'ad_id': self.ad_id, 'cookie': self.cookie_jar.dump(), 'created_ts': int(time.time())}
Helper property that extracts the settings that you should cache in addition to username and password.
client.py
settings
tomLamprecht/PepperNaoInstagram
1
python
@property def settings(self): 'Helper property that extracts the settings that you should cache\n in addition to username and password.' return {'uuid': self.uuid, 'device_id': self.device_id, 'ad_id': self.ad_id, 'cookie': self.cookie_jar.dump(), 'created_ts': int(time.time())}
@property def settings(self): 'Helper property that extracts the settings that you should cache\n in addition to username and password.' return {'uuid': self.uuid, 'device_id': self.device_id, 'ad_id': self.ad_id, 'cookie': self.cookie_jar.dump(), 'created_ts': int(time.time())}<|docstring|>Helper property that extracts the settings that you should cache in addition to username and password.<|endoftext|>
83817bd62452b0a46da2ec05f66c1937c87d41e8f915be9106e1d39564da1325
@property def user_agent(self): 'Returns the useragent string that the client is currently using.' return (Constants.USER_AGENT_FORMAT % {'app_version': self.app_version, 'android_version': self.android_version, 'android_release': self.android_release, 'brand': self.phone_manufacturer, 'device': self.phone_device, 'model': self.phone_model, 'dpi': self.phone_dpi, 'resolution': self.phone_resolution, 'chipset': self.phone_chipset})
Returns the useragent string that the client is currently using.
client.py
user_agent
tomLamprecht/PepperNaoInstagram
1
python
@property def user_agent(self): return (Constants.USER_AGENT_FORMAT % {'app_version': self.app_version, 'android_version': self.android_version, 'android_release': self.android_release, 'brand': self.phone_manufacturer, 'device': self.phone_device, 'model': self.phone_model, 'dpi': self.phone_dpi, 'resolution': self.phone_resolution, 'chipset': self.phone_chipset})
@property def user_agent(self): return (Constants.USER_AGENT_FORMAT % {'app_version': self.app_version, 'android_version': self.android_version, 'android_release': self.android_release, 'brand': self.phone_manufacturer, 'device': self.phone_device, 'model': self.phone_model, 'dpi': self.phone_dpi, 'resolution': self.phone_resolution, 'chipset': self.phone_chipset})<|docstring|>Returns the useragent string that the client is currently using.<|endoftext|>
e30ec1cf5487a942059600990f9611d3cf4e0bea72e8be63324b3496a8e14a8b
@user_agent.setter def user_agent(self, value): 'Override the useragent string with your own' mobj = re.search(Constants.USER_AGENT_EXPRESSION, value) if (not mobj): raise ValueError('User-agent specified does not fit format required: {0!s}'.format(Constants.USER_AGENT_EXPRESSION)) self.app_version = mobj.group('app_version') self.android_release = mobj.group('android_release') self.android_version = int(mobj.group('android_version')) self.phone_manufacturer = mobj.group('manufacturer') self.phone_device = mobj.group('device') self.phone_model = mobj.group('model') self.phone_dpi = mobj.group('dpi') self.phone_resolution = mobj.group('resolution') self.phone_chipset = mobj.group('chipset')
Override the useragent string with your own
client.py
user_agent
tomLamprecht/PepperNaoInstagram
1
python
@user_agent.setter def user_agent(self, value): mobj = re.search(Constants.USER_AGENT_EXPRESSION, value) if (not mobj): raise ValueError('User-agent specified does not fit format required: {0!s}'.format(Constants.USER_AGENT_EXPRESSION)) self.app_version = mobj.group('app_version') self.android_release = mobj.group('android_release') self.android_version = int(mobj.group('android_version')) self.phone_manufacturer = mobj.group('manufacturer') self.phone_device = mobj.group('device') self.phone_model = mobj.group('model') self.phone_dpi = mobj.group('dpi') self.phone_resolution = mobj.group('resolution') self.phone_chipset = mobj.group('chipset')
@user_agent.setter def user_agent(self, value): mobj = re.search(Constants.USER_AGENT_EXPRESSION, value) if (not mobj): raise ValueError('User-agent specified does not fit format required: {0!s}'.format(Constants.USER_AGENT_EXPRESSION)) self.app_version = mobj.group('app_version') self.android_release = mobj.group('android_release') self.android_version = int(mobj.group('android_version')) self.phone_manufacturer = mobj.group('manufacturer') self.phone_device = mobj.group('device') self.phone_model = mobj.group('model') self.phone_dpi = mobj.group('dpi') self.phone_resolution = mobj.group('resolution') self.phone_chipset = mobj.group('chipset')<|docstring|>Override the useragent string with your own<|endoftext|>
384aceffa46d75903fb6c6c0eb2854865f4717ea2f4f66a8a877f82d75ec6599
@staticmethod def generate_useragent(**kwargs): '\n Helper method to generate a useragent string based on device parameters\n\n :param kwargs:\n - **app_version**\n - **android_version**\n - **android_release**\n - **brand**\n - **device**\n - **model**\n - **dpi**\n - **resolution**\n - **chipset**\n :return: A compatible user agent string\n ' return (Constants.USER_AGENT_FORMAT % {'app_version': (kwargs.pop('app_version', None) or Constants.APP_VERSION), 'android_version': int((kwargs.pop('android_version', None) or Constants.ANDROID_VERSION)), 'android_release': (kwargs.pop('android_release', None) or Constants.ANDROID_RELEASE), 'brand': (kwargs.pop('phone_manufacturer', None) or Constants.PHONE_MANUFACTURER), 'device': (kwargs.pop('phone_device', None) or Constants.PHONE_DEVICE), 'model': (kwargs.pop('phone_model', None) or Constants.PHONE_MODEL), 'dpi': (kwargs.pop('phone_dpi', None) or Constants.PHONE_DPI), 'resolution': (kwargs.pop('phone_resolution', None) or Constants.PHONE_RESOLUTION), 'chipset': (kwargs.pop('phone_chipset', None) or Constants.PHONE_CHIPSET)})
Helper method to generate a useragent string based on device parameters :param kwargs: - **app_version** - **android_version** - **android_release** - **brand** - **device** - **model** - **dpi** - **resolution** - **chipset** :return: A compatible user agent string
client.py
generate_useragent
tomLamprecht/PepperNaoInstagram
1
python
@staticmethod def generate_useragent(**kwargs): '\n Helper method to generate a useragent string based on device parameters\n\n :param kwargs:\n - **app_version**\n - **android_version**\n - **android_release**\n - **brand**\n - **device**\n - **model**\n - **dpi**\n - **resolution**\n - **chipset**\n :return: A compatible user agent string\n ' return (Constants.USER_AGENT_FORMAT % {'app_version': (kwargs.pop('app_version', None) or Constants.APP_VERSION), 'android_version': int((kwargs.pop('android_version', None) or Constants.ANDROID_VERSION)), 'android_release': (kwargs.pop('android_release', None) or Constants.ANDROID_RELEASE), 'brand': (kwargs.pop('phone_manufacturer', None) or Constants.PHONE_MANUFACTURER), 'device': (kwargs.pop('phone_device', None) or Constants.PHONE_DEVICE), 'model': (kwargs.pop('phone_model', None) or Constants.PHONE_MODEL), 'dpi': (kwargs.pop('phone_dpi', None) or Constants.PHONE_DPI), 'resolution': (kwargs.pop('phone_resolution', None) or Constants.PHONE_RESOLUTION), 'chipset': (kwargs.pop('phone_chipset', None) or Constants.PHONE_CHIPSET)})
@staticmethod def generate_useragent(**kwargs): '\n Helper method to generate a useragent string based on device parameters\n\n :param kwargs:\n - **app_version**\n - **android_version**\n - **android_release**\n - **brand**\n - **device**\n - **model**\n - **dpi**\n - **resolution**\n - **chipset**\n :return: A compatible user agent string\n ' return (Constants.USER_AGENT_FORMAT % {'app_version': (kwargs.pop('app_version', None) or Constants.APP_VERSION), 'android_version': int((kwargs.pop('android_version', None) or Constants.ANDROID_VERSION)), 'android_release': (kwargs.pop('android_release', None) or Constants.ANDROID_RELEASE), 'brand': (kwargs.pop('phone_manufacturer', None) or Constants.PHONE_MANUFACTURER), 'device': (kwargs.pop('phone_device', None) or Constants.PHONE_DEVICE), 'model': (kwargs.pop('phone_model', None) or Constants.PHONE_MODEL), 'dpi': (kwargs.pop('phone_dpi', None) or Constants.PHONE_DPI), 'resolution': (kwargs.pop('phone_resolution', None) or Constants.PHONE_RESOLUTION), 'chipset': (kwargs.pop('phone_chipset', None) or Constants.PHONE_CHIPSET)})<|docstring|>Helper method to generate a useragent string based on device parameters :param kwargs: - **app_version** - **android_version** - **android_release** - **brand** - **device** - **model** - **dpi** - **resolution** - **chipset** :return: A compatible user agent string<|endoftext|>
fb8f3004382d5d44eff7ee0916817bded8491320c4974f561fe2b6a478e5cf2b
@staticmethod def validate_useragent(value): '\n Helper method to validate a useragent string for format correctness\n\n :param value:\n :return:\n ' mobj = re.search(Constants.USER_AGENT_EXPRESSION, value) if (not mobj): raise ValueError('User-agent specified does not fit format required: {0!s}'.format(Constants.USER_AGENT_EXPRESSION)) parse_params = {'app_version': mobj.group('app_version'), 'android_version': int(mobj.group('android_version')), 'android_release': mobj.group('android_release'), 'brand': mobj.group('manufacturer'), 'device': mobj.group('device'), 'model': mobj.group('model'), 'dpi': mobj.group('dpi'), 'resolution': mobj.group('resolution'), 'chipset': mobj.group('chipset')} return {'user_agent': (Constants.USER_AGENT_FORMAT % parse_params), 'parsed_params': parse_params}
Helper method to validate a useragent string for format correctness :param value: :return:
client.py
validate_useragent
tomLamprecht/PepperNaoInstagram
1
python
@staticmethod def validate_useragent(value): '\n Helper method to validate a useragent string for format correctness\n\n :param value:\n :return:\n ' mobj = re.search(Constants.USER_AGENT_EXPRESSION, value) if (not mobj): raise ValueError('User-agent specified does not fit format required: {0!s}'.format(Constants.USER_AGENT_EXPRESSION)) parse_params = {'app_version': mobj.group('app_version'), 'android_version': int(mobj.group('android_version')), 'android_release': mobj.group('android_release'), 'brand': mobj.group('manufacturer'), 'device': mobj.group('device'), 'model': mobj.group('model'), 'dpi': mobj.group('dpi'), 'resolution': mobj.group('resolution'), 'chipset': mobj.group('chipset')} return {'user_agent': (Constants.USER_AGENT_FORMAT % parse_params), 'parsed_params': parse_params}
@staticmethod def validate_useragent(value): '\n Helper method to validate a useragent string for format correctness\n\n :param value:\n :return:\n ' mobj = re.search(Constants.USER_AGENT_EXPRESSION, value) if (not mobj): raise ValueError('User-agent specified does not fit format required: {0!s}'.format(Constants.USER_AGENT_EXPRESSION)) parse_params = {'app_version': mobj.group('app_version'), 'android_version': int(mobj.group('android_version')), 'android_release': mobj.group('android_release'), 'brand': mobj.group('manufacturer'), 'device': mobj.group('device'), 'model': mobj.group('model'), 'dpi': mobj.group('dpi'), 'resolution': mobj.group('resolution'), 'chipset': mobj.group('chipset')} return {'user_agent': (Constants.USER_AGENT_FORMAT % parse_params), 'parsed_params': parse_params}<|docstring|>Helper method to validate a useragent string for format correctness :param value: :return:<|endoftext|>
426ba99f077e924968b623cdf39b08330da0f1cfef7b0e0053e436a44319e83d
@property def csrftoken(self): "The client's current csrf token" return self.get_cookie_value('csrftoken')
The client's current csrf token
client.py
csrftoken
tomLamprecht/PepperNaoInstagram
1
python
@property def csrftoken(self): return self.get_cookie_value('csrftoken')
@property def csrftoken(self): return self.get_cookie_value('csrftoken')<|docstring|>The client's current csrf token<|endoftext|>
0931e8ce8b8af6fa3ba561e6a8037dfa52c4ab108682da5a47b08472e0c35bb2
@property def token(self): 'For compatibility. Equivalent to :meth:`csrftoken`' return self.csrftoken
For compatibility. Equivalent to :meth:`csrftoken`
client.py
token
tomLamprecht/PepperNaoInstagram
1
python
@property def token(self): return self.csrftoken
@property def token(self): return self.csrftoken<|docstring|>For compatibility. Equivalent to :meth:`csrftoken`<|endoftext|>
cedbc26aee56cf51b4c4068041247f08d4f1769c6d9c34160001ff9e22052b7f
@property def authenticated_user_id(self): 'The current authenticated user id' return self.get_cookie_value('ds_user_id')
The current authenticated user id
client.py
authenticated_user_id
tomLamprecht/PepperNaoInstagram
1
python
@property def authenticated_user_id(self): return self.get_cookie_value('ds_user_id')
@property def authenticated_user_id(self): return self.get_cookie_value('ds_user_id')<|docstring|>The current authenticated user id<|endoftext|>
3349772fe7640ba7f820cf47c7f0f30eeb98d4ed138c510483844ba63d3ab527
@property def authenticated_user_name(self): 'The current authenticated user name' return self.get_cookie_value('ds_user')
The current authenticated user name
client.py
authenticated_user_name
tomLamprecht/PepperNaoInstagram
1
python
@property def authenticated_user_name(self): return self.get_cookie_value('ds_user')
@property def authenticated_user_name(self): return self.get_cookie_value('ds_user')<|docstring|>The current authenticated user name<|endoftext|>
a0b64c86a867b7ef0a6b0d87bdc2c8d6b4c986dcb73be9f33a87f40f815e8004
@property def phone_id(self): 'Current phone ID. For use in certain functions.' return self.generate_uuid(return_hex=False, seed=self.device_id)
Current phone ID. For use in certain functions.
client.py
phone_id
tomLamprecht/PepperNaoInstagram
1
python
@property def phone_id(self): return self.generate_uuid(return_hex=False, seed=self.device_id)
@property def phone_id(self): return self.generate_uuid(return_hex=False, seed=self.device_id)<|docstring|>Current phone ID. For use in certain functions.<|endoftext|>
e959dcbba9957b29e07d3aa2be4b172f488a661146391face8e11a6e70948f75
@property def timezone_offset(self): 'Timezone offset in seconds. For use in certain functions.' return int(round((datetime.now() - datetime.utcnow()).total_seconds()))
Timezone offset in seconds. For use in certain functions.
client.py
timezone_offset
tomLamprecht/PepperNaoInstagram
1
python
@property def timezone_offset(self): return int(round((datetime.now() - datetime.utcnow()).total_seconds()))
@property def timezone_offset(self): return int(round((datetime.now() - datetime.utcnow()).total_seconds()))<|docstring|>Timezone offset in seconds. For use in certain functions.<|endoftext|>
3a8563cc19095655bb6ec52c70134c76e05856ef67205feb1b46dced2e75fb03
@property def cookie_jar(self): "The client's cookiejar instance." return self.opener.cookie_jar
The client's cookiejar instance.
client.py
cookie_jar
tomLamprecht/PepperNaoInstagram
1
python
@property def cookie_jar(self): return self.opener.cookie_jar
@property def cookie_jar(self): return self.opener.cookie_jar<|docstring|>The client's cookiejar instance.<|endoftext|>
677d7a3ae079474098a8de057db1d90f0535961714ed93f5e2c6b5b3fa4de2e9
@property def radio_type(self): 'For use in certain endpoints' return 'wifi-none'
For use in certain endpoints
client.py
radio_type
tomLamprecht/PepperNaoInstagram
1
python
@property def radio_type(self): return 'wifi-none'
@property def radio_type(self): return 'wifi-none'<|docstring|>For use in certain endpoints<|endoftext|>
cec2b1319c75645e6fa7214057affe8fb94055e81eb93662f7461d0ff979225f
def _generate_signature(self, data): '\n Generates the signature for a data string\n\n :param data: content to be signed\n :return:\n ' return hmac.new(self.signature_key.encode('utf-8'), data.encode('utf-8'), digestmod=hashlib.sha256).hexdigest()
Generates the signature for a data string :param data: content to be signed :return:
client.py
_generate_signature
tomLamprecht/PepperNaoInstagram
1
python
def _generate_signature(self, data): '\n Generates the signature for a data string\n\n :param data: content to be signed\n :return:\n ' return hmac.new(self.signature_key.encode('utf-8'), data.encode('utf-8'), digestmod=hashlib.sha256).hexdigest()
def _generate_signature(self, data): '\n Generates the signature for a data string\n\n :param data: content to be signed\n :return:\n ' return hmac.new(self.signature_key.encode('utf-8'), data.encode('utf-8'), digestmod=hashlib.sha256).hexdigest()<|docstring|>Generates the signature for a data string :param data: content to be signed :return:<|endoftext|>
88a8e8845d5a49b71cbe5c498942d9a41783903f1932b54c7aa4ad54effa9a4f
@classmethod def generate_uuid(cls, return_hex=False, seed=None): '\n Generate uuid\n\n :param return_hex: Return in hex format\n :param seed: Seed value to generate a consistent uuid\n :return:\n ' if seed: m = hashlib.md5() m.update(seed.encode('utf-8')) new_uuid = uuid.UUID(m.hexdigest()) else: new_uuid = uuid.uuid1() if return_hex: return new_uuid.hex return str(new_uuid)
Generate uuid :param return_hex: Return in hex format :param seed: Seed value to generate a consistent uuid :return:
client.py
generate_uuid
tomLamprecht/PepperNaoInstagram
1
python
@classmethod def generate_uuid(cls, return_hex=False, seed=None): '\n Generate uuid\n\n :param return_hex: Return in hex format\n :param seed: Seed value to generate a consistent uuid\n :return:\n ' if seed: m = hashlib.md5() m.update(seed.encode('utf-8')) new_uuid = uuid.UUID(m.hexdigest()) else: new_uuid = uuid.uuid1() if return_hex: return new_uuid.hex return str(new_uuid)
@classmethod def generate_uuid(cls, return_hex=False, seed=None): '\n Generate uuid\n\n :param return_hex: Return in hex format\n :param seed: Seed value to generate a consistent uuid\n :return:\n ' if seed: m = hashlib.md5() m.update(seed.encode('utf-8')) new_uuid = uuid.UUID(m.hexdigest()) else: new_uuid = uuid.uuid1() if return_hex: return new_uuid.hex return str(new_uuid)<|docstring|>Generate uuid :param return_hex: Return in hex format :param seed: Seed value to generate a consistent uuid :return:<|endoftext|>
faad4f6890213df3f2a9672997078c324adf1b0cdaf0ebaa75fe249e2c22d0e7
@classmethod def generate_deviceid(cls, seed=None): '\n Generate an android device ID\n\n :param seed: Seed value to generate a consistent device ID\n :return:\n ' return 'android-{0!s}'.format(cls.generate_uuid(True, seed)[:16])
Generate an android device ID :param seed: Seed value to generate a consistent device ID :return:
client.py
generate_deviceid
tomLamprecht/PepperNaoInstagram
1
python
@classmethod def generate_deviceid(cls, seed=None): '\n Generate an android device ID\n\n :param seed: Seed value to generate a consistent device ID\n :return:\n ' return 'android-{0!s}'.format(cls.generate_uuid(True, seed)[:16])
@classmethod def generate_deviceid(cls, seed=None): '\n Generate an android device ID\n\n :param seed: Seed value to generate a consistent device ID\n :return:\n ' return 'android-{0!s}'.format(cls.generate_uuid(True, seed)[:16])<|docstring|>Generate an android device ID :param seed: Seed value to generate a consistent device ID :return:<|endoftext|>
6d616cad124dea7e97ed7c8d3aa9d37acc958d74247ce67e6d8f73a3bf5e5920
def generate_adid(self, seed=None): '\n Generate an Advertising ID based on the login username since\n the Google Ad ID is a personally identifying but resettable ID.\n\n :return:\n ' modified_seed = (seed or self.authenticated_user_name or self.username) if modified_seed: sha2 = hashlib.sha256() sha2.update(modified_seed.encode('utf-8')) modified_seed = sha2.hexdigest() return self.generate_uuid(False, modified_seed)
Generate an Advertising ID based on the login username since the Google Ad ID is a personally identifying but resettable ID. :return:
client.py
generate_adid
tomLamprecht/PepperNaoInstagram
1
python
def generate_adid(self, seed=None): '\n Generate an Advertising ID based on the login username since\n the Google Ad ID is a personally identifying but resettable ID.\n\n :return:\n ' modified_seed = (seed or self.authenticated_user_name or self.username) if modified_seed: sha2 = hashlib.sha256() sha2.update(modified_seed.encode('utf-8')) modified_seed = sha2.hexdigest() return self.generate_uuid(False, modified_seed)
def generate_adid(self, seed=None): '\n Generate an Advertising ID based on the login username since\n the Google Ad ID is a personally identifying but resettable ID.\n\n :return:\n ' modified_seed = (seed or self.authenticated_user_name or self.username) if modified_seed: sha2 = hashlib.sha256() sha2.update(modified_seed.encode('utf-8')) modified_seed = sha2.hexdigest() return self.generate_uuid(False, modified_seed)<|docstring|>Generate an Advertising ID based on the login username since the Google Ad ID is a personally identifying but resettable ID. :return:<|endoftext|>
d06e1f7e1946c1216beeff5a97b3992a7a5886b88b9ac968c3d9e5235d3ba59e
@staticmethod def _read_response(response): '\n Extract the response body from a http response.\n\n :param response:\n :return:\n ' if (response.info().get('Content-Encoding') == 'gzip'): buf = BytesIO(response.read()) res = gzip.GzipFile(fileobj=buf).read().decode('utf8') else: res = response.read().decode('utf8') return res
Extract the response body from a http response. :param response: :return:
client.py
_read_response
tomLamprecht/PepperNaoInstagram
1
python
@staticmethod def _read_response(response): '\n Extract the response body from a http response.\n\n :param response:\n :return:\n ' if (response.info().get('Content-Encoding') == 'gzip'): buf = BytesIO(response.read()) res = gzip.GzipFile(fileobj=buf).read().decode('utf8') else: res = response.read().decode('utf8') return res
@staticmethod def _read_response(response): '\n Extract the response body from a http response.\n\n :param response:\n :return:\n ' if (response.info().get('Content-Encoding') == 'gzip'): buf = BytesIO(response.read()) res = gzip.GzipFile(fileobj=buf).read().decode('utf8') else: res = response.read().decode('utf8') return res<|docstring|>Extract the response body from a http response. :param response: :return:<|endoftext|>
ad83b27e9e0f037d9bd503794f7d4044982dc9eeaf147f2912075123bf8b9a28
def _call_api(self, endpoint, params=None, query=None, return_response=False, unsigned=False, version='v1'): "\n Calls the private api.\n\n :param endpoint: endpoint path that should end with '/', example 'discover/explore/'\n :param params: POST parameters\n :param query: GET url query parameters\n :param return_response: return the response instead of the parsed json object\n :param unsigned: use post params as-is without signing\n :param version: for the versioned api base url. Default 'v1'.\n :return:\n " url = '{0}{1}'.format(self.api_url.format(version=version), endpoint) if query: url += (('?' if ('?' not in endpoint) else '&') + compat_urllib_parse.urlencode(query)) headers = self.default_headers data = None if (params or (params == '')): headers['Content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8' if (params == ''): data = ''.encode('utf-8') else: if (not unsigned): json_params = json.dumps(params, separators=(',', ':')) hash_sig = self._generate_signature(json_params) post_params = {'ig_sig_key_version': self.key_version, 'signed_body': ((hash_sig + '.') + json_params)} else: post_params = params data = compat_urllib_parse.urlencode(post_params).encode('utf-8') req = compat_urllib_request.Request(url, data, headers=headers) try: self.logger.debug('REQUEST: {0!s} {1!s}'.format(url, req.get_method())) self.logger.debug('DATA: {0!s}'.format(data)) response = self.opener.open(req, timeout=self.timeout) except compat_urllib_error.HTTPError as e: error_msg = e.reason error_response = self._read_response(e) self.logger.debug('RESPONSE: {0:d} {1!s}'.format(e.code, error_response)) try: error_obj = json.loads(error_response) if (error_obj.get('message') == 'login_required'): raise ClientLoginRequiredError(error_obj.get('message'), code=e.code, error_response=json.dumps(error_obj)) elif (e.code == ClientErrorCodes.TOO_MANY_REQUESTS): raise ClientThrottledError(error_obj.get('message'), code=e.code, error_response=json.dumps(error_obj)) elif error_obj.get('message'): error_msg = '{0!s}: {1!s}'.format(e.reason, error_obj['message']) except (ClientLoginError, ClientLoginRequiredError, ClientThrottledError): raise except ValueError as ve: self.logger.warn('Error parsing error response: {}'.format(str(ve))) raise ClientError(error_msg, e.code, error_response) except (SSLError, timeout, SocketError, compat_urllib_error.URLError, compat_http_client.HTTPException, ConnectionError) as connection_error: raise ClientConnectionError('{} {}'.format(connection_error.__class__.__name__, str(connection_error))) if return_response: return response response_content = self._read_response(response) try: self.logger.debug('RESPONSE: {0:d} {1!s}'.format(response.code, response_content)) except Exception as e: pass json_response = json.loads(response_content) if (json_response.get('message', '') == 'login_required'): raise ClientLoginRequiredError(json_response.get('message'), code=response.code, error_response=json.dumps(json_response)) if ((not json_response.get('provider_url')) and (json_response.get('status', '') != 'ok')): raise ClientError(json_response.get('message', 'Unknown error'), code=response.code, error_response=json.dumps(json_response)) return json_response
Calls the private api. :param endpoint: endpoint path that should end with '/', example 'discover/explore/' :param params: POST parameters :param query: GET url query parameters :param return_response: return the response instead of the parsed json object :param unsigned: use post params as-is without signing :param version: for the versioned api base url. Default 'v1'. :return:
client.py
_call_api
tomLamprecht/PepperNaoInstagram
1
python
def _call_api(self, endpoint, params=None, query=None, return_response=False, unsigned=False, version='v1'): "\n Calls the private api.\n\n :param endpoint: endpoint path that should end with '/', example 'discover/explore/'\n :param params: POST parameters\n :param query: GET url query parameters\n :param return_response: return the response instead of the parsed json object\n :param unsigned: use post params as-is without signing\n :param version: for the versioned api base url. Default 'v1'.\n :return:\n " url = '{0}{1}'.format(self.api_url.format(version=version), endpoint) if query: url += (('?' if ('?' not in endpoint) else '&') + compat_urllib_parse.urlencode(query)) headers = self.default_headers data = None if (params or (params == )): headers['Content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8' if (params == ): data = .encode('utf-8') else: if (not unsigned): json_params = json.dumps(params, separators=(',', ':')) hash_sig = self._generate_signature(json_params) post_params = {'ig_sig_key_version': self.key_version, 'signed_body': ((hash_sig + '.') + json_params)} else: post_params = params data = compat_urllib_parse.urlencode(post_params).encode('utf-8') req = compat_urllib_request.Request(url, data, headers=headers) try: self.logger.debug('REQUEST: {0!s} {1!s}'.format(url, req.get_method())) self.logger.debug('DATA: {0!s}'.format(data)) response = self.opener.open(req, timeout=self.timeout) except compat_urllib_error.HTTPError as e: error_msg = e.reason error_response = self._read_response(e) self.logger.debug('RESPONSE: {0:d} {1!s}'.format(e.code, error_response)) try: error_obj = json.loads(error_response) if (error_obj.get('message') == 'login_required'): raise ClientLoginRequiredError(error_obj.get('message'), code=e.code, error_response=json.dumps(error_obj)) elif (e.code == ClientErrorCodes.TOO_MANY_REQUESTS): raise ClientThrottledError(error_obj.get('message'), code=e.code, error_response=json.dumps(error_obj)) elif error_obj.get('message'): error_msg = '{0!s}: {1!s}'.format(e.reason, error_obj['message']) except (ClientLoginError, ClientLoginRequiredError, ClientThrottledError): raise except ValueError as ve: self.logger.warn('Error parsing error response: {}'.format(str(ve))) raise ClientError(error_msg, e.code, error_response) except (SSLError, timeout, SocketError, compat_urllib_error.URLError, compat_http_client.HTTPException, ConnectionError) as connection_error: raise ClientConnectionError('{} {}'.format(connection_error.__class__.__name__, str(connection_error))) if return_response: return response response_content = self._read_response(response) try: self.logger.debug('RESPONSE: {0:d} {1!s}'.format(response.code, response_content)) except Exception as e: pass json_response = json.loads(response_content) if (json_response.get('message', ) == 'login_required'): raise ClientLoginRequiredError(json_response.get('message'), code=response.code, error_response=json.dumps(json_response)) if ((not json_response.get('provider_url')) and (json_response.get('status', ) != 'ok')): raise ClientError(json_response.get('message', 'Unknown error'), code=response.code, error_response=json.dumps(json_response)) return json_response
def _call_api(self, endpoint, params=None, query=None, return_response=False, unsigned=False, version='v1'): "\n Calls the private api.\n\n :param endpoint: endpoint path that should end with '/', example 'discover/explore/'\n :param params: POST parameters\n :param query: GET url query parameters\n :param return_response: return the response instead of the parsed json object\n :param unsigned: use post params as-is without signing\n :param version: for the versioned api base url. Default 'v1'.\n :return:\n " url = '{0}{1}'.format(self.api_url.format(version=version), endpoint) if query: url += (('?' if ('?' not in endpoint) else '&') + compat_urllib_parse.urlencode(query)) headers = self.default_headers data = None if (params or (params == )): headers['Content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8' if (params == ): data = .encode('utf-8') else: if (not unsigned): json_params = json.dumps(params, separators=(',', ':')) hash_sig = self._generate_signature(json_params) post_params = {'ig_sig_key_version': self.key_version, 'signed_body': ((hash_sig + '.') + json_params)} else: post_params = params data = compat_urllib_parse.urlencode(post_params).encode('utf-8') req = compat_urllib_request.Request(url, data, headers=headers) try: self.logger.debug('REQUEST: {0!s} {1!s}'.format(url, req.get_method())) self.logger.debug('DATA: {0!s}'.format(data)) response = self.opener.open(req, timeout=self.timeout) except compat_urllib_error.HTTPError as e: error_msg = e.reason error_response = self._read_response(e) self.logger.debug('RESPONSE: {0:d} {1!s}'.format(e.code, error_response)) try: error_obj = json.loads(error_response) if (error_obj.get('message') == 'login_required'): raise ClientLoginRequiredError(error_obj.get('message'), code=e.code, error_response=json.dumps(error_obj)) elif (e.code == ClientErrorCodes.TOO_MANY_REQUESTS): raise ClientThrottledError(error_obj.get('message'), code=e.code, error_response=json.dumps(error_obj)) elif error_obj.get('message'): error_msg = '{0!s}: {1!s}'.format(e.reason, error_obj['message']) except (ClientLoginError, ClientLoginRequiredError, ClientThrottledError): raise except ValueError as ve: self.logger.warn('Error parsing error response: {}'.format(str(ve))) raise ClientError(error_msg, e.code, error_response) except (SSLError, timeout, SocketError, compat_urllib_error.URLError, compat_http_client.HTTPException, ConnectionError) as connection_error: raise ClientConnectionError('{} {}'.format(connection_error.__class__.__name__, str(connection_error))) if return_response: return response response_content = self._read_response(response) try: self.logger.debug('RESPONSE: {0:d} {1!s}'.format(response.code, response_content)) except Exception as e: pass json_response = json.loads(response_content) if (json_response.get('message', ) == 'login_required'): raise ClientLoginRequiredError(json_response.get('message'), code=response.code, error_response=json.dumps(json_response)) if ((not json_response.get('provider_url')) and (json_response.get('status', ) != 'ok')): raise ClientError(json_response.get('message', 'Unknown error'), code=response.code, error_response=json.dumps(json_response)) return json_response<|docstring|>Calls the private api. :param endpoint: endpoint path that should end with '/', example 'discover/explore/' :param params: POST parameters :param query: GET url query parameters :param return_response: return the response instead of the parsed json object :param unsigned: use post params as-is without signing :param version: for the versioned api base url. Default 'v1'. :return:<|endoftext|>
6d813bea4a77434703d8cb6c9f6a1c76503cf2f6fcfecb7616986ca7bf71c01c
def make_derived_maps_for_site(site_name: str, output_root: Path) -> None: 'Generate derived maps for `site_name`.\n\n These maps will be put in the directory `<output_root>/<site_name>`.\n ' initial_dir = os.getcwd() try: os.chdir((output_root / site_name)) except FileNotFoundError: logging.error(f'Could not find directory for output. Check output directory for {site_name} exists.') try: demproc.derive_all('dem.tif', site_name) except Exception: logging.error(f'Could not generate DEM-derived maps for {site_name}') finally: os.chdir(initial_dir) _rename_derived_maps_for_site(site_name, output_root) _make_soil_maps_for_site(site_name, output_root)
Generate derived maps for `site_name`. These maps will be put in the directory `<output_root>/<site_name>`.
dem-derived/make_derived_layers.py
make_derived_maps_for_site
lanecodes/agrosuccess-data
0
python
def make_derived_maps_for_site(site_name: str, output_root: Path) -> None: 'Generate derived maps for `site_name`.\n\n These maps will be put in the directory `<output_root>/<site_name>`.\n ' initial_dir = os.getcwd() try: os.chdir((output_root / site_name)) except FileNotFoundError: logging.error(f'Could not find directory for output. Check output directory for {site_name} exists.') try: demproc.derive_all('dem.tif', site_name) except Exception: logging.error(f'Could not generate DEM-derived maps for {site_name}') finally: os.chdir(initial_dir) _rename_derived_maps_for_site(site_name, output_root) _make_soil_maps_for_site(site_name, output_root)
def make_derived_maps_for_site(site_name: str, output_root: Path) -> None: 'Generate derived maps for `site_name`.\n\n These maps will be put in the directory `<output_root>/<site_name>`.\n ' initial_dir = os.getcwd() try: os.chdir((output_root / site_name)) except FileNotFoundError: logging.error(f'Could not find directory for output. Check output directory for {site_name} exists.') try: demproc.derive_all('dem.tif', site_name) except Exception: logging.error(f'Could not generate DEM-derived maps for {site_name}') finally: os.chdir(initial_dir) _rename_derived_maps_for_site(site_name, output_root) _make_soil_maps_for_site(site_name, output_root)<|docstring|>Generate derived maps for `site_name`. These maps will be put in the directory `<output_root>/<site_name>`.<|endoftext|>
ce5b96767775b857108c709eb5f085d5ffb63aef2070edae1c779de0b676f5d1
def _make_soil_maps_for_site(site_name: str, output_root: Path) -> None: 'Generate uniform soil maps for named site.\n\n Uses hydrologically correct DEM for site as a template to ensure resulting\n soil map has the correct dimensions and geographical projection.\n ' initial_dir = os.getcwd() output_dir = (output_root / site_name) os.chdir(output_dir) generator = UniformSoilMapGenerator('hydrocorrect_dem.tif') for soil_type in list('ABCD'): generator.to_geotiff(f'uniform_soil_map_{soil_type}.tif', soil_type) os.chdir(initial_dir)
Generate uniform soil maps for named site. Uses hydrologically correct DEM for site as a template to ensure resulting soil map has the correct dimensions and geographical projection.
dem-derived/make_derived_layers.py
_make_soil_maps_for_site
lanecodes/agrosuccess-data
0
python
def _make_soil_maps_for_site(site_name: str, output_root: Path) -> None: 'Generate uniform soil maps for named site.\n\n Uses hydrologically correct DEM for site as a template to ensure resulting\n soil map has the correct dimensions and geographical projection.\n ' initial_dir = os.getcwd() output_dir = (output_root / site_name) os.chdir(output_dir) generator = UniformSoilMapGenerator('hydrocorrect_dem.tif') for soil_type in list('ABCD'): generator.to_geotiff(f'uniform_soil_map_{soil_type}.tif', soil_type) os.chdir(initial_dir)
def _make_soil_maps_for_site(site_name: str, output_root: Path) -> None: 'Generate uniform soil maps for named site.\n\n Uses hydrologically correct DEM for site as a template to ensure resulting\n soil map has the correct dimensions and geographical projection.\n ' initial_dir = os.getcwd() output_dir = (output_root / site_name) os.chdir(output_dir) generator = UniformSoilMapGenerator('hydrocorrect_dem.tif') for soil_type in list('ABCD'): generator.to_geotiff(f'uniform_soil_map_{soil_type}.tif', soil_type) os.chdir(initial_dir)<|docstring|>Generate uniform soil maps for named site. Uses hydrologically correct DEM for site as a template to ensure resulting soil map has the correct dimensions and geographical projection.<|endoftext|>
3995bcdd8abf26ed5e74e497cfa809e7a31290f383d989f871139b420a4fc0c6
def select_polynomial_degree(n_samples: int=100, noise: float=5): '\n\tSimulate data from a polynomial model and use cross-validation to select the best fitting degree\n\n\tParameters\n\t----------\n\tn_samples: int, default=100\n\t\tNumber of samples to generate\n\n\tnoise: float, default = 5\n\t\tNoise level to simulate in responses\n\t' f = (lambda x: (((((x + 3) * (x + 2)) * (x + 1)) * (x - 1)) * (x - 2))) X = np.linspace((- 1.2), 2.0, n_samples) y_true = np.vectorize(f)(X) (train_X, test_X, train_y, test_y) = train_test_split(X, y_true, train_size=(2 / 3)) train_y_noised = (train_y + np.random.normal(0, noise, train_y.size)) test_y_noised = (test_y + np.random.normal(0, noise, test_y.size)) fig = go.Figure() fig.add_traces([go.Scatter(x=train_X, y=train_y, mode='markers', marker=dict(color='blue', symbol=class_symbols[0], line=dict(color='black', width=1)), name='Train set'), go.Scatter(x=test_X, y=test_y, mode='markers', marker=dict(color='red', symbol=class_symbols[0], line=dict(color='black', width=1)), name='Test set')]) fig.update_layout(title=f'<b>Noiseless data split into train and test sets.<br><sup> Sampled uniformly from y = (x + 3)(x + 2)(x + 1)(x - 1)(x - 2)</sup><b>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='x', yaxis_title='f(x)') if ((noise == 5) and plot): fig.show() train_X = train_X.reshape((- 1), 1) training_errors = [] validation_errors = [] cv = 5 degrees = list(range(0, 11)) for k in degrees: estimator = PolynomialFitting(k=k) (train_error, validation_error) = cross_validate(estimator, train_X, train_y_noised, mean_square_error, cv=cv) training_errors.append(train_error) validation_errors.append(validation_error) fig = go.Figure() fig.add_traces([go.Scatter(x=degrees, y=training_errors, mode='lines+markers', line=dict(color='blue', width=2), name='Training Error'), go.Scatter(x=degrees, y=validation_errors, mode='lines+markers', line=dict(color='red', width=2), name='Validation Error')]) fig.update_layout(title=f'<b>MSE on train set and validation set with 5-Fold CV<br><sup>{n_samples} Samples, Noise: {noise}</sup>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='Polynomial Degree', yaxis_title='Mean Squared Error') if plot: fig.show() k_star = int(np.argmin(validation_errors)) estimator = PolynomialFitting(k=k_star) estimator.fit(train_X, train_y_noised) test_error = mean_square_error(estimator.predict(test_X), test_y_noised).__round__(2) print(f'Best Results: noise={noise}, fitted degree k={k_star}, validation error={np.min(validation_errors).__round__(2)} with test error: {test_error}')
Simulate data from a polynomial model and use cross-validation to select the best fitting degree Parameters ---------- n_samples: int, default=100 Number of samples to generate noise: float, default = 5 Noise level to simulate in responses
exercises/perform_model_selection.py
select_polynomial_degree
AlonViz/IML.HUJI
0
python
def select_polynomial_degree(n_samples: int=100, noise: float=5): '\n\tSimulate data from a polynomial model and use cross-validation to select the best fitting degree\n\n\tParameters\n\t----------\n\tn_samples: int, default=100\n\t\tNumber of samples to generate\n\n\tnoise: float, default = 5\n\t\tNoise level to simulate in responses\n\t' f = (lambda x: (((((x + 3) * (x + 2)) * (x + 1)) * (x - 1)) * (x - 2))) X = np.linspace((- 1.2), 2.0, n_samples) y_true = np.vectorize(f)(X) (train_X, test_X, train_y, test_y) = train_test_split(X, y_true, train_size=(2 / 3)) train_y_noised = (train_y + np.random.normal(0, noise, train_y.size)) test_y_noised = (test_y + np.random.normal(0, noise, test_y.size)) fig = go.Figure() fig.add_traces([go.Scatter(x=train_X, y=train_y, mode='markers', marker=dict(color='blue', symbol=class_symbols[0], line=dict(color='black', width=1)), name='Train set'), go.Scatter(x=test_X, y=test_y, mode='markers', marker=dict(color='red', symbol=class_symbols[0], line=dict(color='black', width=1)), name='Test set')]) fig.update_layout(title=f'<b>Noiseless data split into train and test sets.<br><sup> Sampled uniformly from y = (x + 3)(x + 2)(x + 1)(x - 1)(x - 2)</sup><b>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='x', yaxis_title='f(x)') if ((noise == 5) and plot): fig.show() train_X = train_X.reshape((- 1), 1) training_errors = [] validation_errors = [] cv = 5 degrees = list(range(0, 11)) for k in degrees: estimator = PolynomialFitting(k=k) (train_error, validation_error) = cross_validate(estimator, train_X, train_y_noised, mean_square_error, cv=cv) training_errors.append(train_error) validation_errors.append(validation_error) fig = go.Figure() fig.add_traces([go.Scatter(x=degrees, y=training_errors, mode='lines+markers', line=dict(color='blue', width=2), name='Training Error'), go.Scatter(x=degrees, y=validation_errors, mode='lines+markers', line=dict(color='red', width=2), name='Validation Error')]) fig.update_layout(title=f'<b>MSE on train set and validation set with 5-Fold CV<br><sup>{n_samples} Samples, Noise: {noise}</sup>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='Polynomial Degree', yaxis_title='Mean Squared Error') if plot: fig.show() k_star = int(np.argmin(validation_errors)) estimator = PolynomialFitting(k=k_star) estimator.fit(train_X, train_y_noised) test_error = mean_square_error(estimator.predict(test_X), test_y_noised).__round__(2) print(f'Best Results: noise={noise}, fitted degree k={k_star}, validation error={np.min(validation_errors).__round__(2)} with test error: {test_error}')
def select_polynomial_degree(n_samples: int=100, noise: float=5): '\n\tSimulate data from a polynomial model and use cross-validation to select the best fitting degree\n\n\tParameters\n\t----------\n\tn_samples: int, default=100\n\t\tNumber of samples to generate\n\n\tnoise: float, default = 5\n\t\tNoise level to simulate in responses\n\t' f = (lambda x: (((((x + 3) * (x + 2)) * (x + 1)) * (x - 1)) * (x - 2))) X = np.linspace((- 1.2), 2.0, n_samples) y_true = np.vectorize(f)(X) (train_X, test_X, train_y, test_y) = train_test_split(X, y_true, train_size=(2 / 3)) train_y_noised = (train_y + np.random.normal(0, noise, train_y.size)) test_y_noised = (test_y + np.random.normal(0, noise, test_y.size)) fig = go.Figure() fig.add_traces([go.Scatter(x=train_X, y=train_y, mode='markers', marker=dict(color='blue', symbol=class_symbols[0], line=dict(color='black', width=1)), name='Train set'), go.Scatter(x=test_X, y=test_y, mode='markers', marker=dict(color='red', symbol=class_symbols[0], line=dict(color='black', width=1)), name='Test set')]) fig.update_layout(title=f'<b>Noiseless data split into train and test sets.<br><sup> Sampled uniformly from y = (x + 3)(x + 2)(x + 1)(x - 1)(x - 2)</sup><b>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='x', yaxis_title='f(x)') if ((noise == 5) and plot): fig.show() train_X = train_X.reshape((- 1), 1) training_errors = [] validation_errors = [] cv = 5 degrees = list(range(0, 11)) for k in degrees: estimator = PolynomialFitting(k=k) (train_error, validation_error) = cross_validate(estimator, train_X, train_y_noised, mean_square_error, cv=cv) training_errors.append(train_error) validation_errors.append(validation_error) fig = go.Figure() fig.add_traces([go.Scatter(x=degrees, y=training_errors, mode='lines+markers', line=dict(color='blue', width=2), name='Training Error'), go.Scatter(x=degrees, y=validation_errors, mode='lines+markers', line=dict(color='red', width=2), name='Validation Error')]) fig.update_layout(title=f'<b>MSE on train set and validation set with 5-Fold CV<br><sup>{n_samples} Samples, Noise: {noise}</sup>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='Polynomial Degree', yaxis_title='Mean Squared Error') if plot: fig.show() k_star = int(np.argmin(validation_errors)) estimator = PolynomialFitting(k=k_star) estimator.fit(train_X, train_y_noised) test_error = mean_square_error(estimator.predict(test_X), test_y_noised).__round__(2) print(f'Best Results: noise={noise}, fitted degree k={k_star}, validation error={np.min(validation_errors).__round__(2)} with test error: {test_error}')<|docstring|>Simulate data from a polynomial model and use cross-validation to select the best fitting degree Parameters ---------- n_samples: int, default=100 Number of samples to generate noise: float, default = 5 Noise level to simulate in responses<|endoftext|>
d111a0049a50b2b992b62f1b9db14b1be77765d110cae6f47ebf7ff1dd9a5806
def select_regularization_parameter(n_samples: int=50, n_evaluations: int=500): "\n\tUsing sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter\n\tvalues for Ridge and Lasso regressions\n\n\tParameters\n\t----------\n\tn_samples: int, default=50\n\t\tNumber of samples to generate\n\n\tn_evaluations: int, default = 500\n\t\tNumber of regularization parameter values to evaluate for each of the algorithms\n\t" (data, target) = load_diabetes(return_X_y=True) (X_train, X_test, y_train, y_test) = train_test_split(data, target, train_size=n_samples) learners = [('Lasso', Lasso, 'alpha'), ('Ridge', RidgeRegression, 'lam')] learners_best_params = [] reg_values = np.linspace(0.02, 1, n_evaluations) for (name, learner, param_name) in learners: training_errors = [] validation_errors = [] for (i, reg_term) in enumerate(reg_values): estimator = learner(**{param_name: reg_term}) (train_error, validation_error) = cross_validate(estimator, X_train, y_train, mean_square_error, 5) training_errors.append(train_error) validation_errors.append(validation_error) learners_best_params.append(reg_values[np.argmin(validation_errors)]) fig = go.Figure() fig.add_traces([go.Scatter(x=reg_values, y=training_errors, mode='lines', line=dict(color='blue', width=2), name='Training Error'), go.Scatter(x=reg_values, y=validation_errors, mode='lines', line=dict(color='red', width=2), name='Validation Error')]) fig.update_layout(title=f'<b>{name} model losses, as a function of regularization param.</sup>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='Regularization term', yaxis_title='MSE') if plot: fig.show() for (i, learner) in enumerate(learners): print(f'the best param for {learner[0]} is {learners_best_params[i]}') learners.append(('Linear Regression', LinearRegression, None)) for (i, (name, learner, param)) in enumerate(learners): estimator = (learner(**{param: learners_best_params[i]}) if (param is not None) else learner()) estimator.fit(X_train, y_train) error = mean_square_error(estimator.predict(X_test), y_test) print(f'estimator {name} achieved {error} error on test set')
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter values for Ridge and Lasso regressions Parameters ---------- n_samples: int, default=50 Number of samples to generate n_evaluations: int, default = 500 Number of regularization parameter values to evaluate for each of the algorithms
exercises/perform_model_selection.py
select_regularization_parameter
AlonViz/IML.HUJI
0
python
def select_regularization_parameter(n_samples: int=50, n_evaluations: int=500): "\n\tUsing sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter\n\tvalues for Ridge and Lasso regressions\n\n\tParameters\n\t----------\n\tn_samples: int, default=50\n\t\tNumber of samples to generate\n\n\tn_evaluations: int, default = 500\n\t\tNumber of regularization parameter values to evaluate for each of the algorithms\n\t" (data, target) = load_diabetes(return_X_y=True) (X_train, X_test, y_train, y_test) = train_test_split(data, target, train_size=n_samples) learners = [('Lasso', Lasso, 'alpha'), ('Ridge', RidgeRegression, 'lam')] learners_best_params = [] reg_values = np.linspace(0.02, 1, n_evaluations) for (name, learner, param_name) in learners: training_errors = [] validation_errors = [] for (i, reg_term) in enumerate(reg_values): estimator = learner(**{param_name: reg_term}) (train_error, validation_error) = cross_validate(estimator, X_train, y_train, mean_square_error, 5) training_errors.append(train_error) validation_errors.append(validation_error) learners_best_params.append(reg_values[np.argmin(validation_errors)]) fig = go.Figure() fig.add_traces([go.Scatter(x=reg_values, y=training_errors, mode='lines', line=dict(color='blue', width=2), name='Training Error'), go.Scatter(x=reg_values, y=validation_errors, mode='lines', line=dict(color='red', width=2), name='Validation Error')]) fig.update_layout(title=f'<b>{name} model losses, as a function of regularization param.</sup>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='Regularization term', yaxis_title='MSE') if plot: fig.show() for (i, learner) in enumerate(learners): print(f'the best param for {learner[0]} is {learners_best_params[i]}') learners.append(('Linear Regression', LinearRegression, None)) for (i, (name, learner, param)) in enumerate(learners): estimator = (learner(**{param: learners_best_params[i]}) if (param is not None) else learner()) estimator.fit(X_train, y_train) error = mean_square_error(estimator.predict(X_test), y_test) print(f'estimator {name} achieved {error} error on test set')
def select_regularization_parameter(n_samples: int=50, n_evaluations: int=500): "\n\tUsing sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter\n\tvalues for Ridge and Lasso regressions\n\n\tParameters\n\t----------\n\tn_samples: int, default=50\n\t\tNumber of samples to generate\n\n\tn_evaluations: int, default = 500\n\t\tNumber of regularization parameter values to evaluate for each of the algorithms\n\t" (data, target) = load_diabetes(return_X_y=True) (X_train, X_test, y_train, y_test) = train_test_split(data, target, train_size=n_samples) learners = [('Lasso', Lasso, 'alpha'), ('Ridge', RidgeRegression, 'lam')] learners_best_params = [] reg_values = np.linspace(0.02, 1, n_evaluations) for (name, learner, param_name) in learners: training_errors = [] validation_errors = [] for (i, reg_term) in enumerate(reg_values): estimator = learner(**{param_name: reg_term}) (train_error, validation_error) = cross_validate(estimator, X_train, y_train, mean_square_error, 5) training_errors.append(train_error) validation_errors.append(validation_error) learners_best_params.append(reg_values[np.argmin(validation_errors)]) fig = go.Figure() fig.add_traces([go.Scatter(x=reg_values, y=training_errors, mode='lines', line=dict(color='blue', width=2), name='Training Error'), go.Scatter(x=reg_values, y=validation_errors, mode='lines', line=dict(color='red', width=2), name='Validation Error')]) fig.update_layout(title=f'<b>{name} model losses, as a function of regularization param.</sup>', title_x=0.5, title_font_size=20, width=800, height=600, xaxis_title='Regularization term', yaxis_title='MSE') if plot: fig.show() for (i, learner) in enumerate(learners): print(f'the best param for {learner[0]} is {learners_best_params[i]}') learners.append(('Linear Regression', LinearRegression, None)) for (i, (name, learner, param)) in enumerate(learners): estimator = (learner(**{param: learners_best_params[i]}) if (param is not None) else learner()) estimator.fit(X_train, y_train) error = mean_square_error(estimator.predict(X_test), y_test) print(f'estimator {name} achieved {error} error on test set')<|docstring|>Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter values for Ridge and Lasso regressions Parameters ---------- n_samples: int, default=50 Number of samples to generate n_evaluations: int, default = 500 Number of regularization parameter values to evaluate for each of the algorithms<|endoftext|>