body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
625efc26145369688f95433134a44a21d49ba05797bb900fb9906efb77b71067
def ignored_docstring(): 'a => b'
a => b
tests/data/docstring.py
ignored_docstring
Austin-HTTPS/black
1
python
def ignored_docstring():
def ignored_docstring(): <|docstring|>a => b<|endoftext|>
971f13a20b64ae0ec6f32ac64a61a530b9e626f93c1f3bad302afde030e07d58
def single_line_docstring_with_whitespace(): 'This should be stripped'
This should be stripped
tests/data/docstring.py
single_line_docstring_with_whitespace
Austin-HTTPS/black
1
python
def single_line_docstring_with_whitespace():
def single_line_docstring_with_whitespace(): <|docstring|>This should be stripped<|endoftext|>
a9ff55578d76d58ad07da070d9eaaefafb44b02127aab0e388b9cb5112fb3c06
def docstring_with_inline_tabs_and_space_indentation(): 'hey\n\n tab\tseparated\tvalue\n tab at start of line and then a tab\tseparated\tvalue\n multiple tabs at the beginning\tand\tinline\n mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.\n\n line ends with some tabs\n '
hey tab separated value tab at start of line and then a tab separated value multiple tabs at the beginning and inline mixed tabs and spaces at beginning. next line has mixed tabs and spaces only. line ends with some tabs
tests/data/docstring.py
docstring_with_inline_tabs_and_space_indentation
Austin-HTTPS/black
1
python
def docstring_with_inline_tabs_and_space_indentation(): 'hey\n\n tab\tseparated\tvalue\n tab at start of line and then a tab\tseparated\tvalue\n multiple tabs at the beginning\tand\tinline\n mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.\n\n line ends with some tabs\n '
def docstring_with_inline_tabs_and_space_indentation(): 'hey\n\n tab\tseparated\tvalue\n tab at start of line and then a tab\tseparated\tvalue\n multiple tabs at the beginning\tand\tinline\n mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.\n\n line ends with some tabs\n '<|docstring|>hey tab separated value tab at start of line and then a tab separated value multiple tabs at the beginning and inline mixed tabs and spaces at beginning. next line has mixed tabs and spaces only. line ends with some tabs<|endoftext|>
ff5f3b77fa40c12af548d5761ef3cc49fb78615db0b9d29e01a8ec7debd75127
def docstring_with_inline_tabs_and_tab_indentation(): 'hey\n\n tab\tseparated\tvalue\n tab at start of line and then a tab\tseparated\tvalue\n multiple tabs at the beginning\tand\tinline\n mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.\n\n line ends with some tabs\n ' pass
hey tab separated value tab at start of line and then a tab separated value multiple tabs at the beginning and inline mixed tabs and spaces at beginning. next line has mixed tabs and spaces only. line ends with some tabs
tests/data/docstring.py
docstring_with_inline_tabs_and_tab_indentation
Austin-HTTPS/black
1
python
def docstring_with_inline_tabs_and_tab_indentation(): 'hey\n\n tab\tseparated\tvalue\n tab at start of line and then a tab\tseparated\tvalue\n multiple tabs at the beginning\tand\tinline\n mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.\n\n line ends with some tabs\n ' pass
def docstring_with_inline_tabs_and_tab_indentation(): 'hey\n\n tab\tseparated\tvalue\n tab at start of line and then a tab\tseparated\tvalue\n multiple tabs at the beginning\tand\tinline\n mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.\n\n line ends with some tabs\n ' pass<|docstring|>hey tab separated value tab at start of line and then a tab separated value multiple tabs at the beginning and inline mixed tabs and spaces at beginning. next line has mixed tabs and spaces only. line ends with some tabs<|endoftext|>
26d488c900d3f33747754c971b7b48b4e0d9cf4a47df197570d9b35c91fa6b2a
def method(self): 'Multiline\n method docstring\n ' pass
Multiline method docstring
tests/data/docstring.py
method
Austin-HTTPS/black
1
python
def method(self): 'Multiline\n method docstring\n ' pass
def method(self): 'Multiline\n method docstring\n ' pass<|docstring|>Multiline method docstring<|endoftext|>
4d1340a1d69219a21513c26e843d6636253c03faf90b4431965f7f7f2e9f34d1
def method(self): 'Multiline\n method docstring\n ' pass
Multiline method docstring
tests/data/docstring.py
method
Austin-HTTPS/black
1
python
def method(self): 'Multiline\n method docstring\n ' pass
def method(self): 'Multiline\n method docstring\n ' pass<|docstring|>Multiline method docstring<|endoftext|>
b3bc563d90680badc78d4c8de2b996bf626714e2708a1b04a7399daa6a8ee71b
def test_smoke(): '\n lib and ffi can be imported and looks OK.\n ' from _argon2_cffi_bindings import ffi, lib assert repr(ffi).startswith('<_cffi_backend.FFI object at') assert repr(lib).startswith('<Lib object for') assert (19 == lib.ARGON2_VERSION_NUMBER) assert (42 == lib.argon2_encodedlen(1, 2, 3, 4, 5, lib.Argon2_id))
lib and ffi can be imported and looks OK.
tests/test_smoke.py
test_smoke
hynek/argon2-cffi-bindings
5
python
def test_smoke(): '\n \n ' from _argon2_cffi_bindings import ffi, lib assert repr(ffi).startswith('<_cffi_backend.FFI object at') assert repr(lib).startswith('<Lib object for') assert (19 == lib.ARGON2_VERSION_NUMBER) assert (42 == lib.argon2_encodedlen(1, 2, 3, 4, 5, lib.Argon2_id))
def test_smoke(): '\n \n ' from _argon2_cffi_bindings import ffi, lib assert repr(ffi).startswith('<_cffi_backend.FFI object at') assert repr(lib).startswith('<Lib object for') assert (19 == lib.ARGON2_VERSION_NUMBER) assert (42 == lib.argon2_encodedlen(1, 2, 3, 4, 5, lib.Argon2_id))<|docstring|>lib and ffi can be imported and looks OK.<|endoftext|>
dd8ea8a197563697d89f98b4da6a983a4ae414fc870248b1df353bd219e80ea9
def read_version(): 'Read the version number from the VERSION file' version_file = 'VERSION' with zipfile.ZipFile(sys.argv[0]) as zf: try: with zf.open(version_file) as f: version = f.read() version = version.decode('ascii') version = version.strip() except KeyError: version = 'UNKNOWN (this is a non github build)' return version
Read the version number from the VERSION file
src/__main__.py
read_version
8cylinder/boss
0
python
def read_version(): version_file = 'VERSION' with zipfile.ZipFile(sys.argv[0]) as zf: try: with zf.open(version_file) as f: version = f.read() version = version.decode('ascii') version = version.strip() except KeyError: version = 'UNKNOWN (this is a non github build)' return version
def read_version(): version_file = 'VERSION' with zipfile.ZipFile(sys.argv[0]) as zf: try: with zf.open(version_file) as f: version = f.read() version = version.decode('ascii') version = version.strip() except KeyError: version = 'UNKNOWN (this is a non github build)' return version<|docstring|>Read the version number from the VERSION file<|endoftext|>
4e4edb3ed5f831a611c4183ef11beea15cd64c7d54f0581cb8a82ce7a713dff5
@click.group(no_args_is_help=True, context_settings=CONTEXT_SETTINGS) @click.version_option(version=__version__) def boss(): "👔 Install various applications and miscellany to set up a dev server.\n\n This can be run standalone or as a Vagrant provider. When run as\n a vagrant provider its recommended that is be run unprivileged.\n This will run as the default user and the script will use sudo\n when necessary (this assumes the default user can use sudo). This\n means that any subsequent uses as the default user will be able to\n update the '$HOME/boss-installed-modules' file. Also if the\n bashrc module is installed during provisioning, then the correct\n home dir will be setup.\n\n \x08\n eg:\n config.vm.provision :shell,\n path: 'boss',\n args: 'install server.local ...'\n\n Its recommended to set up Apt-Cacher NG on the host machine. Once\n that's done adding `aptproxy` to the list of modules will configure\n this server to make use of it."
👔 Install various applications and miscellany to set up a dev server. This can be run standalone or as a Vagrant provider. When run as a vagrant provider its recommended that is be run unprivileged. This will run as the default user and the script will use sudo when necessary (this assumes the default user can use sudo). This means that any subsequent uses as the default user will be able to update the '$HOME/boss-installed-modules' file. Also if the bashrc module is installed during provisioning, then the correct home dir will be setup.  eg: config.vm.provision :shell, path: 'boss', args: 'install server.local ...' Its recommended to set up Apt-Cacher NG on the host machine. Once that's done adding `aptproxy` to the list of modules will configure this server to make use of it.
src/__main__.py
boss
8cylinder/boss
0
python
@click.group(no_args_is_help=True, context_settings=CONTEXT_SETTINGS) @click.version_option(version=__version__) def boss(): "👔 Install various applications and miscellany to set up a dev server.\n\n This can be run standalone or as a Vagrant provider. When run as\n a vagrant provider its recommended that is be run unprivileged.\n This will run as the default user and the script will use sudo\n when necessary (this assumes the default user can use sudo). This\n means that any subsequent uses as the default user will be able to\n update the '$HOME/boss-installed-modules' file. Also if the\n bashrc module is installed during provisioning, then the correct\n home dir will be setup.\n\n \x08\n eg:\n config.vm.provision :shell,\n path: 'boss',\n args: 'install server.local ...'\n\n Its recommended to set up Apt-Cacher NG on the host machine. Once\n that's done adding `aptproxy` to the list of modules will configure\n this server to make use of it."
@click.group(no_args_is_help=True, context_settings=CONTEXT_SETTINGS) @click.version_option(version=__version__) def boss(): "👔 Install various applications and miscellany to set up a dev server.\n\n This can be run standalone or as a Vagrant provider. When run as\n a vagrant provider its recommended that is be run unprivileged.\n This will run as the default user and the script will use sudo\n when necessary (this assumes the default user can use sudo). This\n means that any subsequent uses as the default user will be able to\n update the '$HOME/boss-installed-modules' file. Also if the\n bashrc module is installed during provisioning, then the correct\n home dir will be setup.\n\n \x08\n eg:\n config.vm.provision :shell,\n path: 'boss',\n args: 'install server.local ...'\n\n Its recommended to set up Apt-Cacher NG on the host machine. Once\n that's done adding `aptproxy` to the list of modules will configure\n this server to make use of it."<|docstring|>👔 Install various applications and miscellany to set up a dev server. This can be run standalone or as a Vagrant provider. When run as a vagrant provider its recommended that is be run unprivileged. This will run as the default user and the script will use sudo when necessary (this assumes the default user can use sudo). This means that any subsequent uses as the default user will be able to update the '$HOME/boss-installed-modules' file. Also if the bashrc module is installed during provisioning, then the correct home dir will be setup.  eg: config.vm.provision :shell, path: 'boss', args: 'install server.local ...' Its recommended to set up Apt-Cacher NG on the host machine. Once that's done adding `aptproxy` to the list of modules will configure this server to make use of it.<|endoftext|>
66c4ee62da9e6c1575ce900e3b6205642d4b133b6022ff3335384b793bcc488c
@boss.command() @click.argument('servername', type=SERVER) @click.argument('modules', nargs=(- 1), required=True) @click.option('-d', '--dry-run', is_flag=True, help='Only print the commands that would be used') @click.option('-o', '--no-required', is_flag=True, help="Don't install the required modules") @click.option('-O', '--no-dependencies', is_flag=True, help="Don't install dependent modules") @click.option('--generate-script', is_flag=True, help='Output suitable for a bash script instead of running them') @click.option('-n', '--new-user-and-pass', type=USER_PASS, metavar='USERNAME,USERPASS', help="a new unix user's name and password (seperated by a comma), they will be added to the www-data group") @click.option('-S', '--sql-file', type=click.Path(exists=True, dir_okay=False), metavar='SQLFILE', help='sql file to be run during install') @click.option('-N', '--db-name', metavar='DB-NAME', required=deps('mysql', 'lamp', 'craft3'), help='the name the schema to create') @click.option('-P', '--db-root-pass', default='password', metavar='PASSWORD', required=deps('mysql', 'lamp', 'craft3', 'phpmyadmin'), help='password for mysql root user, required for the mysql module') @click.option('-A', '--new-db-user-and-pass', type=USER_PASS, metavar='USERNAME,PASSWORD', help="a new db user's new username and password (seperated by a comma)") @click.option('-u', '--new-system-user-and-pass', type=USER_PASS, metavar='USERNAME,PASSWORD', required=deps('newuser'), help="a new system user's new username and password (seperated by a comma)") @click.option('-s', '--site-name-and-root', type=SITE_DOCROOT, metavar='SITENAME,DOCUMENTROOT[:...]', required=deps('virtualhost'), help='SITENAME, DOCUMENTROOT and CREATEDIR seperated by a comma (doc root will be put in /var/www).\n CREATEDIR is an optional y/n that indicates if to create the dir or not (default:n).\n Multiple sites can be specified by seperating them with a ":", eg: -s site1,root1,y:site2,root2') @click.option('-c', '--craft-credentials', type=USER_EMAIL_PASS, metavar='USERNAME,EMAIL,PASSWORD', help='Craft admin credentials. If not set, only system requirements for Craft will be installed') @click.option('-i', '--host-ip', type=IP_ADDRESS, required=deps('aptproxy'), help='Host ip to be used in aptproxy config') @click.option('--netdata-user-pass', type=USER_PASS, metavar='USERNAME,USERPASS', help="a new user's name and password (seperated by a comma)") def install(**args): 'Install any modules available from `boss list`' Args = namedtuple('Args', sorted(args)) args = Args(**args) available_mods = mods wanted_mods = [i.lower() for i in args.modules] required_mods = ['first', 'done'] if args.no_required: wanted_apps = [i for i in available_mods if (i.__name__.lower() in wanted_mods)] else: wanted_apps = [i for i in available_mods if ((i.__name__.lower() in wanted_mods) or (i.__name__.lower() in required_mods))] mapping_keys = [i.__name__.lower() for i in available_mods] invalid_modules = [i for i in wanted_mods if (i not in mapping_keys)] if invalid_modules: util.error('module(s) "{invalid}" does not exist.\nValid modules are:\n{valid}'.format(valid=', '.join(mapping_keys), invalid=', '.join(invalid_modules))) if (not args.no_dependencies): install_reqs = [] for app in wanted_apps: install_reqs += app.provides provided = set(install_reqs) required = set(app.requires) if len((required - provided)): util.error('Requirements not met for {}: {}.'.format(app.__name__.lower(), ', '.join(app.requires))) if args.generate_script: script_header = ('#!/usr/bin/env bash', '', '# Boss command used to generate this script', '# {}'.format(' '.join(sys.argv)), '', 'set -x') click.echo('\n'.join(script_header)) for App in wanted_apps: module_name = App.title util.title(module_name, script=args.generate_script) try: app = App(dry_run=args.dry_run, args=args) app.pre_install() app.install() app.post_install() app.log(module_name) except subprocess.CalledProcessError as e: util.error(e) except DependencyError as e: util.error(e) except PlatformError as e: util.error(e) except SecurityError as e: util.error(e) except FileNotFoundError as e: util.error(e.args[0])
Install any modules available from `boss list`
src/__main__.py
install
8cylinder/boss
0
python
@boss.command() @click.argument('servername', type=SERVER) @click.argument('modules', nargs=(- 1), required=True) @click.option('-d', '--dry-run', is_flag=True, help='Only print the commands that would be used') @click.option('-o', '--no-required', is_flag=True, help="Don't install the required modules") @click.option('-O', '--no-dependencies', is_flag=True, help="Don't install dependent modules") @click.option('--generate-script', is_flag=True, help='Output suitable for a bash script instead of running them') @click.option('-n', '--new-user-and-pass', type=USER_PASS, metavar='USERNAME,USERPASS', help="a new unix user's name and password (seperated by a comma), they will be added to the www-data group") @click.option('-S', '--sql-file', type=click.Path(exists=True, dir_okay=False), metavar='SQLFILE', help='sql file to be run during install') @click.option('-N', '--db-name', metavar='DB-NAME', required=deps('mysql', 'lamp', 'craft3'), help='the name the schema to create') @click.option('-P', '--db-root-pass', default='password', metavar='PASSWORD', required=deps('mysql', 'lamp', 'craft3', 'phpmyadmin'), help='password for mysql root user, required for the mysql module') @click.option('-A', '--new-db-user-and-pass', type=USER_PASS, metavar='USERNAME,PASSWORD', help="a new db user's new username and password (seperated by a comma)") @click.option('-u', '--new-system-user-and-pass', type=USER_PASS, metavar='USERNAME,PASSWORD', required=deps('newuser'), help="a new system user's new username and password (seperated by a comma)") @click.option('-s', '--site-name-and-root', type=SITE_DOCROOT, metavar='SITENAME,DOCUMENTROOT[:...]', required=deps('virtualhost'), help='SITENAME, DOCUMENTROOT and CREATEDIR seperated by a comma (doc root will be put in /var/www).\n CREATEDIR is an optional y/n that indicates if to create the dir or not (default:n).\n Multiple sites can be specified by seperating them with a ":", eg: -s site1,root1,y:site2,root2') @click.option('-c', '--craft-credentials', type=USER_EMAIL_PASS, metavar='USERNAME,EMAIL,PASSWORD', help='Craft admin credentials. If not set, only system requirements for Craft will be installed') @click.option('-i', '--host-ip', type=IP_ADDRESS, required=deps('aptproxy'), help='Host ip to be used in aptproxy config') @click.option('--netdata-user-pass', type=USER_PASS, metavar='USERNAME,USERPASS', help="a new user's name and password (seperated by a comma)") def install(**args): Args = namedtuple('Args', sorted(args)) args = Args(**args) available_mods = mods wanted_mods = [i.lower() for i in args.modules] required_mods = ['first', 'done'] if args.no_required: wanted_apps = [i for i in available_mods if (i.__name__.lower() in wanted_mods)] else: wanted_apps = [i for i in available_mods if ((i.__name__.lower() in wanted_mods) or (i.__name__.lower() in required_mods))] mapping_keys = [i.__name__.lower() for i in available_mods] invalid_modules = [i for i in wanted_mods if (i not in mapping_keys)] if invalid_modules: util.error('module(s) "{invalid}" does not exist.\nValid modules are:\n{valid}'.format(valid=', '.join(mapping_keys), invalid=', '.join(invalid_modules))) if (not args.no_dependencies): install_reqs = [] for app in wanted_apps: install_reqs += app.provides provided = set(install_reqs) required = set(app.requires) if len((required - provided)): util.error('Requirements not met for {}: {}.'.format(app.__name__.lower(), ', '.join(app.requires))) if args.generate_script: script_header = ('#!/usr/bin/env bash', , '# Boss command used to generate this script', '# {}'.format(' '.join(sys.argv)), , 'set -x') click.echo('\n'.join(script_header)) for App in wanted_apps: module_name = App.title util.title(module_name, script=args.generate_script) try: app = App(dry_run=args.dry_run, args=args) app.pre_install() app.install() app.post_install() app.log(module_name) except subprocess.CalledProcessError as e: util.error(e) except DependencyError as e: util.error(e) except PlatformError as e: util.error(e) except SecurityError as e: util.error(e) except FileNotFoundError as e: util.error(e.args[0])
@boss.command() @click.argument('servername', type=SERVER) @click.argument('modules', nargs=(- 1), required=True) @click.option('-d', '--dry-run', is_flag=True, help='Only print the commands that would be used') @click.option('-o', '--no-required', is_flag=True, help="Don't install the required modules") @click.option('-O', '--no-dependencies', is_flag=True, help="Don't install dependent modules") @click.option('--generate-script', is_flag=True, help='Output suitable for a bash script instead of running them') @click.option('-n', '--new-user-and-pass', type=USER_PASS, metavar='USERNAME,USERPASS', help="a new unix user's name and password (seperated by a comma), they will be added to the www-data group") @click.option('-S', '--sql-file', type=click.Path(exists=True, dir_okay=False), metavar='SQLFILE', help='sql file to be run during install') @click.option('-N', '--db-name', metavar='DB-NAME', required=deps('mysql', 'lamp', 'craft3'), help='the name the schema to create') @click.option('-P', '--db-root-pass', default='password', metavar='PASSWORD', required=deps('mysql', 'lamp', 'craft3', 'phpmyadmin'), help='password for mysql root user, required for the mysql module') @click.option('-A', '--new-db-user-and-pass', type=USER_PASS, metavar='USERNAME,PASSWORD', help="a new db user's new username and password (seperated by a comma)") @click.option('-u', '--new-system-user-and-pass', type=USER_PASS, metavar='USERNAME,PASSWORD', required=deps('newuser'), help="a new system user's new username and password (seperated by a comma)") @click.option('-s', '--site-name-and-root', type=SITE_DOCROOT, metavar='SITENAME,DOCUMENTROOT[:...]', required=deps('virtualhost'), help='SITENAME, DOCUMENTROOT and CREATEDIR seperated by a comma (doc root will be put in /var/www).\n CREATEDIR is an optional y/n that indicates if to create the dir or not (default:n).\n Multiple sites can be specified by seperating them with a ":", eg: -s site1,root1,y:site2,root2') @click.option('-c', '--craft-credentials', type=USER_EMAIL_PASS, metavar='USERNAME,EMAIL,PASSWORD', help='Craft admin credentials. If not set, only system requirements for Craft will be installed') @click.option('-i', '--host-ip', type=IP_ADDRESS, required=deps('aptproxy'), help='Host ip to be used in aptproxy config') @click.option('--netdata-user-pass', type=USER_PASS, metavar='USERNAME,USERPASS', help="a new user's name and password (seperated by a comma)") def install(**args): Args = namedtuple('Args', sorted(args)) args = Args(**args) available_mods = mods wanted_mods = [i.lower() for i in args.modules] required_mods = ['first', 'done'] if args.no_required: wanted_apps = [i for i in available_mods if (i.__name__.lower() in wanted_mods)] else: wanted_apps = [i for i in available_mods if ((i.__name__.lower() in wanted_mods) or (i.__name__.lower() in required_mods))] mapping_keys = [i.__name__.lower() for i in available_mods] invalid_modules = [i for i in wanted_mods if (i not in mapping_keys)] if invalid_modules: util.error('module(s) "{invalid}" does not exist.\nValid modules are:\n{valid}'.format(valid=', '.join(mapping_keys), invalid=', '.join(invalid_modules))) if (not args.no_dependencies): install_reqs = [] for app in wanted_apps: install_reqs += app.provides provided = set(install_reqs) required = set(app.requires) if len((required - provided)): util.error('Requirements not met for {}: {}.'.format(app.__name__.lower(), ', '.join(app.requires))) if args.generate_script: script_header = ('#!/usr/bin/env bash', , '# Boss command used to generate this script', '# {}'.format(' '.join(sys.argv)), , 'set -x') click.echo('\n'.join(script_header)) for App in wanted_apps: module_name = App.title util.title(module_name, script=args.generate_script) try: app = App(dry_run=args.dry_run, args=args) app.pre_install() app.install() app.post_install() app.log(module_name) except subprocess.CalledProcessError as e: util.error(e) except DependencyError as e: util.error(e) except PlatformError as e: util.error(e) except SecurityError as e: util.error(e) except FileNotFoundError as e: util.error(e.args[0])<|docstring|>Install any modules available from `boss list`<|endoftext|>
7f972414c61111ec94f4d6833f58a6e9e1e27ea67fbe88f93c128c39590f419d
@boss.command() def list(): 'List available modules' installed_file = os.path.expanduser('~/boss-installed-modules') installed = [] if os.path.exists(installed_file): with open(installed_file) as f: installed = f.readlines() installed = [i.lower().strip() for i in installed] for mod in mods: name = mod.__name__ module = mod state = (' ✓ ' if (name in installed) else ' ') state = click.style(state, fg='green') description = (module.__doc__ if module.__doc__ else '') if description: description = description.splitlines()[0] click.echo(((state + click.style(name.ljust(13), bold=True)) + description)) sys.stdout.flush()
List available modules
src/__main__.py
list
8cylinder/boss
0
python
@boss.command() def list(): installed_file = os.path.expanduser('~/boss-installed-modules') installed = [] if os.path.exists(installed_file): with open(installed_file) as f: installed = f.readlines() installed = [i.lower().strip() for i in installed] for mod in mods: name = mod.__name__ module = mod state = (' ✓ ' if (name in installed) else ' ') state = click.style(state, fg='green') description = (module.__doc__ if module.__doc__ else ) if description: description = description.splitlines()[0] click.echo(((state + click.style(name.ljust(13), bold=True)) + description)) sys.stdout.flush()
@boss.command() def list(): installed_file = os.path.expanduser('~/boss-installed-modules') installed = [] if os.path.exists(installed_file): with open(installed_file) as f: installed = f.readlines() installed = [i.lower().strip() for i in installed] for mod in mods: name = mod.__name__ module = mod state = (' ✓ ' if (name in installed) else ' ') state = click.style(state, fg='green') description = (module.__doc__ if module.__doc__ else ) if description: description = description.splitlines()[0] click.echo(((state + click.style(name.ljust(13), bold=True)) + description)) sys.stdout.flush()<|docstring|>List available modules<|endoftext|>
582a7a02c6c59bd1ce7038baa4fd8a89fde3198265fe251a0379d22905f83c47
@boss.command() def help(): 'Show help for each module' content = [] w = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ', break_on_hyphens=False) for app in mods: content.append('') title = '{} ({})'.format(app.title, app.__name__.lower()) under = ('-' * len(title)) content.append(click.style(title, fg='yellow', bold=False, underline=True)) if app.__doc__: lines = app.__doc__.splitlines() lines = [i.strip() for i in lines] content.append('\n'.join(lines).strip()) else: content.append(click.style('(No documentation)', dim=True)) if app.requires: content.append('') cont_title = click.style('Required modules:', fg='blue') content.append('{} {}'.format(cont_title, ', '.join(app.requires))) content.append('\n') click.echo_via_pager('\n'.join(content))
Show help for each module
src/__main__.py
help
8cylinder/boss
0
python
@boss.command() def help(): content = [] w = textwrap.TextWrapper(initial_indent=, subsequent_indent=' ', break_on_hyphens=False) for app in mods: content.append() title = '{} ({})'.format(app.title, app.__name__.lower()) under = ('-' * len(title)) content.append(click.style(title, fg='yellow', bold=False, underline=True)) if app.__doc__: lines = app.__doc__.splitlines() lines = [i.strip() for i in lines] content.append('\n'.join(lines).strip()) else: content.append(click.style('(No documentation)', dim=True)) if app.requires: content.append() cont_title = click.style('Required modules:', fg='blue') content.append('{} {}'.format(cont_title, ', '.join(app.requires))) content.append('\n') click.echo_via_pager('\n'.join(content))
@boss.command() def help(): content = [] w = textwrap.TextWrapper(initial_indent=, subsequent_indent=' ', break_on_hyphens=False) for app in mods: content.append() title = '{} ({})'.format(app.title, app.__name__.lower()) under = ('-' * len(title)) content.append(click.style(title, fg='yellow', bold=False, underline=True)) if app.__doc__: lines = app.__doc__.splitlines() lines = [i.strip() for i in lines] content.append('\n'.join(lines).strip()) else: content.append(click.style('(No documentation)', dim=True)) if app.requires: content.append() cont_title = click.style('Required modules:', fg='blue') content.append('{} {}'.format(cont_title, ', '.join(app.requires))) content.append('\n') click.echo_via_pager('\n'.join(content))<|docstring|>Show help for each module<|endoftext|>
387a778fc62868d33067ab83a3df7fcfcf0ea13fc15ffa4edcf6d849da63853d
def __init__(self, df=None, temps=None, key_pressures=None, key_uptakes=None, model=None, compname=None, temp_units='C'): "\n :param df: pd.DataFrame or list[pd.DataFrame]\n Pure-component isotherm data as a pandas dataframe - must be uptake in mmol/g and pressure in bar or\n equivalent. If datasets at different temperatures are required for fitting, the user must specify\n them in the same dataframe. A list of dataframes may be passed for the dual-site Langmuir isotherm\n model procedure where parameter results across different components are utilised. Must be inputted\n in the same order as compname (when passing as a list).\n\n :param temps: list[float]\n List of temperatures corresponding to each dataset within the dataframe for results formatting and\n for calculating heats of adsorption/ binding energies. Must be inputted in the same order as\n key_pressures and key_uptakes.\n\n :param key_pressures: list[str]\n List of unique column key(s) which correspond to each dataset's pressure values within the\n dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe.\n If multiple dataframes are specified, make sure keys are identical across each dataframe for each\n temperature. Must be inputted in the same order as key_uptakes and temps.\n\n :param key_uptakes: list[str]\n List of unique column key(s) which correspond to each dataset's uptake values within the\n dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe.\n If multiple dataframes are specified, make sure keys are identical across each dataframe for each\n temperature. Must be inputted in the same order as key_pressures and temps.\n\n :param model: str\n Model to be fit to dataset(s).\n\n :param compname: str or list[str], optional\n Name of pure component(s) for results formatting. If None is passed, self.compname is instantiated\n as anarbitrary letter or a list of arbitrary letters corresponding to each component. Must be\n inputted in the same order as compname (when passing as a list).\n\n :param temp_units: str, Optional\n Units of temperature input (temps). Default is degrees C. Can accept Kelvin, 'K'.\n\n " if (df is None): raise ParameterError('Input Pandas Dataframe with pure-component isotherm data for fitting') if (temps is None): raise ParameterError('Input temperature corresponding to each pure-component isotherm dataset within the dataframe for fitting') if (key_pressures is None): raise ParameterError("Input list of unique column key(s) which correspond to each dataset's pressure values within the Dataframe") if (key_uptakes is None): raise ParameterError("Input list of unique column key(s) which correspond to each dataset's uptake values within the Dataframe") if (model.lower() is None): raise ParameterError('Enter a model as a parameter') if (model.lower() not in _MODELS): raise ParameterError(('Enter a valid model - List of supported models:\n ' + str(_MODELS))) len_check = [len(key_uptakes), len(key_pressures), len(temps)] if (len(temps) != (sum(len_check) / len(len_check))): raise ParameterError('Lengths of key_uptakes, key_pressures or temps do not match. Check that the length of each list is the same, corresponding to each dataset') if ((type(df) is list) and (model.lower() != 'dsl')): raise ParameterError('Enter one dataframe, not a list of dataframes') if ((compname is None) and (type(compname) is not list)): self.compname = 'A' logger.info('No component name passed - giving component an arbitrary name.') elif ((compname is None) and (type(df) is list)): letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'] self.compname = [letters[i] for i in range(len(compname))] logger.info('No component names passed - giving components arbitrary names.') del letters logger.info('Checks successfully passed') self.df = df self.temps = temps self.temp_units = temp_units if (self.temp_units == 'K'): self.temps = temps else: self.temps = [(t + 273) for t in temps] self.compname = compname self.key_pressures = key_pressures self.key_uptakes = key_uptakes self.model = model.lower() self.input_model = model self.x = [] self.y = [] self.params = [] self.df_result = None self.emod_input = {} self.henry_params = [] self.rel_pres = False
:param df: pd.DataFrame or list[pd.DataFrame] Pure-component isotherm data as a pandas dataframe - must be uptake in mmol/g and pressure in bar or equivalent. If datasets at different temperatures are required for fitting, the user must specify them in the same dataframe. A list of dataframes may be passed for the dual-site Langmuir isotherm model procedure where parameter results across different components are utilised. Must be inputted in the same order as compname (when passing as a list). :param temps: list[float] List of temperatures corresponding to each dataset within the dataframe for results formatting and for calculating heats of adsorption/ binding energies. Must be inputted in the same order as key_pressures and key_uptakes. :param key_pressures: list[str] List of unique column key(s) which correspond to each dataset's pressure values within the dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe. If multiple dataframes are specified, make sure keys are identical across each dataframe for each temperature. Must be inputted in the same order as key_uptakes and temps. :param key_uptakes: list[str] List of unique column key(s) which correspond to each dataset's uptake values within the dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe. If multiple dataframes are specified, make sure keys are identical across each dataframe for each temperature. Must be inputted in the same order as key_pressures and temps. :param model: str Model to be fit to dataset(s). :param compname: str or list[str], optional Name of pure component(s) for results formatting. If None is passed, self.compname is instantiated as anarbitrary letter or a list of arbitrary letters corresponding to each component. Must be inputted in the same order as compname (when passing as a list). :param temp_units: str, Optional Units of temperature input (temps). Default is degrees C. Can accept Kelvin, 'K'.
src/pyIsoFit/core/fitting.py
__init__
dominikpantak/pyIsoFit
5
python
def __init__(self, df=None, temps=None, key_pressures=None, key_uptakes=None, model=None, compname=None, temp_units='C'): "\n :param df: pd.DataFrame or list[pd.DataFrame]\n Pure-component isotherm data as a pandas dataframe - must be uptake in mmol/g and pressure in bar or\n equivalent. If datasets at different temperatures are required for fitting, the user must specify\n them in the same dataframe. A list of dataframes may be passed for the dual-site Langmuir isotherm\n model procedure where parameter results across different components are utilised. Must be inputted\n in the same order as compname (when passing as a list).\n\n :param temps: list[float]\n List of temperatures corresponding to each dataset within the dataframe for results formatting and\n for calculating heats of adsorption/ binding energies. Must be inputted in the same order as\n key_pressures and key_uptakes.\n\n :param key_pressures: list[str]\n List of unique column key(s) which correspond to each dataset's pressure values within the\n dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe.\n If multiple dataframes are specified, make sure keys are identical across each dataframe for each\n temperature. Must be inputted in the same order as key_uptakes and temps.\n\n :param key_uptakes: list[str]\n List of unique column key(s) which correspond to each dataset's uptake values within the\n dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe.\n If multiple dataframes are specified, make sure keys are identical across each dataframe for each\n temperature. Must be inputted in the same order as key_pressures and temps.\n\n :param model: str\n Model to be fit to dataset(s).\n\n :param compname: str or list[str], optional\n Name of pure component(s) for results formatting. If None is passed, self.compname is instantiated\n as anarbitrary letter or a list of arbitrary letters corresponding to each component. Must be\n inputted in the same order as compname (when passing as a list).\n\n :param temp_units: str, Optional\n Units of temperature input (temps). Default is degrees C. Can accept Kelvin, 'K'.\n\n " if (df is None): raise ParameterError('Input Pandas Dataframe with pure-component isotherm data for fitting') if (temps is None): raise ParameterError('Input temperature corresponding to each pure-component isotherm dataset within the dataframe for fitting') if (key_pressures is None): raise ParameterError("Input list of unique column key(s) which correspond to each dataset's pressure values within the Dataframe") if (key_uptakes is None): raise ParameterError("Input list of unique column key(s) which correspond to each dataset's uptake values within the Dataframe") if (model.lower() is None): raise ParameterError('Enter a model as a parameter') if (model.lower() not in _MODELS): raise ParameterError(('Enter a valid model - List of supported models:\n ' + str(_MODELS))) len_check = [len(key_uptakes), len(key_pressures), len(temps)] if (len(temps) != (sum(len_check) / len(len_check))): raise ParameterError('Lengths of key_uptakes, key_pressures or temps do not match. Check that the length of each list is the same, corresponding to each dataset') if ((type(df) is list) and (model.lower() != 'dsl')): raise ParameterError('Enter one dataframe, not a list of dataframes') if ((compname is None) and (type(compname) is not list)): self.compname = 'A' logger.info('No component name passed - giving component an arbitrary name.') elif ((compname is None) and (type(df) is list)): letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'] self.compname = [letters[i] for i in range(len(compname))] logger.info('No component names passed - giving components arbitrary names.') del letters logger.info('Checks successfully passed') self.df = df self.temps = temps self.temp_units = temp_units if (self.temp_units == 'K'): self.temps = temps else: self.temps = [(t + 273) for t in temps] self.compname = compname self.key_pressures = key_pressures self.key_uptakes = key_uptakes self.model = model.lower() self.input_model = model self.x = [] self.y = [] self.params = [] self.df_result = None self.emod_input = {} self.henry_params = [] self.rel_pres = False
def __init__(self, df=None, temps=None, key_pressures=None, key_uptakes=None, model=None, compname=None, temp_units='C'): "\n :param df: pd.DataFrame or list[pd.DataFrame]\n Pure-component isotherm data as a pandas dataframe - must be uptake in mmol/g and pressure in bar or\n equivalent. If datasets at different temperatures are required for fitting, the user must specify\n them in the same dataframe. A list of dataframes may be passed for the dual-site Langmuir isotherm\n model procedure where parameter results across different components are utilised. Must be inputted\n in the same order as compname (when passing as a list).\n\n :param temps: list[float]\n List of temperatures corresponding to each dataset within the dataframe for results formatting and\n for calculating heats of adsorption/ binding energies. Must be inputted in the same order as\n key_pressures and key_uptakes.\n\n :param key_pressures: list[str]\n List of unique column key(s) which correspond to each dataset's pressure values within the\n dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe.\n If multiple dataframes are specified, make sure keys are identical across each dataframe for each\n temperature. Must be inputted in the same order as key_uptakes and temps.\n\n :param key_uptakes: list[str]\n List of unique column key(s) which correspond to each dataset's uptake values within the\n dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe.\n If multiple dataframes are specified, make sure keys are identical across each dataframe for each\n temperature. Must be inputted in the same order as key_pressures and temps.\n\n :param model: str\n Model to be fit to dataset(s).\n\n :param compname: str or list[str], optional\n Name of pure component(s) for results formatting. If None is passed, self.compname is instantiated\n as anarbitrary letter or a list of arbitrary letters corresponding to each component. Must be\n inputted in the same order as compname (when passing as a list).\n\n :param temp_units: str, Optional\n Units of temperature input (temps). Default is degrees C. Can accept Kelvin, 'K'.\n\n " if (df is None): raise ParameterError('Input Pandas Dataframe with pure-component isotherm data for fitting') if (temps is None): raise ParameterError('Input temperature corresponding to each pure-component isotherm dataset within the dataframe for fitting') if (key_pressures is None): raise ParameterError("Input list of unique column key(s) which correspond to each dataset's pressure values within the Dataframe") if (key_uptakes is None): raise ParameterError("Input list of unique column key(s) which correspond to each dataset's uptake values within the Dataframe") if (model.lower() is None): raise ParameterError('Enter a model as a parameter') if (model.lower() not in _MODELS): raise ParameterError(('Enter a valid model - List of supported models:\n ' + str(_MODELS))) len_check = [len(key_uptakes), len(key_pressures), len(temps)] if (len(temps) != (sum(len_check) / len(len_check))): raise ParameterError('Lengths of key_uptakes, key_pressures or temps do not match. Check that the length of each list is the same, corresponding to each dataset') if ((type(df) is list) and (model.lower() != 'dsl')): raise ParameterError('Enter one dataframe, not a list of dataframes') if ((compname is None) and (type(compname) is not list)): self.compname = 'A' logger.info('No component name passed - giving component an arbitrary name.') elif ((compname is None) and (type(df) is list)): letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'] self.compname = [letters[i] for i in range(len(compname))] logger.info('No component names passed - giving components arbitrary names.') del letters logger.info('Checks successfully passed') self.df = df self.temps = temps self.temp_units = temp_units if (self.temp_units == 'K'): self.temps = temps else: self.temps = [(t + 273) for t in temps] self.compname = compname self.key_pressures = key_pressures self.key_uptakes = key_uptakes self.model = model.lower() self.input_model = model self.x = [] self.y = [] self.params = [] self.df_result = None self.emod_input = {} self.henry_params = [] self.rel_pres = False<|docstring|>:param df: pd.DataFrame or list[pd.DataFrame] Pure-component isotherm data as a pandas dataframe - must be uptake in mmol/g and pressure in bar or equivalent. If datasets at different temperatures are required for fitting, the user must specify them in the same dataframe. A list of dataframes may be passed for the dual-site Langmuir isotherm model procedure where parameter results across different components are utilised. Must be inputted in the same order as compname (when passing as a list). :param temps: list[float] List of temperatures corresponding to each dataset within the dataframe for results formatting and for calculating heats of adsorption/ binding energies. Must be inputted in the same order as key_pressures and key_uptakes. :param key_pressures: list[str] List of unique column key(s) which correspond to each dataset's pressure values within the dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe. If multiple dataframes are specified, make sure keys are identical across each dataframe for each temperature. Must be inputted in the same order as key_uptakes and temps. :param key_uptakes: list[str] List of unique column key(s) which correspond to each dataset's uptake values within the dataframe. Can input any number of keys corresponding to any number of datasets in the dataframe. If multiple dataframes are specified, make sure keys are identical across each dataframe for each temperature. Must be inputted in the same order as key_pressures and temps. :param model: str Model to be fit to dataset(s). :param compname: str or list[str], optional Name of pure component(s) for results formatting. If None is passed, self.compname is instantiated as anarbitrary letter or a list of arbitrary letters corresponding to each component. Must be inputted in the same order as compname (when passing as a list). :param temp_units: str, Optional Units of temperature input (temps). Default is degrees C. Can accept Kelvin, 'K'.<|endoftext|>
6ac45837557b29984dbc3de538d33078a0cbcd88790804e2d858102a9776e56d
def info_params(self): '\n Prints information about the model to be fit\n (WIP)\n\n ' print(f'Parameters for the {self.model} model:') print(_MODEL_PARAM_LISTS[self.model])
Prints information about the model to be fit (WIP)
src/pyIsoFit/core/fitting.py
info_params
dominikpantak/pyIsoFit
5
python
def info_params(self): '\n Prints information about the model to be fit\n (WIP)\n\n ' print(f'Parameters for the {self.model} model:') print(_MODEL_PARAM_LISTS[self.model])
def info_params(self): '\n Prints information about the model to be fit\n (WIP)\n\n ' print(f'Parameters for the {self.model} model:') print(_MODEL_PARAM_LISTS[self.model])<|docstring|>Prints information about the model to be fit (WIP)<|endoftext|>
b000c9ce902803fa41ba779ae2cad377b90181a50dd0c7d0bff39caaa1b7571b
def fit(self, cond=False, meth='leastsq', show_hen=False, hen_tol=0.999, rel_pres=False, henry_off=False, guess=None, cust_bounds=None, fit_report=False, weights=None, dsl_comp_a=None): "\n Plotting method for the FitIsotherm class.\n Fits model to data using Non-Linear Least-Squares Minimization.\n This method is a generic fitting method for all models included in this package using the lmfit\n Parameters and Models class.\n\n Parameters\n ----------\n\n :param cond : bool\n Input whether to add standardised fitting constraints to fitting procedure. These are different\n for each fitting. Currently only works for Langmuir, Langmuir td, DSL, BDDT. Default is False\n\n :param meth : str\n Input the fitting algorithm which lmfit uses to fit curves. Default is 'leastsq' however lmfit includes\n many fitting algorithms which can be inputted (https://lmfit.github.io/lmfit-py/fitting.html).\n\n :param show_hen : bool\n Input whether to show the henry regime of the datasets approximated by the package. This is False by\n default.\n\n :param hen_tol : float or list[float]\n The henry region approximation function calculates the henry region by finding a line with the highest\n R squared value in the low pressure region of the dataset. This is done with a default R squared\n tolerance value (set to 0.999).\n\n For example, if a float is inputted (a different henry tolerance) this will be the henry tolerance value\n used by the function. i.e if 0.98 is inputted the henry regime will be across a large pressure range\n due to the low tolerance for the R squared value of the henry model fitting.\n\n This function also supports inputting the henry regimes manually. For this, input each henry regime for\n each dataset as a list i.e [1.2, 2.1, ... ]\n\n :param rel_pres : bool\n Input whether to fit the x axis data to relative pressure instead of absolute. Default is False\n\n :param henry_off : bool\n Input whether to turn off the henry regime fitting constraint when using the standardised fitting\n constraint to langmuir or dsl - this is usually done when fitting experimental data which has a messy\n low pressure region. Default is False.\n\n :param guess : dict\n Input custom guess values to override the default guess values. This must be inputted as a dictionary\n with the keys corresponding to the parameter string and the value corresponding to the list of guess\n values corresponding to each dataset.\n i.e for Langmuir: guess = {'q': [5, 5, 6], 'b':[100, 1000, 2000]}\n\n :param cust_bounds : dict\n Input custom bounds for the fitting. These are hard constraints and lmfit will fit only within these\n minimum and maximum values. Input these as a dictionary with the keys corresponding to the parameter\n string and the value corresponding to the list of tuples which include bounds for each dataset in the\n format (min, max).\n i.e for Langmuir: cust_bounds = {'q': [(4,6), (4, None), (5,10)], ... ect.}\n\n :param fit_report : bool\n Display a fitting report generated by lmfit for each dataset. Default is False\n\n :param weights : list[list[float]]\n Weights for fitting\n\n :param dsl_comp_a : str\n Manually input which component is the most adsorbed component (compA) for the dsl constrained\n fitting procedure.\n\n :return Returns a dictionary of fitting results\n\n\n Note:\n ---------\n Because the dsl constrained fitting procedure fits a list of dataframes, the generic fitting method is not\n used when 'dsl' is inputted with the fitting condition as true and the method returns the result from the\n dsl_fit function. This is because the dsl_fit function carries out its' own initial guess calculations and\n henry regime estimations. The user may interact with this model in the same way as with the rest, however guess\n must be inputted as a list of dictionaries (just as with the list of DataFrames and component names).\n Custom bounds cannot yet be inputted into this model as this is a WIP.\n " if ((self.model == 'dsl') and (cond is True)): logger.info('DSL Fitting procedure commenced') if (type(self.df) is not list): self.df = [self.df] if (type(self.compname) is not list): self.compname = [self.compname] if ((type(guess) is not list) and (guess is not None)): guess = [guess] if (type(hen_tol) is not list): hen_tol = [hen_tol for _ in self.compname] try: dsl_result = dsl_fit(self.df, self.key_pressures, self.key_uptakes, self.temps, self.compname, meth, guess, hen_tol, show_hen, henry_off, dsl_comp_a) except ValueError: logger.critical(_dsl_error_msg) return None (df_dict, results_dict, df_res_dict, params_dict) = dsl_result self.params = results_dict for comp in self.compname: (x_i, y_i) = df_dict[comp] self.x.append(x_i) self.y.append(y_i) self.df_result = df_res_dict self.emod_input = params_dict return df_res_dict logger.info('Generic fitting procedure commenced') if ((self.model not in _does_something) and (cond is not False)): logger.warning(f'''WARNING You have set cond={cond} but cond for the model '{self.model}' does nothing. ''') if (self.model == 'henry'): show_hen = True henry_params = henry_approx(self.df, self.key_pressures, self.key_uptakes, show_hen, hen_tol, self.compname, henry_off) henry_constants = henry_params[0] if (self.model == 'henry'): logger.info('Henry model fitting only chosen') self.henry_params = henry_params self.df_result = henry_params[1] return None if (guess is None): guess = get_guess_params(self.model, self.df, self.key_uptakes, self.key_pressures) logger.info('Guess values successfully obtained') else: for (param, guess_val) in guess.items(): if (param not in _MODEL_PARAM_LISTS[self.model]): raise ParameterError(('%s is not a valid parameter in the %s model.' % (param, self.model))) guess[param] = guess_val logger.info('Guess values overridden with custom guess values') if ('mdr' in self.model): logger.info('MDR chosen so relative pressure toggle force set') rel_pres = True self.rel_pres = rel_pres (self.x, self.y) = get_xy(self.df, self.key_pressures, self.key_uptakes, self.model, rel_pres) logger.info('x and y parameters successfully obtained') if (weights is None): logger.info('No weights inputted - setting weights to x') weights = self.x if ((self.model == 'bddt 2n') or (self.model == 'bddt 2n-1') or (self.model == 'bddt')): self.model = 'bddt' (self.params, values_dict) = generic_fit(self.model, weights, self.y, guess, self.temps, cond, meth, cust_bounds, fit_report, henry_constants, henry_off) logger.info('Generic fit completed successfully') (final_results_dict, c_list) = get_sorted_results(values_dict, self.model, self.temps) logger.info('Results sorted successfully') se = [mse(self.x[i], self.y[i], _MODEL_FUNCTIONS[self.model], c_list[i]) for i in range(len(self.x))] logger.info('Mean squared error calculated successfully') final_results_dict['MSE'] = se df_result = pd.DataFrame.from_dict(final_results_dict) pd.set_option('display.max_columns', None) print(f''' ---- Component {self.compname} fitting results -----''') display(df_result) self.df_result = df_result if (len(self.temps) >= 3): heat_calc(self.model, self.temps, final_results_dict, self.x)
Plotting method for the FitIsotherm class. Fits model to data using Non-Linear Least-Squares Minimization. This method is a generic fitting method for all models included in this package using the lmfit Parameters and Models class. Parameters ---------- :param cond : bool Input whether to add standardised fitting constraints to fitting procedure. These are different for each fitting. Currently only works for Langmuir, Langmuir td, DSL, BDDT. Default is False :param meth : str Input the fitting algorithm which lmfit uses to fit curves. Default is 'leastsq' however lmfit includes many fitting algorithms which can be inputted (https://lmfit.github.io/lmfit-py/fitting.html). :param show_hen : bool Input whether to show the henry regime of the datasets approximated by the package. This is False by default. :param hen_tol : float or list[float] The henry region approximation function calculates the henry region by finding a line with the highest R squared value in the low pressure region of the dataset. This is done with a default R squared tolerance value (set to 0.999). For example, if a float is inputted (a different henry tolerance) this will be the henry tolerance value used by the function. i.e if 0.98 is inputted the henry regime will be across a large pressure range due to the low tolerance for the R squared value of the henry model fitting. This function also supports inputting the henry regimes manually. For this, input each henry regime for each dataset as a list i.e [1.2, 2.1, ... ] :param rel_pres : bool Input whether to fit the x axis data to relative pressure instead of absolute. Default is False :param henry_off : bool Input whether to turn off the henry regime fitting constraint when using the standardised fitting constraint to langmuir or dsl - this is usually done when fitting experimental data which has a messy low pressure region. Default is False. :param guess : dict Input custom guess values to override the default guess values. This must be inputted as a dictionary with the keys corresponding to the parameter string and the value corresponding to the list of guess values corresponding to each dataset. i.e for Langmuir: guess = {'q': [5, 5, 6], 'b':[100, 1000, 2000]} :param cust_bounds : dict Input custom bounds for the fitting. These are hard constraints and lmfit will fit only within these minimum and maximum values. Input these as a dictionary with the keys corresponding to the parameter string and the value corresponding to the list of tuples which include bounds for each dataset in the format (min, max). i.e for Langmuir: cust_bounds = {'q': [(4,6), (4, None), (5,10)], ... ect.} :param fit_report : bool Display a fitting report generated by lmfit for each dataset. Default is False :param weights : list[list[float]] Weights for fitting :param dsl_comp_a : str Manually input which component is the most adsorbed component (compA) for the dsl constrained fitting procedure. :return Returns a dictionary of fitting results Note: --------- Because the dsl constrained fitting procedure fits a list of dataframes, the generic fitting method is not used when 'dsl' is inputted with the fitting condition as true and the method returns the result from the dsl_fit function. This is because the dsl_fit function carries out its' own initial guess calculations and henry regime estimations. The user may interact with this model in the same way as with the rest, however guess must be inputted as a list of dictionaries (just as with the list of DataFrames and component names). Custom bounds cannot yet be inputted into this model as this is a WIP.
src/pyIsoFit/core/fitting.py
fit
dominikpantak/pyIsoFit
5
python
def fit(self, cond=False, meth='leastsq', show_hen=False, hen_tol=0.999, rel_pres=False, henry_off=False, guess=None, cust_bounds=None, fit_report=False, weights=None, dsl_comp_a=None): "\n Plotting method for the FitIsotherm class.\n Fits model to data using Non-Linear Least-Squares Minimization.\n This method is a generic fitting method for all models included in this package using the lmfit\n Parameters and Models class.\n\n Parameters\n ----------\n\n :param cond : bool\n Input whether to add standardised fitting constraints to fitting procedure. These are different\n for each fitting. Currently only works for Langmuir, Langmuir td, DSL, BDDT. Default is False\n\n :param meth : str\n Input the fitting algorithm which lmfit uses to fit curves. Default is 'leastsq' however lmfit includes\n many fitting algorithms which can be inputted (https://lmfit.github.io/lmfit-py/fitting.html).\n\n :param show_hen : bool\n Input whether to show the henry regime of the datasets approximated by the package. This is False by\n default.\n\n :param hen_tol : float or list[float]\n The henry region approximation function calculates the henry region by finding a line with the highest\n R squared value in the low pressure region of the dataset. This is done with a default R squared\n tolerance value (set to 0.999).\n\n For example, if a float is inputted (a different henry tolerance) this will be the henry tolerance value\n used by the function. i.e if 0.98 is inputted the henry regime will be across a large pressure range\n due to the low tolerance for the R squared value of the henry model fitting.\n\n This function also supports inputting the henry regimes manually. For this, input each henry regime for\n each dataset as a list i.e [1.2, 2.1, ... ]\n\n :param rel_pres : bool\n Input whether to fit the x axis data to relative pressure instead of absolute. Default is False\n\n :param henry_off : bool\n Input whether to turn off the henry regime fitting constraint when using the standardised fitting\n constraint to langmuir or dsl - this is usually done when fitting experimental data which has a messy\n low pressure region. Default is False.\n\n :param guess : dict\n Input custom guess values to override the default guess values. This must be inputted as a dictionary\n with the keys corresponding to the parameter string and the value corresponding to the list of guess\n values corresponding to each dataset.\n i.e for Langmuir: guess = {'q': [5, 5, 6], 'b':[100, 1000, 2000]}\n\n :param cust_bounds : dict\n Input custom bounds for the fitting. These are hard constraints and lmfit will fit only within these\n minimum and maximum values. Input these as a dictionary with the keys corresponding to the parameter\n string and the value corresponding to the list of tuples which include bounds for each dataset in the\n format (min, max).\n i.e for Langmuir: cust_bounds = {'q': [(4,6), (4, None), (5,10)], ... ect.}\n\n :param fit_report : bool\n Display a fitting report generated by lmfit for each dataset. Default is False\n\n :param weights : list[list[float]]\n Weights for fitting\n\n :param dsl_comp_a : str\n Manually input which component is the most adsorbed component (compA) for the dsl constrained\n fitting procedure.\n\n :return Returns a dictionary of fitting results\n\n\n Note:\n ---------\n Because the dsl constrained fitting procedure fits a list of dataframes, the generic fitting method is not\n used when 'dsl' is inputted with the fitting condition as true and the method returns the result from the\n dsl_fit function. This is because the dsl_fit function carries out its' own initial guess calculations and\n henry regime estimations. The user may interact with this model in the same way as with the rest, however guess\n must be inputted as a list of dictionaries (just as with the list of DataFrames and component names).\n Custom bounds cannot yet be inputted into this model as this is a WIP.\n " if ((self.model == 'dsl') and (cond is True)): logger.info('DSL Fitting procedure commenced') if (type(self.df) is not list): self.df = [self.df] if (type(self.compname) is not list): self.compname = [self.compname] if ((type(guess) is not list) and (guess is not None)): guess = [guess] if (type(hen_tol) is not list): hen_tol = [hen_tol for _ in self.compname] try: dsl_result = dsl_fit(self.df, self.key_pressures, self.key_uptakes, self.temps, self.compname, meth, guess, hen_tol, show_hen, henry_off, dsl_comp_a) except ValueError: logger.critical(_dsl_error_msg) return None (df_dict, results_dict, df_res_dict, params_dict) = dsl_result self.params = results_dict for comp in self.compname: (x_i, y_i) = df_dict[comp] self.x.append(x_i) self.y.append(y_i) self.df_result = df_res_dict self.emod_input = params_dict return df_res_dict logger.info('Generic fitting procedure commenced') if ((self.model not in _does_something) and (cond is not False)): logger.warning(f'WARNING You have set cond={cond} but cond for the model '{self.model}' does nothing. ') if (self.model == 'henry'): show_hen = True henry_params = henry_approx(self.df, self.key_pressures, self.key_uptakes, show_hen, hen_tol, self.compname, henry_off) henry_constants = henry_params[0] if (self.model == 'henry'): logger.info('Henry model fitting only chosen') self.henry_params = henry_params self.df_result = henry_params[1] return None if (guess is None): guess = get_guess_params(self.model, self.df, self.key_uptakes, self.key_pressures) logger.info('Guess values successfully obtained') else: for (param, guess_val) in guess.items(): if (param not in _MODEL_PARAM_LISTS[self.model]): raise ParameterError(('%s is not a valid parameter in the %s model.' % (param, self.model))) guess[param] = guess_val logger.info('Guess values overridden with custom guess values') if ('mdr' in self.model): logger.info('MDR chosen so relative pressure toggle force set') rel_pres = True self.rel_pres = rel_pres (self.x, self.y) = get_xy(self.df, self.key_pressures, self.key_uptakes, self.model, rel_pres) logger.info('x and y parameters successfully obtained') if (weights is None): logger.info('No weights inputted - setting weights to x') weights = self.x if ((self.model == 'bddt 2n') or (self.model == 'bddt 2n-1') or (self.model == 'bddt')): self.model = 'bddt' (self.params, values_dict) = generic_fit(self.model, weights, self.y, guess, self.temps, cond, meth, cust_bounds, fit_report, henry_constants, henry_off) logger.info('Generic fit completed successfully') (final_results_dict, c_list) = get_sorted_results(values_dict, self.model, self.temps) logger.info('Results sorted successfully') se = [mse(self.x[i], self.y[i], _MODEL_FUNCTIONS[self.model], c_list[i]) for i in range(len(self.x))] logger.info('Mean squared error calculated successfully') final_results_dict['MSE'] = se df_result = pd.DataFrame.from_dict(final_results_dict) pd.set_option('display.max_columns', None) print(f' ---- Component {self.compname} fitting results -----') display(df_result) self.df_result = df_result if (len(self.temps) >= 3): heat_calc(self.model, self.temps, final_results_dict, self.x)
def fit(self, cond=False, meth='leastsq', show_hen=False, hen_tol=0.999, rel_pres=False, henry_off=False, guess=None, cust_bounds=None, fit_report=False, weights=None, dsl_comp_a=None): "\n Plotting method for the FitIsotherm class.\n Fits model to data using Non-Linear Least-Squares Minimization.\n This method is a generic fitting method for all models included in this package using the lmfit\n Parameters and Models class.\n\n Parameters\n ----------\n\n :param cond : bool\n Input whether to add standardised fitting constraints to fitting procedure. These are different\n for each fitting. Currently only works for Langmuir, Langmuir td, DSL, BDDT. Default is False\n\n :param meth : str\n Input the fitting algorithm which lmfit uses to fit curves. Default is 'leastsq' however lmfit includes\n many fitting algorithms which can be inputted (https://lmfit.github.io/lmfit-py/fitting.html).\n\n :param show_hen : bool\n Input whether to show the henry regime of the datasets approximated by the package. This is False by\n default.\n\n :param hen_tol : float or list[float]\n The henry region approximation function calculates the henry region by finding a line with the highest\n R squared value in the low pressure region of the dataset. This is done with a default R squared\n tolerance value (set to 0.999).\n\n For example, if a float is inputted (a different henry tolerance) this will be the henry tolerance value\n used by the function. i.e if 0.98 is inputted the henry regime will be across a large pressure range\n due to the low tolerance for the R squared value of the henry model fitting.\n\n This function also supports inputting the henry regimes manually. For this, input each henry regime for\n each dataset as a list i.e [1.2, 2.1, ... ]\n\n :param rel_pres : bool\n Input whether to fit the x axis data to relative pressure instead of absolute. Default is False\n\n :param henry_off : bool\n Input whether to turn off the henry regime fitting constraint when using the standardised fitting\n constraint to langmuir or dsl - this is usually done when fitting experimental data which has a messy\n low pressure region. Default is False.\n\n :param guess : dict\n Input custom guess values to override the default guess values. This must be inputted as a dictionary\n with the keys corresponding to the parameter string and the value corresponding to the list of guess\n values corresponding to each dataset.\n i.e for Langmuir: guess = {'q': [5, 5, 6], 'b':[100, 1000, 2000]}\n\n :param cust_bounds : dict\n Input custom bounds for the fitting. These are hard constraints and lmfit will fit only within these\n minimum and maximum values. Input these as a dictionary with the keys corresponding to the parameter\n string and the value corresponding to the list of tuples which include bounds for each dataset in the\n format (min, max).\n i.e for Langmuir: cust_bounds = {'q': [(4,6), (4, None), (5,10)], ... ect.}\n\n :param fit_report : bool\n Display a fitting report generated by lmfit for each dataset. Default is False\n\n :param weights : list[list[float]]\n Weights for fitting\n\n :param dsl_comp_a : str\n Manually input which component is the most adsorbed component (compA) for the dsl constrained\n fitting procedure.\n\n :return Returns a dictionary of fitting results\n\n\n Note:\n ---------\n Because the dsl constrained fitting procedure fits a list of dataframes, the generic fitting method is not\n used when 'dsl' is inputted with the fitting condition as true and the method returns the result from the\n dsl_fit function. This is because the dsl_fit function carries out its' own initial guess calculations and\n henry regime estimations. The user may interact with this model in the same way as with the rest, however guess\n must be inputted as a list of dictionaries (just as with the list of DataFrames and component names).\n Custom bounds cannot yet be inputted into this model as this is a WIP.\n " if ((self.model == 'dsl') and (cond is True)): logger.info('DSL Fitting procedure commenced') if (type(self.df) is not list): self.df = [self.df] if (type(self.compname) is not list): self.compname = [self.compname] if ((type(guess) is not list) and (guess is not None)): guess = [guess] if (type(hen_tol) is not list): hen_tol = [hen_tol for _ in self.compname] try: dsl_result = dsl_fit(self.df, self.key_pressures, self.key_uptakes, self.temps, self.compname, meth, guess, hen_tol, show_hen, henry_off, dsl_comp_a) except ValueError: logger.critical(_dsl_error_msg) return None (df_dict, results_dict, df_res_dict, params_dict) = dsl_result self.params = results_dict for comp in self.compname: (x_i, y_i) = df_dict[comp] self.x.append(x_i) self.y.append(y_i) self.df_result = df_res_dict self.emod_input = params_dict return df_res_dict logger.info('Generic fitting procedure commenced') if ((self.model not in _does_something) and (cond is not False)): logger.warning(f'WARNING You have set cond={cond} but cond for the model '{self.model}' does nothing. ') if (self.model == 'henry'): show_hen = True henry_params = henry_approx(self.df, self.key_pressures, self.key_uptakes, show_hen, hen_tol, self.compname, henry_off) henry_constants = henry_params[0] if (self.model == 'henry'): logger.info('Henry model fitting only chosen') self.henry_params = henry_params self.df_result = henry_params[1] return None if (guess is None): guess = get_guess_params(self.model, self.df, self.key_uptakes, self.key_pressures) logger.info('Guess values successfully obtained') else: for (param, guess_val) in guess.items(): if (param not in _MODEL_PARAM_LISTS[self.model]): raise ParameterError(('%s is not a valid parameter in the %s model.' % (param, self.model))) guess[param] = guess_val logger.info('Guess values overridden with custom guess values') if ('mdr' in self.model): logger.info('MDR chosen so relative pressure toggle force set') rel_pres = True self.rel_pres = rel_pres (self.x, self.y) = get_xy(self.df, self.key_pressures, self.key_uptakes, self.model, rel_pres) logger.info('x and y parameters successfully obtained') if (weights is None): logger.info('No weights inputted - setting weights to x') weights = self.x if ((self.model == 'bddt 2n') or (self.model == 'bddt 2n-1') or (self.model == 'bddt')): self.model = 'bddt' (self.params, values_dict) = generic_fit(self.model, weights, self.y, guess, self.temps, cond, meth, cust_bounds, fit_report, henry_constants, henry_off) logger.info('Generic fit completed successfully') (final_results_dict, c_list) = get_sorted_results(values_dict, self.model, self.temps) logger.info('Results sorted successfully') se = [mse(self.x[i], self.y[i], _MODEL_FUNCTIONS[self.model], c_list[i]) for i in range(len(self.x))] logger.info('Mean squared error calculated successfully') final_results_dict['MSE'] = se df_result = pd.DataFrame.from_dict(final_results_dict) pd.set_option('display.max_columns', None) print(f' ---- Component {self.compname} fitting results -----') display(df_result) self.df_result = df_result if (len(self.temps) >= 3): heat_calc(self.model, self.temps, final_results_dict, self.x)<|docstring|>Plotting method for the FitIsotherm class. Fits model to data using Non-Linear Least-Squares Minimization. This method is a generic fitting method for all models included in this package using the lmfit Parameters and Models class. Parameters ---------- :param cond : bool Input whether to add standardised fitting constraints to fitting procedure. These are different for each fitting. Currently only works for Langmuir, Langmuir td, DSL, BDDT. Default is False :param meth : str Input the fitting algorithm which lmfit uses to fit curves. Default is 'leastsq' however lmfit includes many fitting algorithms which can be inputted (https://lmfit.github.io/lmfit-py/fitting.html). :param show_hen : bool Input whether to show the henry regime of the datasets approximated by the package. This is False by default. :param hen_tol : float or list[float] The henry region approximation function calculates the henry region by finding a line with the highest R squared value in the low pressure region of the dataset. This is done with a default R squared tolerance value (set to 0.999). For example, if a float is inputted (a different henry tolerance) this will be the henry tolerance value used by the function. i.e if 0.98 is inputted the henry regime will be across a large pressure range due to the low tolerance for the R squared value of the henry model fitting. This function also supports inputting the henry regimes manually. For this, input each henry regime for each dataset as a list i.e [1.2, 2.1, ... ] :param rel_pres : bool Input whether to fit the x axis data to relative pressure instead of absolute. Default is False :param henry_off : bool Input whether to turn off the henry regime fitting constraint when using the standardised fitting constraint to langmuir or dsl - this is usually done when fitting experimental data which has a messy low pressure region. Default is False. :param guess : dict Input custom guess values to override the default guess values. This must be inputted as a dictionary with the keys corresponding to the parameter string and the value corresponding to the list of guess values corresponding to each dataset. i.e for Langmuir: guess = {'q': [5, 5, 6], 'b':[100, 1000, 2000]} :param cust_bounds : dict Input custom bounds for the fitting. These are hard constraints and lmfit will fit only within these minimum and maximum values. Input these as a dictionary with the keys corresponding to the parameter string and the value corresponding to the list of tuples which include bounds for each dataset in the format (min, max). i.e for Langmuir: cust_bounds = {'q': [(4,6), (4, None), (5,10)], ... ect.} :param fit_report : bool Display a fitting report generated by lmfit for each dataset. Default is False :param weights : list[list[float]] Weights for fitting :param dsl_comp_a : str Manually input which component is the most adsorbed component (compA) for the dsl constrained fitting procedure. :return Returns a dictionary of fitting results Note: --------- Because the dsl constrained fitting procedure fits a list of dataframes, the generic fitting method is not used when 'dsl' is inputted with the fitting condition as true and the method returns the result from the dsl_fit function. This is because the dsl_fit function carries out its' own initial guess calculations and henry regime estimations. The user may interact with this model in the same way as with the rest, however guess must be inputted as a list of dictionaries (just as with the list of DataFrames and component names). Custom bounds cannot yet be inputted into this model as this is a WIP.<|endoftext|>
72cf45dd80fcf69d89b96a0715efe71d94d6a09a63eadb59276d6ac82c3ec84b
def plot(self, logplot=(False, False)): '\n Plotting method for the FitIsotherm class.\n There are three plotting procedures:\n - For more than one component\n - For henry plots (This requires plotting on individual subplots)\n - The generic plotting procedure for any other model with one component (most models use this)\n\n :param logplot: tuple(bool)\n Whether to have an x and y log axis. Default is off for both x and y axis i.e (False, False) in the order\n (x, y)\n\n ' np.linspace(0, 10, 301) fit_label = '{temps} K Fit' data_label = 'Data at {temps} K' if (type(self.df) is list): for i in range(len(self.df)): plot_settings(logplot) comp_x_params = self.params[self.compname[i]] plt.title(self.compname[i]) for j in range(len(self.key_pressures)): plt.plot(self.x[i][j], comp_x_params[j].best_fit, '-', color=colours[j], label=fit_label.format(temps=self.temps[j])) plt.plot(self.x[i][j], self.y[i][j], 'ko', color='0.75', label=data_label.format(temps=self.temps[j])) elif (self.model == 'henry'): henry_const = self.henry_params[0] xy_dict = self.henry_params[2] x_hen = xy_dict['x'] y_hen = xy_dict['y'] plot_settings(logplot) lenx = len(self.x) for i in range(len(x_hen)): y_henfit = henry(x_hen[i], henry_const[i]) subplot_size = get_subplot_size(lenx, i) plt.subplot(subplot_size[0], subplot_size[1], subplot_size[2]) plt.subplots_adjust(wspace=0.3, hspace=0.3) plt.title((('Henry regime at ' + str(self.temps[i])) + ' K')) plt.plot(x_hen[i], y_henfit, '-', color=colours[i], label=fit_label.format(temps=self.temps[i])) plt.plot(x_hen[i], y_hen[i], 'ko', color='0.75', label=data_label.format(temps=self.temps[i])) plt.legend() else: plot_settings(logplot, self.input_model, self.rel_pres) for i in range(len(self.key_pressures)): plt.plot(self.x[i], self.params[i].best_fit, '-', color=colours[i], label=fit_label.format(temps=self.temps[i])) plt.plot(self.x[i], self.y[i], 'ko', color='0.75', label=data_label.format(temps=self.temps[i])) plt.legend() plt.show()
Plotting method for the FitIsotherm class. There are three plotting procedures: - For more than one component - For henry plots (This requires plotting on individual subplots) - The generic plotting procedure for any other model with one component (most models use this) :param logplot: tuple(bool) Whether to have an x and y log axis. Default is off for both x and y axis i.e (False, False) in the order (x, y)
src/pyIsoFit/core/fitting.py
plot
dominikpantak/pyIsoFit
5
python
def plot(self, logplot=(False, False)): '\n Plotting method for the FitIsotherm class.\n There are three plotting procedures:\n - For more than one component\n - For henry plots (This requires plotting on individual subplots)\n - The generic plotting procedure for any other model with one component (most models use this)\n\n :param logplot: tuple(bool)\n Whether to have an x and y log axis. Default is off for both x and y axis i.e (False, False) in the order\n (x, y)\n\n ' np.linspace(0, 10, 301) fit_label = '{temps} K Fit' data_label = 'Data at {temps} K' if (type(self.df) is list): for i in range(len(self.df)): plot_settings(logplot) comp_x_params = self.params[self.compname[i]] plt.title(self.compname[i]) for j in range(len(self.key_pressures)): plt.plot(self.x[i][j], comp_x_params[j].best_fit, '-', color=colours[j], label=fit_label.format(temps=self.temps[j])) plt.plot(self.x[i][j], self.y[i][j], 'ko', color='0.75', label=data_label.format(temps=self.temps[j])) elif (self.model == 'henry'): henry_const = self.henry_params[0] xy_dict = self.henry_params[2] x_hen = xy_dict['x'] y_hen = xy_dict['y'] plot_settings(logplot) lenx = len(self.x) for i in range(len(x_hen)): y_henfit = henry(x_hen[i], henry_const[i]) subplot_size = get_subplot_size(lenx, i) plt.subplot(subplot_size[0], subplot_size[1], subplot_size[2]) plt.subplots_adjust(wspace=0.3, hspace=0.3) plt.title((('Henry regime at ' + str(self.temps[i])) + ' K')) plt.plot(x_hen[i], y_henfit, '-', color=colours[i], label=fit_label.format(temps=self.temps[i])) plt.plot(x_hen[i], y_hen[i], 'ko', color='0.75', label=data_label.format(temps=self.temps[i])) plt.legend() else: plot_settings(logplot, self.input_model, self.rel_pres) for i in range(len(self.key_pressures)): plt.plot(self.x[i], self.params[i].best_fit, '-', color=colours[i], label=fit_label.format(temps=self.temps[i])) plt.plot(self.x[i], self.y[i], 'ko', color='0.75', label=data_label.format(temps=self.temps[i])) plt.legend() plt.show()
def plot(self, logplot=(False, False)): '\n Plotting method for the FitIsotherm class.\n There are three plotting procedures:\n - For more than one component\n - For henry plots (This requires plotting on individual subplots)\n - The generic plotting procedure for any other model with one component (most models use this)\n\n :param logplot: tuple(bool)\n Whether to have an x and y log axis. Default is off for both x and y axis i.e (False, False) in the order\n (x, y)\n\n ' np.linspace(0, 10, 301) fit_label = '{temps} K Fit' data_label = 'Data at {temps} K' if (type(self.df) is list): for i in range(len(self.df)): plot_settings(logplot) comp_x_params = self.params[self.compname[i]] plt.title(self.compname[i]) for j in range(len(self.key_pressures)): plt.plot(self.x[i][j], comp_x_params[j].best_fit, '-', color=colours[j], label=fit_label.format(temps=self.temps[j])) plt.plot(self.x[i][j], self.y[i][j], 'ko', color='0.75', label=data_label.format(temps=self.temps[j])) elif (self.model == 'henry'): henry_const = self.henry_params[0] xy_dict = self.henry_params[2] x_hen = xy_dict['x'] y_hen = xy_dict['y'] plot_settings(logplot) lenx = len(self.x) for i in range(len(x_hen)): y_henfit = henry(x_hen[i], henry_const[i]) subplot_size = get_subplot_size(lenx, i) plt.subplot(subplot_size[0], subplot_size[1], subplot_size[2]) plt.subplots_adjust(wspace=0.3, hspace=0.3) plt.title((('Henry regime at ' + str(self.temps[i])) + ' K')) plt.plot(x_hen[i], y_henfit, '-', color=colours[i], label=fit_label.format(temps=self.temps[i])) plt.plot(x_hen[i], y_hen[i], 'ko', color='0.75', label=data_label.format(temps=self.temps[i])) plt.legend() else: plot_settings(logplot, self.input_model, self.rel_pres) for i in range(len(self.key_pressures)): plt.plot(self.x[i], self.params[i].best_fit, '-', color=colours[i], label=fit_label.format(temps=self.temps[i])) plt.plot(self.x[i], self.y[i], 'ko', color='0.75', label=data_label.format(temps=self.temps[i])) plt.legend() plt.show()<|docstring|>Plotting method for the FitIsotherm class. There are three plotting procedures: - For more than one component - For henry plots (This requires plotting on individual subplots) - The generic plotting procedure for any other model with one component (most models use this) :param logplot: tuple(bool) Whether to have an x and y log axis. Default is off for both x and y axis i.e (False, False) in the order (x, y)<|endoftext|>
4ae277885bba38e836b97acabbe70fbcffe156a191817555f79981d89dc49dce
def save(self, directory=None, filestring=None, filetype='.csv'): '\n Saves the model fitting result and henry region fitting result dataframes to directory as a .csv or .json\n file.\n\n :param directory:\n Full destination directory must be inputted for the user to save a file\n\n :param filestring: list[str] or str\n This is a list of strings corresponding to the file names, first position is fit result, second is\n henry result. Inputting this as a string for the fit result only will also work.\n\n :param filetype: str\n .csv or .json for saving\n\n ' if (directory is None): raise SaveError('\n\nPlease enter full directory for saving file separated by double dashes i.e C:\\Users\\User\\pyIsoFit-master\\fittingresults\\') if (filestring is None): filestring = 'fit_result' if (type(self.df_result) is dict): for comp in self.df_result: save_func(directory, filestring, filetype, self.df_result[comp], comp) else: save_func(directory, filestring, filetype, self.df_result)
Saves the model fitting result and henry region fitting result dataframes to directory as a .csv or .json file. :param directory: Full destination directory must be inputted for the user to save a file :param filestring: list[str] or str This is a list of strings corresponding to the file names, first position is fit result, second is henry result. Inputting this as a string for the fit result only will also work. :param filetype: str .csv or .json for saving
src/pyIsoFit/core/fitting.py
save
dominikpantak/pyIsoFit
5
python
def save(self, directory=None, filestring=None, filetype='.csv'): '\n Saves the model fitting result and henry region fitting result dataframes to directory as a .csv or .json\n file.\n\n :param directory:\n Full destination directory must be inputted for the user to save a file\n\n :param filestring: list[str] or str\n This is a list of strings corresponding to the file names, first position is fit result, second is\n henry result. Inputting this as a string for the fit result only will also work.\n\n :param filetype: str\n .csv or .json for saving\n\n ' if (directory is None): raise SaveError('\n\nPlease enter full directory for saving file separated by double dashes i.e C:\\Users\\User\\pyIsoFit-master\\fittingresults\\') if (filestring is None): filestring = 'fit_result' if (type(self.df_result) is dict): for comp in self.df_result: save_func(directory, filestring, filetype, self.df_result[comp], comp) else: save_func(directory, filestring, filetype, self.df_result)
def save(self, directory=None, filestring=None, filetype='.csv'): '\n Saves the model fitting result and henry region fitting result dataframes to directory as a .csv or .json\n file.\n\n :param directory:\n Full destination directory must be inputted for the user to save a file\n\n :param filestring: list[str] or str\n This is a list of strings corresponding to the file names, first position is fit result, second is\n henry result. Inputting this as a string for the fit result only will also work.\n\n :param filetype: str\n .csv or .json for saving\n\n ' if (directory is None): raise SaveError('\n\nPlease enter full directory for saving file separated by double dashes i.e C:\\Users\\User\\pyIsoFit-master\\fittingresults\\') if (filestring is None): filestring = 'fit_result' if (type(self.df_result) is dict): for comp in self.df_result: save_func(directory, filestring, filetype, self.df_result[comp], comp) else: save_func(directory, filestring, filetype, self.df_result)<|docstring|>Saves the model fitting result and henry region fitting result dataframes to directory as a .csv or .json file. :param directory: Full destination directory must be inputted for the user to save a file :param filestring: list[str] or str This is a list of strings corresponding to the file names, first position is fit result, second is henry result. Inputting this as a string for the fit result only will also work. :param filetype: str .csv or .json for saving<|endoftext|>
2dca7b70edf2e55f4ac8725ab13283b889a60cf020bc372b26d00dd62eb97f19
def plot_emod(self, yfracs, ext_model='extended dsl', logplot=(False, False)): '\n Predicts co-adsorption isotherm data and plots it.\n\n :param yfracs: list[float]\n List of component mole fractions within the gas mixture\n\n :param ext_model: str\n Extended model for the method to predict co-adsorption with. Currently extended DSL is the only\n model included\n\n :param logplot: bool\n Whether to have an x and y log axis.\n\n :return:\n Returns a dictionary of co-adsorption uptakes for each component\n ' if (len(self.compname) < 2): raise ParameterError('Enter 2 components or more to use extended models') if (self.model != 'dsl'): raise ParameterError('This isotherm model is not supported for extended models. Currently supported\n models are:\n - DSL ') q_dict = ext_dsl(self.emod_input, self.temps, self.x, self.compname, yfracs) for i in range(len(self.compname)): plot_settings(logplot) q = q_dict[self.compname[i]] plt.title(f'Co-adsorption isotherm for component {self.compname[i]} at mol frac of {yfracs[i]}') for j in range(len(self.temps)): plt.plot(self.x[i][j], q[j], '--', color=colours[j], label='{temps} K Fit'.format(temps=self.temps[j])) plt.legend() plt.show() return q_dict
Predicts co-adsorption isotherm data and plots it. :param yfracs: list[float] List of component mole fractions within the gas mixture :param ext_model: str Extended model for the method to predict co-adsorption with. Currently extended DSL is the only model included :param logplot: bool Whether to have an x and y log axis. :return: Returns a dictionary of co-adsorption uptakes for each component
src/pyIsoFit/core/fitting.py
plot_emod
dominikpantak/pyIsoFit
5
python
def plot_emod(self, yfracs, ext_model='extended dsl', logplot=(False, False)): '\n Predicts co-adsorption isotherm data and plots it.\n\n :param yfracs: list[float]\n List of component mole fractions within the gas mixture\n\n :param ext_model: str\n Extended model for the method to predict co-adsorption with. Currently extended DSL is the only\n model included\n\n :param logplot: bool\n Whether to have an x and y log axis.\n\n :return:\n Returns a dictionary of co-adsorption uptakes for each component\n ' if (len(self.compname) < 2): raise ParameterError('Enter 2 components or more to use extended models') if (self.model != 'dsl'): raise ParameterError('This isotherm model is not supported for extended models. Currently supported\n models are:\n - DSL ') q_dict = ext_dsl(self.emod_input, self.temps, self.x, self.compname, yfracs) for i in range(len(self.compname)): plot_settings(logplot) q = q_dict[self.compname[i]] plt.title(f'Co-adsorption isotherm for component {self.compname[i]} at mol frac of {yfracs[i]}') for j in range(len(self.temps)): plt.plot(self.x[i][j], q[j], '--', color=colours[j], label='{temps} K Fit'.format(temps=self.temps[j])) plt.legend() plt.show() return q_dict
def plot_emod(self, yfracs, ext_model='extended dsl', logplot=(False, False)): '\n Predicts co-adsorption isotherm data and plots it.\n\n :param yfracs: list[float]\n List of component mole fractions within the gas mixture\n\n :param ext_model: str\n Extended model for the method to predict co-adsorption with. Currently extended DSL is the only\n model included\n\n :param logplot: bool\n Whether to have an x and y log axis.\n\n :return:\n Returns a dictionary of co-adsorption uptakes for each component\n ' if (len(self.compname) < 2): raise ParameterError('Enter 2 components or more to use extended models') if (self.model != 'dsl'): raise ParameterError('This isotherm model is not supported for extended models. Currently supported\n models are:\n - DSL ') q_dict = ext_dsl(self.emod_input, self.temps, self.x, self.compname, yfracs) for i in range(len(self.compname)): plot_settings(logplot) q = q_dict[self.compname[i]] plt.title(f'Co-adsorption isotherm for component {self.compname[i]} at mol frac of {yfracs[i]}') for j in range(len(self.temps)): plt.plot(self.x[i][j], q[j], '--', color=colours[j], label='{temps} K Fit'.format(temps=self.temps[j])) plt.legend() plt.show() return q_dict<|docstring|>Predicts co-adsorption isotherm data and plots it. :param yfracs: list[float] List of component mole fractions within the gas mixture :param ext_model: str Extended model for the method to predict co-adsorption with. Currently extended DSL is the only model included :param logplot: bool Whether to have an x and y log axis. :return: Returns a dictionary of co-adsorption uptakes for each component<|endoftext|>
0c6ce90f9607add97d44cd13934ab3623417e8541e3760088f1a0aab0b47685a
def get_HNF_diagonals(n): 'Finds the diagonals of the HNF that reach the target n value.\n \n Args:\n n (int): The target determinant for the HNF.\n \n Retruns:\n diags (list of lists): The allowed values of the determinant.\n ' diags = [] for i in range(1, (n + 1)): if (not ((n % i) == 0)): continue else: q = (n / i) for j in range(1, (q + 1)): if (not ((q % j) == 0)): continue else: diags.append([i, j, (q / j)]) return diags
Finds the diagonals of the HNF that reach the target n value. Args: n (int): The target determinant for the HNF. Retruns: diags (list of lists): The allowed values of the determinant.
support/brute_force/general_approach.py
get_HNF_diagonals
glwhart/autoGR
17
python
def get_HNF_diagonals(n): 'Finds the diagonals of the HNF that reach the target n value.\n \n Args:\n n (int): The target determinant for the HNF.\n \n Retruns:\n diags (list of lists): The allowed values of the determinant.\n ' diags = [] for i in range(1, (n + 1)): if (not ((n % i) == 0)): continue else: q = (n / i) for j in range(1, (q + 1)): if (not ((q % j) == 0)): continue else: diags.append([i, j, (q / j)]) return diags
def get_HNF_diagonals(n): 'Finds the diagonals of the HNF that reach the target n value.\n \n Args:\n n (int): The target determinant for the HNF.\n \n Retruns:\n diags (list of lists): The allowed values of the determinant.\n ' diags = [] for i in range(1, (n + 1)): if (not ((n % i) == 0)): continue else: q = (n / i) for j in range(1, (q + 1)): if (not ((q % j) == 0)): continue else: diags.append([i, j, (q / j)]) return diags<|docstring|>Finds the diagonals of the HNF that reach the target n value. Args: n (int): The target determinant for the HNF. Retruns: diags (list of lists): The allowed values of the determinant.<|endoftext|>
6b2163971fbfccf3b0b1751fdb978d74ea4c9df5803c54ab7567868f18b0186a
def forms_group(gens, pg): 'Tests if the given generators forms a group.\n \n Args:\n gens (list of list): The generators to check.\n pg (list of list): The group the generators form.\n \n Returns:\n corret_gens (bool): True if the generators form the group.\n ' correct_gens = False group = [] for i in gens: for j in gens: test = np.matmul(i, j) in_group = False for k in group: if np.allclose(test, k): in_group = True if (not in_group): group.append(test) growing = True while growing: nfound = 0 for i in gens: for j in group: test = np.matmul(i, j) in_group = False for k in group: if np.allclose(test, k): in_group = True if (not in_group): group.append(test) nfound += 1 if (nfound == 0): growing = False if (not (len(pg) == len(group))): correct_gens = False else: for i in pg: in_group = False for k in group: if np.allclose(i, k): correct_gens = True break if (correct_gens == False): break return correct_gens
Tests if the given generators forms a group. Args: gens (list of list): The generators to check. pg (list of list): The group the generators form. Returns: corret_gens (bool): True if the generators form the group.
support/brute_force/general_approach.py
forms_group
glwhart/autoGR
17
python
def forms_group(gens, pg): 'Tests if the given generators forms a group.\n \n Args:\n gens (list of list): The generators to check.\n pg (list of list): The group the generators form.\n \n Returns:\n corret_gens (bool): True if the generators form the group.\n ' correct_gens = False group = [] for i in gens: for j in gens: test = np.matmul(i, j) in_group = False for k in group: if np.allclose(test, k): in_group = True if (not in_group): group.append(test) growing = True while growing: nfound = 0 for i in gens: for j in group: test = np.matmul(i, j) in_group = False for k in group: if np.allclose(test, k): in_group = True if (not in_group): group.append(test) nfound += 1 if (nfound == 0): growing = False if (not (len(pg) == len(group))): correct_gens = False else: for i in pg: in_group = False for k in group: if np.allclose(i, k): correct_gens = True break if (correct_gens == False): break return correct_gens
def forms_group(gens, pg): 'Tests if the given generators forms a group.\n \n Args:\n gens (list of list): The generators to check.\n pg (list of list): The group the generators form.\n \n Returns:\n corret_gens (bool): True if the generators form the group.\n ' correct_gens = False group = [] for i in gens: for j in gens: test = np.matmul(i, j) in_group = False for k in group: if np.allclose(test, k): in_group = True if (not in_group): group.append(test) growing = True while growing: nfound = 0 for i in gens: for j in group: test = np.matmul(i, j) in_group = False for k in group: if np.allclose(test, k): in_group = True if (not in_group): group.append(test) nfound += 1 if (nfound == 0): growing = False if (not (len(pg) == len(group))): correct_gens = False else: for i in pg: in_group = False for k in group: if np.allclose(i, k): correct_gens = True break if (correct_gens == False): break return correct_gens<|docstring|>Tests if the given generators forms a group. Args: gens (list of list): The generators to check. pg (list of list): The group the generators form. Returns: corret_gens (bool): True if the generators form the group.<|endoftext|>
3b2955590f58943fdfc4fb6e746aaf77280cb1c97d235dd38ce562b5d6a3b18b
def find_gens_of_pg(pg): 'This subroutine finds the generators of the point group.\n \n Args:\n pg (list of list): A list of the matrix form of the point group.\n \n Returns:\n gens (list of list): Those operations that will generate the \n remainder of the group.\n ' from itertools import combinations n_gens = 1 found_gens = False while (not found_gens): possible_gens = list(combinations(range(len(pg)), r=n_gens)) for test in possible_gens: test_gens = [] for i in test: test_gens.append(pg[i]) if forms_group(test_gens, pg): gens = test_gens found_gens = True break n_gens += 1 return gens
This subroutine finds the generators of the point group. Args: pg (list of list): A list of the matrix form of the point group. Returns: gens (list of list): Those operations that will generate the remainder of the group.
support/brute_force/general_approach.py
find_gens_of_pg
glwhart/autoGR
17
python
def find_gens_of_pg(pg): 'This subroutine finds the generators of the point group.\n \n Args:\n pg (list of list): A list of the matrix form of the point group.\n \n Returns:\n gens (list of list): Those operations that will generate the \n remainder of the group.\n ' from itertools import combinations n_gens = 1 found_gens = False while (not found_gens): possible_gens = list(combinations(range(len(pg)), r=n_gens)) for test in possible_gens: test_gens = [] for i in test: test_gens.append(pg[i]) if forms_group(test_gens, pg): gens = test_gens found_gens = True break n_gens += 1 return gens
def find_gens_of_pg(pg): 'This subroutine finds the generators of the point group.\n \n Args:\n pg (list of list): A list of the matrix form of the point group.\n \n Returns:\n gens (list of list): Those operations that will generate the \n remainder of the group.\n ' from itertools import combinations n_gens = 1 found_gens = False while (not found_gens): possible_gens = list(combinations(range(len(pg)), r=n_gens)) for test in possible_gens: test_gens = [] for i in test: test_gens.append(pg[i]) if forms_group(test_gens, pg): gens = test_gens found_gens = True break n_gens += 1 return gens<|docstring|>This subroutine finds the generators of the point group. Args: pg (list of list): A list of the matrix form of the point group. Returns: gens (list of list): Those operations that will generate the remainder of the group.<|endoftext|>
6e12e8ef9b94b3166968760e32de74efc66b7284dc3ab04f7888ac3fbd647edf
def div_HNF(lat, n): 'Finds the HNFs that preserve the symmetry of the lattice.\n \n Args:\n lat (numpy.ndarray): The vectors (as rows) of the parent lattice.\n n (int): The volume factor for the supercell.\n \n Returns:\n HNFs (list of lists): The HNFs the preserve the symmetry.\n ' from phenum.symmetry import _get_lattice_pointGroup diags = get_HNF_diagonals(n) pg = _get_lattice_pointGroup(lat) gens = find_gens_of_pg(pg) lat = np.transpose(lat) lat_gens = [] for g in gens: temp = np.matmul(np.linalg.inv(lat), np.matmul(g, lat)) lat_gens.append(np.transpose(temp)) x11 = [] x12 = [] x13 = [] x21 = [] x22 = [] x23 = [] x31 = [] x32 = [] x33 = [] for g in lat_gens: x11.append(g[0][0]) x12.append(g[0][1]) x13.append(g[0][2]) x21.append(g[1][0]) x22.append(g[1][1]) x23.append(g[1][2]) x31.append(g[2][0]) x32.append(g[2][1]) x33.append(g[2][2]) x11 = np.array(x11) x12 = np.array(x12) x13 = np.array(x13) x21 = np.array(x21) x22 = np.array(x22) x23 = np.array(x23) x31 = np.array(x31) x32 = np.array(x32) x33 = np.array(x33) count = 0 HNFs = [] for diag in diags: print('diag', diag) a = diag[0] c = diag[1] f = diag[2] if np.allclose(((x13 * f) % a), 0): d = None e = None b = None if (np.allclose(x13, 0) and (not np.allclose(x12, 0))): if (not np.allclose(((x12 * c) % a), 0)): continue b = 0 al1 = ((b * x12) / a) al2 = ((c * x12) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 elif (np.allclose(x12, 0) and (not np.allclose(x13, 0))): vals = [] N = 0 xt = x13[np.nonzero(x13)] val = np.unique(((N * a) / xt)) while any((abs(val) < f)): for v in val: if (v < f): vals.append(v) N += 1 val = np.unique(((N * a) / xt)) for d in vals: for e in vals: al1 = ((d * x13) / a) al2 = ((e * x13) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 else: for e in range(f): if np.allclose((((c * x12) + (e * x13)) % a), 0): for b in range(c): for d in range(f): if np.allclose((((b * x12) + (d * x13)) % a), 0): al1 = (((b * x12) + (d * x13)) / a) al2 = (((c * x12) + (e * x13)) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 else: continue else: continue else: continue return HNFs
Finds the HNFs that preserve the symmetry of the lattice. Args: lat (numpy.ndarray): The vectors (as rows) of the parent lattice. n (int): The volume factor for the supercell. Returns: HNFs (list of lists): The HNFs the preserve the symmetry.
support/brute_force/general_approach.py
div_HNF
glwhart/autoGR
17
python
def div_HNF(lat, n): 'Finds the HNFs that preserve the symmetry of the lattice.\n \n Args:\n lat (numpy.ndarray): The vectors (as rows) of the parent lattice.\n n (int): The volume factor for the supercell.\n \n Returns:\n HNFs (list of lists): The HNFs the preserve the symmetry.\n ' from phenum.symmetry import _get_lattice_pointGroup diags = get_HNF_diagonals(n) pg = _get_lattice_pointGroup(lat) gens = find_gens_of_pg(pg) lat = np.transpose(lat) lat_gens = [] for g in gens: temp = np.matmul(np.linalg.inv(lat), np.matmul(g, lat)) lat_gens.append(np.transpose(temp)) x11 = [] x12 = [] x13 = [] x21 = [] x22 = [] x23 = [] x31 = [] x32 = [] x33 = [] for g in lat_gens: x11.append(g[0][0]) x12.append(g[0][1]) x13.append(g[0][2]) x21.append(g[1][0]) x22.append(g[1][1]) x23.append(g[1][2]) x31.append(g[2][0]) x32.append(g[2][1]) x33.append(g[2][2]) x11 = np.array(x11) x12 = np.array(x12) x13 = np.array(x13) x21 = np.array(x21) x22 = np.array(x22) x23 = np.array(x23) x31 = np.array(x31) x32 = np.array(x32) x33 = np.array(x33) count = 0 HNFs = [] for diag in diags: print('diag', diag) a = diag[0] c = diag[1] f = diag[2] if np.allclose(((x13 * f) % a), 0): d = None e = None b = None if (np.allclose(x13, 0) and (not np.allclose(x12, 0))): if (not np.allclose(((x12 * c) % a), 0)): continue b = 0 al1 = ((b * x12) / a) al2 = ((c * x12) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 elif (np.allclose(x12, 0) and (not np.allclose(x13, 0))): vals = [] N = 0 xt = x13[np.nonzero(x13)] val = np.unique(((N * a) / xt)) while any((abs(val) < f)): for v in val: if (v < f): vals.append(v) N += 1 val = np.unique(((N * a) / xt)) for d in vals: for e in vals: al1 = ((d * x13) / a) al2 = ((e * x13) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 else: for e in range(f): if np.allclose((((c * x12) + (e * x13)) % a), 0): for b in range(c): for d in range(f): if np.allclose((((b * x12) + (d * x13)) % a), 0): al1 = (((b * x12) + (d * x13)) / a) al2 = (((c * x12) + (e * x13)) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 else: continue else: continue else: continue return HNFs
def div_HNF(lat, n): 'Finds the HNFs that preserve the symmetry of the lattice.\n \n Args:\n lat (numpy.ndarray): The vectors (as rows) of the parent lattice.\n n (int): The volume factor for the supercell.\n \n Returns:\n HNFs (list of lists): The HNFs the preserve the symmetry.\n ' from phenum.symmetry import _get_lattice_pointGroup diags = get_HNF_diagonals(n) pg = _get_lattice_pointGroup(lat) gens = find_gens_of_pg(pg) lat = np.transpose(lat) lat_gens = [] for g in gens: temp = np.matmul(np.linalg.inv(lat), np.matmul(g, lat)) lat_gens.append(np.transpose(temp)) x11 = [] x12 = [] x13 = [] x21 = [] x22 = [] x23 = [] x31 = [] x32 = [] x33 = [] for g in lat_gens: x11.append(g[0][0]) x12.append(g[0][1]) x13.append(g[0][2]) x21.append(g[1][0]) x22.append(g[1][1]) x23.append(g[1][2]) x31.append(g[2][0]) x32.append(g[2][1]) x33.append(g[2][2]) x11 = np.array(x11) x12 = np.array(x12) x13 = np.array(x13) x21 = np.array(x21) x22 = np.array(x22) x23 = np.array(x23) x31 = np.array(x31) x32 = np.array(x32) x33 = np.array(x33) count = 0 HNFs = [] for diag in diags: print('diag', diag) a = diag[0] c = diag[1] f = diag[2] if np.allclose(((x13 * f) % a), 0): d = None e = None b = None if (np.allclose(x13, 0) and (not np.allclose(x12, 0))): if (not np.allclose(((x12 * c) % a), 0)): continue b = 0 al1 = ((b * x12) / a) al2 = ((c * x12) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 elif (np.allclose(x12, 0) and (not np.allclose(x13, 0))): vals = [] N = 0 xt = x13[np.nonzero(x13)] val = np.unique(((N * a) / xt)) while any((abs(val) < f)): for v in val: if (v < f): vals.append(v) N += 1 val = np.unique(((N * a) / xt)) for d in vals: for e in vals: al1 = ((d * x13) / a) al2 = ((e * x13) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 else: for e in range(f): if np.allclose((((c * x12) + (e * x13)) % a), 0): for b in range(c): for d in range(f): if np.allclose((((b * x12) + (d * x13)) % a), 0): al1 = (((b * x12) + (d * x13)) / a) al2 = (((c * x12) + (e * x13)) / a) al3 = ((f * x13) / a) tHNFs = cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33) for t in tHNFs: HNFs.append(t) count += 1 else: continue else: continue else: continue return HNFs<|docstring|>Finds the HNFs that preserve the symmetry of the lattice. Args: lat (numpy.ndarray): The vectors (as rows) of the parent lattice. n (int): The volume factor for the supercell. Returns: HNFs (list of lists): The HNFs the preserve the symmetry.<|endoftext|>
767bff398e874106f1109e6c2210c5dda2bf67e20c4db89023dc76515ee5cd24
def fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33): 'Finds the f divides conditions for the symmetry preserving HNFs.\n \n Args:\n a (int): a from the HNF.\n b (int): b from the HNF.\n c (int): c from the HNF.\n d (int): d from the HNF.\n e (int): e from the HNF.\n f (int): f from the HNF.\n al1 (numpy.array): array of alpha1 values from write up.\n al2 (numpy.array): array of alpha2 values from write up.\n be1 (numpy.array): array of beta1 values from write up.\n be2 (numpy.array): array of beta2 values from write up.\n x11 (numpy.array): array of pg values for x(1,1) spot.\n x22 (numpy.array): array of pg values for x(2,2) spot.\n x31 (numpy.array): array of pg values for x(3,1) spot.\n x32 (numpy.array): array of pg values for x(3,2) spot.\n x33 (numpy.array): array of pg values for x(3,3) spot.\n \n Returns:\n HNFs (list of lists): The symmetry preserving HNFs.\n ' HNFs = [] if ((b == None) and (d == None) and (e == None)): xvar1 = ((x33 - x22) - be2) xvar2 = ((x33 - x11) - al1) for b in range(c): for e in range(f): if (not np.allclose(xvar2, 0)): N = min(np.round(((((a * x31) + (b * x32)) - (be1 * e)) / f))) xt = xvar2[np.nonzero(xvar2)] val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) elif (not np.allclose(al2, 0)): N = max(np.round((((c * x32) + (e * var1)) / f))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N -= 1 val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) else: for d in range(f): f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif (b == None): f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x22) - be2))) if np.allclose((f2 % f), 0): if (not np.allclose(x32, 0)): N = min(np.round(((a * x31) + ((d * ((x33 - x11) - al1)) / f)))) xt = x32[np.nonzero(x32)] val = np.unique(np.reshape(np.outer((((N * f) - (a * x31)) - (d * ((x33 - x11) - al1))), (1 / xt)), (len(x33) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v f1 = ((((a * x32) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if np.allclose((f1 % f), 0): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer((((N * f) - (a * x31)) - (d * ((x33 - x11) - al1))), (1 / xt)), (len(x33) * len(xt)))) else: for b in range(c): f1 = ((((a * x32) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if np.allclose((f1 % f), 0): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif ((d == None) and (e == None)): for e in range(f): if (not np.allclose(xvar2, 0)): N = min(np.round(((((a * x31) + (b * x32)) - (be1 * e)) / f))) xt = xvar2[np.nonzero(xvar2)] val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) elif (not np.allclose(al2, 0)): N = max(np.round((((c * x32) + (e * var1)) / f))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N -= 1 val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) else: for d in range(f): f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif ((e == None) or (d == None) or (b == None)): print('*****************ERROR IN fdivs**************') else: f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x22) - be2))) f1 = ((((a * x31) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) return HNFs
Finds the f divides conditions for the symmetry preserving HNFs. Args: a (int): a from the HNF. b (int): b from the HNF. c (int): c from the HNF. d (int): d from the HNF. e (int): e from the HNF. f (int): f from the HNF. al1 (numpy.array): array of alpha1 values from write up. al2 (numpy.array): array of alpha2 values from write up. be1 (numpy.array): array of beta1 values from write up. be2 (numpy.array): array of beta2 values from write up. x11 (numpy.array): array of pg values for x(1,1) spot. x22 (numpy.array): array of pg values for x(2,2) spot. x31 (numpy.array): array of pg values for x(3,1) spot. x32 (numpy.array): array of pg values for x(3,2) spot. x33 (numpy.array): array of pg values for x(3,3) spot. Returns: HNFs (list of lists): The symmetry preserving HNFs.
support/brute_force/general_approach.py
fdivs
glwhart/autoGR
17
python
def fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33): 'Finds the f divides conditions for the symmetry preserving HNFs.\n \n Args:\n a (int): a from the HNF.\n b (int): b from the HNF.\n c (int): c from the HNF.\n d (int): d from the HNF.\n e (int): e from the HNF.\n f (int): f from the HNF.\n al1 (numpy.array): array of alpha1 values from write up.\n al2 (numpy.array): array of alpha2 values from write up.\n be1 (numpy.array): array of beta1 values from write up.\n be2 (numpy.array): array of beta2 values from write up.\n x11 (numpy.array): array of pg values for x(1,1) spot.\n x22 (numpy.array): array of pg values for x(2,2) spot.\n x31 (numpy.array): array of pg values for x(3,1) spot.\n x32 (numpy.array): array of pg values for x(3,2) spot.\n x33 (numpy.array): array of pg values for x(3,3) spot.\n \n Returns:\n HNFs (list of lists): The symmetry preserving HNFs.\n ' HNFs = [] if ((b == None) and (d == None) and (e == None)): xvar1 = ((x33 - x22) - be2) xvar2 = ((x33 - x11) - al1) for b in range(c): for e in range(f): if (not np.allclose(xvar2, 0)): N = min(np.round(((((a * x31) + (b * x32)) - (be1 * e)) / f))) xt = xvar2[np.nonzero(xvar2)] val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) elif (not np.allclose(al2, 0)): N = max(np.round((((c * x32) + (e * var1)) / f))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N -= 1 val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) else: for d in range(f): f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif (b == None): f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x22) - be2))) if np.allclose((f2 % f), 0): if (not np.allclose(x32, 0)): N = min(np.round(((a * x31) + ((d * ((x33 - x11) - al1)) / f)))) xt = x32[np.nonzero(x32)] val = np.unique(np.reshape(np.outer((((N * f) - (a * x31)) - (d * ((x33 - x11) - al1))), (1 / xt)), (len(x33) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v f1 = ((((a * x32) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if np.allclose((f1 % f), 0): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer((((N * f) - (a * x31)) - (d * ((x33 - x11) - al1))), (1 / xt)), (len(x33) * len(xt)))) else: for b in range(c): f1 = ((((a * x32) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if np.allclose((f1 % f), 0): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif ((d == None) and (e == None)): for e in range(f): if (not np.allclose(xvar2, 0)): N = min(np.round(((((a * x31) + (b * x32)) - (be1 * e)) / f))) xt = xvar2[np.nonzero(xvar2)] val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) elif (not np.allclose(al2, 0)): N = max(np.round((((c * x32) + (e * var1)) / f))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N -= 1 val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) else: for d in range(f): f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif ((e == None) or (d == None) or (b == None)): print('*****************ERROR IN fdivs**************') else: f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x22) - be2))) f1 = ((((a * x31) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) return HNFs
def fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33): 'Finds the f divides conditions for the symmetry preserving HNFs.\n \n Args:\n a (int): a from the HNF.\n b (int): b from the HNF.\n c (int): c from the HNF.\n d (int): d from the HNF.\n e (int): e from the HNF.\n f (int): f from the HNF.\n al1 (numpy.array): array of alpha1 values from write up.\n al2 (numpy.array): array of alpha2 values from write up.\n be1 (numpy.array): array of beta1 values from write up.\n be2 (numpy.array): array of beta2 values from write up.\n x11 (numpy.array): array of pg values for x(1,1) spot.\n x22 (numpy.array): array of pg values for x(2,2) spot.\n x31 (numpy.array): array of pg values for x(3,1) spot.\n x32 (numpy.array): array of pg values for x(3,2) spot.\n x33 (numpy.array): array of pg values for x(3,3) spot.\n \n Returns:\n HNFs (list of lists): The symmetry preserving HNFs.\n ' HNFs = [] if ((b == None) and (d == None) and (e == None)): xvar1 = ((x33 - x22) - be2) xvar2 = ((x33 - x11) - al1) for b in range(c): for e in range(f): if (not np.allclose(xvar2, 0)): N = min(np.round(((((a * x31) + (b * x32)) - (be1 * e)) / f))) xt = xvar2[np.nonzero(xvar2)] val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) elif (not np.allclose(al2, 0)): N = max(np.round((((c * x32) + (e * var1)) / f))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N -= 1 val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) else: for d in range(f): f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif (b == None): f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x22) - be2))) if np.allclose((f2 % f), 0): if (not np.allclose(x32, 0)): N = min(np.round(((a * x31) + ((d * ((x33 - x11) - al1)) / f)))) xt = x32[np.nonzero(x32)] val = np.unique(np.reshape(np.outer((((N * f) - (a * x31)) - (d * ((x33 - x11) - al1))), (1 / xt)), (len(x33) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v f1 = ((((a * x32) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if np.allclose((f1 % f), 0): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer((((N * f) - (a * x31)) - (d * ((x33 - x11) - al1))), (1 / xt)), (len(x33) * len(xt)))) else: for b in range(c): f1 = ((((a * x32) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if np.allclose((f1 % f), 0): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif ((d == None) and (e == None)): for e in range(f): if (not np.allclose(xvar2, 0)): N = min(np.round(((((a * x31) + (b * x32)) - (be1 * e)) / f))) xt = xvar2[np.nonzero(xvar2)] val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N += 1 val = np.unique(np.reshape(np.outer(((((N * f) - (a * x31)) - (b * x32)) + (be1 * e)), (1 / xt)), (len(xt) * len(x32)))) elif (not np.allclose(al2, 0)): N = max(np.round((((c * x32) + (e * var1)) / f))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): d = v f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) N -= 1 val = np.unique(np.reshape(np.outer(((((- N) * f) + (c * x32)) + (e * var1)), (1 / at)), (len(x32) * len(at)))) else: for d in range(f): f1 = ((((a * x31) + (b * x32)) + (d * var2)) - (be1 * e)) f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x33) - be2))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) elif ((e == None) or (d == None) or (b == None)): print('*****************ERROR IN fdivs**************') else: f2 = (((c * x32) - (d * al2)) + (e * ((x33 - x22) - be2))) f1 = ((((a * x31) + (b * x32)) + (e * be1)) + (d * ((x33 - x11) - al1))) if (np.allclose((f1 % f), 0) and np.allclose((f2 % f), 0)): HNF = [[a, 0, 0], [b, c, 0], [d, e, f]] HNFs.append(HNF) return HNFs<|docstring|>Finds the f divides conditions for the symmetry preserving HNFs. Args: a (int): a from the HNF. b (int): b from the HNF. c (int): c from the HNF. d (int): d from the HNF. e (int): e from the HNF. f (int): f from the HNF. al1 (numpy.array): array of alpha1 values from write up. al2 (numpy.array): array of alpha2 values from write up. be1 (numpy.array): array of beta1 values from write up. be2 (numpy.array): array of beta2 values from write up. x11 (numpy.array): array of pg values for x(1,1) spot. x22 (numpy.array): array of pg values for x(2,2) spot. x31 (numpy.array): array of pg values for x(3,1) spot. x32 (numpy.array): array of pg values for x(3,2) spot. x33 (numpy.array): array of pg values for x(3,3) spot. Returns: HNFs (list of lists): The symmetry preserving HNFs.<|endoftext|>
96bee678e06c1c22cbda81d98e778abaadae7a6cf2262c767a55215ba07b482e
def cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33): 'Finds the c divides conditions for the symmetry preserving HNFs.\n \n Args:\n a (int): a from the HNF.\n b (int): b from the HNF.\n c (int): c from the HNF.\n d (int): d from the HNF.\n e (int): e from the HNF.\n f (int): f from the HNF.\n al1 (numpy.array): array of alpha1 values from write up.\n al2 (numpy.array): array of alpha2 values from write up.\n al3 (numpy.array): array of alpha3 values from write up.\n x11 (numpy.array): array of pg values for x(1,1) spot.\n x21 (numpy.array): array of pg values for x(2,1) spot.\n x22 (numpy.array): array of pg values for x(2,2) spot.\n x23 (numpy.array): array of pg values for x(2,3) spot.\n x31 (numpy.array): array of pg values for x(3,1) spot.\n x32 (numpy.array): array of pg values for x(3,2) spot.\n x33 (numpy.array): array of pg values for x(3,3) spot.\n \n Returns:\n HNFs (list of lists): The symmetry preserving HNFs.\n ' HNFs = [] if np.allclose(x23, 0): if (b == None): if (not np.allclose(al3, 0)): N = 0 at = al3[np.nonzero(al3)] val = np.unique(((N * c) / at)) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose(((v % 1) == 0))): b = v c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c2 = ((- b) * al2) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(((N * c) / at)) elif (not np.allclose(al2, 0)): N = 0 at = al2[np.nonzero(al2)] val = np.unique(((N * c) / at)) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c3 = ((- b) * al3) if (np.allclose((c1 % c), 0) and np.allclose((c3 % c), 0)): be1 = (c1 / c) be2 = (((- b) * al2) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(((N * c) / at)) elif (not np.allclose(((x22 - x11) - al1), 0)): N = 0 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - (a * x21)), (1 / xt)), (len(x21) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c2 = ((- b) * al2) c3 = ((- b) * al3) if (np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): be1 = (((a * x21) + (b * ((x22 - x11) - al1))) / c) be2 = (((- b) * al2) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) N += 1 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - (a * x21)), (1 / xt)), (len(x21) * len(xt)))) else: c1 = (a * x21) c2 = 0 c3 = 0 if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) else: c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c2 = ((- b) * al2) c3 = ((- b) * a13) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) elif np.allclose(al3, 0): if np.allclose(((f * x23) % c), 0): if ((b == None) and (e == None) and (d == None)): if (np.allclose(al3, 0) and np.allclose(al2, 0) and np.allclose(al3, 0)): N = 0 xt = x23[np.nonzero(x23)] val = np.unique(((N * c) / xt)) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): e = v for b in range(c): N2 = 0 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((((N2 * c) - (a * x21)) - (b * (x22 - x11))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): d = v2 be1 = ((((a * x21) + (b * (x22 - x11))) + (d * x23)) / c) be2 = ((e * x23) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.appned(t) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((((N2 * c) - (a * x21)) - (b * (x22 - x11))), (1 / xt)), (len(x22) * len(xt)))) N += 1 val = np.unique(((N * c) / xt)) elif (not np.allclose(al3, 0)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(al3)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) while any((abs(val2) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(x22) * len(xt)))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) else: for b in range(c): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val2) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(al2) * len(xt)))) elif (b == None): if (not np.allclose(al3, 0)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif (not np.allclose(al2, 0)): N = max(np.round(((e * x23) / c))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer((((- N) * c) + (e * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer((((- N) * c) + (e * x23)), (1 / at)), (len(x23) * len(at)))) elif (not np.allclose(((x22 - x11) - al1), 0)): N = min(np.round((((a * x21) - (d * x23)) / c))) xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - ((a * x21sd) * x23)), (1 / xt)), (len(x23) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - ((a * x21sd) * x23)), (1 / xt)), (len(x23) * len(xt)))) else: c1 = ((a * x21) + (d * x23)) c2 = (e * x23) c3 = (f * x23) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) elif ((d == None) and (e == None)): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) else: c1 = (((a * x21) + (b * ((x22 - al1) - x11))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) c3 = (((- b) * al3) + (f * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) elif ((b == None) and (d == None) and (e == None)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif (b == None): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif ((d == None) and (e == None)): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) else: be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) return HNFs
Finds the c divides conditions for the symmetry preserving HNFs. Args: a (int): a from the HNF. b (int): b from the HNF. c (int): c from the HNF. d (int): d from the HNF. e (int): e from the HNF. f (int): f from the HNF. al1 (numpy.array): array of alpha1 values from write up. al2 (numpy.array): array of alpha2 values from write up. al3 (numpy.array): array of alpha3 values from write up. x11 (numpy.array): array of pg values for x(1,1) spot. x21 (numpy.array): array of pg values for x(2,1) spot. x22 (numpy.array): array of pg values for x(2,2) spot. x23 (numpy.array): array of pg values for x(2,3) spot. x31 (numpy.array): array of pg values for x(3,1) spot. x32 (numpy.array): array of pg values for x(3,2) spot. x33 (numpy.array): array of pg values for x(3,3) spot. Returns: HNFs (list of lists): The symmetry preserving HNFs.
support/brute_force/general_approach.py
cdivs
glwhart/autoGR
17
python
def cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33): 'Finds the c divides conditions for the symmetry preserving HNFs.\n \n Args:\n a (int): a from the HNF.\n b (int): b from the HNF.\n c (int): c from the HNF.\n d (int): d from the HNF.\n e (int): e from the HNF.\n f (int): f from the HNF.\n al1 (numpy.array): array of alpha1 values from write up.\n al2 (numpy.array): array of alpha2 values from write up.\n al3 (numpy.array): array of alpha3 values from write up.\n x11 (numpy.array): array of pg values for x(1,1) spot.\n x21 (numpy.array): array of pg values for x(2,1) spot.\n x22 (numpy.array): array of pg values for x(2,2) spot.\n x23 (numpy.array): array of pg values for x(2,3) spot.\n x31 (numpy.array): array of pg values for x(3,1) spot.\n x32 (numpy.array): array of pg values for x(3,2) spot.\n x33 (numpy.array): array of pg values for x(3,3) spot.\n \n Returns:\n HNFs (list of lists): The symmetry preserving HNFs.\n ' HNFs = [] if np.allclose(x23, 0): if (b == None): if (not np.allclose(al3, 0)): N = 0 at = al3[np.nonzero(al3)] val = np.unique(((N * c) / at)) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose(((v % 1) == 0))): b = v c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c2 = ((- b) * al2) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(((N * c) / at)) elif (not np.allclose(al2, 0)): N = 0 at = al2[np.nonzero(al2)] val = np.unique(((N * c) / at)) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c3 = ((- b) * al3) if (np.allclose((c1 % c), 0) and np.allclose((c3 % c), 0)): be1 = (c1 / c) be2 = (((- b) * al2) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(((N * c) / at)) elif (not np.allclose(((x22 - x11) - al1), 0)): N = 0 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - (a * x21)), (1 / xt)), (len(x21) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c2 = ((- b) * al2) c3 = ((- b) * al3) if (np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): be1 = (((a * x21) + (b * ((x22 - x11) - al1))) / c) be2 = (((- b) * al2) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) N += 1 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - (a * x21)), (1 / xt)), (len(x21) * len(xt)))) else: c1 = (a * x21) c2 = 0 c3 = 0 if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) else: c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c2 = ((- b) * al2) c3 = ((- b) * a13) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) elif np.allclose(al3, 0): if np.allclose(((f * x23) % c), 0): if ((b == None) and (e == None) and (d == None)): if (np.allclose(al3, 0) and np.allclose(al2, 0) and np.allclose(al3, 0)): N = 0 xt = x23[np.nonzero(x23)] val = np.unique(((N * c) / xt)) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): e = v for b in range(c): N2 = 0 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((((N2 * c) - (a * x21)) - (b * (x22 - x11))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): d = v2 be1 = ((((a * x21) + (b * (x22 - x11))) + (d * x23)) / c) be2 = ((e * x23) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.appned(t) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((((N2 * c) - (a * x21)) - (b * (x22 - x11))), (1 / xt)), (len(x22) * len(xt)))) N += 1 val = np.unique(((N * c) / xt)) elif (not np.allclose(al3, 0)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(al3)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) while any((abs(val2) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(x22) * len(xt)))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) else: for b in range(c): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val2) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(al2) * len(xt)))) elif (b == None): if (not np.allclose(al3, 0)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif (not np.allclose(al2, 0)): N = max(np.round(((e * x23) / c))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer((((- N) * c) + (e * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer((((- N) * c) + (e * x23)), (1 / at)), (len(x23) * len(at)))) elif (not np.allclose(((x22 - x11) - al1), 0)): N = min(np.round((((a * x21) - (d * x23)) / c))) xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - ((a * x21sd) * x23)), (1 / xt)), (len(x23) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - ((a * x21sd) * x23)), (1 / xt)), (len(x23) * len(xt)))) else: c1 = ((a * x21) + (d * x23)) c2 = (e * x23) c3 = (f * x23) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) elif ((d == None) and (e == None)): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) else: c1 = (((a * x21) + (b * ((x22 - al1) - x11))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) c3 = (((- b) * al3) + (f * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) elif ((b == None) and (d == None) and (e == None)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif (b == None): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif ((d == None) and (e == None)): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) else: be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) return HNFs
def cdivs(a, b, c, d, e, f, al1, al2, al3, x11, x21, x22, x23, x31, x32, x33): 'Finds the c divides conditions for the symmetry preserving HNFs.\n \n Args:\n a (int): a from the HNF.\n b (int): b from the HNF.\n c (int): c from the HNF.\n d (int): d from the HNF.\n e (int): e from the HNF.\n f (int): f from the HNF.\n al1 (numpy.array): array of alpha1 values from write up.\n al2 (numpy.array): array of alpha2 values from write up.\n al3 (numpy.array): array of alpha3 values from write up.\n x11 (numpy.array): array of pg values for x(1,1) spot.\n x21 (numpy.array): array of pg values for x(2,1) spot.\n x22 (numpy.array): array of pg values for x(2,2) spot.\n x23 (numpy.array): array of pg values for x(2,3) spot.\n x31 (numpy.array): array of pg values for x(3,1) spot.\n x32 (numpy.array): array of pg values for x(3,2) spot.\n x33 (numpy.array): array of pg values for x(3,3) spot.\n \n Returns:\n HNFs (list of lists): The symmetry preserving HNFs.\n ' HNFs = [] if np.allclose(x23, 0): if (b == None): if (not np.allclose(al3, 0)): N = 0 at = al3[np.nonzero(al3)] val = np.unique(((N * c) / at)) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose(((v % 1) == 0))): b = v c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c2 = ((- b) * al2) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(((N * c) / at)) elif (not np.allclose(al2, 0)): N = 0 at = al2[np.nonzero(al2)] val = np.unique(((N * c) / at)) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c3 = ((- b) * al3) if (np.allclose((c1 % c), 0) and np.allclose((c3 % c), 0)): be1 = (c1 / c) be2 = (((- b) * al2) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(((N * c) / at)) elif (not np.allclose(((x22 - x11) - al1), 0)): N = 0 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - (a * x21)), (1 / xt)), (len(x21) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c2 = ((- b) * al2) c3 = ((- b) * al3) if (np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): be1 = (((a * x21) + (b * ((x22 - x11) - al1))) / c) be2 = (((- b) * al2) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) N += 1 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - (a * x21)), (1 / xt)), (len(x21) * len(xt)))) else: c1 = (a * x21) c2 = 0 c3 = 0 if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) else: c1 = ((a * x21) + (b * ((x22 - al1) - x11))) c2 = ((- b) * al2) c3 = ((- b) * a13) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in HNFs: HNFs.append(t) elif np.allclose(al3, 0): if np.allclose(((f * x23) % c), 0): if ((b == None) and (e == None) and (d == None)): if (np.allclose(al3, 0) and np.allclose(al2, 0) and np.allclose(al3, 0)): N = 0 xt = x23[np.nonzero(x23)] val = np.unique(((N * c) / xt)) while any((abs(val) < f)): for v in val: if ((v < f) and (v >= 0) and np.allclose((v % 1), 0)): e = v for b in range(c): N2 = 0 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((((N2 * c) - (a * x21)) - (b * (x22 - x11))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): d = v2 be1 = ((((a * x21) + (b * (x22 - x11))) + (d * x23)) / c) be2 = ((e * x23) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.appned(t) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((((N2 * c) - (a * x21)) - (b * (x22 - x11))), (1 / xt)), (len(x22) * len(xt)))) N += 1 val = np.unique(((N * c) / xt)) elif (not np.allclose(al3, 0)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(al3)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) while any((abs(val2) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(x22) * len(xt)))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) else: for b in range(c): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val2) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(xt) * len(x22)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(al2) * len(xt)))) elif (b == None): if (not np.allclose(al3, 0)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif (not np.allclose(al2, 0)): N = max(np.round(((e * x23) / c))) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer((((- N) * c) + (e * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer((((- N) * c) + (e * x23)), (1 / at)), (len(x23) * len(at)))) elif (not np.allclose(((x22 - x11) - al1), 0)): N = min(np.round((((a * x21) - (d * x23)) / c))) xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - ((a * x21sd) * x23)), (1 / xt)), (len(x23) * len(xt)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N += 1 xt = ((x22 - x11) - al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(((N * c) - ((a * x21sd) * x23)), (1 / xt)), (len(x23) * len(xt)))) else: c1 = ((a * x21) + (d * x23)) c2 = (e * x23) c3 = (f * x23) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) elif ((d == None) and (e == None)): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 be1 = ((((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) / c) be2 = (((e * x32) - (b * al2)) / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) else: c1 = (((a * x21) + (b * ((x22 - al1) - x11))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) c3 = (((- b) * al3) + (f * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0) and np.allclose((c3 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) elif ((b == None) and (d == None) and (e == None)): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif (b == None): N = max(np.round(((f * x23) / c))) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) while any((abs(val) < c)): for v in val: if ((v < c) and (v >= 0) and np.allclose((v % 1), 0)): b = v c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer((((- N) * c) + (f * x23)), (1 / at)), (len(x23) * len(at)))) elif ((d == None) and (e == None)): N2 = min(np.round((((- b) * al2) / c))) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) while any((abs(val2) < f)): for v2 in val2: if ((v2 < f) and (v2 >= 0) and np.allclose((v2 % 1), 0)): e = v2 N3 = min(np.round((((a * x21) + (b * ((x22 - x11) - al1))) / c))) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) while any((abs(val3) < f)): for v3 in val3: if ((v3 < f) and (v3 >= 0) and np.allclose((v3 % 1), 0)): d = v3 c1 = (((a * x21) + (b * ((x22 - x11) - al1))) + (d * x23)) c2 = (((- b) * al2) + (e * x23)) if (np.allclose((c1 % c), 0) and np.allclose((c2 % c), 0)): be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer((((N3 * c) - (a * x21)) - (b * ((x22 - x11) - al1))), (1 / xt)), (len(x22) * len(xt)))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(((N2 * c) + (b * al2)), (1 / xt)), (len(xt) * len(al2)))) else: be1 = (c1 / c) be2 = (c2 / c) tHNFs = fdivs(a, b, c, d, e, f, al1, al2, be1, be2, x11, x22, x31, x32, x33) for t in tHNFs: HNFs.append(t) return HNFs<|docstring|>Finds the c divides conditions for the symmetry preserving HNFs. Args: a (int): a from the HNF. b (int): b from the HNF. c (int): c from the HNF. d (int): d from the HNF. e (int): e from the HNF. f (int): f from the HNF. al1 (numpy.array): array of alpha1 values from write up. al2 (numpy.array): array of alpha2 values from write up. al3 (numpy.array): array of alpha3 values from write up. x11 (numpy.array): array of pg values for x(1,1) spot. x21 (numpy.array): array of pg values for x(2,1) spot. x22 (numpy.array): array of pg values for x(2,2) spot. x23 (numpy.array): array of pg values for x(2,3) spot. x31 (numpy.array): array of pg values for x(3,1) spot. x32 (numpy.array): array of pg values for x(3,2) spot. x33 (numpy.array): array of pg values for x(3,3) spot. Returns: HNFs (list of lists): The symmetry preserving HNFs.<|endoftext|>
ec0206c7167c91d6f77ce8932f5b6def1d5d8b703e8e206eb097a4fd289c06a3
def load_configuration(config_info: Dict[(str, str)], extra_vars: Dict[(str, Any)]=None) -> Configuration: '\n Load the configuration. The `config_info` parameter is a mapping from\n key strings to value as strings or dictionaries. In the former case, the\n value is used as-is. In the latter case, if the dictionary has a key named\n `type` alongside a key named `key`.\n An optional default value is accepted for dictionary value with a key named\n `default`. The default value will be used only if the environment variable\n is not defined.\n\n\n Here is a sample of what it looks like:\n\n ```\n {\n "cert": "/some/path/file.crt",\n "token": {\n "type": "env",\n "key": "MY_TOKEN"\n },\n "host": {\n "type": "env",\n "key": "HOSTNAME",\n "default": "localhost"\n }\n }\n ```\n\n The `cert` configuration key is set to its string value whereas the `token`\n configuration key is dynamically fetched from the `MY_TOKEN` environment\n variable. The `host` configuration key is dynamically fetched from the\n `HOSTNAME` environment variable, but if not defined, the default value\n `localhost` will be used instead.\n\n When `extra_vars` is provided, it must be a dictionnary where keys map\n to configuration key. The values from `extra_vars` always override the\n values from the experiment itself. This is useful to the Chaos Toolkit\n CLI mostly to allow overriding values directly from cli arguments. It\'s\n seldom required otherwise.\n ' logger.debug('Loading configuration...') env = os.environ extra_vars = (extra_vars or {}) conf = {} for (key, value) in config_info.items(): if (isinstance(value, dict) and ('type' in value)): if (value['type'] == 'env'): env_key = value['key'] env_default = value.get('default') if ((env_key not in env) and (env_default is None) and (key not in extra_vars)): raise InvalidExperiment('Configuration makes reference to an environment key that does not exist: {}'.format(env_key)) conf[key] = extra_vars.get(key, env.get(env_key, env_default)) else: conf[key] = extra_vars.get(key, value) return conf
Load the configuration. The `config_info` parameter is a mapping from key strings to value as strings or dictionaries. In the former case, the value is used as-is. In the latter case, if the dictionary has a key named `type` alongside a key named `key`. An optional default value is accepted for dictionary value with a key named `default`. The default value will be used only if the environment variable is not defined. Here is a sample of what it looks like: ``` { "cert": "/some/path/file.crt", "token": { "type": "env", "key": "MY_TOKEN" }, "host": { "type": "env", "key": "HOSTNAME", "default": "localhost" } } ``` The `cert` configuration key is set to its string value whereas the `token` configuration key is dynamically fetched from the `MY_TOKEN` environment variable. The `host` configuration key is dynamically fetched from the `HOSTNAME` environment variable, but if not defined, the default value `localhost` will be used instead. When `extra_vars` is provided, it must be a dictionnary where keys map to configuration key. The values from `extra_vars` always override the values from the experiment itself. This is useful to the Chaos Toolkit CLI mostly to allow overriding values directly from cli arguments. It's seldom required otherwise.
chaoslib/configuration.py
load_configuration
roeiK-wix/chaostoolkit-lib
73
python
def load_configuration(config_info: Dict[(str, str)], extra_vars: Dict[(str, Any)]=None) -> Configuration: '\n Load the configuration. The `config_info` parameter is a mapping from\n key strings to value as strings or dictionaries. In the former case, the\n value is used as-is. In the latter case, if the dictionary has a key named\n `type` alongside a key named `key`.\n An optional default value is accepted for dictionary value with a key named\n `default`. The default value will be used only if the environment variable\n is not defined.\n\n\n Here is a sample of what it looks like:\n\n ```\n {\n "cert": "/some/path/file.crt",\n "token": {\n "type": "env",\n "key": "MY_TOKEN"\n },\n "host": {\n "type": "env",\n "key": "HOSTNAME",\n "default": "localhost"\n }\n }\n ```\n\n The `cert` configuration key is set to its string value whereas the `token`\n configuration key is dynamically fetched from the `MY_TOKEN` environment\n variable. The `host` configuration key is dynamically fetched from the\n `HOSTNAME` environment variable, but if not defined, the default value\n `localhost` will be used instead.\n\n When `extra_vars` is provided, it must be a dictionnary where keys map\n to configuration key. The values from `extra_vars` always override the\n values from the experiment itself. This is useful to the Chaos Toolkit\n CLI mostly to allow overriding values directly from cli arguments. It\'s\n seldom required otherwise.\n ' logger.debug('Loading configuration...') env = os.environ extra_vars = (extra_vars or {}) conf = {} for (key, value) in config_info.items(): if (isinstance(value, dict) and ('type' in value)): if (value['type'] == 'env'): env_key = value['key'] env_default = value.get('default') if ((env_key not in env) and (env_default is None) and (key not in extra_vars)): raise InvalidExperiment('Configuration makes reference to an environment key that does not exist: {}'.format(env_key)) conf[key] = extra_vars.get(key, env.get(env_key, env_default)) else: conf[key] = extra_vars.get(key, value) return conf
def load_configuration(config_info: Dict[(str, str)], extra_vars: Dict[(str, Any)]=None) -> Configuration: '\n Load the configuration. The `config_info` parameter is a mapping from\n key strings to value as strings or dictionaries. In the former case, the\n value is used as-is. In the latter case, if the dictionary has a key named\n `type` alongside a key named `key`.\n An optional default value is accepted for dictionary value with a key named\n `default`. The default value will be used only if the environment variable\n is not defined.\n\n\n Here is a sample of what it looks like:\n\n ```\n {\n "cert": "/some/path/file.crt",\n "token": {\n "type": "env",\n "key": "MY_TOKEN"\n },\n "host": {\n "type": "env",\n "key": "HOSTNAME",\n "default": "localhost"\n }\n }\n ```\n\n The `cert` configuration key is set to its string value whereas the `token`\n configuration key is dynamically fetched from the `MY_TOKEN` environment\n variable. The `host` configuration key is dynamically fetched from the\n `HOSTNAME` environment variable, but if not defined, the default value\n `localhost` will be used instead.\n\n When `extra_vars` is provided, it must be a dictionnary where keys map\n to configuration key. The values from `extra_vars` always override the\n values from the experiment itself. This is useful to the Chaos Toolkit\n CLI mostly to allow overriding values directly from cli arguments. It\'s\n seldom required otherwise.\n ' logger.debug('Loading configuration...') env = os.environ extra_vars = (extra_vars or {}) conf = {} for (key, value) in config_info.items(): if (isinstance(value, dict) and ('type' in value)): if (value['type'] == 'env'): env_key = value['key'] env_default = value.get('default') if ((env_key not in env) and (env_default is None) and (key not in extra_vars)): raise InvalidExperiment('Configuration makes reference to an environment key that does not exist: {}'.format(env_key)) conf[key] = extra_vars.get(key, env.get(env_key, env_default)) else: conf[key] = extra_vars.get(key, value) return conf<|docstring|>Load the configuration. The `config_info` parameter is a mapping from key strings to value as strings or dictionaries. In the former case, the value is used as-is. In the latter case, if the dictionary has a key named `type` alongside a key named `key`. An optional default value is accepted for dictionary value with a key named `default`. The default value will be used only if the environment variable is not defined. Here is a sample of what it looks like: ``` { "cert": "/some/path/file.crt", "token": { "type": "env", "key": "MY_TOKEN" }, "host": { "type": "env", "key": "HOSTNAME", "default": "localhost" } } ``` The `cert` configuration key is set to its string value whereas the `token` configuration key is dynamically fetched from the `MY_TOKEN` environment variable. The `host` configuration key is dynamically fetched from the `HOSTNAME` environment variable, but if not defined, the default value `localhost` will be used instead. When `extra_vars` is provided, it must be a dictionnary where keys map to configuration key. The values from `extra_vars` always override the values from the experiment itself. This is useful to the Chaos Toolkit CLI mostly to allow overriding values directly from cli arguments. It's seldom required otherwise.<|endoftext|>
34116485ed02e983e5612a4fb45c31c14fec1e794c03142b44d697e83784f00b
def test_duplo_tower(): ' Tests state machine for duplo tower scenario ' behavior_tree.load_settings_from_file('duplo_state_machine/BT_SETTINGS_TOWER.yaml') start_positions = [] start_positions.append(Pos((- 0.05), (- 0.1), 0)) start_positions.append(Pos(0, (- 0.1), 0)) start_positions.append(Pos(0.05, (- 0.1), 0)) targets = [] targets.append(Pos(0.0, 0.05, 0)) targets.append(Pos(0.0, 0.05, 0.0192)) targets.append(Pos(0.0, 0.05, (2 * 0.0192))) environment = duplo_state_machine.environment.Environment(start_positions, targets) print(environment.get_fitness(['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'pick 1!', 'place on 0!', ')', ')', ')'])) print(environment.get_fitness(['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')'])) environment.plot_individual('', 'test', ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')']) environment.plot_individual('', 'test2', ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')']) ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')'] print_and_plot(environment, ind, 'at no force') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 on 1', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')'] print_and_plot(environment, ind, 'on no force') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', '2 on 1?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', 'apply force 2!', ')', ')', ')'] print_and_plot(environment, ind, 'on with force') for i in range(10): random.seed(i) ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 on 1?', 's(', 'pick 2!', 'place on 1!', ')', ')', 'apply force 2!', ')'] print_and_plot(environment, ind, 'optimal') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', '2 on 1?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', 'apply force 2!', ')', ')', ')'] print_and_plot(environment, ind, 'full planned')
Tests state machine for duplo tower scenario
duplo_state_machine/tests/test_duplo.py
test_duplo_tower
jstyrud/planning-and-learning
8
python
def test_duplo_tower(): ' ' behavior_tree.load_settings_from_file('duplo_state_machine/BT_SETTINGS_TOWER.yaml') start_positions = [] start_positions.append(Pos((- 0.05), (- 0.1), 0)) start_positions.append(Pos(0, (- 0.1), 0)) start_positions.append(Pos(0.05, (- 0.1), 0)) targets = [] targets.append(Pos(0.0, 0.05, 0)) targets.append(Pos(0.0, 0.05, 0.0192)) targets.append(Pos(0.0, 0.05, (2 * 0.0192))) environment = duplo_state_machine.environment.Environment(start_positions, targets) print(environment.get_fitness(['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'pick 1!', 'place on 0!', ')', ')', ')'])) print(environment.get_fitness(['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')'])) environment.plot_individual(, 'test', ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')']) environment.plot_individual(, 'test2', ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')']) ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')'] print_and_plot(environment, ind, 'at no force') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 on 1', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')'] print_and_plot(environment, ind, 'on no force') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', '2 on 1?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', 'apply force 2!', ')', ')', ')'] print_and_plot(environment, ind, 'on with force') for i in range(10): random.seed(i) ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 on 1?', 's(', 'pick 2!', 'place on 1!', ')', ')', 'apply force 2!', ')'] print_and_plot(environment, ind, 'optimal') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', '2 on 1?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', 'apply force 2!', ')', ')', ')'] print_and_plot(environment, ind, 'full planned')
def test_duplo_tower(): ' ' behavior_tree.load_settings_from_file('duplo_state_machine/BT_SETTINGS_TOWER.yaml') start_positions = [] start_positions.append(Pos((- 0.05), (- 0.1), 0)) start_positions.append(Pos(0, (- 0.1), 0)) start_positions.append(Pos(0.05, (- 0.1), 0)) targets = [] targets.append(Pos(0.0, 0.05, 0)) targets.append(Pos(0.0, 0.05, 0.0192)) targets.append(Pos(0.0, 0.05, (2 * 0.0192))) environment = duplo_state_machine.environment.Environment(start_positions, targets) print(environment.get_fitness(['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'pick 1!', 'place on 0!', ')', ')', ')'])) print(environment.get_fitness(['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')'])) environment.plot_individual(, 'test', ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')']) environment.plot_individual(, 'test2', ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')']) ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')'] print_and_plot(environment, ind, 'at no force') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'f(', '2 on 1', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', ')'] print_and_plot(environment, ind, 'on no force') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', '2 on 1?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', 'apply force 2!', ')', ')', ')'] print_and_plot(environment, ind, 'on with force') for i in range(10): random.seed(i) ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 on 1?', 's(', 'pick 2!', 'place on 1!', ')', ')', 'apply force 2!', ')'] print_and_plot(environment, ind, 'optimal') ind = ['s(', 'f(', '0 at pos (0.0, 0.05, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.05, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.05, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.0, 0.05, 0.0384)?', 's(', 'f(', '2 on 1?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place on 1!', ')', ')', 'apply force 2!', ')', ')', ')'] print_and_plot(environment, ind, 'full planned')<|docstring|>Tests state machine for duplo tower scenario<|endoftext|>
f785e5271cebe480456540c470435214a07fc2304c03986209b3ea672d7ff220
def test_duplo_croissant(): ' Tests state machine for duplo croissant scenario ' behavior_tree.load_settings_from_file('duplo_state_machine/BT_SETTINGS_CROISSANT.yaml') start_positions = [] start_positions.append(Pos((- 0.05), (- 0.1), 0)) start_positions.append(Pos(0, (- 0.1), 0)) start_positions.append(Pos(0.05, (- 0.1), 0)) start_positions.append(Pos(0.1, (- 0.1), 0)) targets = [] targets.append(Pos(0.0, 0.0, 0.0)) targets.append(Pos(0.0, 0.0, 0.0192)) targets.append(Pos(0.016, (- 0.032), 0.0)) targets.append(Pos(0.016, 0.032, 0.0)) environment = duplo_state_machine.environment.Environment(start_positions, targets) best = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'pick 2!', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'pick 3!', 'place at (0.016, 0.032, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')'] print(environment.get_fitness(best)) planned = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'pick 2!', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'pick 3!', 'place at (0.016, 0.032, 0.0)!', ')', ')', ')'] planned = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'f(', 'picked 3?', 'pick 3!', ')', 'place at (0.016, 0.032, 0.0)!', ')', ')', ')'] print(environment.get_fitness(planned)) gp_par = gp.GpParameters() gp_par.ind_start_length = 8 gp_par.n_population = 16 gp_par.f_crossover = 0.5 gp_par.n_offspring_crossover = 2 gp_par.f_mutation = 0.5 gp_par.n_offspring_mutation = 2 gp_par.parent_selection = gp.SelectionMethods.RANK gp_par.survivor_selection = gp.SelectionMethods.RANK gp_par.f_elites = 0.1 gp_par.f_parents = 1 gp_par.mutate_co_offspring = False gp_par.mutate_co_parents = True gp_par.mutation_p_add = 0.4 gp_par.mutation_p_delete = 0.3 gp_par.allow_identical = False gp_par.plot = True gp_par.n_generations = 50 gp_par.verbose = False gp_par.fig_last_gen = False n_logs = 3 for i in range(1, (n_logs + 1)): gp_par.log_name = ('croissant_baseline_sm_' + str(i)) gp.set_seeds(i) gp.run(environment, gp_par, baseline=planned)
Tests state machine for duplo croissant scenario
duplo_state_machine/tests/test_duplo.py
test_duplo_croissant
jstyrud/planning-and-learning
8
python
def test_duplo_croissant(): ' ' behavior_tree.load_settings_from_file('duplo_state_machine/BT_SETTINGS_CROISSANT.yaml') start_positions = [] start_positions.append(Pos((- 0.05), (- 0.1), 0)) start_positions.append(Pos(0, (- 0.1), 0)) start_positions.append(Pos(0.05, (- 0.1), 0)) start_positions.append(Pos(0.1, (- 0.1), 0)) targets = [] targets.append(Pos(0.0, 0.0, 0.0)) targets.append(Pos(0.0, 0.0, 0.0192)) targets.append(Pos(0.016, (- 0.032), 0.0)) targets.append(Pos(0.016, 0.032, 0.0)) environment = duplo_state_machine.environment.Environment(start_positions, targets) best = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'pick 2!', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'pick 3!', 'place at (0.016, 0.032, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')'] print(environment.get_fitness(best)) planned = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'pick 2!', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'pick 3!', 'place at (0.016, 0.032, 0.0)!', ')', ')', ')'] planned = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'f(', 'picked 3?', 'pick 3!', ')', 'place at (0.016, 0.032, 0.0)!', ')', ')', ')'] print(environment.get_fitness(planned)) gp_par = gp.GpParameters() gp_par.ind_start_length = 8 gp_par.n_population = 16 gp_par.f_crossover = 0.5 gp_par.n_offspring_crossover = 2 gp_par.f_mutation = 0.5 gp_par.n_offspring_mutation = 2 gp_par.parent_selection = gp.SelectionMethods.RANK gp_par.survivor_selection = gp.SelectionMethods.RANK gp_par.f_elites = 0.1 gp_par.f_parents = 1 gp_par.mutate_co_offspring = False gp_par.mutate_co_parents = True gp_par.mutation_p_add = 0.4 gp_par.mutation_p_delete = 0.3 gp_par.allow_identical = False gp_par.plot = True gp_par.n_generations = 50 gp_par.verbose = False gp_par.fig_last_gen = False n_logs = 3 for i in range(1, (n_logs + 1)): gp_par.log_name = ('croissant_baseline_sm_' + str(i)) gp.set_seeds(i) gp.run(environment, gp_par, baseline=planned)
def test_duplo_croissant(): ' ' behavior_tree.load_settings_from_file('duplo_state_machine/BT_SETTINGS_CROISSANT.yaml') start_positions = [] start_positions.append(Pos((- 0.05), (- 0.1), 0)) start_positions.append(Pos(0, (- 0.1), 0)) start_positions.append(Pos(0.05, (- 0.1), 0)) start_positions.append(Pos(0.1, (- 0.1), 0)) targets = [] targets.append(Pos(0.0, 0.0, 0.0)) targets.append(Pos(0.0, 0.0, 0.0192)) targets.append(Pos(0.016, (- 0.032), 0.0)) targets.append(Pos(0.016, 0.032, 0.0)) environment = duplo_state_machine.environment.Environment(start_positions, targets) best = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'pick 2!', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'pick 3!', 'place at (0.016, 0.032, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', ')'] print(environment.get_fitness(best)) planned = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'pick 0!', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'pick 1!', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'pick 2!', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'pick 3!', 'place at (0.016, 0.032, 0.0)!', ')', ')', ')'] planned = ['s(', 'f(', '0 at pos (0.0, 0.0, 0.0)?', 's(', 'f(', 'picked 0?', 'pick 0!', ')', 'place at (0.0, 0.0, 0.0)!', ')', ')', 'f(', '1 at pos (0.0, 0.0, 0.0192)?', 's(', 'f(', '1 on 0?', 's(', 'f(', 'picked 1?', 'pick 1!', ')', 'place on 0!', ')', ')', 'apply force 1!', ')', ')', 'f(', '2 at pos (0.016, -0.032, 0.0)?', 's(', 'f(', 'picked 2?', 'pick 2!', ')', 'place at (0.016, -0.032, 0.0)!', ')', ')', 'f(', '3 at pos (0.016, 0.032, 0.0)?', 's(', 'f(', 'picked 3?', 'pick 3!', ')', 'place at (0.016, 0.032, 0.0)!', ')', ')', ')'] print(environment.get_fitness(planned)) gp_par = gp.GpParameters() gp_par.ind_start_length = 8 gp_par.n_population = 16 gp_par.f_crossover = 0.5 gp_par.n_offspring_crossover = 2 gp_par.f_mutation = 0.5 gp_par.n_offspring_mutation = 2 gp_par.parent_selection = gp.SelectionMethods.RANK gp_par.survivor_selection = gp.SelectionMethods.RANK gp_par.f_elites = 0.1 gp_par.f_parents = 1 gp_par.mutate_co_offspring = False gp_par.mutate_co_parents = True gp_par.mutation_p_add = 0.4 gp_par.mutation_p_delete = 0.3 gp_par.allow_identical = False gp_par.plot = True gp_par.n_generations = 50 gp_par.verbose = False gp_par.fig_last_gen = False n_logs = 3 for i in range(1, (n_logs + 1)): gp_par.log_name = ('croissant_baseline_sm_' + str(i)) gp.set_seeds(i) gp.run(environment, gp_par, baseline=planned)<|docstring|>Tests state machine for duplo croissant scenario<|endoftext|>
92a40f6c98f501d3d02f5428312789c0c3de25e183c95d5a0a86ad9b368dbd80
def print_and_plot(environment, bt, name): ' Help function to print and plot bt ' environment.plot_individual('logs/', name, bt) print(((name + ' ') + str(environment.get_fitness(bt))))
Help function to print and plot bt
duplo_state_machine/tests/test_duplo.py
print_and_plot
jstyrud/planning-and-learning
8
python
def print_and_plot(environment, bt, name): ' ' environment.plot_individual('logs/', name, bt) print(((name + ' ') + str(environment.get_fitness(bt))))
def print_and_plot(environment, bt, name): ' ' environment.plot_individual('logs/', name, bt) print(((name + ' ') + str(environment.get_fitness(bt))))<|docstring|>Help function to print and plot bt<|endoftext|>
9c715b19b2bb52af46e1f6e9623463164dd275ea92ec0408fa9e02fc38e3f12d
@pytest.mark.skip def test_check_profile(): '\n Profiles the state machine environment\n ' import cProfile cProfile.runctx('test_other()', globals=globals(), locals=locals())
Profiles the state machine environment
duplo_state_machine/tests/test_duplo.py
test_check_profile
jstyrud/planning-and-learning
8
python
@pytest.mark.skip def test_check_profile(): '\n \n ' import cProfile cProfile.runctx('test_other()', globals=globals(), locals=locals())
@pytest.mark.skip def test_check_profile(): '\n \n ' import cProfile cProfile.runctx('test_other()', globals=globals(), locals=locals())<|docstring|>Profiles the state machine environment<|endoftext|>
282aeb8d8853620792288d6c72a9140e799ebf0f732414f67eef9c495c98187d
def __init__(self, mode: str, repo_pth: os.PathLike, dataenv: lmdb.Environment, labelenv: lmdb.Environment, *args, **kwargs): "Developer documentation for init method.\n\n Parameters\n ----------\n mode : str\n 'r' for read-only, 'a' for write-enabled\n repo_pth : os.PathLike\n path to the repository on disk.\n dataenv : lmdb.Environment\n the lmdb environment in which the data records are stored. this is\n the same as the arrayset data record environments.\n labelenv : lmdb.Environment\n the lmdb environment in which the label hash key / values are stored\n permanently. When opened in by this reader instance, no write access\n is allowed.\n " self._mode = mode self._path = repo_pth self._is_conman: bool = False self._labelenv: lmdb.Environment = labelenv self._labelTxn: Optional[lmdb.Transaction] = None self._TxnRegister = TxnRegister() self._mspecs: Dict[(Union[(str, int)], bytes)] = {} metaNamesSpec = RecordQuery(dataenv).metadata_records() for (metaNames, metaSpec) in metaNamesSpec: labelKey = parsing.hash_meta_db_key_from_raw_key(metaSpec.meta_hash) self._mspecs[metaNames.meta_name] = labelKey
Developer documentation for init method. Parameters ---------- mode : str 'r' for read-only, 'a' for write-enabled repo_pth : os.PathLike path to the repository on disk. dataenv : lmdb.Environment the lmdb environment in which the data records are stored. this is the same as the arrayset data record environments. labelenv : lmdb.Environment the lmdb environment in which the label hash key / values are stored permanently. When opened in by this reader instance, no write access is allowed.
src/hangar/metadata.py
__init__
niranjana687/hangar-py
1
python
def __init__(self, mode: str, repo_pth: os.PathLike, dataenv: lmdb.Environment, labelenv: lmdb.Environment, *args, **kwargs): "Developer documentation for init method.\n\n Parameters\n ----------\n mode : str\n 'r' for read-only, 'a' for write-enabled\n repo_pth : os.PathLike\n path to the repository on disk.\n dataenv : lmdb.Environment\n the lmdb environment in which the data records are stored. this is\n the same as the arrayset data record environments.\n labelenv : lmdb.Environment\n the lmdb environment in which the label hash key / values are stored\n permanently. When opened in by this reader instance, no write access\n is allowed.\n " self._mode = mode self._path = repo_pth self._is_conman: bool = False self._labelenv: lmdb.Environment = labelenv self._labelTxn: Optional[lmdb.Transaction] = None self._TxnRegister = TxnRegister() self._mspecs: Dict[(Union[(str, int)], bytes)] = {} metaNamesSpec = RecordQuery(dataenv).metadata_records() for (metaNames, metaSpec) in metaNamesSpec: labelKey = parsing.hash_meta_db_key_from_raw_key(metaSpec.meta_hash) self._mspecs[metaNames.meta_name] = labelKey
def __init__(self, mode: str, repo_pth: os.PathLike, dataenv: lmdb.Environment, labelenv: lmdb.Environment, *args, **kwargs): "Developer documentation for init method.\n\n Parameters\n ----------\n mode : str\n 'r' for read-only, 'a' for write-enabled\n repo_pth : os.PathLike\n path to the repository on disk.\n dataenv : lmdb.Environment\n the lmdb environment in which the data records are stored. this is\n the same as the arrayset data record environments.\n labelenv : lmdb.Environment\n the lmdb environment in which the label hash key / values are stored\n permanently. When opened in by this reader instance, no write access\n is allowed.\n " self._mode = mode self._path = repo_pth self._is_conman: bool = False self._labelenv: lmdb.Environment = labelenv self._labelTxn: Optional[lmdb.Transaction] = None self._TxnRegister = TxnRegister() self._mspecs: Dict[(Union[(str, int)], bytes)] = {} metaNamesSpec = RecordQuery(dataenv).metadata_records() for (metaNames, metaSpec) in metaNamesSpec: labelKey = parsing.hash_meta_db_key_from_raw_key(metaSpec.meta_hash) self._mspecs[metaNames.meta_name] = labelKey<|docstring|>Developer documentation for init method. Parameters ---------- mode : str 'r' for read-only, 'a' for write-enabled repo_pth : os.PathLike path to the repository on disk. dataenv : lmdb.Environment the lmdb environment in which the data records are stored. this is the same as the arrayset data record environments. labelenv : lmdb.Environment the lmdb environment in which the label hash key / values are stored permanently. When opened in by this reader instance, no write access is allowed.<|endoftext|>
ad5da069143721010ebc7b0e55e4503071083d6222223bc51d962672049d3337
def __len__(self) -> int: 'Determine how many metadata key/value pairs are in the checkout\n\n Returns\n -------\n int\n number of metadata key/value pairs.\n ' return len(self._mspecs)
Determine how many metadata key/value pairs are in the checkout Returns ------- int number of metadata key/value pairs.
src/hangar/metadata.py
__len__
niranjana687/hangar-py
1
python
def __len__(self) -> int: 'Determine how many metadata key/value pairs are in the checkout\n\n Returns\n -------\n int\n number of metadata key/value pairs.\n ' return len(self._mspecs)
def __len__(self) -> int: 'Determine how many metadata key/value pairs are in the checkout\n\n Returns\n -------\n int\n number of metadata key/value pairs.\n ' return len(self._mspecs)<|docstring|>Determine how many metadata key/value pairs are in the checkout Returns ------- int number of metadata key/value pairs.<|endoftext|>
3ec2d082a7757a506cb56db63d019a2701c51fd291d1f3d3a0192ac536bcbb0b
def __getitem__(self, key: Union[(str, int)]) -> str: 'Retrieve a metadata sample with a key. Convenience method for dict style access.\n\n .. seealso:: :meth:`get`\n\n Parameters\n ----------\n key : Union[str, int]\n metadata key to retrieve from the checkout\n\n Returns\n -------\n string\n value of the metadata key/value pair stored in the checkout.\n ' return self.get(key)
Retrieve a metadata sample with a key. Convenience method for dict style access. .. seealso:: :meth:`get` Parameters ---------- key : Union[str, int] metadata key to retrieve from the checkout Returns ------- string value of the metadata key/value pair stored in the checkout.
src/hangar/metadata.py
__getitem__
niranjana687/hangar-py
1
python
def __getitem__(self, key: Union[(str, int)]) -> str: 'Retrieve a metadata sample with a key. Convenience method for dict style access.\n\n .. seealso:: :meth:`get`\n\n Parameters\n ----------\n key : Union[str, int]\n metadata key to retrieve from the checkout\n\n Returns\n -------\n string\n value of the metadata key/value pair stored in the checkout.\n ' return self.get(key)
def __getitem__(self, key: Union[(str, int)]) -> str: 'Retrieve a metadata sample with a key. Convenience method for dict style access.\n\n .. seealso:: :meth:`get`\n\n Parameters\n ----------\n key : Union[str, int]\n metadata key to retrieve from the checkout\n\n Returns\n -------\n string\n value of the metadata key/value pair stored in the checkout.\n ' return self.get(key)<|docstring|>Retrieve a metadata sample with a key. Convenience method for dict style access. .. seealso:: :meth:`get` Parameters ---------- key : Union[str, int] metadata key to retrieve from the checkout Returns ------- string value of the metadata key/value pair stored in the checkout.<|endoftext|>
32af1caad43adca0ba8bfde5a0023542dd045801e9d11c857663dd5c1ce20a47
def __contains__(self, key: Union[(str, int)]) -> bool: 'Determine if a key with the provided name is in the metadata\n\n Parameters\n ----------\n key : Union[str, int]\n key to check for containment testing\n\n Returns\n -------\n bool\n True if key exists, False otherwise\n ' if (key in self._mspecs): return True else: return False
Determine if a key with the provided name is in the metadata Parameters ---------- key : Union[str, int] key to check for containment testing Returns ------- bool True if key exists, False otherwise
src/hangar/metadata.py
__contains__
niranjana687/hangar-py
1
python
def __contains__(self, key: Union[(str, int)]) -> bool: 'Determine if a key with the provided name is in the metadata\n\n Parameters\n ----------\n key : Union[str, int]\n key to check for containment testing\n\n Returns\n -------\n bool\n True if key exists, False otherwise\n ' if (key in self._mspecs): return True else: return False
def __contains__(self, key: Union[(str, int)]) -> bool: 'Determine if a key with the provided name is in the metadata\n\n Parameters\n ----------\n key : Union[str, int]\n key to check for containment testing\n\n Returns\n -------\n bool\n True if key exists, False otherwise\n ' if (key in self._mspecs): return True else: return False<|docstring|>Determine if a key with the provided name is in the metadata Parameters ---------- key : Union[str, int] key to check for containment testing Returns ------- bool True if key exists, False otherwise<|endoftext|>
14ce8c07c9e6f63556993427c74be17239d9a8fdb1c9f416c3e4f2fa75a91a55
@property def iswriteable(self) -> bool: 'Read-only attribute indicating if this metadata object is write-enabled.\n\n Returns\n -------\n bool\n True if write-enabled checkout, Otherwise False.\n ' return (False if (self._mode == 'r') else True)
Read-only attribute indicating if this metadata object is write-enabled. Returns ------- bool True if write-enabled checkout, Otherwise False.
src/hangar/metadata.py
iswriteable
niranjana687/hangar-py
1
python
@property def iswriteable(self) -> bool: 'Read-only attribute indicating if this metadata object is write-enabled.\n\n Returns\n -------\n bool\n True if write-enabled checkout, Otherwise False.\n ' return (False if (self._mode == 'r') else True)
@property def iswriteable(self) -> bool: 'Read-only attribute indicating if this metadata object is write-enabled.\n\n Returns\n -------\n bool\n True if write-enabled checkout, Otherwise False.\n ' return (False if (self._mode == 'r') else True)<|docstring|>Read-only attribute indicating if this metadata object is write-enabled. Returns ------- bool True if write-enabled checkout, Otherwise False.<|endoftext|>
3a0e555f4705dad76796d346413268722d9bd73286243120c4c8f2bfedb4b42f
def keys(self) -> Iterator[Union[(str, int)]]: 'generator which yields the names of every metadata piece in the checkout.\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[Union[str, int]]\n keys of one metadata sample at a time\n ' for name in tuple(self._mspecs.keys()): (yield name)
generator which yields the names of every metadata piece in the checkout. For write enabled checkouts, is technically possible to iterate over the metadata object while adding/deleting data, in order to avoid internal python runtime errors (``dictionary changed size during iteration`` we have to make a copy of they key list before beginning the loop.) While not necessary for read checkouts, we perform the same operation for both read and write checkouts in order in order to avoid differences. Yields ------ Iterator[Union[str, int]] keys of one metadata sample at a time
src/hangar/metadata.py
keys
niranjana687/hangar-py
1
python
def keys(self) -> Iterator[Union[(str, int)]]: 'generator which yields the names of every metadata piece in the checkout.\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[Union[str, int]]\n keys of one metadata sample at a time\n ' for name in tuple(self._mspecs.keys()): (yield name)
def keys(self) -> Iterator[Union[(str, int)]]: 'generator which yields the names of every metadata piece in the checkout.\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[Union[str, int]]\n keys of one metadata sample at a time\n ' for name in tuple(self._mspecs.keys()): (yield name)<|docstring|>generator which yields the names of every metadata piece in the checkout. For write enabled checkouts, is technically possible to iterate over the metadata object while adding/deleting data, in order to avoid internal python runtime errors (``dictionary changed size during iteration`` we have to make a copy of they key list before beginning the loop.) While not necessary for read checkouts, we perform the same operation for both read and write checkouts in order in order to avoid differences. Yields ------ Iterator[Union[str, int]] keys of one metadata sample at a time<|endoftext|>
befa1af6f4bf3f4938eef6b374d6c09702695a90994346876d295e493423b958
def values(self) -> Iterator[str]: 'generator yielding all metadata values in the checkout\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[str]\n values of one metadata piece at a time\n ' for name in tuple(self._mspecs.keys()): (yield self.get(name))
generator yielding all metadata values in the checkout For write enabled checkouts, is technically possible to iterate over the metadata object while adding/deleting data, in order to avoid internal python runtime errors (``dictionary changed size during iteration`` we have to make a copy of they key list before beginning the loop.) While not necessary for read checkouts, we perform the same operation for both read and write checkouts in order in order to avoid differences. Yields ------ Iterator[str] values of one metadata piece at a time
src/hangar/metadata.py
values
niranjana687/hangar-py
1
python
def values(self) -> Iterator[str]: 'generator yielding all metadata values in the checkout\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[str]\n values of one metadata piece at a time\n ' for name in tuple(self._mspecs.keys()): (yield self.get(name))
def values(self) -> Iterator[str]: 'generator yielding all metadata values in the checkout\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[str]\n values of one metadata piece at a time\n ' for name in tuple(self._mspecs.keys()): (yield self.get(name))<|docstring|>generator yielding all metadata values in the checkout For write enabled checkouts, is technically possible to iterate over the metadata object while adding/deleting data, in order to avoid internal python runtime errors (``dictionary changed size during iteration`` we have to make a copy of they key list before beginning the loop.) While not necessary for read checkouts, we perform the same operation for both read and write checkouts in order in order to avoid differences. Yields ------ Iterator[str] values of one metadata piece at a time<|endoftext|>
b79d16cd89d6f2c082dfb1584bfcd6c4eee4c5d54720cbdea64973d2cadd0dda
def items(self) -> Iterator[Tuple[(Union[(str, int)], str)]]: 'generator yielding key/value for all metadata recorded in checkout.\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[Tuple[Union[str, int], np.ndarray]]\n metadata key and stored value for every piece in the checkout.\n ' for name in tuple(self._mspecs.keys()): (yield (name, self.get(name)))
generator yielding key/value for all metadata recorded in checkout. For write enabled checkouts, is technically possible to iterate over the metadata object while adding/deleting data, in order to avoid internal python runtime errors (``dictionary changed size during iteration`` we have to make a copy of they key list before beginning the loop.) While not necessary for read checkouts, we perform the same operation for both read and write checkouts in order in order to avoid differences. Yields ------ Iterator[Tuple[Union[str, int], np.ndarray]] metadata key and stored value for every piece in the checkout.
src/hangar/metadata.py
items
niranjana687/hangar-py
1
python
def items(self) -> Iterator[Tuple[(Union[(str, int)], str)]]: 'generator yielding key/value for all metadata recorded in checkout.\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[Tuple[Union[str, int], np.ndarray]]\n metadata key and stored value for every piece in the checkout.\n ' for name in tuple(self._mspecs.keys()): (yield (name, self.get(name)))
def items(self) -> Iterator[Tuple[(Union[(str, int)], str)]]: 'generator yielding key/value for all metadata recorded in checkout.\n\n For write enabled checkouts, is technically possible to iterate over the\n metadata object while adding/deleting data, in order to avoid internal\n python runtime errors (``dictionary changed size during iteration`` we\n have to make a copy of they key list before beginning the loop.) While\n not necessary for read checkouts, we perform the same operation for both\n read and write checkouts in order in order to avoid differences.\n\n Yields\n ------\n Iterator[Tuple[Union[str, int], np.ndarray]]\n metadata key and stored value for every piece in the checkout.\n ' for name in tuple(self._mspecs.keys()): (yield (name, self.get(name)))<|docstring|>generator yielding key/value for all metadata recorded in checkout. For write enabled checkouts, is technically possible to iterate over the metadata object while adding/deleting data, in order to avoid internal python runtime errors (``dictionary changed size during iteration`` we have to make a copy of they key list before beginning the loop.) While not necessary for read checkouts, we perform the same operation for both read and write checkouts in order in order to avoid differences. Yields ------ Iterator[Tuple[Union[str, int], np.ndarray]] metadata key and stored value for every piece in the checkout.<|endoftext|>
f1b1a1ce31090ca1bf9379e021cbfef510b806192e4ba1c194c133d24ab21722
def get(self, key: Union[(str, int)]) -> str: 'retrieve a piece of metadata from the checkout.\n\n Parameters\n ----------\n key : Union[str, int]\n The name of the metadata piece to retrieve.\n\n Returns\n -------\n str\n The stored metadata value associated with the key.\n\n Raises\n ------\n ValueError\n If the `key` is not str type or contains whitespace or non\n alpha-numeric characters.\n KeyError\n If no metadata exists in the checkout with the provided key.\n ' try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() metaVal = self._labelTxn.get(self._mspecs[key]) meta_val = parsing.hash_meta_raw_val_from_db_val(metaVal) except KeyError: raise KeyError(f'The checkout does not contain metadata with key: {key}') finally: if tmpconman: self.__exit__() return meta_val
retrieve a piece of metadata from the checkout. Parameters ---------- key : Union[str, int] The name of the metadata piece to retrieve. Returns ------- str The stored metadata value associated with the key. Raises ------ ValueError If the `key` is not str type or contains whitespace or non alpha-numeric characters. KeyError If no metadata exists in the checkout with the provided key.
src/hangar/metadata.py
get
niranjana687/hangar-py
1
python
def get(self, key: Union[(str, int)]) -> str: 'retrieve a piece of metadata from the checkout.\n\n Parameters\n ----------\n key : Union[str, int]\n The name of the metadata piece to retrieve.\n\n Returns\n -------\n str\n The stored metadata value associated with the key.\n\n Raises\n ------\n ValueError\n If the `key` is not str type or contains whitespace or non\n alpha-numeric characters.\n KeyError\n If no metadata exists in the checkout with the provided key.\n ' try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() metaVal = self._labelTxn.get(self._mspecs[key]) meta_val = parsing.hash_meta_raw_val_from_db_val(metaVal) except KeyError: raise KeyError(f'The checkout does not contain metadata with key: {key}') finally: if tmpconman: self.__exit__() return meta_val
def get(self, key: Union[(str, int)]) -> str: 'retrieve a piece of metadata from the checkout.\n\n Parameters\n ----------\n key : Union[str, int]\n The name of the metadata piece to retrieve.\n\n Returns\n -------\n str\n The stored metadata value associated with the key.\n\n Raises\n ------\n ValueError\n If the `key` is not str type or contains whitespace or non\n alpha-numeric characters.\n KeyError\n If no metadata exists in the checkout with the provided key.\n ' try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() metaVal = self._labelTxn.get(self._mspecs[key]) meta_val = parsing.hash_meta_raw_val_from_db_val(metaVal) except KeyError: raise KeyError(f'The checkout does not contain metadata with key: {key}') finally: if tmpconman: self.__exit__() return meta_val<|docstring|>retrieve a piece of metadata from the checkout. Parameters ---------- key : Union[str, int] The name of the metadata piece to retrieve. Returns ------- str The stored metadata value associated with the key. Raises ------ ValueError If the `key` is not str type or contains whitespace or non alpha-numeric characters. KeyError If no metadata exists in the checkout with the provided key.<|endoftext|>
60b303d25f7255fec363c8176ccd166b8ab337bfc33d2265de88afa9a78100c3
def __init__(self, *args, **kwargs): 'Developer documentation of init method\n\n Parameters\n ----------\n *args\n Arguments passed to :class:`MetadataReader`\n **kwargs\n KeyWord arguments passed to :class:`MetadataReader`\n ' super().__init__(*args, **kwargs) self._dataenv: lmdb.Environment = kwargs['dataenv'] self._dataTxn: Optional[lmdb.Transaction] = None
Developer documentation of init method Parameters ---------- *args Arguments passed to :class:`MetadataReader` **kwargs KeyWord arguments passed to :class:`MetadataReader`
src/hangar/metadata.py
__init__
niranjana687/hangar-py
1
python
def __init__(self, *args, **kwargs): 'Developer documentation of init method\n\n Parameters\n ----------\n *args\n Arguments passed to :class:`MetadataReader`\n **kwargs\n KeyWord arguments passed to :class:`MetadataReader`\n ' super().__init__(*args, **kwargs) self._dataenv: lmdb.Environment = kwargs['dataenv'] self._dataTxn: Optional[lmdb.Transaction] = None
def __init__(self, *args, **kwargs): 'Developer documentation of init method\n\n Parameters\n ----------\n *args\n Arguments passed to :class:`MetadataReader`\n **kwargs\n KeyWord arguments passed to :class:`MetadataReader`\n ' super().__init__(*args, **kwargs) self._dataenv: lmdb.Environment = kwargs['dataenv'] self._dataTxn: Optional[lmdb.Transaction] = None<|docstring|>Developer documentation of init method Parameters ---------- *args Arguments passed to :class:`MetadataReader` **kwargs KeyWord arguments passed to :class:`MetadataReader`<|endoftext|>
8f1470bc26d87a2a2adf2059910b056cbf8a13d99acc7f99c30a168bdbc7cf40
def __setitem__(self, key: Union[(str, int)], value: str) -> Union[(str, int)]: 'Store a key/value pair as metadata. Convenience method to :meth:`add`.\n\n .. seealso:: :meth:`add`\n\n Parameters\n ----------\n key : Union[str, int]\n name of the key to add as metadata\n value : string\n value to add as metadata\n\n Returns\n -------\n Union[str, int]\n key of the stored metadata sample (assuming operation was successful)\n ' return self.add(key, value)
Store a key/value pair as metadata. Convenience method to :meth:`add`. .. seealso:: :meth:`add` Parameters ---------- key : Union[str, int] name of the key to add as metadata value : string value to add as metadata Returns ------- Union[str, int] key of the stored metadata sample (assuming operation was successful)
src/hangar/metadata.py
__setitem__
niranjana687/hangar-py
1
python
def __setitem__(self, key: Union[(str, int)], value: str) -> Union[(str, int)]: 'Store a key/value pair as metadata. Convenience method to :meth:`add`.\n\n .. seealso:: :meth:`add`\n\n Parameters\n ----------\n key : Union[str, int]\n name of the key to add as metadata\n value : string\n value to add as metadata\n\n Returns\n -------\n Union[str, int]\n key of the stored metadata sample (assuming operation was successful)\n ' return self.add(key, value)
def __setitem__(self, key: Union[(str, int)], value: str) -> Union[(str, int)]: 'Store a key/value pair as metadata. Convenience method to :meth:`add`.\n\n .. seealso:: :meth:`add`\n\n Parameters\n ----------\n key : Union[str, int]\n name of the key to add as metadata\n value : string\n value to add as metadata\n\n Returns\n -------\n Union[str, int]\n key of the stored metadata sample (assuming operation was successful)\n ' return self.add(key, value)<|docstring|>Store a key/value pair as metadata. Convenience method to :meth:`add`. .. seealso:: :meth:`add` Parameters ---------- key : Union[str, int] name of the key to add as metadata value : string value to add as metadata Returns ------- Union[str, int] key of the stored metadata sample (assuming operation was successful)<|endoftext|>
809e7744b5f7dc786bc5f9cb6ffb1e9b256d60cbb4f4aeb014ffc83e28de9606
def __delitem__(self, key: Union[(str, int)]) -> Union[(str, int)]: 'Remove a key/value pair from metadata. Convenience method to :meth:`remove`.\n\n .. seealso:: :meth:`remove` for the function this calls into.\n\n Parameters\n ----------\n key : Union[str, int]\n Name of the metadata piece to remove.\n\n Returns\n -------\n Union[str, int]\n Metadata key removed from the checkout (assuming operation successful)\n ' return self.remove(key)
Remove a key/value pair from metadata. Convenience method to :meth:`remove`. .. seealso:: :meth:`remove` for the function this calls into. Parameters ---------- key : Union[str, int] Name of the metadata piece to remove. Returns ------- Union[str, int] Metadata key removed from the checkout (assuming operation successful)
src/hangar/metadata.py
__delitem__
niranjana687/hangar-py
1
python
def __delitem__(self, key: Union[(str, int)]) -> Union[(str, int)]: 'Remove a key/value pair from metadata. Convenience method to :meth:`remove`.\n\n .. seealso:: :meth:`remove` for the function this calls into.\n\n Parameters\n ----------\n key : Union[str, int]\n Name of the metadata piece to remove.\n\n Returns\n -------\n Union[str, int]\n Metadata key removed from the checkout (assuming operation successful)\n ' return self.remove(key)
def __delitem__(self, key: Union[(str, int)]) -> Union[(str, int)]: 'Remove a key/value pair from metadata. Convenience method to :meth:`remove`.\n\n .. seealso:: :meth:`remove` for the function this calls into.\n\n Parameters\n ----------\n key : Union[str, int]\n Name of the metadata piece to remove.\n\n Returns\n -------\n Union[str, int]\n Metadata key removed from the checkout (assuming operation successful)\n ' return self.remove(key)<|docstring|>Remove a key/value pair from metadata. Convenience method to :meth:`remove`. .. seealso:: :meth:`remove` for the function this calls into. Parameters ---------- key : Union[str, int] Name of the metadata piece to remove. Returns ------- Union[str, int] Metadata key removed from the checkout (assuming operation successful)<|endoftext|>
9d139ef6f6ce72a378d5bb4a5a878344d7967c030b51d43c2951414ae0fc101f
def add(self, key: Union[(str, int)], value: str) -> Union[(str, int)]: 'Add a piece of metadata to the staging area of the next commit.\n\n Parameters\n ----------\n key : Union[str, int]\n Name of the metadata piece, alphanumeric ascii characters only\n value : string\n Metadata value to store in the repository, any length of valid\n ascii characters.\n\n Returns\n -------\n Union[str, int]\n The name of the metadata key written to the database if the\n operation succeeded.\n\n Raises\n ------\n ValueError\n If the `key` contains any whitespace or non alpha-numeric characters.\n ValueError\n If the `value` contains any non ascii characters.\n ' try: if (not is_suitable_user_key(key)): raise ValueError(f'Metadata key: {key} of type: {type(key)} invalid. Must be int ascii string with only alpha-numeric / "." "_" "-" characters.') elif (not (isinstance(value, str) and is_ascii(value))): raise ValueError(f'Metadata Value: `{value}` not allowed. Must be ascii-only string') except ValueError as e: raise e from None try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() val_hash = hashlib.blake2b(value.encode(), digest_size=20).hexdigest() hashKey = parsing.hash_meta_db_key_from_raw_key(val_hash) metaRecKey = parsing.metadata_record_db_key_from_raw_key(key) metaRecVal = parsing.metadata_record_db_val_from_raw_val(val_hash) existingMetaRecVal = self._dataTxn.get(metaRecKey, default=False) if existingMetaRecVal: existingMetaRec = parsing.metadata_record_raw_val_from_db_val(existingMetaRecVal) if (val_hash == existingMetaRec.meta_hash): return key existingHashVal = self._labelTxn.get(hashKey, default=False) if (existingHashVal is False): hashVal = parsing.hash_meta_db_val_from_raw_val(value) self._labelTxn.put(hashKey, hashVal) self._dataTxn.put(metaRecKey, metaRecVal) self._mspecs[key] = hashKey finally: if tmpconman: self.__exit__() return key
Add a piece of metadata to the staging area of the next commit. Parameters ---------- key : Union[str, int] Name of the metadata piece, alphanumeric ascii characters only value : string Metadata value to store in the repository, any length of valid ascii characters. Returns ------- Union[str, int] The name of the metadata key written to the database if the operation succeeded. Raises ------ ValueError If the `key` contains any whitespace or non alpha-numeric characters. ValueError If the `value` contains any non ascii characters.
src/hangar/metadata.py
add
niranjana687/hangar-py
1
python
def add(self, key: Union[(str, int)], value: str) -> Union[(str, int)]: 'Add a piece of metadata to the staging area of the next commit.\n\n Parameters\n ----------\n key : Union[str, int]\n Name of the metadata piece, alphanumeric ascii characters only\n value : string\n Metadata value to store in the repository, any length of valid\n ascii characters.\n\n Returns\n -------\n Union[str, int]\n The name of the metadata key written to the database if the\n operation succeeded.\n\n Raises\n ------\n ValueError\n If the `key` contains any whitespace or non alpha-numeric characters.\n ValueError\n If the `value` contains any non ascii characters.\n ' try: if (not is_suitable_user_key(key)): raise ValueError(f'Metadata key: {key} of type: {type(key)} invalid. Must be int ascii string with only alpha-numeric / "." "_" "-" characters.') elif (not (isinstance(value, str) and is_ascii(value))): raise ValueError(f'Metadata Value: `{value}` not allowed. Must be ascii-only string') except ValueError as e: raise e from None try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() val_hash = hashlib.blake2b(value.encode(), digest_size=20).hexdigest() hashKey = parsing.hash_meta_db_key_from_raw_key(val_hash) metaRecKey = parsing.metadata_record_db_key_from_raw_key(key) metaRecVal = parsing.metadata_record_db_val_from_raw_val(val_hash) existingMetaRecVal = self._dataTxn.get(metaRecKey, default=False) if existingMetaRecVal: existingMetaRec = parsing.metadata_record_raw_val_from_db_val(existingMetaRecVal) if (val_hash == existingMetaRec.meta_hash): return key existingHashVal = self._labelTxn.get(hashKey, default=False) if (existingHashVal is False): hashVal = parsing.hash_meta_db_val_from_raw_val(value) self._labelTxn.put(hashKey, hashVal) self._dataTxn.put(metaRecKey, metaRecVal) self._mspecs[key] = hashKey finally: if tmpconman: self.__exit__() return key
def add(self, key: Union[(str, int)], value: str) -> Union[(str, int)]: 'Add a piece of metadata to the staging area of the next commit.\n\n Parameters\n ----------\n key : Union[str, int]\n Name of the metadata piece, alphanumeric ascii characters only\n value : string\n Metadata value to store in the repository, any length of valid\n ascii characters.\n\n Returns\n -------\n Union[str, int]\n The name of the metadata key written to the database if the\n operation succeeded.\n\n Raises\n ------\n ValueError\n If the `key` contains any whitespace or non alpha-numeric characters.\n ValueError\n If the `value` contains any non ascii characters.\n ' try: if (not is_suitable_user_key(key)): raise ValueError(f'Metadata key: {key} of type: {type(key)} invalid. Must be int ascii string with only alpha-numeric / "." "_" "-" characters.') elif (not (isinstance(value, str) and is_ascii(value))): raise ValueError(f'Metadata Value: `{value}` not allowed. Must be ascii-only string') except ValueError as e: raise e from None try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() val_hash = hashlib.blake2b(value.encode(), digest_size=20).hexdigest() hashKey = parsing.hash_meta_db_key_from_raw_key(val_hash) metaRecKey = parsing.metadata_record_db_key_from_raw_key(key) metaRecVal = parsing.metadata_record_db_val_from_raw_val(val_hash) existingMetaRecVal = self._dataTxn.get(metaRecKey, default=False) if existingMetaRecVal: existingMetaRec = parsing.metadata_record_raw_val_from_db_val(existingMetaRecVal) if (val_hash == existingMetaRec.meta_hash): return key existingHashVal = self._labelTxn.get(hashKey, default=False) if (existingHashVal is False): hashVal = parsing.hash_meta_db_val_from_raw_val(value) self._labelTxn.put(hashKey, hashVal) self._dataTxn.put(metaRecKey, metaRecVal) self._mspecs[key] = hashKey finally: if tmpconman: self.__exit__() return key<|docstring|>Add a piece of metadata to the staging area of the next commit. Parameters ---------- key : Union[str, int] Name of the metadata piece, alphanumeric ascii characters only value : string Metadata value to store in the repository, any length of valid ascii characters. Returns ------- Union[str, int] The name of the metadata key written to the database if the operation succeeded. Raises ------ ValueError If the `key` contains any whitespace or non alpha-numeric characters. ValueError If the `value` contains any non ascii characters.<|endoftext|>
b3d4ff0e22bd64828107e76a6d3fcb72890feed3d38dafa0ccf62072273318c4
def remove(self, key: Union[(str, int)]) -> Union[(str, int)]: 'Remove a piece of metadata from the staging area of the next commit.\n\n Parameters\n ----------\n key : Union[str, int]\n Metadata name to remove.\n\n Returns\n -------\n Union[str, int]\n Name of the metadata key/value pair removed, if the operation was\n successful.\n\n Raises\n ------\n ValueError\n If the key provided is not string type and containing only\n ascii-alphanumeric characters.\n KeyError\n If the checkout does not contain metadata with the provided key.\n ' try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() if (not is_suitable_user_key(key)): msg = f'HANGAR VALUE ERROR:: metadata key: `{key}` not allowed. Must be strcontaining alpha-numeric or "." "_" "-" ascii characters (no whitespace).' raise ValueError(msg) metaRecKey = parsing.metadata_record_db_key_from_raw_key(key) delete_succeeded = self._dataTxn.delete(metaRecKey) if (delete_succeeded is False): msg = f'HANGAR KEY ERROR:: No metadata exists with key: {key}' raise KeyError(msg) del self._mspecs[key] except (KeyError, ValueError) as e: raise e from None finally: if tmpconman: self.__exit__() return key
Remove a piece of metadata from the staging area of the next commit. Parameters ---------- key : Union[str, int] Metadata name to remove. Returns ------- Union[str, int] Name of the metadata key/value pair removed, if the operation was successful. Raises ------ ValueError If the key provided is not string type and containing only ascii-alphanumeric characters. KeyError If the checkout does not contain metadata with the provided key.
src/hangar/metadata.py
remove
niranjana687/hangar-py
1
python
def remove(self, key: Union[(str, int)]) -> Union[(str, int)]: 'Remove a piece of metadata from the staging area of the next commit.\n\n Parameters\n ----------\n key : Union[str, int]\n Metadata name to remove.\n\n Returns\n -------\n Union[str, int]\n Name of the metadata key/value pair removed, if the operation was\n successful.\n\n Raises\n ------\n ValueError\n If the key provided is not string type and containing only\n ascii-alphanumeric characters.\n KeyError\n If the checkout does not contain metadata with the provided key.\n ' try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() if (not is_suitable_user_key(key)): msg = f'HANGAR VALUE ERROR:: metadata key: `{key}` not allowed. Must be strcontaining alpha-numeric or "." "_" "-" ascii characters (no whitespace).' raise ValueError(msg) metaRecKey = parsing.metadata_record_db_key_from_raw_key(key) delete_succeeded = self._dataTxn.delete(metaRecKey) if (delete_succeeded is False): msg = f'HANGAR KEY ERROR:: No metadata exists with key: {key}' raise KeyError(msg) del self._mspecs[key] except (KeyError, ValueError) as e: raise e from None finally: if tmpconman: self.__exit__() return key
def remove(self, key: Union[(str, int)]) -> Union[(str, int)]: 'Remove a piece of metadata from the staging area of the next commit.\n\n Parameters\n ----------\n key : Union[str, int]\n Metadata name to remove.\n\n Returns\n -------\n Union[str, int]\n Name of the metadata key/value pair removed, if the operation was\n successful.\n\n Raises\n ------\n ValueError\n If the key provided is not string type and containing only\n ascii-alphanumeric characters.\n KeyError\n If the checkout does not contain metadata with the provided key.\n ' try: tmpconman = (not self._is_conman) if tmpconman: self.__enter__() if (not is_suitable_user_key(key)): msg = f'HANGAR VALUE ERROR:: metadata key: `{key}` not allowed. Must be strcontaining alpha-numeric or "." "_" "-" ascii characters (no whitespace).' raise ValueError(msg) metaRecKey = parsing.metadata_record_db_key_from_raw_key(key) delete_succeeded = self._dataTxn.delete(metaRecKey) if (delete_succeeded is False): msg = f'HANGAR KEY ERROR:: No metadata exists with key: {key}' raise KeyError(msg) del self._mspecs[key] except (KeyError, ValueError) as e: raise e from None finally: if tmpconman: self.__exit__() return key<|docstring|>Remove a piece of metadata from the staging area of the next commit. Parameters ---------- key : Union[str, int] Metadata name to remove. Returns ------- Union[str, int] Name of the metadata key/value pair removed, if the operation was successful. Raises ------ ValueError If the key provided is not string type and containing only ascii-alphanumeric characters. KeyError If the checkout does not contain metadata with the provided key.<|endoftext|>
7051f8d2ceab3b23b2bd52f8f59f7f823cfe2c3d2956a15b21ac13a51e042c50
def __init__(self, in_dim=((1024 * 8) * 10), feat_dim=256, num_layers=2, rot_dim=4, norm='none', num_gn_groups=32, act='leaky_relu', num_classes=1): '\n rot_dim: 4 for quaternion, 6 for rot6d\n num_classes: default 1 (either single class or class-agnostic)\n ' super().__init__() self.norm = get_norm(norm, feat_dim, num_gn_groups=num_gn_groups) self.act_func = act_func = get_nn_act_func(act) self.num_classes = num_classes self.rot_dim = rot_dim self.linears = nn.ModuleList() for _i in range(num_layers): _in_dim = (in_dim if (_i == 0) else feat_dim) self.linears.append(nn.Linear(_in_dim, feat_dim)) self.linears.append(get_norm(norm, feat_dim, num_gn_groups=num_gn_groups)) self.linears.append(act_func) self.fc_r = nn.Linear(feat_dim, (rot_dim * num_classes)) self.fc_t = nn.Linear(feat_dim, (3 * num_classes)) self._init_weights()
rot_dim: 4 for quaternion, 6 for rot6d num_classes: default 1 (either single class or class-agnostic)
core/deepim/models/heads/fc_rot_trans_head.py
__init__
THU-DA-6D-Pose-Group/self6dpp
33
python
def __init__(self, in_dim=((1024 * 8) * 10), feat_dim=256, num_layers=2, rot_dim=4, norm='none', num_gn_groups=32, act='leaky_relu', num_classes=1): '\n rot_dim: 4 for quaternion, 6 for rot6d\n num_classes: default 1 (either single class or class-agnostic)\n ' super().__init__() self.norm = get_norm(norm, feat_dim, num_gn_groups=num_gn_groups) self.act_func = act_func = get_nn_act_func(act) self.num_classes = num_classes self.rot_dim = rot_dim self.linears = nn.ModuleList() for _i in range(num_layers): _in_dim = (in_dim if (_i == 0) else feat_dim) self.linears.append(nn.Linear(_in_dim, feat_dim)) self.linears.append(get_norm(norm, feat_dim, num_gn_groups=num_gn_groups)) self.linears.append(act_func) self.fc_r = nn.Linear(feat_dim, (rot_dim * num_classes)) self.fc_t = nn.Linear(feat_dim, (3 * num_classes)) self._init_weights()
def __init__(self, in_dim=((1024 * 8) * 10), feat_dim=256, num_layers=2, rot_dim=4, norm='none', num_gn_groups=32, act='leaky_relu', num_classes=1): '\n rot_dim: 4 for quaternion, 6 for rot6d\n num_classes: default 1 (either single class or class-agnostic)\n ' super().__init__() self.norm = get_norm(norm, feat_dim, num_gn_groups=num_gn_groups) self.act_func = act_func = get_nn_act_func(act) self.num_classes = num_classes self.rot_dim = rot_dim self.linears = nn.ModuleList() for _i in range(num_layers): _in_dim = (in_dim if (_i == 0) else feat_dim) self.linears.append(nn.Linear(_in_dim, feat_dim)) self.linears.append(get_norm(norm, feat_dim, num_gn_groups=num_gn_groups)) self.linears.append(act_func) self.fc_r = nn.Linear(feat_dim, (rot_dim * num_classes)) self.fc_t = nn.Linear(feat_dim, (3 * num_classes)) self._init_weights()<|docstring|>rot_dim: 4 for quaternion, 6 for rot6d num_classes: default 1 (either single class or class-agnostic)<|endoftext|>
42d9d2717b6a7da694d7745a8768636d8a472f2905e3c93b61854ee16b8ad5cc
def forward(self, x): '\n x: should be flattened\n ' for _layer in self.linears: x = _layer(x) rot = self.fc_r(x) trans = self.fc_t(x) return (rot, trans)
x: should be flattened
core/deepim/models/heads/fc_rot_trans_head.py
forward
THU-DA-6D-Pose-Group/self6dpp
33
python
def forward(self, x): '\n \n ' for _layer in self.linears: x = _layer(x) rot = self.fc_r(x) trans = self.fc_t(x) return (rot, trans)
def forward(self, x): '\n \n ' for _layer in self.linears: x = _layer(x) rot = self.fc_r(x) trans = self.fc_t(x) return (rot, trans)<|docstring|>x: should be flattened<|endoftext|>
1dd7284e34d18c974c95692aedfa081c93717fb2269f5fbcdfd649cf9291a97d
def get(self, request, format=None): ' method for handling get request to this class ' an_apiview = ['This is an API View Class', 'We can write http requests like Post, get , put, patch and delete', 'Urls are created in urls.py of the api app , creates using path() ', 'This is used when full control is needed in the API logic', 'Lets have a look, shall we ?'] return Response({'about': 'Hello there ', 'content': an_apiview})
method for handling get request to this class
profiles_api/views.py
get
john7ric/profiles_rest_api
0
python
def get(self, request, format=None): ' ' an_apiview = ['This is an API View Class', 'We can write http requests like Post, get , put, patch and delete', 'Urls are created in urls.py of the api app , creates using path() ', 'This is used when full control is needed in the API logic', 'Lets have a look, shall we ?'] return Response({'about': 'Hello there ', 'content': an_apiview})
def get(self, request, format=None): ' ' an_apiview = ['This is an API View Class', 'We can write http requests like Post, get , put, patch and delete', 'Urls are created in urls.py of the api app , creates using path() ', 'This is used when full control is needed in the API logic', 'Lets have a look, shall we ?'] return Response({'about': 'Hello there ', 'content': an_apiview})<|docstring|>method for handling get request to this class<|endoftext|>
18c99d952f37855a50eef8906be281f09508c62176baecd36df192f1d95cf6b2
def post(self, request): ' method handling the post requests to this view ' serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') age = serializer.validated_data.get('age') message_string = (((('Hello ' + name) + ' you are ') + str(age)) + ' years old') return Response({'message': message_string}) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
method handling the post requests to this view
profiles_api/views.py
post
john7ric/profiles_rest_api
0
python
def post(self, request): ' ' serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') age = serializer.validated_data.get('age') message_string = (((('Hello ' + name) + ' you are ') + str(age)) + ' years old') return Response({'message': message_string}) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def post(self, request): ' ' serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') age = serializer.validated_data.get('age') message_string = (((('Hello ' + name) + ' you are ') + str(age)) + ' years old') return Response({'message': message_string}) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)<|docstring|>method handling the post requests to this view<|endoftext|>
39366102ec826ece3114d30a8c279dca1fa2ec7b2354be0776d0650c14913978
def patch(self, request, pk=None): ' method for handling patch requests to this class' return Response({'method': 'Patch'})
method for handling patch requests to this class
profiles_api/views.py
patch
john7ric/profiles_rest_api
0
python
def patch(self, request, pk=None): ' ' return Response({'method': 'Patch'})
def patch(self, request, pk=None): ' ' return Response({'method': 'Patch'})<|docstring|>method for handling patch requests to this class<|endoftext|>
9069099907d50b887421bf70e936246ac448d0d66c3b9d8469fac98ecf3eafd7
def put(self, reaction, pk=None): 'method for handling put' return Response({'message': 'PUT'})
method for handling put
profiles_api/views.py
put
john7ric/profiles_rest_api
0
python
def put(self, reaction, pk=None): return Response({'message': 'PUT'})
def put(self, reaction, pk=None): return Response({'message': 'PUT'})<|docstring|>method for handling put<|endoftext|>
33483db04edea69ae3eff49eb2a7f3c617aed40af9f0532fd6bc6b155fecdc14
def delete(self, request, pk=None): 'method to handle delete' return Response({'message': 'Delete'})
method to handle delete
profiles_api/views.py
delete
john7ric/profiles_rest_api
0
python
def delete(self, request, pk=None): return Response({'message': 'Delete'})
def delete(self, request, pk=None): return Response({'message': 'Delete'})<|docstring|>method to handle delete<|endoftext|>
0d75c91141200d80548c20bf06c1eca65bef1c2c1b320b9a4abd0aaf4ee81849
def create(self, request): ' to create a new object ' serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') age = serializer.validated_data.get('age') return Response({'message': f'Hello {name} you are {age} years old'}) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
to create a new object
profiles_api/views.py
create
john7ric/profiles_rest_api
0
python
def create(self, request): ' ' serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') age = serializer.validated_data.get('age') return Response({'message': f'Hello {name} you are {age} years old'}) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def create(self, request): ' ' serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') age = serializer.validated_data.get('age') return Response({'message': f'Hello {name} you are {age} years old'}) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)<|docstring|>to create a new object<|endoftext|>
8ad125c15c26098468e3771f3a835c86aeeac10fafc794c4a9ddeb1c162f3914
def retrieve(self, request, pk=None): ' doc string to fetch an object with an ID' return Response({'message': 'retreive is called'})
doc string to fetch an object with an ID
profiles_api/views.py
retrieve
john7ric/profiles_rest_api
0
python
def retrieve(self, request, pk=None): ' ' return Response({'message': 'retreive is called'})
def retrieve(self, request, pk=None): ' ' return Response({'message': 'retreive is called'})<|docstring|>doc string to fetch an object with an ID<|endoftext|>
93fdd56937209e73b4c1d213fec00f7eea905da338e93b717efd2a567ae8de38
def update(self, request, pk=None): ' method to update a view ' return Response({'message': 'methode called is update'})
method to update a view
profiles_api/views.py
update
john7ric/profiles_rest_api
0
python
def update(self, request, pk=None): ' ' return Response({'message': 'methode called is update'})
def update(self, request, pk=None): ' ' return Response({'message': 'methode called is update'})<|docstring|>method to update a view<|endoftext|>
32f0031182373f4183aa6ec78a127f95c321ce599054b94f513e1451679bd122
def partial_update(self, request, pk=None): 'method for partial update ' return Response({'Message': 'Method patch'})
method for partial update
profiles_api/views.py
partial_update
john7ric/profiles_rest_api
0
python
def partial_update(self, request, pk=None): ' ' return Response({'Message': 'Method patch'})
def partial_update(self, request, pk=None): ' ' return Response({'Message': 'Method patch'})<|docstring|>method for partial update<|endoftext|>
2750bbc26e5ec635163580cadc28f3d31a842865e1c43b92b6011ee81adad56c
def destroy(self, request, pk=None): ' method to delete the object with ID' return Response({'message': 'HTTP Delete'})
method to delete the object with ID
profiles_api/views.py
destroy
john7ric/profiles_rest_api
0
python
def destroy(self, request, pk=None): ' ' return Response({'message': 'HTTP Delete'})
def destroy(self, request, pk=None): ' ' return Response({'message': 'HTTP Delete'})<|docstring|>method to delete the object with ID<|endoftext|>
73ebe0b0253af1c914deced650fdc4ce3fb688573a650a9a7ed0f0d50fbaff98
def quiz_callback(update: Update, context): 'Ask new question ' client_redis = context.bot_data['r'] if (update.message.text == 'Новый вопрос'): (quiz_line, quiz_content) = random.choice(list(quiz.items())) context.bot.send_message(chat_id=update.message.chat_id, text=quiz_content[0]) context.bot.send_message(chat_id=update.message.chat_id, text=quiz_content[1]['q']) client_redis.rpush(update.effective_user.id, quiz_line, quiz_content[1]['q'], quiz_content[1]['a']) elif (update.message.text == 'Сдаться'): context.bot.send_message(chat_id=update.message.chat_id, text='Правильный ответ') answer = client_redis.lrange(update.effective_user.id, (- 1), (- 1))[0] context.bot.send_message(chat_id=update.message.chat_id, text=answer) client_redis.delete(update.effective_user.id, 0, (- 1)) update.message.reply_text('Bye! I hope we can talk again some day.', reply_markup=ReplyKeyboardRemove()) else: answer = client_redis.lrange(update.effective_user.id, (- 1), (- 1))[0] if (update.message.text.lower() in answer.lower()): context.bot.send_message(chat_id=update.message.chat_id, text='Правильно! Поздравляю!') context.bot.send_message(chat_id=update.message.chat_id, text=answer) client_redis.delete(update.effective_user.id, 0, (- 1)) return context.bot.send_message(chat_id=update.message.chat_id, text='Неправильно\n Попробуешь ещё раз?"')
Ask new question
tg_bot.py
quiz_callback
psergal/quiz
0
python
def quiz_callback(update: Update, context): ' ' client_redis = context.bot_data['r'] if (update.message.text == 'Новый вопрос'): (quiz_line, quiz_content) = random.choice(list(quiz.items())) context.bot.send_message(chat_id=update.message.chat_id, text=quiz_content[0]) context.bot.send_message(chat_id=update.message.chat_id, text=quiz_content[1]['q']) client_redis.rpush(update.effective_user.id, quiz_line, quiz_content[1]['q'], quiz_content[1]['a']) elif (update.message.text == 'Сдаться'): context.bot.send_message(chat_id=update.message.chat_id, text='Правильный ответ') answer = client_redis.lrange(update.effective_user.id, (- 1), (- 1))[0] context.bot.send_message(chat_id=update.message.chat_id, text=answer) client_redis.delete(update.effective_user.id, 0, (- 1)) update.message.reply_text('Bye! I hope we can talk again some day.', reply_markup=ReplyKeyboardRemove()) else: answer = client_redis.lrange(update.effective_user.id, (- 1), (- 1))[0] if (update.message.text.lower() in answer.lower()): context.bot.send_message(chat_id=update.message.chat_id, text='Правильно! Поздравляю!') context.bot.send_message(chat_id=update.message.chat_id, text=answer) client_redis.delete(update.effective_user.id, 0, (- 1)) return context.bot.send_message(chat_id=update.message.chat_id, text='Неправильно\n Попробуешь ещё раз?"')
def quiz_callback(update: Update, context): ' ' client_redis = context.bot_data['r'] if (update.message.text == 'Новый вопрос'): (quiz_line, quiz_content) = random.choice(list(quiz.items())) context.bot.send_message(chat_id=update.message.chat_id, text=quiz_content[0]) context.bot.send_message(chat_id=update.message.chat_id, text=quiz_content[1]['q']) client_redis.rpush(update.effective_user.id, quiz_line, quiz_content[1]['q'], quiz_content[1]['a']) elif (update.message.text == 'Сдаться'): context.bot.send_message(chat_id=update.message.chat_id, text='Правильный ответ') answer = client_redis.lrange(update.effective_user.id, (- 1), (- 1))[0] context.bot.send_message(chat_id=update.message.chat_id, text=answer) client_redis.delete(update.effective_user.id, 0, (- 1)) update.message.reply_text('Bye! I hope we can talk again some day.', reply_markup=ReplyKeyboardRemove()) else: answer = client_redis.lrange(update.effective_user.id, (- 1), (- 1))[0] if (update.message.text.lower() in answer.lower()): context.bot.send_message(chat_id=update.message.chat_id, text='Правильно! Поздравляю!') context.bot.send_message(chat_id=update.message.chat_id, text=answer) client_redis.delete(update.effective_user.id, 0, (- 1)) return context.bot.send_message(chat_id=update.message.chat_id, text='Неправильно\n Попробуешь ещё раз?"')<|docstring|>Ask new question<|endoftext|>
cc3b6a5edfca873467e5b6f85189fc642d1c8fe8217ffa3476ec007899a1d5fb
def log_error(update: Update, context): 'Log Errors caused by Updates.' logger.warning('Update "%s" caused error "%s"', update, context.error, extra={'Update_err': True})
Log Errors caused by Updates.
tg_bot.py
log_error
psergal/quiz
0
python
def log_error(update: Update, context): logger.warning('Update "%s" caused error "%s"', update, context.error, extra={'Update_err': True})
def log_error(update: Update, context): logger.warning('Update "%s" caused error "%s"', update, context.error, extra={'Update_err': True})<|docstring|>Log Errors caused by Updates.<|endoftext|>
81f4efeedc05f8903e31e569ed7cb5ea6c9441aaa3a0418d105dcb8aab6825b7
def generate_img(self, batch): ' Return generated img\n\n :param batch: number of img\n :return: 0~255, uint, numpy array\n [original_a, fake_from_a, cycle_a, identity_a, original_b, fake_from_b, cycle_b, identity_b]\n ' def form_img(target_array): target_array = (((target_array + 1) / 2) * 255) target_array = target_array.astype(np.uint8) return target_array self.session.run([self.iterator_ini_a, self.iterator_ini_b], feed_dict={self.__tfrecord_a: ('%s/testA.tfrecord' % self.tfrecord_dir), self.__tfrecord_b: ('%s/testB.tfrecord' % self.tfrecord_dir), self.__batch: 1}) result = [] for b in range(batch): (img_a, img_b) = self.session.run([self.img_a, self.img_b]) imgs = self.session.run([self.fake_img_b, self.cycle_img_a, self.id_a, self.fake_img_a, self.cycle_img_b, self.id_b], feed_dict={self.__original_img_a: img_a, self.__original_img_b: img_b}) result.append([img_a.astype(np.uint8), form_img(imgs[0]), form_img(imgs[1]), form_img(imgs[2]), img_b.astype(np.uint8), form_img(imgs[3]), form_img(imgs[4]), form_img(imgs[5])]) return result
Return generated img :param batch: number of img :return: 0~255, uint, numpy array [original_a, fake_from_a, cycle_a, identity_a, original_b, fake_from_b, cycle_b, identity_b]
cycle_gan/cycle_gan.py
generate_img
asahi417/CycleGAN
2
python
def generate_img(self, batch): ' Return generated img\n\n :param batch: number of img\n :return: 0~255, uint, numpy array\n [original_a, fake_from_a, cycle_a, identity_a, original_b, fake_from_b, cycle_b, identity_b]\n ' def form_img(target_array): target_array = (((target_array + 1) / 2) * 255) target_array = target_array.astype(np.uint8) return target_array self.session.run([self.iterator_ini_a, self.iterator_ini_b], feed_dict={self.__tfrecord_a: ('%s/testA.tfrecord' % self.tfrecord_dir), self.__tfrecord_b: ('%s/testB.tfrecord' % self.tfrecord_dir), self.__batch: 1}) result = [] for b in range(batch): (img_a, img_b) = self.session.run([self.img_a, self.img_b]) imgs = self.session.run([self.fake_img_b, self.cycle_img_a, self.id_a, self.fake_img_a, self.cycle_img_b, self.id_b], feed_dict={self.__original_img_a: img_a, self.__original_img_b: img_b}) result.append([img_a.astype(np.uint8), form_img(imgs[0]), form_img(imgs[1]), form_img(imgs[2]), img_b.astype(np.uint8), form_img(imgs[3]), form_img(imgs[4]), form_img(imgs[5])]) return result
def generate_img(self, batch): ' Return generated img\n\n :param batch: number of img\n :return: 0~255, uint, numpy array\n [original_a, fake_from_a, cycle_a, identity_a, original_b, fake_from_b, cycle_b, identity_b]\n ' def form_img(target_array): target_array = (((target_array + 1) / 2) * 255) target_array = target_array.astype(np.uint8) return target_array self.session.run([self.iterator_ini_a, self.iterator_ini_b], feed_dict={self.__tfrecord_a: ('%s/testA.tfrecord' % self.tfrecord_dir), self.__tfrecord_b: ('%s/testB.tfrecord' % self.tfrecord_dir), self.__batch: 1}) result = [] for b in range(batch): (img_a, img_b) = self.session.run([self.img_a, self.img_b]) imgs = self.session.run([self.fake_img_b, self.cycle_img_a, self.id_a, self.fake_img_a, self.cycle_img_b, self.id_b], feed_dict={self.__original_img_a: img_a, self.__original_img_b: img_b}) result.append([img_a.astype(np.uint8), form_img(imgs[0]), form_img(imgs[1]), form_img(imgs[2]), img_b.astype(np.uint8), form_img(imgs[3]), form_img(imgs[4]), form_img(imgs[5])]) return result<|docstring|>Return generated img :param batch: number of img :return: 0~255, uint, numpy array [original_a, fake_from_a, cycle_a, identity_a, original_b, fake_from_b, cycle_b, identity_b]<|endoftext|>
5d97b0f084be77649c81d837956be2aa618f73b90fa7226e7554d08f1043565e
def shuffle_data(data, seed=None): 'shuffle array along first axis' np.random.seed(seed) np.random.shuffle(data) return data
shuffle array along first axis
cycle_gan/cycle_gan.py
shuffle_data
asahi417/CycleGAN
2
python
def shuffle_data(data, seed=None): np.random.seed(seed) np.random.shuffle(data) return data
def shuffle_data(data, seed=None): np.random.seed(seed) np.random.shuffle(data) return data<|docstring|>shuffle array along first axis<|endoftext|>
db4591388b26bde78bfdd9b0e6d6ca854c3c11544900a4d619fb49feba9e540d
def learning_rate_scheduler(current_lr, current_epoch): ' heuristic scheduler used in original paper ' bias = 2e-06 if (current_epoch > 100): return np.max([(current_lr - bias), 0]) else: return current_lr
heuristic scheduler used in original paper
cycle_gan/cycle_gan.py
learning_rate_scheduler
asahi417/CycleGAN
2
python
def learning_rate_scheduler(current_lr, current_epoch): ' ' bias = 2e-06 if (current_epoch > 100): return np.max([(current_lr - bias), 0]) else: return current_lr
def learning_rate_scheduler(current_lr, current_epoch): ' ' bias = 2e-06 if (current_epoch > 100): return np.max([(current_lr - bias), 0]) else: return current_lr<|docstring|>heuristic scheduler used in original paper<|endoftext|>
4483c85e5c90508f803cb779ca9905e24e2745e2edc8cb82d344fb8ea023741d
def store_the_master(s): "Define as 'MASTER_VARIABLES' those variables of the self object that\n do not belong in the list 'SIMULATION_VARIABLES'. Essentially, it includes\n those defined in hamiltonian_class.__init__(s).\n\n " s.MASTER_VARIABLES = [x for x in s.__dict__.keys() if (x not in s.SIMULATION_VARIABLES) if (x != 'SIMULATION_VARIABLES')] for (key, arg) in s.model_operators.items(): if (type(s.model_operators[key]) == list): if (type(s.model_operators[key][0]) == ndarray): s.model_operators[key] = [csr_matrix(_) for _ in s.model_operators[key]] if s.STORE_MASTER: with open(s.MASTER_DATAFILE_PATH, 'wb') as my_file: dump(s, my_file, HIGHEST_PROTOCOL)
Define as 'MASTER_VARIABLES' those variables of the self object that do not belong in the list 'SIMULATION_VARIABLES'. Essentially, it includes those defined in hamiltonian_class.__init__(s).
src/mother_classes.py
store_the_master
lorenzocardarelli/PyTeNC
0
python
def store_the_master(s): "Define as 'MASTER_VARIABLES' those variables of the self object that\n do not belong in the list 'SIMULATION_VARIABLES'. Essentially, it includes\n those defined in hamiltonian_class.__init__(s).\n\n " s.MASTER_VARIABLES = [x for x in s.__dict__.keys() if (x not in s.SIMULATION_VARIABLES) if (x != 'SIMULATION_VARIABLES')] for (key, arg) in s.model_operators.items(): if (type(s.model_operators[key]) == list): if (type(s.model_operators[key][0]) == ndarray): s.model_operators[key] = [csr_matrix(_) for _ in s.model_operators[key]] if s.STORE_MASTER: with open(s.MASTER_DATAFILE_PATH, 'wb') as my_file: dump(s, my_file, HIGHEST_PROTOCOL)
def store_the_master(s): "Define as 'MASTER_VARIABLES' those variables of the self object that\n do not belong in the list 'SIMULATION_VARIABLES'. Essentially, it includes\n those defined in hamiltonian_class.__init__(s).\n\n " s.MASTER_VARIABLES = [x for x in s.__dict__.keys() if (x not in s.SIMULATION_VARIABLES) if (x != 'SIMULATION_VARIABLES')] for (key, arg) in s.model_operators.items(): if (type(s.model_operators[key]) == list): if (type(s.model_operators[key][0]) == ndarray): s.model_operators[key] = [csr_matrix(_) for _ in s.model_operators[key]] if s.STORE_MASTER: with open(s.MASTER_DATAFILE_PATH, 'wb') as my_file: dump(s, my_file, HIGHEST_PROTOCOL)<|docstring|>Define as 'MASTER_VARIABLES' those variables of the self object that do not belong in the list 'SIMULATION_VARIABLES'. Essentially, it includes those defined in hamiltonian_class.__init__(s).<|endoftext|>
a53a1b9c856294e101f585b1e808f8edd9ebcfa592d31292e109015c47e18a2d
def initialize_and_update_simulation_parameters(s, SIM_PARAMS): 'Define (many) simulation parameters as self objects, from SIM_PARAMS.\n Store the algorithm and Hamiltonian input parameters as self objects.\n\n ' if (type(SIM_PARAMS['INIT_STATE_PARAMS']) is list): s.INITIAL_STATE_MATRIX = SIM_PARAMS['INIT_STATE_PARAMS'] elif (SIM_PARAMS['INIT_STATE_PARAMS'] == 'fixed'): s.set_standard_initial_state_matrix(SIM_PARAMS) elif (SIM_PARAMS['INIT_STATE_PARAMS'] == 'random'): s.INITIAL_STATE_MATRIX = [] OUTPUT_PARAMS = {'LOCAL_RUN': True, 'STORE_STATE': False, 'STORE_MASTER': False, 'INFO_EVERY_SWEEP_STEP': True, 'DISPLAY_RAM': False, 'DISPLAY_TIMERS': False, 'PKL_STORE_TIME_INTERVAL': 1, 'STDOUT_FLUSH_TIME_INTERVAL': 1} OUTPUT_PARAMS.update(SIM_PARAMS['OUTPUT_PARAMS']) for key in OUTPUT_PARAMS.keys(): setattr(s, key, OUTPUT_PARAMS[key]) ALG_PARAMS = {'POST_RUN_INSPECTION': False, 'INFINITE_SYSTEM_WARMUP': True, 'REQUIRED_CHAIN_LENGTH': 30, 'NUMBER_SWEEPS': 2, 'BOND_DIMENSION': 50, 'SCHMIDT_TOLERANCE': (10.0 ** (- 15)), 'LANCZOS_ALGORITHM': 'SCIPY', 'SCIPY_EIGSH_TOLERANCE': 0, 'KRYLOV_SPACE_DIMENSION': 200, 'ALWAYS_MINIMIZE': True, 'SELF_ATTRIBUTES': {'rek_value': '%.0E', 'rek_vector': '%.0E'}, 'INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES': {}, 'INFOSTREAM_OPERATORS_ACTING_ON_CENTRAL_SITES': [], 'LOCAL_OPERATORS_SUMMED_OVER_ALL_SITES': [], 'NON_LOCAL_OPERATORS_OR_LIST_LOCAL_EXPECTATION_VALUES': [], 'NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES': [], 'NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES': [], 'NAMES_NORMAL_MATRIX_OPERATORS_FOR_CORRELATIONS_AND_LOCAL_EXPECTATION_VALUES': []} ALG_PARAMS.update(SIM_PARAMS['ALG_PARAMS']) for key in ALG_PARAMS.keys(): setattr(s, key, ALG_PARAMS[key]) if ALG_PARAMS['INFINITE_SYSTEM_WARMUP']: s.INITIAL_STATE_LENGTH = 2 else: s.INITIAL_STATE_LENGTH = s.REQUIRED_CHAIN_LENGTH s.SELF_ATTRIBUTES_TAGS = list(s.SELF_ATTRIBUTES.keys()) if s.POST_RUN_INSPECTION: s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES = s.LOCAL_OPERATORS_SUMMED_OVER_ALL_SITES s.NAMES_NORMAL_MATRIX_OPERATORS_FOR_CORRELATIONS_AND_LOCAL_EXPECTATION_VALUES = s.NON_LOCAL_OPERATORS_OR_LIST_LOCAL_EXPECTATION_VALUES else: s.INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES['hamiltonian'] = '%.10f' s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES = list(s.INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES.keys()) s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES = s.INFOSTREAM_OPERATORS_ACTING_ON_CENTRAL_SITES s.DATA_COLUMNS_TAG = [] s.DATA_COLUMNS_TAG += s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES s.DATA_COLUMNS_TAG += [(_ + '_mid') for _ in s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES] s.DATA_COLUMNS_TAG += s.SELF_ATTRIBUTES_TAGS s.NAMES_ALL_ACTIVE_MATRIX_PRODUCT_OPERATORS = list((set(s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES) | set(s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES))) s.H_PARAMS = SIM_PARAMS['H_PARAMS'] if (len(s.H_PARAMS['Commuting_Operators'].keys()) == 0): s.ABELIAN_SYMMETRIES = False else: s.ABELIAN_SYMMETRIES = True s.TOTAL_CHARGE = {} s.AVERAGE_CHARGE_PER_SITE = {} s.LIST_SYMMETRIES_NAMES = list(s.H_PARAMS['Commuting_Operators'].keys()) s.LIST_SYMMETRIC_OPERATORS_NAMES = [] for symmetry_name in s.LIST_SYMMETRIES_NAMES: if (symmetry_name == 'links_alignment'): s.LIST_SYMMETRIC_OPERATORS_NAMES.append('links_set_left') s.AVERAGE_CHARGE_PER_SITE['links_set_left'] = 0 s.LIST_SYMMETRIC_OPERATORS_NAMES.append('links_set_right') s.AVERAGE_CHARGE_PER_SITE['links_set_right'] = 0 else: s.LIST_SYMMETRIC_OPERATORS_NAMES.append(symmetry_name) try: s.AVERAGE_CHARGE_PER_SITE[symmetry_name] = s.H_PARAMS['Commuting_Operators'][symmetry_name]['Average_Charge'] s.TOTAL_CHARGE[symmetry_name] = int((s.AVERAGE_CHARGE_PER_SITE[symmetry_name] * s.REQUIRED_CHAIN_LENGTH)) except: s.TOTAL_CHARGE[symmetry_name] = s.H_PARAMS['Commuting_Operators'][symmetry_name]['Total_Charge'] s.AVERAGE_CHARGE_PER_SITE[symmetry_name] = (s.TOTAL_CHARGE[symmetry_name] / s.REQUIRED_CHAIN_LENGTH) s.number_tensor_contractions = {} s.number_tensor_contractions['matvec'] = 4 s.number_tensor_contractions['ltm_mpo_update'] = 3 s.number_tensor_contractions['rtm_mpo_update'] = 3 s.number_tensor_contractions['ltm_opt_update'] = 3 s.number_tensor_contractions['rtm_opt_update'] = 3 s.number_tensor_contractions['two_sites_svd'] = 1 s.HALF_REQUIRED_CHAIN_LENGTH = int((s.REQUIRED_CHAIN_LENGTH / 2))
Define (many) simulation parameters as self objects, from SIM_PARAMS. Store the algorithm and Hamiltonian input parameters as self objects.
src/mother_classes.py
initialize_and_update_simulation_parameters
lorenzocardarelli/PyTeNC
0
python
def initialize_and_update_simulation_parameters(s, SIM_PARAMS): 'Define (many) simulation parameters as self objects, from SIM_PARAMS.\n Store the algorithm and Hamiltonian input parameters as self objects.\n\n ' if (type(SIM_PARAMS['INIT_STATE_PARAMS']) is list): s.INITIAL_STATE_MATRIX = SIM_PARAMS['INIT_STATE_PARAMS'] elif (SIM_PARAMS['INIT_STATE_PARAMS'] == 'fixed'): s.set_standard_initial_state_matrix(SIM_PARAMS) elif (SIM_PARAMS['INIT_STATE_PARAMS'] == 'random'): s.INITIAL_STATE_MATRIX = [] OUTPUT_PARAMS = {'LOCAL_RUN': True, 'STORE_STATE': False, 'STORE_MASTER': False, 'INFO_EVERY_SWEEP_STEP': True, 'DISPLAY_RAM': False, 'DISPLAY_TIMERS': False, 'PKL_STORE_TIME_INTERVAL': 1, 'STDOUT_FLUSH_TIME_INTERVAL': 1} OUTPUT_PARAMS.update(SIM_PARAMS['OUTPUT_PARAMS']) for key in OUTPUT_PARAMS.keys(): setattr(s, key, OUTPUT_PARAMS[key]) ALG_PARAMS = {'POST_RUN_INSPECTION': False, 'INFINITE_SYSTEM_WARMUP': True, 'REQUIRED_CHAIN_LENGTH': 30, 'NUMBER_SWEEPS': 2, 'BOND_DIMENSION': 50, 'SCHMIDT_TOLERANCE': (10.0 ** (- 15)), 'LANCZOS_ALGORITHM': 'SCIPY', 'SCIPY_EIGSH_TOLERANCE': 0, 'KRYLOV_SPACE_DIMENSION': 200, 'ALWAYS_MINIMIZE': True, 'SELF_ATTRIBUTES': {'rek_value': '%.0E', 'rek_vector': '%.0E'}, 'INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES': {}, 'INFOSTREAM_OPERATORS_ACTING_ON_CENTRAL_SITES': [], 'LOCAL_OPERATORS_SUMMED_OVER_ALL_SITES': [], 'NON_LOCAL_OPERATORS_OR_LIST_LOCAL_EXPECTATION_VALUES': [], 'NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES': [], 'NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES': [], 'NAMES_NORMAL_MATRIX_OPERATORS_FOR_CORRELATIONS_AND_LOCAL_EXPECTATION_VALUES': []} ALG_PARAMS.update(SIM_PARAMS['ALG_PARAMS']) for key in ALG_PARAMS.keys(): setattr(s, key, ALG_PARAMS[key]) if ALG_PARAMS['INFINITE_SYSTEM_WARMUP']: s.INITIAL_STATE_LENGTH = 2 else: s.INITIAL_STATE_LENGTH = s.REQUIRED_CHAIN_LENGTH s.SELF_ATTRIBUTES_TAGS = list(s.SELF_ATTRIBUTES.keys()) if s.POST_RUN_INSPECTION: s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES = s.LOCAL_OPERATORS_SUMMED_OVER_ALL_SITES s.NAMES_NORMAL_MATRIX_OPERATORS_FOR_CORRELATIONS_AND_LOCAL_EXPECTATION_VALUES = s.NON_LOCAL_OPERATORS_OR_LIST_LOCAL_EXPECTATION_VALUES else: s.INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES['hamiltonian'] = '%.10f' s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES = list(s.INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES.keys()) s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES = s.INFOSTREAM_OPERATORS_ACTING_ON_CENTRAL_SITES s.DATA_COLUMNS_TAG = [] s.DATA_COLUMNS_TAG += s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES s.DATA_COLUMNS_TAG += [(_ + '_mid') for _ in s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES] s.DATA_COLUMNS_TAG += s.SELF_ATTRIBUTES_TAGS s.NAMES_ALL_ACTIVE_MATRIX_PRODUCT_OPERATORS = list((set(s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES) | set(s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES))) s.H_PARAMS = SIM_PARAMS['H_PARAMS'] if (len(s.H_PARAMS['Commuting_Operators'].keys()) == 0): s.ABELIAN_SYMMETRIES = False else: s.ABELIAN_SYMMETRIES = True s.TOTAL_CHARGE = {} s.AVERAGE_CHARGE_PER_SITE = {} s.LIST_SYMMETRIES_NAMES = list(s.H_PARAMS['Commuting_Operators'].keys()) s.LIST_SYMMETRIC_OPERATORS_NAMES = [] for symmetry_name in s.LIST_SYMMETRIES_NAMES: if (symmetry_name == 'links_alignment'): s.LIST_SYMMETRIC_OPERATORS_NAMES.append('links_set_left') s.AVERAGE_CHARGE_PER_SITE['links_set_left'] = 0 s.LIST_SYMMETRIC_OPERATORS_NAMES.append('links_set_right') s.AVERAGE_CHARGE_PER_SITE['links_set_right'] = 0 else: s.LIST_SYMMETRIC_OPERATORS_NAMES.append(symmetry_name) try: s.AVERAGE_CHARGE_PER_SITE[symmetry_name] = s.H_PARAMS['Commuting_Operators'][symmetry_name]['Average_Charge'] s.TOTAL_CHARGE[symmetry_name] = int((s.AVERAGE_CHARGE_PER_SITE[symmetry_name] * s.REQUIRED_CHAIN_LENGTH)) except: s.TOTAL_CHARGE[symmetry_name] = s.H_PARAMS['Commuting_Operators'][symmetry_name]['Total_Charge'] s.AVERAGE_CHARGE_PER_SITE[symmetry_name] = (s.TOTAL_CHARGE[symmetry_name] / s.REQUIRED_CHAIN_LENGTH) s.number_tensor_contractions = {} s.number_tensor_contractions['matvec'] = 4 s.number_tensor_contractions['ltm_mpo_update'] = 3 s.number_tensor_contractions['rtm_mpo_update'] = 3 s.number_tensor_contractions['ltm_opt_update'] = 3 s.number_tensor_contractions['rtm_opt_update'] = 3 s.number_tensor_contractions['two_sites_svd'] = 1 s.HALF_REQUIRED_CHAIN_LENGTH = int((s.REQUIRED_CHAIN_LENGTH / 2))
def initialize_and_update_simulation_parameters(s, SIM_PARAMS): 'Define (many) simulation parameters as self objects, from SIM_PARAMS.\n Store the algorithm and Hamiltonian input parameters as self objects.\n\n ' if (type(SIM_PARAMS['INIT_STATE_PARAMS']) is list): s.INITIAL_STATE_MATRIX = SIM_PARAMS['INIT_STATE_PARAMS'] elif (SIM_PARAMS['INIT_STATE_PARAMS'] == 'fixed'): s.set_standard_initial_state_matrix(SIM_PARAMS) elif (SIM_PARAMS['INIT_STATE_PARAMS'] == 'random'): s.INITIAL_STATE_MATRIX = [] OUTPUT_PARAMS = {'LOCAL_RUN': True, 'STORE_STATE': False, 'STORE_MASTER': False, 'INFO_EVERY_SWEEP_STEP': True, 'DISPLAY_RAM': False, 'DISPLAY_TIMERS': False, 'PKL_STORE_TIME_INTERVAL': 1, 'STDOUT_FLUSH_TIME_INTERVAL': 1} OUTPUT_PARAMS.update(SIM_PARAMS['OUTPUT_PARAMS']) for key in OUTPUT_PARAMS.keys(): setattr(s, key, OUTPUT_PARAMS[key]) ALG_PARAMS = {'POST_RUN_INSPECTION': False, 'INFINITE_SYSTEM_WARMUP': True, 'REQUIRED_CHAIN_LENGTH': 30, 'NUMBER_SWEEPS': 2, 'BOND_DIMENSION': 50, 'SCHMIDT_TOLERANCE': (10.0 ** (- 15)), 'LANCZOS_ALGORITHM': 'SCIPY', 'SCIPY_EIGSH_TOLERANCE': 0, 'KRYLOV_SPACE_DIMENSION': 200, 'ALWAYS_MINIMIZE': True, 'SELF_ATTRIBUTES': {'rek_value': '%.0E', 'rek_vector': '%.0E'}, 'INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES': {}, 'INFOSTREAM_OPERATORS_ACTING_ON_CENTRAL_SITES': [], 'LOCAL_OPERATORS_SUMMED_OVER_ALL_SITES': [], 'NON_LOCAL_OPERATORS_OR_LIST_LOCAL_EXPECTATION_VALUES': [], 'NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES': [], 'NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES': [], 'NAMES_NORMAL_MATRIX_OPERATORS_FOR_CORRELATIONS_AND_LOCAL_EXPECTATION_VALUES': []} ALG_PARAMS.update(SIM_PARAMS['ALG_PARAMS']) for key in ALG_PARAMS.keys(): setattr(s, key, ALG_PARAMS[key]) if ALG_PARAMS['INFINITE_SYSTEM_WARMUP']: s.INITIAL_STATE_LENGTH = 2 else: s.INITIAL_STATE_LENGTH = s.REQUIRED_CHAIN_LENGTH s.SELF_ATTRIBUTES_TAGS = list(s.SELF_ATTRIBUTES.keys()) if s.POST_RUN_INSPECTION: s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES = s.LOCAL_OPERATORS_SUMMED_OVER_ALL_SITES s.NAMES_NORMAL_MATRIX_OPERATORS_FOR_CORRELATIONS_AND_LOCAL_EXPECTATION_VALUES = s.NON_LOCAL_OPERATORS_OR_LIST_LOCAL_EXPECTATION_VALUES else: s.INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES['hamiltonian'] = '%.10f' s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES = list(s.INFOSTREAM_OPERATORS_SUMMED_OVER_ALL_SITES.keys()) s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES = s.INFOSTREAM_OPERATORS_ACTING_ON_CENTRAL_SITES s.DATA_COLUMNS_TAG = [] s.DATA_COLUMNS_TAG += s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES s.DATA_COLUMNS_TAG += [(_ + '_mid') for _ in s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES] s.DATA_COLUMNS_TAG += s.SELF_ATTRIBUTES_TAGS s.NAMES_ALL_ACTIVE_MATRIX_PRODUCT_OPERATORS = list((set(s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_ALL_SITES) | set(s.NAMES_MATRIX_PRODUCT_OPERATORS_ACTING_ON_CENTRAL_SITES))) s.H_PARAMS = SIM_PARAMS['H_PARAMS'] if (len(s.H_PARAMS['Commuting_Operators'].keys()) == 0): s.ABELIAN_SYMMETRIES = False else: s.ABELIAN_SYMMETRIES = True s.TOTAL_CHARGE = {} s.AVERAGE_CHARGE_PER_SITE = {} s.LIST_SYMMETRIES_NAMES = list(s.H_PARAMS['Commuting_Operators'].keys()) s.LIST_SYMMETRIC_OPERATORS_NAMES = [] for symmetry_name in s.LIST_SYMMETRIES_NAMES: if (symmetry_name == 'links_alignment'): s.LIST_SYMMETRIC_OPERATORS_NAMES.append('links_set_left') s.AVERAGE_CHARGE_PER_SITE['links_set_left'] = 0 s.LIST_SYMMETRIC_OPERATORS_NAMES.append('links_set_right') s.AVERAGE_CHARGE_PER_SITE['links_set_right'] = 0 else: s.LIST_SYMMETRIC_OPERATORS_NAMES.append(symmetry_name) try: s.AVERAGE_CHARGE_PER_SITE[symmetry_name] = s.H_PARAMS['Commuting_Operators'][symmetry_name]['Average_Charge'] s.TOTAL_CHARGE[symmetry_name] = int((s.AVERAGE_CHARGE_PER_SITE[symmetry_name] * s.REQUIRED_CHAIN_LENGTH)) except: s.TOTAL_CHARGE[symmetry_name] = s.H_PARAMS['Commuting_Operators'][symmetry_name]['Total_Charge'] s.AVERAGE_CHARGE_PER_SITE[symmetry_name] = (s.TOTAL_CHARGE[symmetry_name] / s.REQUIRED_CHAIN_LENGTH) s.number_tensor_contractions = {} s.number_tensor_contractions['matvec'] = 4 s.number_tensor_contractions['ltm_mpo_update'] = 3 s.number_tensor_contractions['rtm_mpo_update'] = 3 s.number_tensor_contractions['ltm_opt_update'] = 3 s.number_tensor_contractions['rtm_opt_update'] = 3 s.number_tensor_contractions['two_sites_svd'] = 1 s.HALF_REQUIRED_CHAIN_LENGTH = int((s.REQUIRED_CHAIN_LENGTH / 2))<|docstring|>Define (many) simulation parameters as self objects, from SIM_PARAMS. Store the algorithm and Hamiltonian input parameters as self objects.<|endoftext|>
c1de0e7f21f498d0733ff5fd000f3344689546c00e82a0fab9c5765229f320ce
@property def pool(self): 'Create thread pool on first request\n avoids instantiating unused threadpool for blocking clients.\n ' if (self._pool is None): atexit.register(self.close) self._pool = ThreadPool(self.pool_threads) return self._pool
Create thread pool on first request avoids instantiating unused threadpool for blocking clients.
aylien_news_api/api_client.py
pool
AYLIEN/aylien_newsapi_python
13
python
@property def pool(self): 'Create thread pool on first request\n avoids instantiating unused threadpool for blocking clients.\n ' if (self._pool is None): atexit.register(self.close) self._pool = ThreadPool(self.pool_threads) return self._pool
@property def pool(self): 'Create thread pool on first request\n avoids instantiating unused threadpool for blocking clients.\n ' if (self._pool is None): atexit.register(self.close) self._pool = ThreadPool(self.pool_threads) return self._pool<|docstring|>Create thread pool on first request avoids instantiating unused threadpool for blocking clients.<|endoftext|>
c5c52b83bab6c62c262b0cf7dcecce09b5609dfb7306716d10ea8feac092abe8
@property def user_agent(self): 'User agent for this API client' return self.default_headers['User-Agent']
User agent for this API client
aylien_news_api/api_client.py
user_agent
AYLIEN/aylien_newsapi_python
13
python
@property def user_agent(self): return self.default_headers['User-Agent']
@property def user_agent(self): return self.default_headers['User-Agent']<|docstring|>User agent for this API client<|endoftext|>
bb247c68e3f340cf62644aba05f189a147b17709d1b61b0fdacd87e5e816db7a
def sanitize_for_serialization(self, obj): 'Builds a JSON POST object.\n\n If obj is None, return None.\n If obj is str, int, long, float, bool, return directly.\n If obj is datetime.datetime, datetime.date\n convert to string in iso8601 format.\n If obj is list, sanitize each element in the list.\n If obj is dict, return the dict.\n If obj is OpenAPI model, return the properties dict.\n\n :param obj: The data to serialize.\n :return: The serialized form of data.\n ' if (obj is None): return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple((self.sanitize_for_serialization(sub_obj) for sub_obj in obj)) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for (attr, _) in six.iteritems(obj.openapi_types) if (getattr(obj, attr) is not None)} return {key: self.sanitize_for_serialization(val) for (key, val) in six.iteritems(obj_dict)}
Builds a JSON POST object. If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is OpenAPI model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data.
aylien_news_api/api_client.py
sanitize_for_serialization
AYLIEN/aylien_newsapi_python
13
python
def sanitize_for_serialization(self, obj): 'Builds a JSON POST object.\n\n If obj is None, return None.\n If obj is str, int, long, float, bool, return directly.\n If obj is datetime.datetime, datetime.date\n convert to string in iso8601 format.\n If obj is list, sanitize each element in the list.\n If obj is dict, return the dict.\n If obj is OpenAPI model, return the properties dict.\n\n :param obj: The data to serialize.\n :return: The serialized form of data.\n ' if (obj is None): return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple((self.sanitize_for_serialization(sub_obj) for sub_obj in obj)) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for (attr, _) in six.iteritems(obj.openapi_types) if (getattr(obj, attr) is not None)} return {key: self.sanitize_for_serialization(val) for (key, val) in six.iteritems(obj_dict)}
def sanitize_for_serialization(self, obj): 'Builds a JSON POST object.\n\n If obj is None, return None.\n If obj is str, int, long, float, bool, return directly.\n If obj is datetime.datetime, datetime.date\n convert to string in iso8601 format.\n If obj is list, sanitize each element in the list.\n If obj is dict, return the dict.\n If obj is OpenAPI model, return the properties dict.\n\n :param obj: The data to serialize.\n :return: The serialized form of data.\n ' if (obj is None): return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple((self.sanitize_for_serialization(sub_obj) for sub_obj in obj)) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for (attr, _) in six.iteritems(obj.openapi_types) if (getattr(obj, attr) is not None)} return {key: self.sanitize_for_serialization(val) for (key, val) in six.iteritems(obj_dict)}<|docstring|>Builds a JSON POST object. If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is OpenAPI model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data.<|endoftext|>
92da644dcebc9d93f798594d2a39281b0eeaf90c415ad654d8d1b36f7bee49aa
def deserialize(self, response, response_type): 'Deserializes response into an object.\n\n :param response: RESTResponse object to be deserialized.\n :param response_type: class literal for\n deserialized object, or string of class name.\n\n :return: deserialized object.\n ' if (response_type == 'file'): return self.__deserialize_file(response) try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type)
Deserializes response into an object. :param response: RESTResponse object to be deserialized. :param response_type: class literal for deserialized object, or string of class name. :return: deserialized object.
aylien_news_api/api_client.py
deserialize
AYLIEN/aylien_newsapi_python
13
python
def deserialize(self, response, response_type): 'Deserializes response into an object.\n\n :param response: RESTResponse object to be deserialized.\n :param response_type: class literal for\n deserialized object, or string of class name.\n\n :return: deserialized object.\n ' if (response_type == 'file'): return self.__deserialize_file(response) try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type)
def deserialize(self, response, response_type): 'Deserializes response into an object.\n\n :param response: RESTResponse object to be deserialized.\n :param response_type: class literal for\n deserialized object, or string of class name.\n\n :return: deserialized object.\n ' if (response_type == 'file'): return self.__deserialize_file(response) try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type)<|docstring|>Deserializes response into an object. :param response: RESTResponse object to be deserialized. :param response_type: class literal for deserialized object, or string of class name. :return: deserialized object.<|endoftext|>
6f2a08f904947972254855216eea84ba86ce6b8ccd8a98d710745a97cfc84d4c
def __deserialize(self, data, klass): 'Deserializes dict, list, str into an object.\n\n :param data: dict, list or str.\n :param klass: class literal, or string of class name.\n\n :return: object.\n ' if (data is None): return None if (type(klass) == str): if klass.startswith('list['): sub_kls = re.match('list\\[(.*)\\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match('dict\\(([^,]*), (.*)\\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for (k, v) in six.iteritems(data)} if (klass in self.NATIVE_TYPES_MAPPING): klass = self.NATIVE_TYPES_MAPPING[klass] else: klass = getattr(aylien_news_api.models, klass) if (klass in self.PRIMITIVE_TYPES): return self.__deserialize_primitive(data, klass) elif (klass == object): return self.__deserialize_object(data) elif (klass == datetime.date): return self.__deserialize_date(data) elif (klass == datetime.datetime): return self.__deserialize_datetime(data) else: return self.__deserialize_model(data, klass)
Deserializes dict, list, str into an object. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object.
aylien_news_api/api_client.py
__deserialize
AYLIEN/aylien_newsapi_python
13
python
def __deserialize(self, data, klass): 'Deserializes dict, list, str into an object.\n\n :param data: dict, list or str.\n :param klass: class literal, or string of class name.\n\n :return: object.\n ' if (data is None): return None if (type(klass) == str): if klass.startswith('list['): sub_kls = re.match('list\\[(.*)\\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match('dict\\(([^,]*), (.*)\\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for (k, v) in six.iteritems(data)} if (klass in self.NATIVE_TYPES_MAPPING): klass = self.NATIVE_TYPES_MAPPING[klass] else: klass = getattr(aylien_news_api.models, klass) if (klass in self.PRIMITIVE_TYPES): return self.__deserialize_primitive(data, klass) elif (klass == object): return self.__deserialize_object(data) elif (klass == datetime.date): return self.__deserialize_date(data) elif (klass == datetime.datetime): return self.__deserialize_datetime(data) else: return self.__deserialize_model(data, klass)
def __deserialize(self, data, klass): 'Deserializes dict, list, str into an object.\n\n :param data: dict, list or str.\n :param klass: class literal, or string of class name.\n\n :return: object.\n ' if (data is None): return None if (type(klass) == str): if klass.startswith('list['): sub_kls = re.match('list\\[(.*)\\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match('dict\\(([^,]*), (.*)\\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for (k, v) in six.iteritems(data)} if (klass in self.NATIVE_TYPES_MAPPING): klass = self.NATIVE_TYPES_MAPPING[klass] else: klass = getattr(aylien_news_api.models, klass) if (klass in self.PRIMITIVE_TYPES): return self.__deserialize_primitive(data, klass) elif (klass == object): return self.__deserialize_object(data) elif (klass == datetime.date): return self.__deserialize_date(data) elif (klass == datetime.datetime): return self.__deserialize_datetime(data) else: return self.__deserialize_model(data, klass)<|docstring|>Deserializes dict, list, str into an object. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object.<|endoftext|>
7378342307cd1c1447649016446df257d866f079f600583bbc2317ac4d5859d6
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, async_req=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None, _host=None, _request_auth=None): 'Makes the HTTP request (synchronous) and returns deserialized data.\n\n To make an async_req request, set the async_req parameter.\n\n :param resource_path: Path to method endpoint.\n :param method: Method to call.\n :param path_params: Path parameters in the url.\n :param query_params: Query parameters in the url.\n :param header_params: Header parameters to be\n placed in the request header.\n :param body: Request body.\n :param post_params dict: Request post form parameters,\n for `application/x-www-form-urlencoded`, `multipart/form-data`.\n :param auth_settings list: Auth Settings names for the request.\n :param response: Response data type.\n :param files dict: key -> filename, value -> filepath,\n for `multipart/form-data`.\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param collection_formats: dict of collection formats for path, query,\n header, and post parameters.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :param _request_auth: set to override the auth_settings for an a single\n request; this effectively ignores the authentication\n in the spec for a single request.\n :type _request_token: dict, optional\n :return:\n If async_req parameter is True,\n the request will be called asynchronously.\n The method will return the request thread.\n If parameter async_req is False or missing,\n then the method will return the response directly.\n ' if (not async_req): return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth) return self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth))
Makes the HTTP request (synchronous) and returns deserialized data. To make an async_req request, set the async_req parameter. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response: Response data type. :param files dict: key -> filename, value -> filepath, for `multipart/form-data`. :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_token: dict, optional :return: If async_req parameter is True, the request will be called asynchronously. The method will return the request thread. If parameter async_req is False or missing, then the method will return the response directly.
aylien_news_api/api_client.py
call_api
AYLIEN/aylien_newsapi_python
13
python
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, async_req=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None, _host=None, _request_auth=None): 'Makes the HTTP request (synchronous) and returns deserialized data.\n\n To make an async_req request, set the async_req parameter.\n\n :param resource_path: Path to method endpoint.\n :param method: Method to call.\n :param path_params: Path parameters in the url.\n :param query_params: Query parameters in the url.\n :param header_params: Header parameters to be\n placed in the request header.\n :param body: Request body.\n :param post_params dict: Request post form parameters,\n for `application/x-www-form-urlencoded`, `multipart/form-data`.\n :param auth_settings list: Auth Settings names for the request.\n :param response: Response data type.\n :param files dict: key -> filename, value -> filepath,\n for `multipart/form-data`.\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param collection_formats: dict of collection formats for path, query,\n header, and post parameters.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :param _request_auth: set to override the auth_settings for an a single\n request; this effectively ignores the authentication\n in the spec for a single request.\n :type _request_token: dict, optional\n :return:\n If async_req parameter is True,\n the request will be called asynchronously.\n The method will return the request thread.\n If parameter async_req is False or missing,\n then the method will return the response directly.\n ' if (not async_req): return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth) return self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth))
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, async_req=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None, _host=None, _request_auth=None): 'Makes the HTTP request (synchronous) and returns deserialized data.\n\n To make an async_req request, set the async_req parameter.\n\n :param resource_path: Path to method endpoint.\n :param method: Method to call.\n :param path_params: Path parameters in the url.\n :param query_params: Query parameters in the url.\n :param header_params: Header parameters to be\n placed in the request header.\n :param body: Request body.\n :param post_params dict: Request post form parameters,\n for `application/x-www-form-urlencoded`, `multipart/form-data`.\n :param auth_settings list: Auth Settings names for the request.\n :param response: Response data type.\n :param files dict: key -> filename, value -> filepath,\n for `multipart/form-data`.\n :param async_req bool: execute request asynchronously\n :param _return_http_data_only: response data without head status code\n and headers\n :param collection_formats: dict of collection formats for path, query,\n header, and post parameters.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :param _request_auth: set to override the auth_settings for an a single\n request; this effectively ignores the authentication\n in the spec for a single request.\n :type _request_token: dict, optional\n :return:\n If async_req parameter is True,\n the request will be called asynchronously.\n The method will return the request thread.\n If parameter async_req is False or missing,\n then the method will return the response directly.\n ' if (not async_req): return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth) return self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth))<|docstring|>Makes the HTTP request (synchronous) and returns deserialized data. To make an async_req request, set the async_req parameter. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response: Response data type. :param files dict: key -> filename, value -> filepath, for `multipart/form-data`. :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_token: dict, optional :return: If async_req parameter is True, the request will be called asynchronously. The method will return the request thread. If parameter async_req is False or missing, then the method will return the response directly.<|endoftext|>
05ffda3a775a60d420ec75309ae32cef9add19416b4107fe3f0160e24c168375
def request(self, method, url, query_params=None, headers=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): 'Makes the HTTP request using RESTClient.' if (method == 'GET'): return self.rest_client.GET(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif (method == 'HEAD'): return self.rest_client.HEAD(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif (method == 'OPTIONS'): return self.rest_client.OPTIONS(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout) elif (method == 'POST'): return self.rest_client.POST(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'PUT'): return self.rest_client.PUT(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'PATCH'): return self.rest_client.PATCH(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'DELETE'): return self.rest_client.DELETE(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) else: raise ApiValueError('http method must be `GET`, `HEAD`, `OPTIONS`, `POST`, `PATCH`, `PUT` or `DELETE`.')
Makes the HTTP request using RESTClient.
aylien_news_api/api_client.py
request
AYLIEN/aylien_newsapi_python
13
python
def request(self, method, url, query_params=None, headers=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): if (method == 'GET'): return self.rest_client.GET(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif (method == 'HEAD'): return self.rest_client.HEAD(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif (method == 'OPTIONS'): return self.rest_client.OPTIONS(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout) elif (method == 'POST'): return self.rest_client.POST(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'PUT'): return self.rest_client.PUT(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'PATCH'): return self.rest_client.PATCH(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'DELETE'): return self.rest_client.DELETE(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) else: raise ApiValueError('http method must be `GET`, `HEAD`, `OPTIONS`, `POST`, `PATCH`, `PUT` or `DELETE`.')
def request(self, method, url, query_params=None, headers=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): if (method == 'GET'): return self.rest_client.GET(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif (method == 'HEAD'): return self.rest_client.HEAD(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif (method == 'OPTIONS'): return self.rest_client.OPTIONS(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout) elif (method == 'POST'): return self.rest_client.POST(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'PUT'): return self.rest_client.PUT(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'PATCH'): return self.rest_client.PATCH(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif (method == 'DELETE'): return self.rest_client.DELETE(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) else: raise ApiValueError('http method must be `GET`, `HEAD`, `OPTIONS`, `POST`, `PATCH`, `PUT` or `DELETE`.')<|docstring|>Makes the HTTP request using RESTClient.<|endoftext|>
95796efc2c32c5f0a63c3023434a7fb8077a70b562341d885353d2b2a18a8d2a
def parameters_to_tuples(self, params, collection_formats): 'Get parameters as list of tuples, formatting collections.\n\n :param params: Parameters as dict or list of two-tuples\n :param dict collection_formats: Parameter collection formats\n :return: Parameters as list of tuples, collections formatted\n ' new_params = [] if (collection_formats is None): collection_formats = {} for (k, v) in (six.iteritems(params) if isinstance(params, dict) else params): if (k in collection_formats): collection_format = collection_formats[k] if (collection_format == 'multi'): new_params.extend(((k, value) for value in v)) else: if (collection_format == 'ssv'): delimiter = ' ' elif (collection_format == 'tsv'): delimiter = '\t' elif (collection_format == 'pipes'): delimiter = '|' else: delimiter = ',' new_params.append((k, delimiter.join((str(value) for value in v)))) else: new_params.append((k, v)) return new_params
Get parameters as list of tuples, formatting collections. :param params: Parameters as dict or list of two-tuples :param dict collection_formats: Parameter collection formats :return: Parameters as list of tuples, collections formatted
aylien_news_api/api_client.py
parameters_to_tuples
AYLIEN/aylien_newsapi_python
13
python
def parameters_to_tuples(self, params, collection_formats): 'Get parameters as list of tuples, formatting collections.\n\n :param params: Parameters as dict or list of two-tuples\n :param dict collection_formats: Parameter collection formats\n :return: Parameters as list of tuples, collections formatted\n ' new_params = [] if (collection_formats is None): collection_formats = {} for (k, v) in (six.iteritems(params) if isinstance(params, dict) else params): if (k in collection_formats): collection_format = collection_formats[k] if (collection_format == 'multi'): new_params.extend(((k, value) for value in v)) else: if (collection_format == 'ssv'): delimiter = ' ' elif (collection_format == 'tsv'): delimiter = '\t' elif (collection_format == 'pipes'): delimiter = '|' else: delimiter = ',' new_params.append((k, delimiter.join((str(value) for value in v)))) else: new_params.append((k, v)) return new_params
def parameters_to_tuples(self, params, collection_formats): 'Get parameters as list of tuples, formatting collections.\n\n :param params: Parameters as dict or list of two-tuples\n :param dict collection_formats: Parameter collection formats\n :return: Parameters as list of tuples, collections formatted\n ' new_params = [] if (collection_formats is None): collection_formats = {} for (k, v) in (six.iteritems(params) if isinstance(params, dict) else params): if (k in collection_formats): collection_format = collection_formats[k] if (collection_format == 'multi'): new_params.extend(((k, value) for value in v)) else: if (collection_format == 'ssv'): delimiter = ' ' elif (collection_format == 'tsv'): delimiter = '\t' elif (collection_format == 'pipes'): delimiter = '|' else: delimiter = ',' new_params.append((k, delimiter.join((str(value) for value in v)))) else: new_params.append((k, v)) return new_params<|docstring|>Get parameters as list of tuples, formatting collections. :param params: Parameters as dict or list of two-tuples :param dict collection_formats: Parameter collection formats :return: Parameters as list of tuples, collections formatted<|endoftext|>
97e3bb67798227f199805ae68b7c5ef4f46a9993b159d67e67de5e22b4cd0fb9
def files_parameters(self, files=None): 'Builds form parameters.\n\n :param files: File parameters.\n :return: Form parameters with files.\n ' params = [] if files: for (k, v) in six.iteritems(files): if (not v): continue file_names = (v if (type(v) is list) else [v]) for n in file_names: with open(n, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = (mimetypes.guess_type(filename)[0] or 'application/octet-stream') params.append(tuple([k, tuple([filename, filedata, mimetype])])) return params
Builds form parameters. :param files: File parameters. :return: Form parameters with files.
aylien_news_api/api_client.py
files_parameters
AYLIEN/aylien_newsapi_python
13
python
def files_parameters(self, files=None): 'Builds form parameters.\n\n :param files: File parameters.\n :return: Form parameters with files.\n ' params = [] if files: for (k, v) in six.iteritems(files): if (not v): continue file_names = (v if (type(v) is list) else [v]) for n in file_names: with open(n, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = (mimetypes.guess_type(filename)[0] or 'application/octet-stream') params.append(tuple([k, tuple([filename, filedata, mimetype])])) return params
def files_parameters(self, files=None): 'Builds form parameters.\n\n :param files: File parameters.\n :return: Form parameters with files.\n ' params = [] if files: for (k, v) in six.iteritems(files): if (not v): continue file_names = (v if (type(v) is list) else [v]) for n in file_names: with open(n, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = (mimetypes.guess_type(filename)[0] or 'application/octet-stream') params.append(tuple([k, tuple([filename, filedata, mimetype])])) return params<|docstring|>Builds form parameters. :param files: File parameters. :return: Form parameters with files.<|endoftext|>
67e460fe35972cffd5484e31890892031d5bd4a32baf7f5717be1b353dcd6260
def select_header_accept(self, accepts): 'Returns `Accept` based on an array of accepts provided.\n\n :param accepts: List of headers.\n :return: Accept (e.g. application/json).\n ' if (not accepts): return accepts = [x.lower() for x in accepts] if ('application/json' in accepts): return 'application/json' else: return ', '.join(accepts)
Returns `Accept` based on an array of accepts provided. :param accepts: List of headers. :return: Accept (e.g. application/json).
aylien_news_api/api_client.py
select_header_accept
AYLIEN/aylien_newsapi_python
13
python
def select_header_accept(self, accepts): 'Returns `Accept` based on an array of accepts provided.\n\n :param accepts: List of headers.\n :return: Accept (e.g. application/json).\n ' if (not accepts): return accepts = [x.lower() for x in accepts] if ('application/json' in accepts): return 'application/json' else: return ', '.join(accepts)
def select_header_accept(self, accepts): 'Returns `Accept` based on an array of accepts provided.\n\n :param accepts: List of headers.\n :return: Accept (e.g. application/json).\n ' if (not accepts): return accepts = [x.lower() for x in accepts] if ('application/json' in accepts): return 'application/json' else: return ', '.join(accepts)<|docstring|>Returns `Accept` based on an array of accepts provided. :param accepts: List of headers. :return: Accept (e.g. application/json).<|endoftext|>
a9d569d0f0bd6a8e47f52567aa35c9cf4726a04df122971a70f45f918bed128a
def select_header_content_type(self, content_types): 'Returns `Content-Type` based on an array of content_types provided.\n\n :param content_types: List of content-types.\n :return: Content-Type (e.g. application/json).\n ' if (not content_types): return 'application/json' content_types = [x.lower() for x in content_types] if (('application/json' in content_types) or ('*/*' in content_types)): return 'application/json' else: return content_types[0]
Returns `Content-Type` based on an array of content_types provided. :param content_types: List of content-types. :return: Content-Type (e.g. application/json).
aylien_news_api/api_client.py
select_header_content_type
AYLIEN/aylien_newsapi_python
13
python
def select_header_content_type(self, content_types): 'Returns `Content-Type` based on an array of content_types provided.\n\n :param content_types: List of content-types.\n :return: Content-Type (e.g. application/json).\n ' if (not content_types): return 'application/json' content_types = [x.lower() for x in content_types] if (('application/json' in content_types) or ('*/*' in content_types)): return 'application/json' else: return content_types[0]
def select_header_content_type(self, content_types): 'Returns `Content-Type` based on an array of content_types provided.\n\n :param content_types: List of content-types.\n :return: Content-Type (e.g. application/json).\n ' if (not content_types): return 'application/json' content_types = [x.lower() for x in content_types] if (('application/json' in content_types) or ('*/*' in content_types)): return 'application/json' else: return content_types[0]<|docstring|>Returns `Content-Type` based on an array of content_types provided. :param content_types: List of content-types. :return: Content-Type (e.g. application/json).<|endoftext|>
dd7565f679c2afa0d10fffdf4ac6ad5ac6cdf777b010bde532b34e1ed4da96f5
def update_params_for_auth(self, headers, querys, auth_settings, request_auth=None): 'Updates header and query params based on authentication setting.\n\n :param headers: Header parameters dict to be updated.\n :param querys: Query parameters tuple list to be updated.\n :param auth_settings: Authentication setting identifiers list.\n :param request_auth: if set, the provided settings will\n override the token in the configuration.\n ' if (not auth_settings): return if request_auth: self._apply_auth_params(headers, querys, request_auth) return for auth in auth_settings: auth_setting = self.configuration.auth_settings().get(auth) if auth_setting: self._apply_auth_params(headers, querys, auth_setting)
Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. :param request_auth: if set, the provided settings will override the token in the configuration.
aylien_news_api/api_client.py
update_params_for_auth
AYLIEN/aylien_newsapi_python
13
python
def update_params_for_auth(self, headers, querys, auth_settings, request_auth=None): 'Updates header and query params based on authentication setting.\n\n :param headers: Header parameters dict to be updated.\n :param querys: Query parameters tuple list to be updated.\n :param auth_settings: Authentication setting identifiers list.\n :param request_auth: if set, the provided settings will\n override the token in the configuration.\n ' if (not auth_settings): return if request_auth: self._apply_auth_params(headers, querys, request_auth) return for auth in auth_settings: auth_setting = self.configuration.auth_settings().get(auth) if auth_setting: self._apply_auth_params(headers, querys, auth_setting)
def update_params_for_auth(self, headers, querys, auth_settings, request_auth=None): 'Updates header and query params based on authentication setting.\n\n :param headers: Header parameters dict to be updated.\n :param querys: Query parameters tuple list to be updated.\n :param auth_settings: Authentication setting identifiers list.\n :param request_auth: if set, the provided settings will\n override the token in the configuration.\n ' if (not auth_settings): return if request_auth: self._apply_auth_params(headers, querys, request_auth) return for auth in auth_settings: auth_setting = self.configuration.auth_settings().get(auth) if auth_setting: self._apply_auth_params(headers, querys, auth_setting)<|docstring|>Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. :param request_auth: if set, the provided settings will override the token in the configuration.<|endoftext|>
0f666636f6238cf696a0b3f26690e3554e5cb65fa7da719fdf6be36131b143a0
def _apply_auth_params(self, headers, querys, auth_setting): 'Updates the request parameters based on a single auth_setting\n\n :param headers: Header parameters dict to be updated.\n :param querys: Query parameters tuple list to be updated.\n :param auth_setting: auth settings for the endpoint\n ' if (auth_setting['in'] == 'cookie'): headers['Cookie'] = auth_setting['value'] elif (auth_setting['in'] == 'header'): headers[auth_setting['key']] = auth_setting['value'] elif (auth_setting['in'] == 'query'): querys.append((auth_setting['key'], auth_setting['value'])) else: raise ApiValueError('Authentication token must be in `query` or `header`')
Updates the request parameters based on a single auth_setting :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_setting: auth settings for the endpoint
aylien_news_api/api_client.py
_apply_auth_params
AYLIEN/aylien_newsapi_python
13
python
def _apply_auth_params(self, headers, querys, auth_setting): 'Updates the request parameters based on a single auth_setting\n\n :param headers: Header parameters dict to be updated.\n :param querys: Query parameters tuple list to be updated.\n :param auth_setting: auth settings for the endpoint\n ' if (auth_setting['in'] == 'cookie'): headers['Cookie'] = auth_setting['value'] elif (auth_setting['in'] == 'header'): headers[auth_setting['key']] = auth_setting['value'] elif (auth_setting['in'] == 'query'): querys.append((auth_setting['key'], auth_setting['value'])) else: raise ApiValueError('Authentication token must be in `query` or `header`')
def _apply_auth_params(self, headers, querys, auth_setting): 'Updates the request parameters based on a single auth_setting\n\n :param headers: Header parameters dict to be updated.\n :param querys: Query parameters tuple list to be updated.\n :param auth_setting: auth settings for the endpoint\n ' if (auth_setting['in'] == 'cookie'): headers['Cookie'] = auth_setting['value'] elif (auth_setting['in'] == 'header'): headers[auth_setting['key']] = auth_setting['value'] elif (auth_setting['in'] == 'query'): querys.append((auth_setting['key'], auth_setting['value'])) else: raise ApiValueError('Authentication token must be in `query` or `header`')<|docstring|>Updates the request parameters based on a single auth_setting :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_setting: auth settings for the endpoint<|endoftext|>
9b86180dd535039229f5641d433347368798a645d1087ea786c8a27498886a5d
def __deserialize_file(self, response): 'Deserializes body to file\n\n Saves response body into a file in a temporary folder,\n using the filename from the `Content-Disposition` header if provided.\n\n :param response: RESTResponse.\n :return: file path.\n ' (fd, path) = tempfile.mkstemp(dir=self.configuration.temp_folder_path) os.close(fd) os.remove(path) content_disposition = response.getheader('Content-Disposition') if content_disposition: filename = re.search('filename=[\\\'"]?([^\\\'"\\s]+)[\\\'"]?', content_disposition).group(1) path = os.path.join(os.path.dirname(path), filename) with open(path, 'wb') as f: f.write(response.data) return path
Deserializes body to file Saves response body into a file in a temporary folder, using the filename from the `Content-Disposition` header if provided. :param response: RESTResponse. :return: file path.
aylien_news_api/api_client.py
__deserialize_file
AYLIEN/aylien_newsapi_python
13
python
def __deserialize_file(self, response): 'Deserializes body to file\n\n Saves response body into a file in a temporary folder,\n using the filename from the `Content-Disposition` header if provided.\n\n :param response: RESTResponse.\n :return: file path.\n ' (fd, path) = tempfile.mkstemp(dir=self.configuration.temp_folder_path) os.close(fd) os.remove(path) content_disposition = response.getheader('Content-Disposition') if content_disposition: filename = re.search('filename=[\\\'"]?([^\\\'"\\s]+)[\\\'"]?', content_disposition).group(1) path = os.path.join(os.path.dirname(path), filename) with open(path, 'wb') as f: f.write(response.data) return path
def __deserialize_file(self, response): 'Deserializes body to file\n\n Saves response body into a file in a temporary folder,\n using the filename from the `Content-Disposition` header if provided.\n\n :param response: RESTResponse.\n :return: file path.\n ' (fd, path) = tempfile.mkstemp(dir=self.configuration.temp_folder_path) os.close(fd) os.remove(path) content_disposition = response.getheader('Content-Disposition') if content_disposition: filename = re.search('filename=[\\\'"]?([^\\\'"\\s]+)[\\\'"]?', content_disposition).group(1) path = os.path.join(os.path.dirname(path), filename) with open(path, 'wb') as f: f.write(response.data) return path<|docstring|>Deserializes body to file Saves response body into a file in a temporary folder, using the filename from the `Content-Disposition` header if provided. :param response: RESTResponse. :return: file path.<|endoftext|>
7c794fb8b3b38a2148831d9bd56333b28469bdcce6b43073a1e095b7702c7691
def __deserialize_primitive(self, data, klass): 'Deserializes string to primitive type.\n\n :param data: str.\n :param klass: class literal.\n\n :return: int, long, float, str, bool.\n ' try: return klass(data) except UnicodeEncodeError: return six.text_type(data) except TypeError: return data
Deserializes string to primitive type. :param data: str. :param klass: class literal. :return: int, long, float, str, bool.
aylien_news_api/api_client.py
__deserialize_primitive
AYLIEN/aylien_newsapi_python
13
python
def __deserialize_primitive(self, data, klass): 'Deserializes string to primitive type.\n\n :param data: str.\n :param klass: class literal.\n\n :return: int, long, float, str, bool.\n ' try: return klass(data) except UnicodeEncodeError: return six.text_type(data) except TypeError: return data
def __deserialize_primitive(self, data, klass): 'Deserializes string to primitive type.\n\n :param data: str.\n :param klass: class literal.\n\n :return: int, long, float, str, bool.\n ' try: return klass(data) except UnicodeEncodeError: return six.text_type(data) except TypeError: return data<|docstring|>Deserializes string to primitive type. :param data: str. :param klass: class literal. :return: int, long, float, str, bool.<|endoftext|>
26b7ee7e8810fec6b49476dc902020e0c0d471cfc3e57c520e10d0c85fca7e5c
def __deserialize_object(self, value): 'Return an original value.\n\n :return: object.\n ' return value
Return an original value. :return: object.
aylien_news_api/api_client.py
__deserialize_object
AYLIEN/aylien_newsapi_python
13
python
def __deserialize_object(self, value): 'Return an original value.\n\n :return: object.\n ' return value
def __deserialize_object(self, value): 'Return an original value.\n\n :return: object.\n ' return value<|docstring|>Return an original value. :return: object.<|endoftext|>
6e1afd52baab9845bc695f1a514eb003454e7ca5f724602e39314566e2077929
def __deserialize_date(self, string): 'Deserializes string to date.\n\n :param string: str.\n :return: date.\n ' try: return parse(string).date() except ImportError: return string except ValueError: raise rest.ApiException(status=0, reason='Failed to parse `{0}` as date object'.format(string))
Deserializes string to date. :param string: str. :return: date.
aylien_news_api/api_client.py
__deserialize_date
AYLIEN/aylien_newsapi_python
13
python
def __deserialize_date(self, string): 'Deserializes string to date.\n\n :param string: str.\n :return: date.\n ' try: return parse(string).date() except ImportError: return string except ValueError: raise rest.ApiException(status=0, reason='Failed to parse `{0}` as date object'.format(string))
def __deserialize_date(self, string): 'Deserializes string to date.\n\n :param string: str.\n :return: date.\n ' try: return parse(string).date() except ImportError: return string except ValueError: raise rest.ApiException(status=0, reason='Failed to parse `{0}` as date object'.format(string))<|docstring|>Deserializes string to date. :param string: str. :return: date.<|endoftext|>
3ddc2cfd7081465c841052febd2f0257a6963ce5e23ffd7e2e477b018136cfad
def __deserialize_datetime(self, string): 'Deserializes string to datetime.\n\n The string should be in iso8601 datetime format.\n\n :param string: str.\n :return: datetime.\n ' try: return parse(string) except ImportError: return string except ValueError: raise rest.ApiException(status=0, reason='Failed to parse `{0}` as datetime object'.format(string))
Deserializes string to datetime. The string should be in iso8601 datetime format. :param string: str. :return: datetime.
aylien_news_api/api_client.py
__deserialize_datetime
AYLIEN/aylien_newsapi_python
13
python
def __deserialize_datetime(self, string): 'Deserializes string to datetime.\n\n The string should be in iso8601 datetime format.\n\n :param string: str.\n :return: datetime.\n ' try: return parse(string) except ImportError: return string except ValueError: raise rest.ApiException(status=0, reason='Failed to parse `{0}` as datetime object'.format(string))
def __deserialize_datetime(self, string): 'Deserializes string to datetime.\n\n The string should be in iso8601 datetime format.\n\n :param string: str.\n :return: datetime.\n ' try: return parse(string) except ImportError: return string except ValueError: raise rest.ApiException(status=0, reason='Failed to parse `{0}` as datetime object'.format(string))<|docstring|>Deserializes string to datetime. The string should be in iso8601 datetime format. :param string: str. :return: datetime.<|endoftext|>
35d1a459f1b1340b2e8f76869c05c06aa0510c10aa302f7254f22a2ace5da974
def __deserialize_model(self, data, klass): 'Deserializes list or dict to model.\n\n :param data: dict, list.\n :param klass: class literal.\n :return: model object.\n ' has_discriminator = False if (hasattr(klass, 'get_real_child_model') and klass.discriminator_value_class_map): has_discriminator = True if ((not klass.openapi_types) and (has_discriminator is False)): return data kwargs = {} if ((data is not None) and (klass.openapi_types is not None) and isinstance(data, (list, dict))): for (attr, attr_type) in six.iteritems(klass.openapi_types): if (klass.attribute_map[attr] in data): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) if has_discriminator: klass_name = instance.get_real_child_model(data) if klass_name: instance = self.__deserialize(data, klass_name) return instance
Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object.
aylien_news_api/api_client.py
__deserialize_model
AYLIEN/aylien_newsapi_python
13
python
def __deserialize_model(self, data, klass): 'Deserializes list or dict to model.\n\n :param data: dict, list.\n :param klass: class literal.\n :return: model object.\n ' has_discriminator = False if (hasattr(klass, 'get_real_child_model') and klass.discriminator_value_class_map): has_discriminator = True if ((not klass.openapi_types) and (has_discriminator is False)): return data kwargs = {} if ((data is not None) and (klass.openapi_types is not None) and isinstance(data, (list, dict))): for (attr, attr_type) in six.iteritems(klass.openapi_types): if (klass.attribute_map[attr] in data): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) if has_discriminator: klass_name = instance.get_real_child_model(data) if klass_name: instance = self.__deserialize(data, klass_name) return instance
def __deserialize_model(self, data, klass): 'Deserializes list or dict to model.\n\n :param data: dict, list.\n :param klass: class literal.\n :return: model object.\n ' has_discriminator = False if (hasattr(klass, 'get_real_child_model') and klass.discriminator_value_class_map): has_discriminator = True if ((not klass.openapi_types) and (has_discriminator is False)): return data kwargs = {} if ((data is not None) and (klass.openapi_types is not None) and isinstance(data, (list, dict))): for (attr, attr_type) in six.iteritems(klass.openapi_types): if (klass.attribute_map[attr] in data): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) if has_discriminator: klass_name = instance.get_real_child_model(data) if klass_name: instance = self.__deserialize(data, klass_name) return instance<|docstring|>Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object.<|endoftext|>
32b31629f1f194a0081462ee1bb154726dae358c8f8f45b17de524fe1c4fc52d
def parse_tagdata_to_rawdata(self, tag_bsobject): '\n 解析接口返回的标签数据,返回原数据列表\n ' if tag_bsobject: li_array = tag_bsobject.find_all('li') category = li_array[0].text growth = li_array[1].text ref_avg_growth = li_array[2].text ref_HS300_growth = li_array[3].text current_range = li_array[4].text range_update_content = li_array[5] range_update_prefix = '' range_update_flag = range_update_content.find('font') if (range_update_flag['class'][0] == 'grn'): range_update_prefix = '-' range_update_content.font.extract() range_updage = (range_update_prefix + range_update_content.text) four_division_grange = '---' four_division_grange_content = li_array[6].find('p') if four_division_grange_content: four_division_grange = four_division_grange_content.text rawdata = ResultRawDataModel(category, growth, ref_avg_growth, ref_HS300_growth, current_range, range_updage, four_division_grange) return rawdata else: return ResultRawDataModel('---', '---', '---', '---', '---', '---', '---')
解析接口返回的标签数据,返回原数据列表
funds/src/service/easymoney/result_rawdata.py
parse_tagdata_to_rawdata
biztudio/richlab
0
python
def parse_tagdata_to_rawdata(self, tag_bsobject): '\n \n ' if tag_bsobject: li_array = tag_bsobject.find_all('li') category = li_array[0].text growth = li_array[1].text ref_avg_growth = li_array[2].text ref_HS300_growth = li_array[3].text current_range = li_array[4].text range_update_content = li_array[5] range_update_prefix = range_update_flag = range_update_content.find('font') if (range_update_flag['class'][0] == 'grn'): range_update_prefix = '-' range_update_content.font.extract() range_updage = (range_update_prefix + range_update_content.text) four_division_grange = '---' four_division_grange_content = li_array[6].find('p') if four_division_grange_content: four_division_grange = four_division_grange_content.text rawdata = ResultRawDataModel(category, growth, ref_avg_growth, ref_HS300_growth, current_range, range_updage, four_division_grange) return rawdata else: return ResultRawDataModel('---', '---', '---', '---', '---', '---', '---')
def parse_tagdata_to_rawdata(self, tag_bsobject): '\n \n ' if tag_bsobject: li_array = tag_bsobject.find_all('li') category = li_array[0].text growth = li_array[1].text ref_avg_growth = li_array[2].text ref_HS300_growth = li_array[3].text current_range = li_array[4].text range_update_content = li_array[5] range_update_prefix = range_update_flag = range_update_content.find('font') if (range_update_flag['class'][0] == 'grn'): range_update_prefix = '-' range_update_content.font.extract() range_updage = (range_update_prefix + range_update_content.text) four_division_grange = '---' four_division_grange_content = li_array[6].find('p') if four_division_grange_content: four_division_grange = four_division_grange_content.text rawdata = ResultRawDataModel(category, growth, ref_avg_growth, ref_HS300_growth, current_range, range_updage, four_division_grange) return rawdata else: return ResultRawDataModel('---', '---', '---', '---', '---', '---', '---')<|docstring|>解析接口返回的标签数据,返回原数据列表<|endoftext|>
51ef500d1a78fcd8a937347bb7714909c082b476878ea10e2c843ee734ca86a3
def fetch_archivedata(self, code): '\n fetch data via fund code from api at eastmoney site\n ' page = urllib.request.urlopen((('http://fund.eastmoney.com/f10/FundArchivesDatas.aspx?type=jdzf&code=' + code) + '&rt=0.5686106265556327')) lines = page.readlines() page.close() document = '' for line in lines: document = (document + line.decode('utf-8')) contentjson = document.replace contentjson = document.replace('var apidata=', '').replace('};', '}').replace('content:', '"content":') acdata = json.loads(contentjson)['content'] soup = BeautifulSoup(acdata, 'html.parser') uls_array = soup.find_all('ul')[1:] return [self.parse_tagdata_to_rawdata(d) for d in uls_array]
fetch data via fund code from api at eastmoney site
funds/src/service/easymoney/result_rawdata.py
fetch_archivedata
biztudio/richlab
0
python
def fetch_archivedata(self, code): '\n \n ' page = urllib.request.urlopen((('http://fund.eastmoney.com/f10/FundArchivesDatas.aspx?type=jdzf&code=' + code) + '&rt=0.5686106265556327')) lines = page.readlines() page.close() document = for line in lines: document = (document + line.decode('utf-8')) contentjson = document.replace contentjson = document.replace('var apidata=', ).replace('};', '}').replace('content:', '"content":') acdata = json.loads(contentjson)['content'] soup = BeautifulSoup(acdata, 'html.parser') uls_array = soup.find_all('ul')[1:] return [self.parse_tagdata_to_rawdata(d) for d in uls_array]
def fetch_archivedata(self, code): '\n \n ' page = urllib.request.urlopen((('http://fund.eastmoney.com/f10/FundArchivesDatas.aspx?type=jdzf&code=' + code) + '&rt=0.5686106265556327')) lines = page.readlines() page.close() document = for line in lines: document = (document + line.decode('utf-8')) contentjson = document.replace contentjson = document.replace('var apidata=', ).replace('};', '}').replace('content:', '"content":') acdata = json.loads(contentjson)['content'] soup = BeautifulSoup(acdata, 'html.parser') uls_array = soup.find_all('ul')[1:] return [self.parse_tagdata_to_rawdata(d) for d in uls_array]<|docstring|>fetch data via fund code from api at eastmoney site<|endoftext|>
5d912f042f010e32421ffdbb32d870faa380e1aa818742648c29fb2549fd90c0
def import_class(module, cls_name, file_location=None): 'Import and return the given class from the given module.\n\n File location can be given to import the class from a location that\n is not accessible through the PYTHONPATH.\n This works from python 2.6 to python 3.\n ' try: module = importlib.import_module(module) except NameError: module = __import__(module, globals(), locals(), ['object'], (- 1)) try: cls = getattr(module, cls_name) except AttributeError: loader = importlib.machinery.SourceFileLoder('module', file_location) spec = importlib.machinery.ModuleSpec('module', loader, origin=file_location) module = importlib.util.module_from_spec(spec) cls = getattr(module, cls_name) return cls
Import and return the given class from the given module. File location can be given to import the class from a location that is not accessible through the PYTHONPATH. This works from python 2.6 to python 3.
flowpipe/utilities.py
import_class
osynge/flowpipe
139
python
def import_class(module, cls_name, file_location=None): 'Import and return the given class from the given module.\n\n File location can be given to import the class from a location that\n is not accessible through the PYTHONPATH.\n This works from python 2.6 to python 3.\n ' try: module = importlib.import_module(module) except NameError: module = __import__(module, globals(), locals(), ['object'], (- 1)) try: cls = getattr(module, cls_name) except AttributeError: loader = importlib.machinery.SourceFileLoder('module', file_location) spec = importlib.machinery.ModuleSpec('module', loader, origin=file_location) module = importlib.util.module_from_spec(spec) cls = getattr(module, cls_name) return cls
def import_class(module, cls_name, file_location=None): 'Import and return the given class from the given module.\n\n File location can be given to import the class from a location that\n is not accessible through the PYTHONPATH.\n This works from python 2.6 to python 3.\n ' try: module = importlib.import_module(module) except NameError: module = __import__(module, globals(), locals(), ['object'], (- 1)) try: cls = getattr(module, cls_name) except AttributeError: loader = importlib.machinery.SourceFileLoder('module', file_location) spec = importlib.machinery.ModuleSpec('module', loader, origin=file_location) module = importlib.util.module_from_spec(spec) cls = getattr(module, cls_name) return cls<|docstring|>Import and return the given class from the given module. File location can be given to import the class from a location that is not accessible through the PYTHONPATH. This works from python 2.6 to python 3.<|endoftext|>
0ac06f6d8b80b6b48237733b93adbbd9832ef19216bd8964dbd028c9ad2062e9
def deserialize_node(data): 'De-serialize a node from the given json data.' node = import_class(data['module'], data['cls'], data['file_location'])(graph=None) node.post_deserialize(data) return node
De-serialize a node from the given json data.
flowpipe/utilities.py
deserialize_node
osynge/flowpipe
139
python
def deserialize_node(data): node = import_class(data['module'], data['cls'], data['file_location'])(graph=None) node.post_deserialize(data) return node
def deserialize_node(data): node = import_class(data['module'], data['cls'], data['file_location'])(graph=None) node.post_deserialize(data) return node<|docstring|>De-serialize a node from the given json data.<|endoftext|>
2530cff75309005839266ccd3d40003d79c2dd701037e73162993d05b26780d3
def deserialize_graph(data): 'De-serialize from the given json data.' graph = import_class(data['module'], data['cls'])() graph.name = data['name'] graph.nodes = [] for node in data['nodes']: deserialized_node = deserialize_node(node) graph.nodes.append(deserialized_node) deserialized_node.graph = graph nodes = {n.identifier: n for n in graph.nodes} all_nodes = [n for n in data['nodes']] subgraphs = [] for sub_data in data.get('subgraphs', []): subgraph = import_class(sub_data['module'], sub_data['cls'])() subgraph.name = sub_data['name'] subgraph.nodes = [] for node in sub_data['nodes']: deserialized_node = deserialize_node(node) subgraph.nodes.append(deserialized_node) deserialized_node.graph = subgraph all_nodes += sub_data['nodes'] subgraphs.append(subgraph) nodes.update({n.identifier: n for n in subgraph.nodes}) for node in all_nodes: this = nodes[node['identifier']] for (name, input_) in node['inputs'].items(): for (identifier, plug) in input_['connections'].items(): upstream = nodes[identifier] (upstream.outputs[plug] >> this.inputs[name]) for (sub_plug_name, sub_plug) in input_['sub_plugs'].items(): sub_plug_name = sub_plug_name.split('.')[(- 1)] for (identifier, plug) in sub_plug['connections'].items(): upstream = nodes[identifier] upstream.outputs[plug].connect(this.inputs[name][sub_plug_name]) return graph
De-serialize from the given json data.
flowpipe/utilities.py
deserialize_graph
osynge/flowpipe
139
python
def deserialize_graph(data): graph = import_class(data['module'], data['cls'])() graph.name = data['name'] graph.nodes = [] for node in data['nodes']: deserialized_node = deserialize_node(node) graph.nodes.append(deserialized_node) deserialized_node.graph = graph nodes = {n.identifier: n for n in graph.nodes} all_nodes = [n for n in data['nodes']] subgraphs = [] for sub_data in data.get('subgraphs', []): subgraph = import_class(sub_data['module'], sub_data['cls'])() subgraph.name = sub_data['name'] subgraph.nodes = [] for node in sub_data['nodes']: deserialized_node = deserialize_node(node) subgraph.nodes.append(deserialized_node) deserialized_node.graph = subgraph all_nodes += sub_data['nodes'] subgraphs.append(subgraph) nodes.update({n.identifier: n for n in subgraph.nodes}) for node in all_nodes: this = nodes[node['identifier']] for (name, input_) in node['inputs'].items(): for (identifier, plug) in input_['connections'].items(): upstream = nodes[identifier] (upstream.outputs[plug] >> this.inputs[name]) for (sub_plug_name, sub_plug) in input_['sub_plugs'].items(): sub_plug_name = sub_plug_name.split('.')[(- 1)] for (identifier, plug) in sub_plug['connections'].items(): upstream = nodes[identifier] upstream.outputs[plug].connect(this.inputs[name][sub_plug_name]) return graph
def deserialize_graph(data): graph = import_class(data['module'], data['cls'])() graph.name = data['name'] graph.nodes = [] for node in data['nodes']: deserialized_node = deserialize_node(node) graph.nodes.append(deserialized_node) deserialized_node.graph = graph nodes = {n.identifier: n for n in graph.nodes} all_nodes = [n for n in data['nodes']] subgraphs = [] for sub_data in data.get('subgraphs', []): subgraph = import_class(sub_data['module'], sub_data['cls'])() subgraph.name = sub_data['name'] subgraph.nodes = [] for node in sub_data['nodes']: deserialized_node = deserialize_node(node) subgraph.nodes.append(deserialized_node) deserialized_node.graph = subgraph all_nodes += sub_data['nodes'] subgraphs.append(subgraph) nodes.update({n.identifier: n for n in subgraph.nodes}) for node in all_nodes: this = nodes[node['identifier']] for (name, input_) in node['inputs'].items(): for (identifier, plug) in input_['connections'].items(): upstream = nodes[identifier] (upstream.outputs[plug] >> this.inputs[name]) for (sub_plug_name, sub_plug) in input_['sub_plugs'].items(): sub_plug_name = sub_plug_name.split('.')[(- 1)] for (identifier, plug) in sub_plug['connections'].items(): upstream = nodes[identifier] upstream.outputs[plug].connect(this.inputs[name][sub_plug_name]) return graph<|docstring|>De-serialize from the given json data.<|endoftext|>
0ae6d2e2a76beded111c88420ec1752be4ad7cc61b1a554d57c45c31df35ca11
def get_hash(obj, hash_func=(lambda x: sha256(x).hexdigest())): 'Safely get the hash of an object.\n\n This function tries to compute the hash as safely as possible, dealing with\n json data and strings in a well-defined manner.\n\n Args:\n obj: The object to hash\n hash_func (func(obj) -> str): The hashing function to use\n\n Returns:\n (str): A hash of the obj\n\n ' try: return hash_func(obj) except (TypeError, ValueError): try: js = json.dumps(obj, sort_keys=True) except TypeError: pass else: obj = js if isinstance(obj, str): return hash_func(obj.encode('utf-8')) if (sys.version_info.major > 2): try: return hash_func(bytes(obj)) except TypeError: return None else: return None
Safely get the hash of an object. This function tries to compute the hash as safely as possible, dealing with json data and strings in a well-defined manner. Args: obj: The object to hash hash_func (func(obj) -> str): The hashing function to use Returns: (str): A hash of the obj
flowpipe/utilities.py
get_hash
osynge/flowpipe
139
python
def get_hash(obj, hash_func=(lambda x: sha256(x).hexdigest())): 'Safely get the hash of an object.\n\n This function tries to compute the hash as safely as possible, dealing with\n json data and strings in a well-defined manner.\n\n Args:\n obj: The object to hash\n hash_func (func(obj) -> str): The hashing function to use\n\n Returns:\n (str): A hash of the obj\n\n ' try: return hash_func(obj) except (TypeError, ValueError): try: js = json.dumps(obj, sort_keys=True) except TypeError: pass else: obj = js if isinstance(obj, str): return hash_func(obj.encode('utf-8')) if (sys.version_info.major > 2): try: return hash_func(bytes(obj)) except TypeError: return None else: return None
def get_hash(obj, hash_func=(lambda x: sha256(x).hexdigest())): 'Safely get the hash of an object.\n\n This function tries to compute the hash as safely as possible, dealing with\n json data and strings in a well-defined manner.\n\n Args:\n obj: The object to hash\n hash_func (func(obj) -> str): The hashing function to use\n\n Returns:\n (str): A hash of the obj\n\n ' try: return hash_func(obj) except (TypeError, ValueError): try: js = json.dumps(obj, sort_keys=True) except TypeError: pass else: obj = js if isinstance(obj, str): return hash_func(obj.encode('utf-8')) if (sys.version_info.major > 2): try: return hash_func(bytes(obj)) except TypeError: return None else: return None<|docstring|>Safely get the hash of an object. This function tries to compute the hash as safely as possible, dealing with json data and strings in a well-defined manner. Args: obj: The object to hash hash_func (func(obj) -> str): The hashing function to use Returns: (str): A hash of the obj<|endoftext|>
0fa544f78e9536eb610cd015c4ea7c45b041625cc5b49ed29e3b5c6cc15492ed
def default(self, o): 'Encode the object, handling type errors by encoding into sha256.' try: return super(NodeEncoder, self).default(o) except TypeError: try: return sha256(o).hexdigest() except TypeError: return str(o) except ValueError: return sha256(bytes(o)).hexdigest()
Encode the object, handling type errors by encoding into sha256.
flowpipe/utilities.py
default
osynge/flowpipe
139
python
def default(self, o): try: return super(NodeEncoder, self).default(o) except TypeError: try: return sha256(o).hexdigest() except TypeError: return str(o) except ValueError: return sha256(bytes(o)).hexdigest()
def default(self, o): try: return super(NodeEncoder, self).default(o) except TypeError: try: return sha256(o).hexdigest() except TypeError: return str(o) except ValueError: return sha256(bytes(o)).hexdigest()<|docstring|>Encode the object, handling type errors by encoding into sha256.<|endoftext|>
62ec93afcbccd7b2ada6d764c43ce931ab4c81d86eeee7fdf78d031c826f9888
def fourthPower(x): '\n x: int or float.\n ' return square(square(x))
x: int or float.
fingerExercises/fingerExercises-02/02.4-finger.fourth-power.py
fourthPower
sodaPhix/MITx-6.00.1x
1
python
def fourthPower(x): '\n \n ' return square(square(x))
def fourthPower(x): '\n \n ' return square(square(x))<|docstring|>x: int or float.<|endoftext|>
eb29bd242d07e0767ea170df155298eb7b2b3f8e72f281a29fa55a1578f284b1
def log_error(f): 'Отлавливание ошибок' def inner(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: error = f'ERROR {e} in ' print(error) update = args[0] if (update and hasattr(update, 'message')): update.message.bot.send_message(chat_id=ADMIN_ID, text=error) raise e return inner
Отлавливание ошибок
main.py
log_error
xm4dn355x/tg_artyukhov_today
0
python
def log_error(f): def inner(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: error = f'ERROR {e} in ' print(error) update = args[0] if (update and hasattr(update, 'message')): update.message.bot.send_message(chat_id=ADMIN_ID, text=error) raise e return inner
def log_error(f): def inner(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: error = f'ERROR {e} in ' print(error) update = args[0] if (update and hasattr(update, 'message')): update.message.bot.send_message(chat_id=ADMIN_ID, text=error) raise e return inner<|docstring|>Отлавливание ошибок<|endoftext|>
1d033015c7554ea4803bd4e66cd348c6042547ead846b23c4ee5957a10e6b798
@log_error def post_in_channel(data: dict) -> None: 'Постит пост в канал' bot.send_message(parse_mode='markdown', chat_id=CHAT_ID, text=f'''**{data['title']}** {data['description']} {data['url']}''')
Постит пост в канал
main.py
post_in_channel
xm4dn355x/tg_artyukhov_today
0
python
@log_error def post_in_channel(data: dict) -> None: bot.send_message(parse_mode='markdown', chat_id=CHAT_ID, text=f'**{data['title']}** {data['description']} {data['url']}')
@log_error def post_in_channel(data: dict) -> None: bot.send_message(parse_mode='markdown', chat_id=CHAT_ID, text=f'**{data['title']}** {data['description']} {data['url']}')<|docstring|>Постит пост в канал<|endoftext|>
4d00dfb33343d6eadb60aab1f063a64a07add388d84fa38b28908bb795ba5181
def setUp(self): 'Run at the begining of every test to setup the gui' self.test_obj = LamSlotWind(Rint=0.1, Rext=0.2) self.test_obj.slot = SlotW25(H1=0.11, H2=0.12, W3=0.14, W4=0.15) self.widget = PWSlot25(self.test_obj)
Run at the begining of every test to setup the gui
Tests/GUI/DMachineSetup/test_PWSlot25.py
setUp
Kelos-Zhu/pyleecan
2
python
def setUp(self): self.test_obj = LamSlotWind(Rint=0.1, Rext=0.2) self.test_obj.slot = SlotW25(H1=0.11, H2=0.12, W3=0.14, W4=0.15) self.widget = PWSlot25(self.test_obj)
def setUp(self): self.test_obj = LamSlotWind(Rint=0.1, Rext=0.2) self.test_obj.slot = SlotW25(H1=0.11, H2=0.12, W3=0.14, W4=0.15) self.widget = PWSlot25(self.test_obj)<|docstring|>Run at the begining of every test to setup the gui<|endoftext|>
0175da663076db20a7276aa6c316b47546ce5dd77c219d9c1794522fde599b8e
@classmethod def setUpClass(cls): 'Start the app for the test' print('\nStart Test PWSlot25') cls.app = QtWidgets.QApplication(sys.argv)
Start the app for the test
Tests/GUI/DMachineSetup/test_PWSlot25.py
setUpClass
Kelos-Zhu/pyleecan
2
python
@classmethod def setUpClass(cls): print('\nStart Test PWSlot25') cls.app = QtWidgets.QApplication(sys.argv)
@classmethod def setUpClass(cls): print('\nStart Test PWSlot25') cls.app = QtWidgets.QApplication(sys.argv)<|docstring|>Start the app for the test<|endoftext|>
033a316ab8de57809551c33ebf4e118bfcad5c1960e9590ebcdbc0b93187d15f
@classmethod def tearDownClass(cls): 'Exit the app after the test' cls.app.quit()
Exit the app after the test
Tests/GUI/DMachineSetup/test_PWSlot25.py
tearDownClass
Kelos-Zhu/pyleecan
2
python
@classmethod def tearDownClass(cls): cls.app.quit()
@classmethod def tearDownClass(cls): cls.app.quit()<|docstring|>Exit the app after the test<|endoftext|>
d4b76de49709936b2fc99b9cff85c0952781996ab4dd390cf56623b6079f1227
def test_init(self): 'Check that the Widget spinbox initialise to the lamination value' self.assertEqual(self.widget.lf_H1.value(), 0.11) self.assertEqual(self.widget.lf_H2.value(), 0.12) self.assertEqual(self.widget.lf_W3.value(), 0.14) self.assertEqual(self.widget.lf_W4.value(), 0.15) self.test_obj.slot = SlotW25(H1=0.21, H2=0.22, W3=0.24, W4=0.25) self.widget = PWSlot25(self.test_obj) self.assertEqual(self.widget.lf_H1.value(), 0.21) self.assertEqual(self.widget.lf_H2.value(), 0.22) self.assertEqual(self.widget.lf_W3.value(), 0.24) self.assertEqual(self.widget.lf_W4.value(), 0.25)
Check that the Widget spinbox initialise to the lamination value
Tests/GUI/DMachineSetup/test_PWSlot25.py
test_init
Kelos-Zhu/pyleecan
2
python
def test_init(self): self.assertEqual(self.widget.lf_H1.value(), 0.11) self.assertEqual(self.widget.lf_H2.value(), 0.12) self.assertEqual(self.widget.lf_W3.value(), 0.14) self.assertEqual(self.widget.lf_W4.value(), 0.15) self.test_obj.slot = SlotW25(H1=0.21, H2=0.22, W3=0.24, W4=0.25) self.widget = PWSlot25(self.test_obj) self.assertEqual(self.widget.lf_H1.value(), 0.21) self.assertEqual(self.widget.lf_H2.value(), 0.22) self.assertEqual(self.widget.lf_W3.value(), 0.24) self.assertEqual(self.widget.lf_W4.value(), 0.25)
def test_init(self): self.assertEqual(self.widget.lf_H1.value(), 0.11) self.assertEqual(self.widget.lf_H2.value(), 0.12) self.assertEqual(self.widget.lf_W3.value(), 0.14) self.assertEqual(self.widget.lf_W4.value(), 0.15) self.test_obj.slot = SlotW25(H1=0.21, H2=0.22, W3=0.24, W4=0.25) self.widget = PWSlot25(self.test_obj) self.assertEqual(self.widget.lf_H1.value(), 0.21) self.assertEqual(self.widget.lf_H2.value(), 0.22) self.assertEqual(self.widget.lf_W3.value(), 0.24) self.assertEqual(self.widget.lf_W4.value(), 0.25)<|docstring|>Check that the Widget spinbox initialise to the lamination value<|endoftext|>
fd2d0b12fd5ced9659c5a1c7f0a5c441d4e8c3e7ff1a34ce7a2992626a0c8b27
def test_set_W3(self): 'Check that the Widget allow to update W3' self.widget.lf_W3.clear() QTest.keyClicks(self.widget.lf_W3, '0.32') self.widget.lf_W3.editingFinished.emit() self.assertEqual(self.widget.slot.W3, 0.32) self.assertEqual(self.test_obj.slot.W3, 0.32)
Check that the Widget allow to update W3
Tests/GUI/DMachineSetup/test_PWSlot25.py
test_set_W3
Kelos-Zhu/pyleecan
2
python
def test_set_W3(self): self.widget.lf_W3.clear() QTest.keyClicks(self.widget.lf_W3, '0.32') self.widget.lf_W3.editingFinished.emit() self.assertEqual(self.widget.slot.W3, 0.32) self.assertEqual(self.test_obj.slot.W3, 0.32)
def test_set_W3(self): self.widget.lf_W3.clear() QTest.keyClicks(self.widget.lf_W3, '0.32') self.widget.lf_W3.editingFinished.emit() self.assertEqual(self.widget.slot.W3, 0.32) self.assertEqual(self.test_obj.slot.W3, 0.32)<|docstring|>Check that the Widget allow to update W3<|endoftext|>
13decc1a9dd1ee9560679b289fcfc82ab75eb18ec2d48038fdb8730ec52847e6
def test_set_W4(self): 'Check that the Widget allow to update W4' self.widget.lf_W4.clear() QTest.keyClicks(self.widget.lf_W4, '0.33') self.widget.lf_W4.editingFinished.emit() self.assertEqual(self.widget.slot.W4, 0.33) self.assertEqual(self.test_obj.slot.W4, 0.33)
Check that the Widget allow to update W4
Tests/GUI/DMachineSetup/test_PWSlot25.py
test_set_W4
Kelos-Zhu/pyleecan
2
python
def test_set_W4(self): self.widget.lf_W4.clear() QTest.keyClicks(self.widget.lf_W4, '0.33') self.widget.lf_W4.editingFinished.emit() self.assertEqual(self.widget.slot.W4, 0.33) self.assertEqual(self.test_obj.slot.W4, 0.33)
def test_set_W4(self): self.widget.lf_W4.clear() QTest.keyClicks(self.widget.lf_W4, '0.33') self.widget.lf_W4.editingFinished.emit() self.assertEqual(self.widget.slot.W4, 0.33) self.assertEqual(self.test_obj.slot.W4, 0.33)<|docstring|>Check that the Widget allow to update W4<|endoftext|>
799e2d540e477f92b240a3fe9c98f541cba0dbaa28fa2d3f68daccfca8134ce6
def test_set_H1(self): 'Check that the Widget allow to update H1' self.widget.lf_H1.clear() QTest.keyClicks(self.widget.lf_H1, '0.35') self.widget.lf_H1.editingFinished.emit() self.assertEqual(self.widget.slot.H1, 0.35) self.assertEqual(self.test_obj.slot.H1, 0.35)
Check that the Widget allow to update H1
Tests/GUI/DMachineSetup/test_PWSlot25.py
test_set_H1
Kelos-Zhu/pyleecan
2
python
def test_set_H1(self): self.widget.lf_H1.clear() QTest.keyClicks(self.widget.lf_H1, '0.35') self.widget.lf_H1.editingFinished.emit() self.assertEqual(self.widget.slot.H1, 0.35) self.assertEqual(self.test_obj.slot.H1, 0.35)
def test_set_H1(self): self.widget.lf_H1.clear() QTest.keyClicks(self.widget.lf_H1, '0.35') self.widget.lf_H1.editingFinished.emit() self.assertEqual(self.widget.slot.H1, 0.35) self.assertEqual(self.test_obj.slot.H1, 0.35)<|docstring|>Check that the Widget allow to update H1<|endoftext|>
0bb20219f3660e7cc11980835ad88f96f052c00250bacb83596ff19e0efb1bae
def test_set_H2(self): 'Check that the Widget allow to update H2' self.widget.lf_H2.clear() QTest.keyClicks(self.widget.lf_H2, '0.36') self.widget.lf_H2.editingFinished.emit() self.assertEqual(self.widget.slot.H2, 0.36) self.assertEqual(self.test_obj.slot.H2, 0.36)
Check that the Widget allow to update H2
Tests/GUI/DMachineSetup/test_PWSlot25.py
test_set_H2
Kelos-Zhu/pyleecan
2
python
def test_set_H2(self): self.widget.lf_H2.clear() QTest.keyClicks(self.widget.lf_H2, '0.36') self.widget.lf_H2.editingFinished.emit() self.assertEqual(self.widget.slot.H2, 0.36) self.assertEqual(self.test_obj.slot.H2, 0.36)
def test_set_H2(self): self.widget.lf_H2.clear() QTest.keyClicks(self.widget.lf_H2, '0.36') self.widget.lf_H2.editingFinished.emit() self.assertEqual(self.widget.slot.H2, 0.36) self.assertEqual(self.test_obj.slot.H2, 0.36)<|docstring|>Check that the Widget allow to update H2<|endoftext|>
dce49d320e80a67d3f5b1894f1faeda95099a4e3e3eb35a63aa7e7a1b960dcc8
def test_output_txt(self): 'Check that the Output text is computed and correct\n ' self.test_obj = LamSlotWind(Rint=0, Rext=0.5, is_internal=True, is_stator=False, L1=0.9, Nrvd=1, Wrvd=0.1) self.test_obj.slot = SlotW25(Zs=12, W4=0.15, W3=0.075, H1=0.03, H2=0.15) self.widget = PWSlot25(self.test_obj) self.assertEqual(self.widget.w_out.out_slot_height.text(), 'Slot height: 0.1789 m')
Check that the Output text is computed and correct
Tests/GUI/DMachineSetup/test_PWSlot25.py
test_output_txt
Kelos-Zhu/pyleecan
2
python
def test_output_txt(self): '\n ' self.test_obj = LamSlotWind(Rint=0, Rext=0.5, is_internal=True, is_stator=False, L1=0.9, Nrvd=1, Wrvd=0.1) self.test_obj.slot = SlotW25(Zs=12, W4=0.15, W3=0.075, H1=0.03, H2=0.15) self.widget = PWSlot25(self.test_obj) self.assertEqual(self.widget.w_out.out_slot_height.text(), 'Slot height: 0.1789 m')
def test_output_txt(self): '\n ' self.test_obj = LamSlotWind(Rint=0, Rext=0.5, is_internal=True, is_stator=False, L1=0.9, Nrvd=1, Wrvd=0.1) self.test_obj.slot = SlotW25(Zs=12, W4=0.15, W3=0.075, H1=0.03, H2=0.15) self.widget = PWSlot25(self.test_obj) self.assertEqual(self.widget.w_out.out_slot_height.text(), 'Slot height: 0.1789 m')<|docstring|>Check that the Output text is computed and correct<|endoftext|>