hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
075601d812e7788a83abdb5d69e6437c29517e9c
7,993
py
Python
src/sultan/result.py
bquantump/sultan
a46e8dc9b09385a7226f6151134ae2417166f25d
[ "MIT" ]
null
null
null
src/sultan/result.py
bquantump/sultan
a46e8dc9b09385a7226f6151134ae2417166f25d
[ "MIT" ]
null
null
null
src/sultan/result.py
bquantump/sultan
a46e8dc9b09385a7226f6151134ae2417166f25d
[ "MIT" ]
null
null
null
import subprocess import sys import time import traceback from queue import Queue from sultan.core import Base from sultan.echo import Echo from threading import Thread class Result(Base): """ Class that encompasses the result of a POpen command. """ def __init__(self, process, commands, context, streaming=False, exception=None, halt_on_nonzero=False): super(Result, self).__init__() self._process = process self._commands = commands self._context = context self._exception = exception self.__echo = Echo() self._streaming = streaming self.rc = None self._halt_on_nonzero=halt_on_nonzero if process and streaming: self.is_complete = False self.__stdout = Queue() self.__stderr = Queue() self.__stdin = Queue() self._stdout_t = Thread(target=self.read_output, args=(process.stdout, self.__stdout)) self._stderr_t = Thread(target=self.read_output, args=(process.stderr, self.__stderr)) self._stdin_t = Thread(target=self.write_input) self._wait_t = Thread(target=self.wait_on_process) for t in (self._stdout_t, self._stderr_t, self._stdin_t, self._wait_t): t.daemon = True t.start() else: self.is_complete = True try: stdout, stderr = process.communicate() except: stdout, stderr = None, None try: self.rc = process.returncode except: pass self.__stdout = stdout.strip().splitlines() if stdout else [] self.__stderr = stderr.strip().splitlines() if stderr else [] if self._halt_on_nonzero and self.rc != 0: print(self.stderr) raise subprocess.CalledProcessError(self.rc, ''.join(self._commands), self.stderr) # self.dump_exception() def read_output(self, pipe, q): for line in iter(pipe.readline, b''): if line: q.put(line.strip()) elif self.is_complete: break else: time.sleep(0.1) pipe.close() def write_input(self): for line in iter(self.__stdin.get, None): if line.endswith("\n"): self._process.stdin.write(line) else: self._process.stdin.write(line + "\n") def wait_on_process(self): self.rc = self._process.wait() self.__stdin.put(None) self.is_complete = True for t in (self._stdout_t, self._stderr_t, self._stdin_t): t.join() if self._halt_on_nonzero and self.rc != 0: self.dump_exception() sys.exit() def dump_exception(self): if not self._exception: try: raise subprocess.CalledProcessError(self.rc, ''.join(self._commands), self.stderr) except subprocess.CalledProcessError as e: self._exception = e self.__echo.critical("Unable to run '%s'" % self._commands) # traceback self.print_traceback() # standard out self.print_stdout() # standard error self.print_stderr() # print debug information self.__display_exception_debug_information() if self._halt_on_nonzero: raise self._exception def __display_exception_debug_information(self): def echo_debug_info(key): if self._context and len(self._context) > 0: self.__echo.warn("\t - %s: %s" % (key, self._context[0].get(key, 'N/A'))) self.__echo.warn("The following are additional information that can be used to debug this exception.") self.__echo.warn("The following is the context used to run:") echo_debug_info('cwd') echo_debug_info('sudo') echo_debug_info('user') echo_debug_info('hostname') echo_debug_info('env') echo_debug_info('logging') echo_debug_info('executable') echo_debug_info('ssh_config') echo_debug_info('src') def __str__(self): return '\n'.join(self.stdout) def __format_line(self, msg): return '| %s' % msg def __format_lines_error(self, lines): for line in lines: self.__echo.critical(self.__format_line(line)) def __format_lines_info(self, lines): for line in lines: self.__echo.info(self.__format_line(line)) @property def stdout(self): """ Converts stdout string to a list. """ if self._streaming: stdout = [] while not self.__stdout.empty(): try: line = self.__stdout.get_nowait() stdout.append(line) except: pass else: stdout = self.__stdout return stdout @property def stderr(self): """ Converts stderr string to a list. """ if self._streaming: stderr = [] while not self.__stderr.empty(): try: line = self.__stderr.get_nowait() stderr.append(line) except: pass else: stderr = self.__stderr return stderr def stdin(self, line): """ Sends input to stdin. """ if self._streaming: self.__stdin.put(line) @property def traceback(self): """ Converts traceback string to a list. """ if self._exception: return traceback.format_exc().split("\n") else: return [] @property def is_success(self): """ Returns if the result of the command was a success. True for success, False for failure. """ return self.is_complete and self.rc == 0 @property def is_failure(self): """ Returns if the result of the command was a failure. True for failure, False for succes. """ return self.is_complete and not self.rc == 0 @property def has_exception(self): ''' Returns True if self._exception is not empty. ''' return bool(self._exception) def print_stdout(self, always_print=False): """ Prints the stdout to console - if there is any stdout, otherwise does nothing. :param always_print: print the stdout, even if there is nothing in the buffer (default: false) """ if self.__stdout or always_print: self.__echo.info("---------------" + "-" * 100) self.__format_lines_info(self.stdout) self.__echo.info("---------------" + "-" * 100) def print_stderr(self, always_print=False): """ Prints the stderr to console - if there is any stdout, otherwise does nothing. :param always_print: print the stderr, even if there is nothing in the buffer (default: false) """ if self.__stderr or always_print: self.__echo.critical("--{ STDERR }---" + "-" * 100) self.__format_lines_error(self.stderr) self.__echo.critical("---------------" + "-" * 100) def print_traceback(self, always_print=False): """ Prints the traceback to console - if there is any traceback, otherwise does nothing. :param always_print: print the traceback, even if there is nothing in the buffer (default: false) """ if self._exception or always_print: self.__echo.critical("--{ TRACEBACK }" + "-" * 100) self.__format_lines_error(self.traceback) self.__echo.critical("---------------" + "-" * 100)
31.222656
110
0.555236
899
7,993
4.675195
0.176863
0.03331
0.03093
0.016179
0.326909
0.263145
0.206281
0.192957
0.149655
0.135855
0
0.004933
0.340548
7,993
255
111
31.345098
0.792449
0.128362
0
0.233533
0
0
0.047177
0
0
0
0
0
0
1
0.125749
false
0.017964
0.047904
0.011976
0.233533
0.05988
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07565d1f240205eff7e6a9514e645e53e8414dbd
10,991
py
Python
great_expectations/cli/datasource.py
orenovadia/great_expectations
76ef0c4e066227f8b589a1ee6ac885618f65906e
[ "Apache-2.0" ]
null
null
null
great_expectations/cli/datasource.py
orenovadia/great_expectations
76ef0c4e066227f8b589a1ee6ac885618f65906e
[ "Apache-2.0" ]
null
null
null
great_expectations/cli/datasource.py
orenovadia/great_expectations
76ef0c4e066227f8b589a1ee6ac885618f65906e
[ "Apache-2.0" ]
null
null
null
import os import click from .util import cli_message from great_expectations.render import DefaultJinjaPageView from great_expectations.version import __version__ as __version__ def add_datasource(context): cli_message( """ ========== Datasources ========== See <blue>https://docs.greatexpectations.io/en/latest/core_concepts/datasource.html?utm_source=cli&utm_medium=init&utm_campaign={0:s}</blue> for more information about datasources. """.format(__version__.replace(".", "_")) ) data_source_selection = click.prompt( msg_prompt_choose_data_source, type=click.Choice(["1", "2", "3", "4"]), show_choices=False ) cli_message(data_source_selection) if data_source_selection == "1": # pandas path = click.prompt( msg_prompt_filesys_enter_base_path, # default='/data/', type=click.Path( exists=False, file_okay=False, dir_okay=True, readable=True ), show_default=True ) if path.startswith("./"): path = path[2:] if path.endswith("/"): basenamepath = path[:-1] else: basenamepath = path default_data_source_name = os.path.basename(basenamepath) + "__dir" data_source_name = click.prompt( msg_prompt_datasource_name, default=default_data_source_name, show_default=True ) context.add_datasource(data_source_name, "pandas", base_directory=os.path.join("..", path)) elif data_source_selection == "2": # sqlalchemy data_source_name = click.prompt( msg_prompt_datasource_name, default="mydb", show_default=True) cli_message(msg_sqlalchemy_config_connection.format( data_source_name)) drivername = click.prompt("What is the driver for the sqlalchemy connection?", default="postgres", show_default=True) host = click.prompt("What is the host for the sqlalchemy connection?", default="localhost", show_default=True) port = click.prompt("What is the port for the sqlalchemy connection?", default="5432", show_default=True) username = click.prompt("What is the username for the sqlalchemy connection?", default="postgres", show_default=True) password = click.prompt("What is the password for the sqlalchemy connection?", default="", show_default=False, hide_input=True) database = click.prompt("What is the database name for the sqlalchemy connection?", default="postgres", show_default=True) credentials = { "drivername": drivername, "host": host, "port": port, "username": username, "password": password, "database": database } context.add_profile_credentials(data_source_name, **credentials) context.add_datasource( data_source_name, "sqlalchemy", profile=data_source_name) elif data_source_selection == "3": # Spark path = click.prompt( msg_prompt_filesys_enter_base_path, default='/data/', type=click.Path( exists=True, file_okay=False, dir_okay=True, readable=True ), show_default=True ) if path.startswith("./"): path = path[2:] if path.endswith("/"): basenamepath = path[:-1] default_data_source_name = os.path.basename(basenamepath) data_source_name = click.prompt( msg_prompt_datasource_name, default=default_data_source_name, show_default=True) context.add_datasource(data_source_name, "spark", base_directory=path) # if data_source_selection == "5": # dbt # dbt_profile = click.prompt(msg_prompt_dbt_choose_profile) # log_message(msg_dbt_go_to_notebook, color="blue") # context.add_datasource("dbt", "dbt", profile=dbt_profile) if data_source_selection == "4": # None of the above cli_message(msg_unknown_data_source) print("Skipping datasource configuration. You can add a datasource later by editing the great_expectations.yml file.") return None if data_source_name != None: cli_message( """ ========== Profiling ========== Would you like to profile '{0:s}' to create candidate expectations and documentation? Please note: As of v0.7.0, profiling is still a beta feature in Great Expectations. This generation of profilers will evaluate the entire data source (without sampling) and may be very time consuming. As a rule of thumb, we recommend starting with data smaller than 100MB. To learn more about profiling, visit <blue>https://docs.greatexpectations.io/en/latest/guides/profiling.html?utm_source=cli&utm_medium=init&utm_campaign={1:s}</blue>. """.format(data_source_name, __version__.replace(".", "_")) ) if click.confirm("Proceed?", default=True ): profiling_results = context.profile_datasource( data_source_name, max_data_assets=20 ) print("\nDone.\n\nProfiling results are saved here:") for profiling_result in profiling_results: data_asset_name = profiling_result[1]['meta']['data_asset_name'] expectation_suite_name = profiling_result[1]['meta']['expectation_suite_name'] run_id = profiling_result[1]['meta']['run_id'] print(" {0:s}".format(context.get_validation_location( data_asset_name, expectation_suite_name, run_id)['filepath'])) cli_message( """ ========== Data Documentation ========== To generate documentation from the data you just profiled, the profiling results should be moved from great_expectations/uncommitted (ignored by git) to great_expectations/fixtures. Before committing, please make sure that this data does not contain sensitive information! To learn more: <blue>https://docs.greatexpectations.io/en/latest/guides/data_documentation.html?utm_source=cli&utm_medium=init&utm_campaign={0:s}</blue> """.format(__version__.replace(".", "_")) ) if click.confirm("Move the profiled data and build HTML documentation?", default=True ): cli_message("\nMoving files...") for profiling_result in profiling_results: data_asset_name = profiling_result[1]['meta']['data_asset_name'] expectation_suite_name = profiling_result[1]['meta']['expectation_suite_name'] run_id = profiling_result[1]['meta']['run_id'] context.move_validation_to_fixtures( data_asset_name, expectation_suite_name, run_id) cli_message("\nDone.") cli_message("\nBuilding documentation...") context.render_full_static_site() cli_message( """ To view the generated data documentation, open this file in a web browser: <green>great_expectations/uncommitted/documentation/index.html</green> """) else: cli_message( "Okay, skipping HTML documentation for now.`." ) else: cli_message( "Okay, skipping profiling for now. You can always do this later by running `great_expectations profile`." ) if data_source_selection == "1": # Pandas cli_message(msg_filesys_go_to_notebook) elif data_source_selection == "2": # SQL cli_message(msg_sqlalchemy_go_to_notebook) elif data_source_selection == "3": # Spark cli_message(msg_spark_go_to_notebook) msg_prompt_choose_data_source = """ Configure a datasource: 1. Pandas DataFrame 2. Relational database (SQL) 3. Spark DataFrame 4. Skip datasource configuration """ # msg_prompt_dbt_choose_profile = """ # Please specify the name of the dbt profile (from your ~/.dbt/profiles.yml file Great Expectations \ # should use to connect to the database # """ # msg_dbt_go_to_notebook = """ # To create expectations for your dbt models start Jupyter and open notebook # great_expectations/notebooks/using_great_expectations_with_dbt.ipynb - # it will walk you through next steps. # """ msg_prompt_filesys_enter_base_path = """ Enter the path of the root directory where the data files are stored. (The path may be either absolute or relative to current directory.) """ msg_prompt_datasource_name = """ Give your new data source a short name. """ msg_sqlalchemy_config_connection = """ Great Expectations relies on sqlalchemy to connect to relational databases. Please make sure that you have it installed. Next, we will configure database credentials and store them in the "{0:s}" section of this config file: great_expectations/uncommitted/credentials/profiles.yml: """ msg_unknown_data_source = """ We are looking for more types of data types to support. Please create a GitHub issue here: https://github.com/great-expectations/great_expectations/issues/new In the meantime you can see what Great Expectations can do on CSV files. To create expectations for your CSV files start Jupyter and open notebook great_expectations/notebooks/using_great_expectations_with_pandas.ipynb - it will walk you through configuring the database connection and next steps. """ msg_filesys_go_to_notebook = """ To create expectations for your data, start Jupyter and open a tutorial notebook: To launch with jupyter notebooks: <green>jupyter notebook great_expectations/notebooks/create_expectations.ipynb</green> To launch with jupyter lab: <green>jupyter lab great_expectations/notebooks/create_expectations.ipynb</green> """ msg_sqlalchemy_go_to_notebook = """ To create expectations for your data start Jupyter and open the notebook that will walk you through next steps. To launch with jupyter notebooks: <green>jupyter notebook great_expectations/notebooks/create_expectations.ipynb</green> To launch with jupyter lab: <green>jupyter lab great_expectations/notebooks/create_expectations.ipynb</green> """ msg_spark_go_to_notebook = """ To create expectations for your data start Jupyter and open the notebook that will walk you through next steps. To launch with jupyter notebooks: <green>jupyter notebook great_expectations/notebooks/create_expectations.ipynb</green> To launch with jupyter lab: <green>jupyter lab great_expectations/notebooks/create_expectations.ipynb</green> """
38.837456
180
0.655263
1,299
10,991
5.307929
0.211701
0.04641
0.032487
0.020305
0.515446
0.427556
0.396664
0.380711
0.337056
0.306889
0
0.005357
0.252752
10,991
282
181
38.975177
0.834165
0.06387
0
0.443299
0
0
0.348215
0.064023
0
0
0
0
0
1
0.005155
false
0.010309
0.025773
0
0.036082
0.015464
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0756766e6e04859ce22940229b15353362178faa
4,105
py
Python
python/crawler/downloader.py
rgb-24bit/code-library
8da8336e241e1428b2b46c6939bd5e9eadcf3e68
[ "MIT" ]
null
null
null
python/crawler/downloader.py
rgb-24bit/code-library
8da8336e241e1428b2b46c6939bd5e9eadcf3e68
[ "MIT" ]
null
null
null
python/crawler/downloader.py
rgb-24bit/code-library
8da8336e241e1428b2b46c6939bd5e9eadcf3e68
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Provide download function by request """ from datetime import datetime import logging import time import urllib.parse import requests from bs4 import BeautifulSoup class Throttle(object): """Throttle downloading by sleeping between requests to same domain.""" def __init__(self, delay): # amount of delay between downloads for each domain self.delay = delay # timestamp of when a domain was last accessed self.domains = {} def wait(self, url): domain = urllib.parse.urlparse(url).netloc last_accessed = self.domains.get(domain) if self.delay > 0 and last_accessed is not None: sleep_secs = self.delay - (datetime.now() - last_accessed).seconds if sleep_secs > 0: time.sleep(sleep_secs) self.domains[domain] = datetime.now() class Downloader(object): """Convenient download of web pages or caller to call api. Args: delay: Interval between downloads (seconds) num_retries: Number of retries when downloading errors timeout: Download timeout """ def __init__(self, delay=5, user_agent='awsl', proxies=None, num_retries=1, timeout=60, cache=None, auth=None): self.session = requests.Session() self.session.headers.update({'user-agent': user_agent}) self.session.proxies = proxies self.session.auth = auth self.throttle = Throttle(delay) self.num_retries = num_retries self.timeout = timeout self.cache = cache def get_from_cache(self, request): """Try to get the result of the request from the cache.""" result = None if self.cache: result = self.cache.get(request.url) if result and self.num_retries > 0 and 500 <= result['code'] < 600: result = None return result def prepare_request(self, url, params=None): """Build requests based on the provided url and parameters.""" request = requests.Request('GET', url, params=params) return self.session.prepare_request(request) def send_request(self, request, num_retries): """Send request and return response object.""" self.throttle.wait(request.url) try: logging.info('Downloading: %s' % request.url) response = self.session.send(request, timeout=self.timeout) response.raise_for_status() except requests.exceptions.HTTPError as e: logging.warn('Download error: %s' % e) if num_retries > 0 and 500 <= response.status_code < 600: return self.send_request(request, num_retries - 1) except requests.exceptions.RequestException: logging.error('Download faild: %s' % request.url) response = None return response def text(self, url, params=None, encoding=None): """Download web content in text format or html.""" request = self.prepare_request(url, params) result = self.get_from_cache(request) if result is None: response = self.send_request(request, self.num_retries) if response: if encoding: response.encoding = encoding result = {'text': response.text, 'code': response.status_code} if self.cache: self.cache[request.url] = result return result['text'] def json(self, url, params=None): """Access the api and return the json object.""" request = self.prepare_request(url, params) result = self.get_from_cache(request) if result is None: response = self.send_request(request, self.num_retries) if response: result = {'json': response.json(), 'code': response.status_code} if self.cache: self.cache[request.url] = result return result['json']
37.66055
81
0.599026
474
4,105
5.092827
0.257384
0.041425
0.023198
0.021127
0.180613
0.166529
0.166529
0.166529
0.166529
0.166529
0
0.008079
0.306456
4,105
108
82
38.009259
0.839831
0.160536
0
0.226667
0
0
0.02934
0
0
0
0
0
0
1
0.106667
false
0
0.08
0
0.293333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07573778dc31fb4c60d28d2030387a7be8144f36
7,327
py
Python
src/keycloak/connection.py
ecederstrand/python-keycloak
77686a2764a3fcba092d78e02f42a58c7214c30e
[ "MIT" ]
null
null
null
src/keycloak/connection.py
ecederstrand/python-keycloak
77686a2764a3fcba092d78e02f42a58c7214c30e
[ "MIT" ]
null
null
null
src/keycloak/connection.py
ecederstrand/python-keycloak
77686a2764a3fcba092d78e02f42a58c7214c30e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # The MIT License (MIT) # # Copyright (C) 2017 Marcos Pereira <marcospereira.mpj@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin import requests from requests.adapters import HTTPAdapter from .exceptions import KeycloakConnectionError class ConnectionManager(object): """ Represents a simple server connection. :param base_url: (str) The server URL. :param headers: (dict) The header parameters of the requests to the server. :param timeout: (int) Timeout to use for requests to the server. :param verify: (bool) Verify server SSL. :param proxies: (dict) The proxies servers requests is sent by. """ def __init__(self, base_url, headers={}, timeout=60, verify=True, proxies=None): self._base_url = base_url self._headers = headers self._timeout = timeout self._verify = verify self._s = requests.Session() self._s.auth = lambda x: x # don't let requests add auth headers # retry once to reset connection with Keycloak after tomcat's ConnectionTimeout # see https://github.com/marcospereirampj/python-keycloak/issues/36 for protocol in ("https://", "http://"): adapter = HTTPAdapter(max_retries=1) # adds POST to retry whitelist allowed_methods = set(adapter.max_retries.allowed_methods) allowed_methods.add("POST") adapter.max_retries.allowed_methods = frozenset(allowed_methods) self._s.mount(protocol, adapter) if proxies: self._s.proxies.update(proxies) def __del__(self): self._s.close() @property def base_url(self): """Return base url in use for requests to the server.""" return self._base_url @base_url.setter def base_url(self, value): """ """ self._base_url = value @property def timeout(self): """Return timeout in use for request to the server.""" return self._timeout @timeout.setter def timeout(self, value): """ """ self._timeout = value @property def verify(self): """Return verify in use for request to the server.""" return self._verify @verify.setter def verify(self, value): """ """ self._verify = value @property def headers(self): """Return header request to the server.""" return self._headers @headers.setter def headers(self, value): """ """ self._headers = value def param_headers(self, key): """ Return a specific header parameter. :param key: (str) Header parameters key. :returns: If the header parameters exist, return its value. """ return self.headers.get(key) def clean_headers(self): """Clear header parameters.""" self.headers = {} def exist_param_headers(self, key): """Check if the parameter exists in the header. :param key: (str) Header parameters key. :returns: If the header parameters exist, return True. """ return self.param_headers(key) is not None def add_param_headers(self, key, value): """Add a single parameter inside the header. :param key: (str) Header parameters key. :param value: (str) Value to be added. """ self.headers[key] = value def del_param_headers(self, key): """Remove a specific parameter. :param key: (str) Key of the header parameters. """ self.headers.pop(key, None) def raw_get(self, path, **kwargs): """Submit get request to the path. :param path: (str) Path for request. :returns: Response the request. :raises: HttpError Can't connect to server. """ try: return self._s.get( urljoin(self.base_url, path), params=kwargs, headers=self.headers, timeout=self.timeout, verify=self.verify, ) except Exception as e: raise KeycloakConnectionError("Can't connect to server (%s)" % e) def raw_post(self, path, data, **kwargs): """Submit post request to the path. :param path: (str) Path for request. :param data: (dict) Payload for request. :returns: Response the request. :raises: HttpError Can't connect to server. """ try: return self._s.post( urljoin(self.base_url, path), params=kwargs, data=data, headers=self.headers, timeout=self.timeout, verify=self.verify, ) except Exception as e: raise KeycloakConnectionError("Can't connect to server (%s)" % e) def raw_put(self, path, data, **kwargs): """Submit put request to the path. :param path: (str) Path for request. :param data: (dict) Payload for request. :returns: Response the request. :raises: HttpError Can't connect to server. """ try: return self._s.put( urljoin(self.base_url, path), params=kwargs, data=data, headers=self.headers, timeout=self.timeout, verify=self.verify, ) except Exception as e: raise KeycloakConnectionError("Can't connect to server (%s)" % e) def raw_delete(self, path, data={}, **kwargs): """Submit delete request to the path. :param path: (str) Path for request. :param data: (dict) Payload for request. :returns: Response the request. :raises: HttpError Can't connect to server. """ try: return self._s.delete( urljoin(self.base_url, path), params=kwargs, data=data, headers=self.headers, timeout=self.timeout, verify=self.verify, ) except Exception as e: raise KeycloakConnectionError("Can't connect to server (%s)" % e)
32.564444
88
0.608162
883
7,327
4.975085
0.254813
0.022308
0.020032
0.023674
0.397223
0.350558
0.332802
0.325063
0.314136
0.297746
0
0.001949
0.299577
7,327
224
89
32.709821
0.854053
0.418452
0
0.377358
0
0
0.034026
0
0
0
0
0
0
1
0.179245
false
0
0.056604
0
0.339623
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07577b638bc8a39bd8fcb86c2ed5cc924e43d86a
700
py
Python
2020/23.py
Valokoodari/advent-of-code
c664987f739e0b07ddad34bad87d56768556a5a5
[ "MIT" ]
2
2021-12-27T18:59:11.000Z
2022-01-10T02:31:36.000Z
2020/23.py
Valokoodari/advent-of-code-2019
c664987f739e0b07ddad34bad87d56768556a5a5
[ "MIT" ]
null
null
null
2020/23.py
Valokoodari/advent-of-code-2019
c664987f739e0b07ddad34bad87d56768556a5a5
[ "MIT" ]
2
2021-12-23T17:29:10.000Z
2021-12-24T03:21:49.000Z
#!venv/bin/python3 cs = [int(c) for c in open("inputs/23.in", "r").readline().strip()] def f(cs, ts): p,cc = {n: cs[(i+1)%len(cs)] for i,n in enumerate(cs)},cs[-1] for _ in range(ts): cc,dc = p[cc],p[cc]-1 if p[cc]-1 > 0 else max(p.keys()) hc,p[cc] = [p[cc], p[p[cc]], p[p[p[cc]]]],p[p[p[p[cc]]]] while dc in hc: dc -= 1 if dc < 1: dc = max(p.keys()) p[dc],p[hc[-1]] = hc[0],p[dc] a,n = [],1 for _ in range(8): n = p[n] a.append(str(n)) return "".join(a), p[1] * p[p[1]] print("Part 1:", f(cs.copy(), 100)[0]) print("Part 2:", f(cs.copy() + [i for i in range(10, 1000001)], 10000000)[1])
29.166667
77
0.452857
140
700
2.25
0.335714
0.085714
0.063492
0.047619
0.038095
0.038095
0
0
0
0
0
0.079681
0.282857
700
24
77
29.166667
0.547809
0.024286
0
0
0
0
0.039531
0
0
0
0
0
0
1
0.055556
false
0
0
0
0.111111
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
075a46f6df538e13d87e3247bc8ca4b6d54f0b7b
659
py
Python
demos/nn_classification_demo.py
fire-breathing-rubber-lemons/cs207-FinalProject
92d1d7d70637e2478effb01c9ce56199e0f873c9
[ "MIT" ]
null
null
null
demos/nn_classification_demo.py
fire-breathing-rubber-lemons/cs207-FinalProject
92d1d7d70637e2478effb01c9ce56199e0f873c9
[ "MIT" ]
31
2019-10-18T16:14:07.000Z
2019-12-10T16:38:34.000Z
demos/nn_classification_demo.py
fire-breathing-rubber-lemons/cs207-FinalProject
92d1d7d70637e2478effb01c9ce56199e0f873c9
[ "MIT" ]
null
null
null
import numpy as np from pyad.nn import NeuralNet from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split np.random.seed(0) data = load_breast_cancer() X_train, X_test, y_train, y_test = train_test_split( data.data, data.target, train_size=0.8, random_state=0 ) nn = NeuralNet(loss_fn='cross_entropy') nn.add_layer(X_train.shape[1], 100, activation='linear') nn.add_layer(100, 100, activation='logistic') nn.add_layer(100, 1 + np.max(y_train), activation='linear') nn.train( X_train, y_train, X_test, y_test, batch_size=1, learning_rate=1e-3, epochs=20 ) print('Predictions:', nn.predict(X_test))
26.36
59
0.76176
113
659
4.185841
0.451327
0.038055
0.063425
0.046512
0
0
0
0
0
0
0
0.039316
0.112291
659
24
60
27.458333
0.769231
0
0
0
0
0
0.068285
0
0
0
0
0
0
1
0
false
0
0.222222
0
0.222222
0.055556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
075c40bff74b1c9ad80e482ccef0c574552a2c97
226
py
Python
mgatemp.py
zobclub/chapter8
fbd9e8711747b7446f75b472bae1465fe0ab495c
[ "MIT" ]
1
2021-12-02T10:56:49.000Z
2021-12-02T10:56:49.000Z
mgatemp.py
zobclub/chapter8
fbd9e8711747b7446f75b472bae1465fe0ab495c
[ "MIT" ]
null
null
null
mgatemp.py
zobclub/chapter8
fbd9e8711747b7446f75b472bae1465fe0ab495c
[ "MIT" ]
null
null
null
from microbit import * I2CADR = 0x0E DIE_TEMP = 0x0F while True: i2c.write(I2CADR, bytearray([DIE_TEMP])) d = i2c.read(I2CADR, 1) x = d[0] if x >=128: x -= 256 x += 10 print(x) sleep(500)
16.142857
44
0.553097
35
226
3.514286
0.714286
0.113821
0
0
0
0
0
0
0
0
0
0.141026
0.309735
226
14
45
16.142857
0.647436
0
0
0
0
0
0
0
0
0
0.035242
0
0
1
0
false
0
0.083333
0
0.083333
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
075fafdab69c5858ee27f6483fe78f36b26b216c
11,121
py
Python
src/scalar_net/visualisations.py
scheeloong/lindaedynamics_icml2018
d03b450e254d33b019161a3cd015e44aafe407cb
[ "MIT" ]
1
2018-08-04T17:04:13.000Z
2018-08-04T17:04:13.000Z
src/scalar_net/visualisations.py
scheeloong/lindaedynamics_icml2018
d03b450e254d33b019161a3cd015e44aafe407cb
[ "MIT" ]
null
null
null
src/scalar_net/visualisations.py
scheeloong/lindaedynamics_icml2018
d03b450e254d33b019161a3cd015e44aafe407cb
[ "MIT" ]
null
null
null
# required modules import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib import cm from matplotlib.colors import Normalize from mpl_toolkits.mplot3d import Axes3D from matplotlib.animation import FuncAnimation # two-dimesional version def plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)): # create weight space n_w = 100 w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1 w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2 ws_x, ws_y = np.meshgrid(w1, w2) cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix # Fill the cost matrix for each combination of weights for i in range(n_w): for j in range(n_w): y_pred = ws_x[i, j] * ws_y[i, j] * x y_true = y cost_ws[i, j] = 0.5 * (y_true - y_pred)**2 + \ 0.5 * l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2 # compute gradients dy, dx = np.gradient(cost_ws) # plot vector space skip = (slice(None, None, 5), slice(None, None, 5)) # fig, ax = plt.subplots(figsize=(8, 8)) #ax.contour(ws_x, ws_y, cost_ws, 200) im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max( ), ws_y.min(), ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip]) cbar = fig.colorbar(im, ax=ax) # ax.set(aspect=1, title='Loss Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$', fontsize=15) # ax.grid() # add saddle point ax.scatter(0, 0, label='Saddle point', c='red', marker='*') # ax.scatter(0,0, c='black', marker=r'$\rightarrow$', label='Negative gradient') settings = (x, y, v, l2, w1_range, w2_range) return ax, settings # three-dimensional version def plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2), angle=30): # create weight space n_w = 100 w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1 w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2 ws_x, ws_y = np.meshgrid(w1, w2) cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix # Fill the cost matrix for each combination of weights for i in range(n_w): for j in range(n_w): y_pred = ws_x[i, j] * ws_y[i, j] * x y_true = y cost_ws[i, j] = 0.5 * (y_true - y_pred)**2 + \ 0.5 * l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2 X = ws_x Y = ws_y Z = cost_ws #fig, ax = plt.subplots(figsize=(8, 8)) #ax = fig.add_subplot(1,1,1, projection='3d') # fourth dimention - colormap # create colormap according to x-value (can use any 50x50 array) color_dimension = Z # change to desired fourth dimension minn, maxx = color_dimension.min(), color_dimension.max() norm = Normalize(minn, maxx) m = plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([]) fcolors = m.to_rgba(color_dimension) # plot # fig = plt.figure(figsize=(8, 8)) # ax = fig.gca(projection='3d') ax.set_zlim(0, 50) ax.plot([0], [0], 'ro', c='red', marker='*', label='Saddle point') ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=minn, vmax=maxx, shade=False, alpha=1) ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$', fontsize=20) settings = (x, y, v, l2, w1_range, w2_range) ax.view_init(angle, 10) return ax, settings def plot_global_minimum_manifold_2d(ax, settings): # retieve cached settings x, y, v, l2, w1_range, w2_range = settings n_w = 1000 man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2) loss = 0.5 * y *(1 - man_ws_x * man_ws_y * x)**2 + \ 0.5 * l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x * man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss < min_loss + 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] # plot manifold of global minima ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan', label='Manifold of global minima') def plot_global_minimum_manifold_3d(ax, settings): # retieve cached settings x, y, v, l2, w1_range, w2_range = settings n_w = 1000 man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2) loss = 0.5 * y * (1 - man_ws_x * man_ws_y * x)**2 + \ 0.5 * l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x*man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss < min_loss + 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1 x = np.insert(manifold_x, pos, np.nan) y = np.insert(manifold_y, pos, np.nan) # plot manifold of global minima #ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan', # label='Manifold of global minima') ax.plot(y, x, c='cyan', label='Manifold of global minima') def plot_optimiser_trajectory_2d(ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals, w2_vals, **kwargs) def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs): x, y, v, l2, _, _ = settings w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) loss = 0.5 * y * (1 - w1_vals * w2_vals * x)**2 + \ 0.5 * l2 * (w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals, loss, **kwargs) def plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45, manifold=False, **kwargs): if dim == '3d': ax, settings = plot_mse_loss_surface_3d(x, y, angle=angle) if manifold: plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs) else: ax, settings = plot_mse_loss_surface_2d(x, y) if manifold: plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax, weights, **kwargs) def plot_weight_norm(ax, weights, **kwargs): w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) epochs = np.arange(0, len(w1_vals), 1) norms = np.sqrt(w1_vals**2 + w2_vals**2) ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight norm', fontsize=12) ax.plot(epochs, norms, linewidth=2.0, **kwargs) def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i], **kwargs) return ax def animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs): x, y, v, l2, _, _ = settings w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) loss = 0.5 * y * (1 - w1_vals * w2_vals * x)**2 + \ 0.5 * l2 * (w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs) return ax def plot_optimiser_loss(x, y, v, l2, weights, **kwargs): loss = [] epoch = np.arange(0, len(weights['w1'])) for w1, w2 in zip(weights['w1'], weights['w2']): loss_val = 0.5 * y * (1 - w1 * w2 * x)**2 + 0.5 * l2 * (w1**2 + w2**2) + 0.5 * v * (w1 * w2)**2 loss.append(loss_val) plt.plot(epoch, loss, **kwargs) plt.xlabel('Epoch') plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): alpha = np.arange(start, end, 0.001) w1_path = [] w2_path = [] for a in alpha: ww1 = (1 - a) * w1_a + a * w1_b ww2 = (1 - a) * w2_a + a * w2_b w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, **kwargs) def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): x, y, _, _ = settings alpha = np.arange(start, end, 0.001) w1_path = [] w2_path = [] loss = [] for a in alpha: ww1 = (1 - a) * w1_a + a * w1_b ww2 = (1 - a) * w2_a + a * w2_b loss_val = 0.5 * (y - ww1 * ww2 * x)**2 + 0.5 * l2 * (ww1**2 + ww2**2) loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, loss, **kwargs) def plot_interpolated_loss(x, y, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): alpha = np.arange(start, end, 0.001) interpolated_loss = [] for a in alpha: ww1 = (1 - a) * w1_a + a * w1_b ww2 = (1 - a) * w2_a + a * w2_b loss_val = 0.5 * (y - ww1 * ww2 * x)**2 + 0.5 * l2 * (ww1**2 + ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss, **kwargs) plt.xlabel(r'$\alpha$') plt.ylabel('Loss') def plot_learning_dynamics(ax, weights, **kwargs): epoch = np.arange(0, len(weights['w1'])) scores = [] for w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.plot(epoch, scores, **kwargs) def animate_learning_dynamics(i, ax, weights, y, **kwargs): n_epoch = len(weights['w1']) epoch = np.arange(1, n_epoch) scores = [] for w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.set_xlim((1, n_epoch)) ax.set_ylim((0, y)) ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2 \cdot w_1$', fontsize=15) ax.plot(epoch[:i], scores[:i], **kwargs) return ax def animate_learning(weights, save=False, name='anim'): gs = gridspec.GridSpec(2, 4) gs.update(wspace=0.5) fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(gs[0, :2], ) ax2 = fig.add_subplot(gs[0, 2:], projection='3d') ax3 = fig.add_subplot(gs[1, 1:3]) # ax1 = fig.add_subplot(2, 2, 1) # ax2 = fig.add_subplot(2, 2, 2, projection = '3d') # ax3 = fig.add_subplot(2, 2, 3) # ax4 = fig.add_subplot(2, 2, 4) ax1, settings = plot_mse_loss_surface_2d(ax1, 1, 1) ax2, settings = plot_mse_loss_surface_3d(ax2, 1, 1, angle=60) plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2, settings) def update(i): animate_optimiser_trajectory_2d( i, ax1, settings, weights, 'Gradient descent') animate_optimiser_trajectory_3d( i, ax2, settings, weights, 'Gradient descent') animate_learning_dynamics(i, ax3, weights, 1) # animate_weight_norm(i, ax4, scalarNet.history) # suncAnimation will call the 'update' function for each frame anim = FuncAnimation(fig, update, frames=100, interval=5, save_count=50) # HTML(anim.to_html5_video()) if save: anim.save(name + '.gif', dpi=80, writer='imagemagick') plt.show()
35.193038
103
0.608488
1,873
11,121
3.409503
0.132942
0.008456
0.007516
0.007047
0.634043
0.553868
0.488256
0.429847
0.407454
0.382086
0
0.061091
0.230195
11,121
315
104
35.304762
0.68485
0.118065
0
0.493151
0
0
0.028571
0
0
0
0
0
0
1
0.082192
false
0
0.031963
0
0.136986
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
075fc7c73e1c7b1fe3355a9a233cd8869299a19e
7,435
py
Python
tests/qconvolutional_test.py
kshithijiyer/qkeras
78ac608c6dcd84151792a986d03fe7afb17929cf
[ "Apache-2.0" ]
null
null
null
tests/qconvolutional_test.py
kshithijiyer/qkeras
78ac608c6dcd84151792a986d03fe7afb17929cf
[ "Apache-2.0" ]
null
null
null
tests/qconvolutional_test.py
kshithijiyer/qkeras
78ac608c6dcd84151792a986d03fe7afb17929cf
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Google LLC # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test layers from qconvolutional.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from numpy.testing import assert_allclose import pytest import tempfile from tensorflow.keras import backend as K from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from tensorflow.keras.backend import clear_session from qkeras import binary from qkeras import ternary from qkeras import QActivation from qkeras import QDense from qkeras import QConv1D from qkeras import QConv2D from qkeras import QSeparableConv2D from qkeras import quantized_bits from qkeras import quantized_relu from qkeras.utils import model_save_quantized_weights from qkeras.utils import quantized_model_from_json from qkeras.utils import load_qmodel from qkeras import print_qstats from qkeras import extract_model_operations # TODO(hzhuang): # qoctave_conv test # qbatchnorm test def test_qnetwork(): x = x_in = Input((28, 28, 1), name='input') x = QSeparableConv2D( 32, (2, 2), strides=(2, 2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0), depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_0_m')( x) x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x = QConv2D( 64, (3, 3), strides=(2, 2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_1_m', activation=quantized_relu(6, 3, 1))( x) x = QConv2D( 64, (2, 2), strides=(2, 2), kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_2_m')( x) x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x = Flatten(name='flatten')(x) x = QDense( 10, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='dense')( x) x = Activation('softmax', name='softmax')(x) model = Model(inputs=[x_in], outputs=[x]) # reload the model to ensure saving/loading works json_string = model.to_json() clear_session() model = quantized_model_from_json(json_string) # generate same output for weights np.random.seed(42) for layer in model.layers: all_weights = [] for i, weights in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 576 * 10 # to avoid learning sizes shape = weights.shape assert input_size > 0, 'input size for {} {}'.format(layer.name, i) # he normal initialization with a scale factor of 2.0 all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights) # apply quantizer to weights model_save_quantized_weights(model) all_weights = [] for layer in model.layers: for i, weights in enumerate(layer.get_weights()): w = np.sum(weights) all_weights.append(w) all_weights = np.array(all_weights) # test_qnetwork_weight_quantization all_weights_signature = np.array( [2., -6.75, -0.625, -2., -0.25, -56., 1.125, -1.625, -1.125]) assert all_weights.size == all_weights_signature.size assert np.all(all_weights == all_weights_signature) # test_qnetwork_forward: expected_output = np.array( [[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [ 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16) inputs = 2 * np.random.rand(10, 28, 28, 1) actual_output = model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output, rtol=1e-4) def test_qconv1d(): np.random.seed(33) x = Input((4, 4,)) y = QConv1D( 2, 1, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')( x) model = Model(inputs=x, outputs=y) # Extract model operations model_ops = extract_model_operations(model) # Assertion about the number of operations for this Conv1D layer assert model_ops['qconv1d']['number_of_operations'] == 32 # Print qstats to make sure it works with Conv1D layer print_qstats(model) # reload the model to ensure saving/loading works # json_string = model.to_json() # clear_session() # model = quantized_model_from_json(json_string) for layer in model.layers: all_weights = [] for i, weights in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 10 * 10 shape = weights.shape assert input_size > 0, 'input size for {} {}'.format(layer.name, i) all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights) # Save the model as an h5 file using Keras's model.save() fd, fname = tempfile.mkstemp('.h5') model.save(fname) del model # Delete the existing model # Return a compiled model identical to the previous one model = load_qmodel(fname) # Clean the created h5 file after loading the model os.close(fd) os.remove(fname) # apply quantizer to weights model_save_quantized_weights(model) inputs = np.random.rand(2, 4, 4) p = model.predict(inputs).astype(np.float16) y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659, 0.9834]], [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16) assert np.all(p == y) if __name__ == '__main__': pytest.main([__file__])
33.490991
80
0.646537
1,258
7,435
3.699523
0.201908
0.063171
0.075634
0.093468
0.429093
0.386979
0.367426
0.355393
0.347443
0.324667
0
0.094044
0.200538
7,435
221
81
33.642534
0.68893
0.195293
0
0.375796
0
0
0.033664
0.00707
0
0
0
0.004525
0.050955
1
0.012739
false
0
0.178344
0
0.191083
0.019108
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
076073f7df321e46ea5bd065cc9331746695ec1f
2,356
py
Python
discord/ext/ui/select.py
Lapis256/discord-ext-ui
593de0a1107d2a0c26023587a2937f00ecec3ed1
[ "MIT" ]
null
null
null
discord/ext/ui/select.py
Lapis256/discord-ext-ui
593de0a1107d2a0c26023587a2937f00ecec3ed1
[ "MIT" ]
null
null
null
discord/ext/ui/select.py
Lapis256/discord-ext-ui
593de0a1107d2a0c26023587a2937f00ecec3ed1
[ "MIT" ]
null
null
null
from typing import Optional, List, TypeVar, Generic, Callable import discord.ui from .item import Item from .select_option import SelectOption from .custom import CustomSelect def _default_check(_: discord.Interaction) -> bool: return True C = TypeVar("C", bound=discord.ui.Select) class Select(Item, Generic[C]): def __init__( self, placeholder: Optional[str] = None, min_values: int = 1, max_values: int = 1, options: Optional[list] = None, cls: C = CustomSelect, custom_id: Optional[str] = None, ) -> None: self._placeholder: Optional[str] = placeholder self._min_values: int = min_values self._max_values: int = max_values self._options: list = [] if options is None else options self._row: Optional[int] = None self.cls: C = cls self._custom_id: Optional[str] = custom_id self.func: Optional[Callable] = None self.check_func: Callable[[discord.Interaction], bool] = _default_check def placeholder(self, placeholder: str) -> 'Select': self._placeholder = placeholder return self def min_values(self, min_values: int) -> 'Select': self._min_values = min_values return self def max_values(self, max_values: int) -> 'Select': self._max_values = max_values return self def options(self, options: List[SelectOption]) -> 'Select': self._options = options return self def row(self, row: int) -> 'Select': self._row = row return self def on_select(self, func: Callable) -> 'Select': self.func = func return self def custom_id(self, custom_id: str) -> 'Select': self._custom_id = custom_id return self def check(self, func: Callable[[discord.Interaction], bool]) -> 'Select': self.check_func = func return self def to_discord(self) -> C: return self.cls( placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option() for o in self._options], row=self._row, custom_id=self._custom_id, check_func=self.check_func, callback=self.func )
28.731707
79
0.61163
282
2,356
4.882979
0.177305
0.058824
0.075527
0.041394
0.190995
0
0
0
0
0
0
0.001193
0.288625
2,356
81
80
29.08642
0.820406
0
0
0.129032
0
0
0.020798
0
0
0
0
0
0
1
0.177419
false
0
0.080645
0.032258
0.435484
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07629f29c3ccce164edffac5aaf1f19ce3ce8456
6,934
py
Python
userbot/helper_funcs/misc.py
Abucuyy/Uciha
726e9cd61eabf056064e40f7b322d8993161e52a
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
userbot/helper_funcs/misc.py
Abucuyy/Uciha
726e9cd61eabf056064e40f7b322d8993161e52a
[ "Naumen", "Condor-1.1", "MS-PL" ]
1
2021-02-08T20:43:56.000Z
2021-02-08T20:43:56.000Z
userbot/helper_funcs/misc.py
Abucuyy/Uciha
726e9cd61eabf056064e40f7b322d8993161e52a
[ "Naumen", "Condor-1.1", "MS-PL" ]
5
2020-09-05T12:45:31.000Z
2020-09-25T09:04:29.000Z
# TG-UserBot - A modular Telegram UserBot script for Python. # Copyright (C) 2019 Kandarp <https://github.com/kandnub> # # TG-UserBot is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # TG-UserBot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with TG-UserBot. If not, see <https://www.gnu.org/licenses/>. from typing import Tuple, Union from telethon.tl import types from ..utils.client import UserBotClient from ..utils.helpers import get_chat_link ChatBannedRights = { 'until_date': 'Banned until:', 'view_messages': 'Read messages:', 'send_messages': 'Send messages:', 'send_media': 'Send media:', 'send_stickers': 'Send stickers:', 'send_gifs': 'Send GIFs:', 'send_games': 'Send games:', 'send_inline': 'Send inline messages:', 'embed_links': 'Send embed links:', 'send_polls': 'Send polls:', 'change_info': 'Change info:', 'invite_users': 'Add users:', 'pin_messages': 'Pin messages:' } ChatAdminRights = { 'change_info': 'Change chat info:', 'post_messages': 'Post messages:', 'edit_messages': 'Edit messages:', 'delete_messages': 'Delete messages:', 'ban_users': 'Ban users:', 'invite_users': 'Invite users:', 'pin_messages': 'Pin messages:', 'add_admins': 'Add new admins:' } async def parse_admin_rights(AdminRights: types.ChatAdminRights) -> str: text = [] for attr, string in ChatAdminRights.items(): right = getattr(AdminRights, attr, False) if right: text.append(f'{string} {right}') return '\n'.join(text) async def parse_banned_rights(BannedRights: types.ChatBannedRights) -> str: text = [] for attr, string in ChatBannedRights.items(): right = getattr(BannedRights, attr, False) if right: if attr == "until_date": text.append(f'{string} {right.ctime()} (UTC)') else: text.append(f'{string} {right}') return '\n'.join(text) async def get_entity_info( arg: Union[types.ChatFull, types.ChannelFull] ) -> Tuple[int, int, int, int, int, int]: creator, admins, bots, participants, kicked, banned = (None, None, None, None, None, None) full_chat = arg.full_chat if isinstance(full_chat, types.ChannelFull): if hasattr(full_chat, 'participants_count'): participants = full_chat.participants_count if hasattr(full_chat, 'admins_count'): admins = full_chat.admins_count if hasattr(full_chat, 'kicked_count'): kicked = full_chat.kicked_count if hasattr(full_chat, 'banned_count'): banned = full_chat.banned_count if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) else: if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) if hasattr(full_chat, 'participants'): admins, participants = 0, 0 for p in full_chat.participants.participants: if isinstance(p, types.ChatParticipantCreator): creator = p.user_id if isinstance(p, types.ChatParticipant): participants += 1 if isinstance(p, types.ChatParticipantAdmin): admins += 1 return creator, admins, bots, participants, kicked, banned async def unparse_info(client: UserBotClient, creator: int, admins: int, bots: int, users: int, kicked: int, banned: int) -> str: text = '' if creator: c = await client.get_entity(creator) text += f"\n**Creator:** {await get_chat_link(c)}" if users: text += f"\n**Participants:** {users}" if admins: text += f"\n**Admins:** {admins}" if bots: text += f"\n**Bots:** {bots}" if kicked: text += f"\n**Kicked:** {kicked}" if banned: text += f"\n**Banned:** {banned}" return text async def unparse_rights(title: str, rights: str) -> str: text = f"**{title}**" for l in rights.split('\n'): splat = l.split(':') text += f"\n **{splat[0]}:** `{':'.join(splat[1:])}`" return text async def resolve_channel(client: UserBotClient, channel: types.ChannelFull) -> str: text = '' default_banned_rights = None banned_rights = None admin_rights = None channel_type = "Channel" for c in channel.chats: if c.id == channel.full_chat.id: if c.megagroup: channel_type = "Megagroup" admin_rights = c.admin_rights banned_rights = c.banned_rights default_banned_rights = c.default_banned_rights break text += f"\n**{channel_type} ID:** `{channel.full_chat.id}`" info = await get_entity_info(channel) text += await unparse_info(client, *info) if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed = await unparse_rights("Admin rights:", parsed) text += f"\n{unparsed}" if banned_rights: parsed = await parse_banned_rights(banned_rights) unparsed = await unparse_rights("Banned rights:", parsed) text += f"\n{unparsed}" if default_banned_rights: parsed = await parse_banned_rights(default_banned_rights) unparsed = await unparse_rights("Default banned rights:", parsed) text += f"\n{unparsed}" return text async def resolve_chat(client: UserBotClient, chat: types.ChatFull) -> str: text = f"\n**Chat ID:** `{chat.full_chat.id}``" info = await get_entity_info(chat) text += await unparse_info(client, *info) admin_rights = None default_banned_rights = None for c in chat.chats: if c.id == chat.full_chat.id: admin_rights = c.admin_rights default_banned_rights = c.default_banned_rights break if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed = await unparse_rights("Admin rights:", parsed) text += f"\n{unparsed}" if default_banned_rights: parsed = await parse_banned_rights(default_banned_rights) unparsed = await unparse_rights("Default banned rights:", parsed) text += f"\n{unparsed}" return text
37.27957
80
0.608595
830
6,934
4.927711
0.219277
0.064548
0.020538
0.029095
0.393888
0.293888
0.225428
0.214181
0.198533
0.174083
0
0.002192
0.27632
6,934
185
81
37.481081
0.812874
0.103836
0
0.326667
0
0
0.188591
0.011475
0
0
0
0
0
1
0
false
0
0.026667
0
0.073333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0763811316d721bd61d00c534d919a140fb4b71a
1,421
py
Python
gym-multilayerthinfilm/utils.py
HarryTheBird/gym-multilayerthinfilm
22eda96e71e95e9ea1b491fae633c4a32fadb023
[ "MIT" ]
10
2021-05-20T19:46:36.000Z
2022-02-24T03:06:46.000Z
gym-multilayerthinfilm/utils.py
HarryTheBird/gym-multilayerthinfilm
22eda96e71e95e9ea1b491fae633c4a32fadb023
[ "MIT" ]
null
null
null
gym-multilayerthinfilm/utils.py
HarryTheBird/gym-multilayerthinfilm
22eda96e71e95e9ea1b491fae633c4a32fadb023
[ "MIT" ]
2
2021-12-11T21:49:35.000Z
2022-03-04T06:28:57.000Z
import numpy as np def get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700, complex_n=True): ntxt = np.loadtxt(filepath) if np.min(np.abs(ntxt[:, 0] - lambda_min)) > 25 or np.min(np.abs(ntxt[:, 0] - lambda_max)) > 25: print('No measurement data for refractive indicies are available within 25 nm in \n' + filepath) if points is None: points = lambda_max - lambda_min + 1 idxmin = np.argmin(np.abs(ntxt[:, 0] - lambda_min)) idxmax = np.argmin(np.abs(ntxt[:, 0] - lambda_max)) if idxmax == idxmin: if complex_n: indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]])) else: indicies = np.array([ntxt[idxmin, 1]]) else: xp = ntxt[idxmin:idxmax, 0] fpn = ntxt[idxmin:idxmax, 1] n = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpn) if complex_n: fpk = ntxt[idxmin:idxmax, 2].squeeze() k = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpk) indicies = np.vectorize(complex)(n, k) else: indicies = n return indicies def get_N(path_list, lambda_min, lambda_max, points=None, complex_n=False): n = [] for path in path_list: n.append(get_n_from_txt(path, points, lambda_min=lambda_min, lambda_max=lambda_max, complex_n=complex_n)) return np.vstack((n))
43.060606
113
0.626319
214
1,421
4.004673
0.280374
0.094516
0.087515
0.046674
0.291715
0.221704
0.207701
0.102684
0.102684
0.102684
0
0.021257
0.238564
1,421
32
114
44.40625
0.770795
0
0
0.166667
0
0
0.053483
0
0
0
0
0
0
1
0.066667
false
0
0.033333
0
0.166667
0.033333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
076ab2cb67c5bd176123d8332c42ca379bbe81d8
992
py
Python
service.py
Kleist/MusicPlayer
95f634d1e4d47e7b430e32ad9224d94ad0453c82
[ "MIT" ]
1
2020-08-14T21:14:09.000Z
2020-08-14T21:14:09.000Z
service.py
Kleist/MusicPlayer
95f634d1e4d47e7b430e32ad9224d94ad0453c82
[ "MIT" ]
null
null
null
service.py
Kleist/MusicPlayer
95f634d1e4d47e7b430e32ad9224d94ad0453c82
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import RPi.GPIO as GPIO from mfrc522 import SimpleMFRC522 import play import time class TagPlayer(object): def __init__(self): self._current = None self.reader = SimpleMFRC522() self._failed = 0 def step(self): id, text = self.reader.read_no_block() print(id,text) if id: self._failed = 0 if text != self._current: stripped_text = text.strip() print("Read text: \"{}\"".format(stripped_text)) play.play(stripped_text) self._current = text elif self._current: self._failed += 1 if self._failed > 2: self._current = None print("Stopping") play.stop() time.sleep(1) def main(): try: player = TagPlayer() while 1: player.step() finally: GPIO.cleanup() if __name__ == "__main__": main()
22.545455
64
0.521169
107
992
4.588785
0.448598
0.112016
0.0611
0
0
0
0
0
0
0
0
0.025765
0.373992
992
43
65
23.069767
0.764895
0.021169
0
0.114286
0
0
0.028866
0
0
0
0
0
0
1
0.085714
false
0
0.114286
0
0.228571
0.085714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
076b099ed1e8933339bc07b3aea99e064efdee24
1,118
py
Python
mypy/defaults.py
ckanesan/mypy
ffb3ce925e8bb3376e19f942c7d3a3806c9bba97
[ "PSF-2.0" ]
null
null
null
mypy/defaults.py
ckanesan/mypy
ffb3ce925e8bb3376e19f942c7d3a3806c9bba97
[ "PSF-2.0" ]
8
2021-03-18T22:27:44.000Z
2022-02-10T09:18:50.000Z
mypy/defaults.py
ckanesan/mypy
ffb3ce925e8bb3376e19f942c7d3a3806c9bba97
[ "PSF-2.0" ]
1
2021-09-20T06:37:41.000Z
2021-09-20T06:37:41.000Z
import os MYPY = False if MYPY: from typing_extensions import Final PYTHON2_VERSION = (2, 7) # type: Final PYTHON3_VERSION = (3, 6) # type: Final PYTHON3_VERSION_MIN = (3, 4) # type: Final CACHE_DIR = '.mypy_cache' # type: Final CONFIG_FILE = 'mypy.ini' # type: Final SHARED_CONFIG_FILES = ['setup.cfg', ] # type: Final USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini', ] # type: Final if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES = [CONFIG_FILE, ] + SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final # This must include all reporters defined in mypy.report. This is defined here # to make reporter names available without importing mypy.report -- this speeds # up startup. REPORTER_NAMES = ['linecount', 'any-exprs', 'linecoverage', 'memory-xml', 'cobertura-xml', 'xml', 'xslt-html', 'xslt-txt', 'html', 'txt'] # type: Final
34.9375
91
0.604651
138
1,118
4.717391
0.485507
0.124424
0.069124
0.070661
0
0
0
0
0
0
0
0.01224
0.269231
1,118
31
92
36.064516
0.784578
0.245081
0
0
0
0
0.21781
0.025271
0
0
0
0
0
1
0
false
0
0.083333
0
0.083333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4acbd0cbd7b35addaf03f24e1fa4d33805db8c3a
4,819
py
Python
tools/corpora.py
EleutherAI/megatron-3d
be3014d47a127f08871d0ba6d6389363f2484397
[ "MIT" ]
3
2021-02-13T21:51:45.000Z
2021-02-14T23:15:02.000Z
tools/corpora.py
EleutherAI/megatron-3d
be3014d47a127f08871d0ba6d6389363f2484397
[ "MIT" ]
13
2021-02-08T11:22:38.000Z
2021-02-18T20:13:10.000Z
tools/corpora.py
EleutherAI/megatron-3d
be3014d47a127f08871d0ba6d6389363f2484397
[ "MIT" ]
2
2021-02-13T22:13:21.000Z
2021-10-12T06:39:33.000Z
import os import tarfile from abc import ABC, abstractmethod from glob import glob import shutil import random import zstandard """ This registry is for automatically downloading and extracting datasets. To register a class you need to inherit the DataDownloader class, provide name, filetype and url attributes, and (optionally) provide download / extract / exists / tokenize functions to check if the data exists, and, if it doesn't, download, extract and tokenize the data into the correct directory. When done, add it to the DATA_DOWNLOADERS dict. The function process_data runs the pre-processing for the selected dataset. """ DATA_DIR = os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP = f"{DATA_DIR}/gpt2-vocab.json" GPT2_VOCAB_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json" GPT2_MERGE_FP = f"{DATA_DIR}/gpt2-merges.txt" GPT2_MERGE_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt" class DataDownloader(ABC): """Dataset registry class to automatically download / extract datasets""" @property def base_dir(self): """base data directory""" return DATA_DIR @property @abstractmethod def name(self): """name of dataset""" pass @property @abstractmethod def filetype(self): """filetype of dataset""" pass @property @abstractmethod def url(self): """URL from which to download dataset""" pass def _extract_tar(self): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url)) with tarfile.open(tarfile_path, "r:gz") as dataset_tar: print(f'Extracting files from {tarfile_path}...') dataset_tar.extractall(self.path) def _extract_zstd(self, remove_zstd=True): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url)) with open(zstd_file_path, 'rb') as compressed: decomp = zstandard.ZstdDecompressor() output_path = zstd_file_path.replace(".zst", "") with open(output_path, 'wb') as destination: decomp.copy_stream(compressed, destination) if remove_zstd: os.remove(zstd_file_path) return output_path def extract(self): """extracts dataset and moves to the correct data dir if necessary""" self._extract_tar() def exists(self): """Checks if the dataset is present""" return os.path.isdir(f"{self.base_dir}/{self.name}") def download(self): """downloads dataset""" os.makedirs(self.base_dir, exist_ok=True) os.system(f"wget {self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}") def tokenize(self): parent_folder = os.path.join(self.base_dir, self.name) jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(".zst", "") assert jsonl_filepath.endswith(".jsonl") os.system(f"python tools/preprocess_data.py \ --input {jsonl_filepath} \ --output-prefix {parent_folder}/{self.name} \ --vocab {GPT2_VOCAB_FP} \ --dataset-impl mmap \ --tokenizer-type GPT2BPETokenizer \ --merge-file {GPT2_MERGE_FP} \ --append-eod") def prepare(self): if not self.exists(): self.download() self.extract() self.tokenize() class Enron(DataDownloader): name = "enron" filetype = "jsonl.zst" url = "http://eaidata.bmk.sh/data/enron_emails.jsonl.zst" seed = 1 def exists(self): self.path = os.path.join(self.base_dir, self.name) return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(".zst", ""))) def extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(".zst", "")), os.path.join(self.base_dir, self.name)) def maybe_download_gpt2_tokenizer_data(): if not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}') if not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}') DATA_DOWNLOADERS = { "enron": Enron } def prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None) if DownloaderClass is None: raise NotImplementedError else: d = DownloaderClass() d.prepare()
35.175182
136
0.661756
650
4,819
4.753846
0.253846
0.040777
0.035599
0.045307
0.290615
0.249191
0.190615
0.167314
0.148544
0.148544
0
0.005831
0.217057
4,819
136
137
35.433824
0.813146
0.056651
0
0.171717
0
0.030303
0.130033
0.033758
0
0
0
0
0.010101
1
0.151515
false
0.030303
0.070707
0
0.323232
0.010101
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4acced6bfbc482f9d38f37f561868a587991d47b
1,575
py
Python
othello_rl/qlearning/qlearning.py
aka256/othello-rl
ef5e78c6cf6b276e16b50086b53138ab968d728c
[ "MIT" ]
null
null
null
othello_rl/qlearning/qlearning.py
aka256/othello-rl
ef5e78c6cf6b276e16b50086b53138ab968d728c
[ "MIT" ]
null
null
null
othello_rl/qlearning/qlearning.py
aka256/othello-rl
ef5e78c6cf6b276e16b50086b53138ab968d728c
[ "MIT" ]
null
null
null
from logging import getLogger logger = getLogger(__name__) class QLearning: """ Q-Learning用のクラス Attributes ---------- alpha : float 学習率α gamma : float 割引率γ data : dict Q-Learningでの学習結果の保存用辞書 init_value : float dataの初期値 """ def __init__(self, alpha: float, gamma: float, data: dict = {}, init_value: float = 0) -> None: self.alpha = alpha self.gamma = gamma self.data = data self.init_value = init_value def get(self, s: int, a: int) -> float: """ dataから値の取得 Parameters ---------- s : int 状態 a : int 行動 Returns ------- value : float Q値, Q(s, a) """ return self.data.get((s, a), self.init_value) def __set(self, s: int, a: int, value: float) -> None: """ dataへの値の代入 Parameters ---------- s : int 状態 a : int 行動 value : float 代入するQ値, Q(s, a) """ self.data[(s, a)] = value def update(self, s: int, a: int, r: float, q: float, *q_old: float) -> float: """ Q値の更新 Parameters ---------- s : int 状態 a : int 行動 r : float 報酬 q : float Q(s_t+1, a) q_old : float Q(s, a) Returns ------ q_new : float updateされたQ値 """ if len(q_old) == 0: q_old = self.get(s, a) else: q_old = q_old[0] #print('alpha:{}, q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha, q_old, r, self.gamma, q)) q_new = (1-self.alpha)*q_old+self.alpha*(r + self.gamma*q) self.__set(s, a, q_new) return q_new
17.119565
98
0.507937
214
1,575
3.593458
0.252336
0.046814
0.031209
0.035111
0.13264
0.085826
0.085826
0
0
0
0
0.00473
0.328889
1,575
91
99
17.307692
0.7228
0.380952
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.05
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4acdb07fa21e6d09ec1006ea9fc4f7c0e59b102d
6,748
py
Python
SearchService/test/unit/test_solr_interface.py
loftwah/appscale
586fc1347ebc743d7a632de698f4dbfb09ae38d6
[ "Apache-2.0" ]
790
2015-01-03T02:13:39.000Z
2020-05-10T19:53:57.000Z
SearchService/test/unit/test_solr_interface.py
loftwah/appscale
586fc1347ebc743d7a632de698f4dbfb09ae38d6
[ "Apache-2.0" ]
1,361
2015-01-08T23:09:40.000Z
2020-04-14T00:03:04.000Z
SearchService/test/unit/test_solr_interface.py
loftwah/appscale
586fc1347ebc743d7a632de698f4dbfb09ae38d6
[ "Apache-2.0" ]
155
2015-01-08T22:59:31.000Z
2020-04-08T08:01:53.000Z
#!/usr/bin/env python import os import json import sys import unittest import urllib2 from flexmock import flexmock sys.path.append(os.path.join(os.path.dirname(__file__), "../../")) import solr_interface import search_exceptions class FakeSolrDoc(): def __init__(self): self.fields = [] class FakeDocument(): INDEX_NAME = "indexname" INDEX_LOCALE = "indexlocale" def __init__(self): self.fields = [] self.id = "id" self.language = "lang" class FakeSchema(): def __init__(self): self.fields = [] class FakeIndex(): def __init__(self): self.name = "name" self.schema = FakeSchema() class FakeIndexSpec(): def __init__(self): pass def namespace(self): return 'ns' def name(self): return self.name class FakeUpdate(): def __init__(self, name, field_type): self.name = name self.field_type = field_type class FakeConnection(): def __init__(self, is_good_code): self.code = 200 if not is_good_code: self.code = 500 def getcode(self): return self.code class TestSolrInterface(unittest.TestCase): """ A set of test cases for the solr interface module. """ def test_get_index_adapter(self): appscale_info = flexmock() appscale_info.should_receive("get_search_location").\ and_return("somelocation") solr = solr_interface.Solr() solr = flexmock(solr) flexmock(solr_interface) solr_interface.should_receive("get_index_name").and_return("index_ns_name") flexmock(urllib2) urllib2.should_receive("urlopen").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, "app_id", "ns", "name") # Test the case of ValueError on a json.load. urllib2.should_receive("urlopen").and_return(FakeConnection(True)) flexmock(json) json.should_receive("load").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, "app_id", "ns", "name") # Test a bad status from SOLR. dictionary = {'responseHeader':{'status': 1}} json.should_receive("load").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, "app_id", "ns", "name") fields = [{'name':"index_ns_name_"}] dictionary = {'responseHeader':{'status': 0}, "fields": fields} json.should_receive("load").and_return(dictionary) index = solr._get_index_adapter("app_id", "ns", "name") self.assertEquals(index.schema[0]['name'], "index_ns_name_") def test_update_schema(self): appscale_info = flexmock() appscale_info.should_receive("get_search_location").\ and_return("somelocation") solr = solr_interface.Solr() flexmock(urllib2) urllib2.should_receive("urlopen").and_return(FakeConnection(False)) updates = [] self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) updates = [{'name': 'name1', 'type':'type1'}] flexmock(json) json.should_receive("load").and_raise(ValueError) urllib2.should_receive("urlopen").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {"responseHeader":{"status":1}} json.should_receive("load").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {"responseHeader":{"status":0}} json.should_receive("load").and_return(dictionary) solr.update_schema(updates) def test_to_solr_hash_map(self): appscale_info = flexmock() appscale_info.should_receive("get_search_location").\ and_return("somelocation") solr = solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {}) def test_commit_update(self): appscale_info = flexmock() appscale_info.should_receive("get_search_location").\ and_return("somelocation") solr = solr_interface.Solr() flexmock(json) json.should_receive("loads").and_return({}) flexmock(urllib2) urllib2.should_receive("urlopen").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) json.should_receive("load").and_raise(ValueError) urllib2.should_receive("urlopen").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 1}} json.should_receive("load").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 0}} json.should_receive("load").and_return(dictionary).once() solr.commit_update({}) def test_update_document(self): appscale_info = flexmock() appscale_info.should_receive("get_search_location").\ and_return("somelocation") solr = solr_interface.Solr() solr = flexmock(solr) solr.should_receive("to_solr_doc").and_return(FakeSolrDoc()) solr.should_receive("_get_index_adapter").and_return(FakeIndex()) solr.should_receive("compute_updates").and_return([]) solr.should_receive("to_solr_hash_map").and_return(None) solr.should_receive("commit_update").and_return(None) solr.update_document("app_id", None, FakeIndexSpec()) solr.should_receive("compute_updates").and_return([1,2]) solr.should_receive("update_schema").twice() solr.update_document("app_id", None, FakeIndexSpec()) solr.should_receive("to_solr_hash_map").and_return(None).once() solr.update_document("app_id", None, FakeIndexSpec()) def test_json_loads_byteified(self): json_with_unicode = ( '{"key2": [{"\\u2611": 28, "\\u2616": ["\\u263a"]}, "second", "third"], ' '"key1": "value", ' '"\\u2604": {"\\u2708": "\\u2708"}}' ) parsed_obj = solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj): if isinstance(obj, dict): for key, value in obj.iteritems(): self.assertIsInstance(key, str) walk_and_check_type(value) elif isinstance(obj, list): for value in obj: walk_and_check_type(value) else: self.assertIsInstance(obj, (str, int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, { 'key1': 'value', 'key2': [ {'\xe2\x98\x91': 28, '\xe2\x98\x96': ['\xe2\x98\xba']}, 'second', 'third' ], '\xe2\x98\x84': {'\xe2\x9c\x88': '\xe2\x9c\x88'} })
33.405941
80
0.678275
790
6,748
5.497468
0.187342
0.0898
0.039143
0.066314
0.604881
0.571955
0.559982
0.527055
0.503108
0.472945
0
0.015228
0.182573
6,748
201
81
33.572139
0.772117
0.02134
0
0.447853
0
0
0.135687
0
0
0
0
0
0.08589
1
0.104294
false
0.006135
0.04908
0.018405
0.233129
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4acea4b00d95238388dfdf1bfda34fd153268c2f
5,858
py
Python
WDJN/eval/eval.py
silverriver/Stylized_Dialog
559dd97c4ec9c91e94deb048f789684ef3f1f9fa
[ "MIT" ]
21
2020-12-16T08:53:38.000Z
2022-01-21T09:08:55.000Z
WDJN/eval/eval.py
silverriver/Stylized_Dialog
559dd97c4ec9c91e94deb048f789684ef3f1f9fa
[ "MIT" ]
1
2020-12-27T07:56:01.000Z
2020-12-30T05:13:11.000Z
WDJN/eval/eval.py
silverriver/Stylized_Dialog
559dd97c4ec9c91e94deb048f789684ef3f1f9fa
[ "MIT" ]
1
2022-02-28T12:19:19.000Z
2022-02-28T12:19:19.000Z
import os from nltk.translate.bleu_score import corpus_bleu from nltk.translate.bleu_score import SmoothingFunction import json from tqdm import tqdm, trange from random import sample import numpy as np import pickle import argparse import bert_eval_acc import svm_eval_acc smooth = SmoothingFunction() def eval_bleu(ref, pred): """ :param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references :param pred: list(list(any)), a list of predictions :return: corpus bleu score """ return corpus_bleu(ref, pred, smoothing_function=smooth.method1) def eval_bleu_detail(ref, pred): """ :param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references :param pred: list(list(any)), a list of predictions :return: corpus bleu score """ return corpus_bleu(ref, pred, weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\ corpus_bleu(ref, pred, weights=[0, 1, 0, 0], smoothing_function=smooth.method1), \ corpus_bleu(ref, pred, weights=[0, 0, 1, 0], smoothing_function=smooth.method1), \ corpus_bleu(ref, pred, weights=[0, 0, 0, 1], smoothing_function=smooth.method1) def count_ngram(hyps_resp, n): """ Count the number of unique n-grams :param hyps_resp: list, a list of responses :param n: int, n-gram :return: the number of unique n-grams in hyps_resp """ if len(hyps_resp) == 0: print("ERROR, eval_distinct get empty input") return if type(hyps_resp[0]) != list: print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format( type(hyps_resp[0]))) return ngram = set() for resp in hyps_resp: if len(resp) < n: continue for i in range(len(resp) - n + 1): ngram.add(' '.join(resp[i: i + n])) return len(ngram) def eval_distinct_detail(hyps_resp): """ compute distinct score for the hyps_resp :param hyps_resp: list, a list of hyps responses :return: average distinct score for 1, 2-gram """ if len(hyps_resp) == 0: print("ERROR, eval_distinct get empty input") return if type(hyps_resp[0]) != list: print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format( type(hyps_resp[0]))) return hyps_resp = [[str(x) for x in l] for l in hyps_resp] hyps_resp = [(' '.join(i)).split() for i in hyps_resp] num_tokens = sum([len(i) for i in hyps_resp]) dist1 = count_ngram(hyps_resp, 1) / float(num_tokens) dist2 = count_ngram(hyps_resp, 2) / float(num_tokens) return dist1, dist2 def eval_f1(ref, pred): """ :param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references :param pred: list(list(any)), a list of predictions :return: f1 score """ assert len(ref) == len(pred) > 0 precisions = [] recalls = [] for i, s in enumerate(pred): ref_set = set() for rs in ref[i]: for w in rs: ref_set.add(w) pred_set = set() for w in s: pred_set.add(w) p = 0 for w in s: if w in ref_set: p += 1 if len(s) > 0: p /= len(s) r = 0 for rs in ref[i]: for w in rs: if w in pred_set: r += 1 tot_l = sum([len(rs) for rs in ref[i]]) if tot_l > 0: r /= tot_l precisions.append(p) recalls.append(r) precision = sum(precisions) / len(precisions) recall = sum(recalls) / len(recalls) return 0.0 if precision == recall == 0 else 2 * precision * recall / (precision + recall) def calc_metrics_value(task, fn, n_sample=None): with open(fn) as f: res = [json.loads(i) for i in f.readlines()] s0_pred, s0_ref = [], [] s1_pred, s1_ref = [], [] for d in res: if d['style'] == 0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if n_sample: assert len(s0_ref) >= n_sample assert len(s1_ref) >= n_sample sampled_idxs = sample(range(len(s0_ref)), n_sample) s0_ref = [x for i, x in enumerate(s0_ref) if i in sampled_idxs] s0_pred = [x for i, x in enumerate(s0_pred) if i in sampled_idxs] sampled_idxs = sample(range(len(s1_ref)), n_sample) s1_ref = [x for i, x in enumerate(s1_ref) if i in sampled_idxs] s1_pred = [x for i, x in enumerate(s1_pred) if i in sampled_idxs] bleu_s0 = eval_bleu_detail(s0_ref, s0_pred) bleu_s1 = eval_bleu_detail(s1_ref, s1_pred) dist_s0 = eval_distinct_detail(s0_pred) dist_s1 = eval_distinct_detail(s1_pred) f1_s0 = eval_f1(s0_ref, s0_pred) f1_s1 = eval_f1(s1_ref, s1_pred) for k in range(1, 4): print('%d-gram BLEU:' % k, 's0', bleu_s0[k - 1] * 100, 's1', bleu_s1[k - 1] * 100, 'mean', (bleu_s0[k - 1] + bleu_s1[k - 1]) / 2 * 100) print('F1:', 's0', f1_s0 * 100, 's1', f1_s1 * 100, 'mean', (f1_s0 + f1_s1) / 2 * 100) print('Dist:', 's0', dist_s0[1] * 100, 's1', dist_s1[1] * 100, 'mean', (dist_s0[1] + dist_s1[1]) / 2 * 100) parser = argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path of the eval file', required=True) args = parser.parse_args() file_path = args.eval_file_path calc_metrics_value(None, file_path) print("Evaluating acc results:") bert_eval_acc.main(file_path) svm_eval_acc.main(file_path)
32.726257
118
0.602083
912
5,858
3.700658
0.160088
0.047407
0.031111
0.021333
0.504
0.399407
0.343704
0.302815
0.302815
0.291556
0
0.036898
0.273643
5,858
178
119
32.910112
0.756287
0.146808
0
0.16
0
0
0.076671
0
0
0
0
0
0.024
1
0.048
false
0
0.088
0
0.208
0.064
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4acf75fbdd9f5684eaa634c30e9274299d052baa
804
py
Python
homeassistant/components/unifi/const.py
olbjan/home-assistant-1
1adb45f74e96fc5eff137a3727647a7e428e123c
[ "Apache-2.0" ]
7
2019-02-07T14:14:12.000Z
2019-07-28T06:56:10.000Z
homeassistant/components/unifi/const.py
olbjan/home-assistant-1
1adb45f74e96fc5eff137a3727647a7e428e123c
[ "Apache-2.0" ]
6
2021-02-08T20:54:31.000Z
2022-03-12T00:50:43.000Z
homeassistant/components/unifi/const.py
olbjan/home-assistant-1
1adb45f74e96fc5eff137a3727647a7e428e123c
[ "Apache-2.0" ]
1
2020-09-23T16:41:16.000Z
2020-09-23T16:41:16.000Z
"""Constants for the UniFi component.""" import logging LOGGER = logging.getLogger(__package__) DOMAIN = "unifi" CONTROLLER_ID = "{host}-{site}" CONF_CONTROLLER = "controller" CONF_SITE_ID = "site" UNIFI_WIRELESS_CLIENTS = "unifi_wireless_clients" CONF_ALLOW_BANDWIDTH_SENSORS = "allow_bandwidth_sensors" CONF_BLOCK_CLIENT = "block_client" CONF_DETECTION_TIME = "detection_time" CONF_POE_CLIENTS = "poe_clients" CONF_TRACK_CLIENTS = "track_clients" CONF_TRACK_DEVICES = "track_devices" CONF_TRACK_WIRED_CLIENTS = "track_wired_clients" CONF_SSID_FILTER = "ssid_filter" DEFAULT_ALLOW_BANDWIDTH_SENSORS = False DEFAULT_POE_CLIENTS = True DEFAULT_TRACK_CLIENTS = True DEFAULT_TRACK_DEVICES = True DEFAULT_TRACK_WIRED_CLIENTS = True DEFAULT_DETECTION_TIME = 300 ATTR_MANUFACTURER = "Ubiquiti Networks"
25.935484
56
0.823383
104
804
5.836538
0.375
0.072488
0.103789
0.075783
0
0
0
0
0
0
0
0.004132
0.097015
804
30
57
26.8
0.831956
0.042289
0
0
0
0
0.244764
0.058901
0
0
0
0
0
1
0
false
0
0.045455
0
0.045455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4acfa6e08c91d6cf965af047f2b0bfd2e83e88a1
503
py
Python
coding_intereview/1656. Design an Ordered Stream.py
Jahidul007/Python-Bootcamp
3c870587465ff66c2c1871c8d3c4eea72463abda
[ "MIT" ]
2
2020-12-07T16:07:07.000Z
2020-12-07T16:08:53.000Z
coding_intereview/1656. Design an Ordered Stream.py
Jahidul007/Python-Bootcamp
3c870587465ff66c2c1871c8d3c4eea72463abda
[ "MIT" ]
null
null
null
coding_intereview/1656. Design an Ordered Stream.py
Jahidul007/Python-Bootcamp
3c870587465ff66c2c1871c8d3c4eea72463abda
[ "MIT" ]
1
2020-10-03T16:38:02.000Z
2020-10-03T16:38:02.000Z
class OrderedStream: def __init__(self, n: int): self.data = [None]*n self.ptr = 0 def insert(self, id: int, value: str) -> List[str]: id -= 1 self.data[id] = value if id > self.ptr: return [] while self.ptr < len(self.data) and self.data[self.ptr]: self.ptr += 1 return self.data[id:self.ptr] # Your OrderedStream object will be instantiated and called as such: # obj = OrderedStream(n) # param_1 = obj.insert(id,value)
26.473684
80
0.584493
73
503
3.958904
0.438356
0.145329
0.069204
0
0
0
0
0
0
0
0
0.011142
0.286282
503
18
81
27.944444
0.793872
0.238569
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad0334044a6b76510a6250d8488d1fea4817857
326
py
Python
lista01/rpc/ex01_cl.py
SD-CC-UFG/leonardo.fleury
0a8dfc5752c739f5ff98890477355df8960ad730
[ "MIT" ]
null
null
null
lista01/rpc/ex01_cl.py
SD-CC-UFG/leonardo.fleury
0a8dfc5752c739f5ff98890477355df8960ad730
[ "MIT" ]
null
null
null
lista01/rpc/ex01_cl.py
SD-CC-UFG/leonardo.fleury
0a8dfc5752c739f5ff98890477355df8960ad730
[ "MIT" ]
null
null
null
import xmlrpc.client def main(): s = xmlrpc.client.ServerProxy('http://localhost:9991') nome = input("Nome: ") cargo = input("Cargo (programador, operador): ") salario = float(input("Salário: ")) print("\n\n{}".format(s.atualiza_salario(nome, cargo, salario))) if __name__ == '__main__': main()
20.375
68
0.628834
38
326
5.157895
0.631579
0.122449
0
0
0
0
0
0
0
0
0
0.015038
0.184049
326
15
69
21.733333
0.721805
0
0
0
0
0
0.248466
0
0
0
0
0
0
1
0.111111
false
0
0.111111
0
0.222222
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad2b9e71e54721776c8640bd3dfe9980a8f4ea4
654
py
Python
src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py
gunpowder78/webdnn
c659ea49007f91d178ce422a1eebe289516a71ee
[ "MIT" ]
1
2018-07-26T13:52:21.000Z
2018-07-26T13:52:21.000Z
src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py
gunpowder78/webdnn
c659ea49007f91d178ce422a1eebe289516a71ee
[ "MIT" ]
null
null
null
src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py
gunpowder78/webdnn
c659ea49007f91d178ce422a1eebe289516a71ee
[ "MIT" ]
null
null
null
from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_nonsense_channel_mode_conversion import \ SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup): def __init__(self): super(SimplifyChannelModeConversion, self).__init__([ SimplifyRedundantChannelModeConversion(), SimplifyNonsenseChannelModeConversion() ])
46.714286
125
0.831804
53
654
9.792453
0.45283
0.084778
0.16185
0.084778
0.277457
0.277457
0.277457
0.277457
0.277457
0.277457
0
0
0.117737
654
13
126
50.307692
0.89948
0
0
0
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0.272727
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad2c65a15fe6f6a8837baee7e607c55330b95b9
3,998
py
Python
script.video.F4mProxy/lib/flvlib/constants.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
105
2015-11-28T00:03:11.000Z
2021-05-05T20:47:42.000Z
script.video.F4mProxy/lib/flvlib/constants.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
918
2015-11-28T14:12:40.000Z
2022-03-23T20:24:49.000Z
script.video.F4mProxy/lib/flvlib/constants.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
111
2015-12-01T14:06:10.000Z
2020-08-01T10:44:39.000Z
""" The constants used in FLV files and their meanings. """ # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: "Linear PCM, platform endian", SOUND_FORMAT_ADPCM: "ADPCM", SOUND_FORMAT_MP3: "MP3", SOUND_FORMAT_PCM_LITTLE_ENDIAN: "Linear PCM, little endian", SOUND_FORMAT_NELLYMOSER_16KHZ: "Nellymoser 16-kHz mono", SOUND_FORMAT_NELLYMOSER_8KHZ: "Nellymoser 8-kHz mono", SOUND_FORMAT_NELLYMOSER: "Nellymoser", SOUND_FORMAT_G711_A_LAW: "G.711 A-law logarithmic PCM", SOUND_FORMAT_G711_MU_LAW: "G.711 mu-law logarithmic PCM", SOUND_FORMAT_AAC: "AAC", SOUND_FORMAT_SPEEX: "Speex", SOUND_FORMAT_MP3_8KHZ: "MP3 8-kHz", SOUND_FORMAT_DEVICE_SPECIFIC: "Device-specific sound" } # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: "5.5-kHz", SOUND_RATE_11_KHZ: "11-kHz", SOUND_RATE_22_KHZ: "22-kHz", SOUND_RATE_44_KHZ: "44-kHz" } # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: "snd8Bit", SOUND_SIZE_16_BIT: "snd16Bit" } # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string = { SOUND_TYPE_MONO: "sndMono", SOUND_TYPE_STEREO: "sndStereo" } # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: "sequence header", AAC_PACKET_TYPE_RAW: "raw" } # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string = { CODEC_ID_JPEG: "JPEG", CODEC_ID_H263: "Sorenson H.263", CODEC_ID_SCREEN_VIDEO: "Screen video", CODEC_ID_VP6: "On2 VP6", CODEC_ID_VP6_WITH_ALPHA: "On2 VP6 with alpha channel", CODEC_ID_SCREEN_VIDEO_V2: "Screen video version 2", CODEC_ID_H264: "H.264" } # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: "keyframe", FRAME_TYPE_INTERFRAME: "interframe", FRAME_TYPE_DISPOSABLE_INTERFRAME: "disposable interframe", FRAME_TYPE_GENERATED_KEYFRAME: "generated keyframe", FRAME_TYPE_INFO_FRAME: "video info/command frame" } # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: "sequence header", H264_PACKET_TYPE_NALU: "NAL unit", H264_PACKET_TYPE_END_OF_SEQUENCE: "sequence end" } # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE: 'Date', VALUE_TYPE_LONGSTRING: 'Longstring' }
24.679012
68
0.765883
591
3,998
4.654822
0.191201
0.111959
0.035623
0.034896
0.452199
0.279171
0.043621
0
0
0
0
0.043466
0.142571
3,998
161
69
24.832298
0.759043
0.043022
0
0
0
0
0.151746
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad2ef7203bc120919170c5085d9fe1547885b6b
8,318
py
Python
gnn_model.py
thoang3/graph_neural_network_benchmark
72dc031ed23c6684c43d6f2ace03425f9b69cee6
[ "MIT" ]
null
null
null
gnn_model.py
thoang3/graph_neural_network_benchmark
72dc031ed23c6684c43d6f2ace03425f9b69cee6
[ "MIT" ]
null
null
null
gnn_model.py
thoang3/graph_neural_network_benchmark
72dc031ed23c6684c43d6f2ace03425f9b69cee6
[ "MIT" ]
null
null
null
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from load_cora import load_cora from baseline_model import create_ffn from utils import run_experiment from utils import display_learning_curves # Graph convolution layer class GraphConvLayer(layers.Layer): def __init__( self, hidden_units, dropout_rate=0.2, aggregation_type="mean", combination_type="concat", normalize=False, *args, **kwargs ): super(GraphConvLayer, self).__init__(*args, **kwargs) self._aggregation_type = aggregation_type self._combination_type = combination_type self._normalize = normalize self._ffn_prepare = create_ffn(hidden_units, dropout_rate) if self._combination_type == "gated": self._update_fn = layers.GRU( units=hidden_units, activation="tanh", recurrent_activation="sigmoid", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate ) else: self._update_fn = create_ffn(hidden_units, dropout_rate) def _prepare(self, node_representations, weights=None): # node_representations shape is [num_edges, embedding_dim] messages = self._ffn_prepare(node_representations) if weights is not None: messages = messages * tf.expand_dims(weights, -1) return messages def _aggregate(self, node_indices, neighbour_messages): # node_indices shape is [num_edges] # neighbour_messages shape: [num_edges, representation_dim] num_nodes = tf.math.reduce_max(node_indices) + 1 if self._aggregation_type == "sum": aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type == "mean": aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type == "max": aggregated_message = tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes ) else: raise ValueError(f"Invalid aggregation type: {self._aggregation_type}.") return aggregated_message def _update(self, node_representations, aggregated_messages): # node_representations shape is [num_nodes, representation_dim] # aggregated_messages shape is [num_nodes, representation_dim] if self._combination_type == "gru": # Create a sequence of two elements for the GRU layer h = tf.stack([node_respresentations, aggregated_messages], axis=1) elif self._combination_type == "concat": # Concatenate the node_representations and aggregated_messages h = tf.concat([node_representations, aggregated_messages], axis=1) elif self._combination_type == "add": # Add node_representations and aggregated_messages h = node_representations + aggregated_messages else: raise ValueError(f"Invalid combination type: {self._combinatino_type}.") # Apply the processing function node_embeddings = self._update_fn(h) if self._combination_type == "gru": node_embeddings = tf.unstack(node_embeddings, axis=1)[-1] if self._normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings def call(self, inputs): """Process the inputs to produce the node_embeddings. Args: Inputs: A tuple of three elements: node_representations, edges, edge_weights. Returns: node_embeddings of shape [num_nodes, representation_dim]. """ node_representations, edges, edge_weights = inputs # Get node_indices (source) and neighbour_indices (target) from edges node_indices, neighbour_indices = edges[0], edges[1] # neighbour_representations shape is [num_edges, representation_dim] neighbour_representations = tf.gather(node_representations, neighbour_indices) # Prepare the messages of the neighbours neighbour_messages = self._prepare(neighbour_representations, edge_weights) # Aggregate the neighbour messages aggregated_messages = self._aggregate(node_indices, neighbour_messages) # Update the node embedding with the neighbour messages return self._update(node_representations, aggregated_messages) class GNNNodeClassifier(tf.keras.Model): def __init__( self, graph_info, num_classes, hidden_units, aggregation_type="sum", combination_type="concat", dropout_rate=0.2, normalize=True, *args, **kwargs ): super(GNNNodeClassifier, self).__init__(*args, **kwargs) # Unpack graph_info node_features, edges, edge_weights = graph_info self._node_features = node_features self._edges = edges self._edge_weights = edge_weights # Set edge_weights to ones if not provided if self._edge_weights is None: self._edge_weights = tf.ones(shape=edges.shape[1]) # Scale edge_weights to sum to 1 self._edge_weights = self._edge_weights / tf.math.reduce_sum(self._edge_weights) # Create a process layer self._preprocess = create_ffn(hidden_units, dropout_rate, name="preprocess") # Create the 1st GraphConv layer self._conv1 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name="graph_conv1" ) # Create the 2nd GraphConv layer self._conv2 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name="graph_conv2" ) # Create a postprocess layer self._postprocess = create_ffn(hidden_units, dropout_rate, name="postprocess") # Create a compute logits layer self._compute_logits = layers.Dense(units=num_classes, name="logits") def call(self, input_node_indices): # Preprocess the node_features to produce node representations x = self._preprocess(self._node_features) # Apply the 1st graph conv layer x1 = self._conv1((x, self._edges, self._edge_weights)) # Skip connection x = x1 + x # Apply the 2nd graph conv layer x2 = self._conv2((x, self._edges, self._edge_weights)) # Skip connection x = x2 + x # Postprocess node embedding x = self._postprocess(x) # Fetch node embeddings for the input node_indices node_embeddings = tf.gather(x, input_node_indices) # Compute logits return self._compute_logits(node_embeddings) if __name__ == '__main__': papers, train_data, test_data, paper_idx, class_idx, citations, feature_names = load_cora(verbose=1) num_features = len(feature_names) num_classes = len(class_idx) hidden_units = [32, 32] learning_rate = 0.01 dropout_rate = 0.5 epochs = 300 batch_size = 256 # Create an edges array (sparse adjacency matrix) of shape [2, num_edges] edges = citations[["source", "target"]].to_numpy().T #print(edges) # Create an edge weights array of ones (default weights) edge_weights = tf.ones(shape=edges.shape[1]) # Create a node features array of shape [num_nodes, num_features] node_features = tf.cast( papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.float32) # Create graph info tuple with node_features, edges, and edge_weights graph_info = (node_features, edges, edge_weights) print("Edges shape: ", edges.shape) print("Nodes shape: ", node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name="gnn_model" ) print("GNN output shape: ", gnn_model([1, 10, 100])) gnn_model.summary() # Train the GNN model X_train = train_data.paper_id.to_numpy() y_train = train_data.subject history = run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate) # Plot the learning curves display_learning_curves(history, figure_name="gnn.png") # Evaluate on test data X_test = test_data.paper_id.to_numpy() y_test = test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
33.007937
103
0.709425
1,043
8,318
5.347076
0.197507
0.037475
0.02582
0.031558
0.242424
0.196701
0.13215
0.10633
0.069571
0.055227
0
0.009237
0.206059
8,318
251
104
33.139442
0.835251
0.213513
0
0.240964
0
0
0.053469
0.007727
0
0
0
0
0
1
0.042169
false
0
0.042169
0
0.126506
0.024096
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad45250872794a6a29b08c6da2bcb27a740d5f5
5,098
py
Python
src/sim/basicExample/main.py
andremtsilva/dissertacao
7c039ffe871468be0215c482adb42830fff586aa
[ "MIT" ]
null
null
null
src/sim/basicExample/main.py
andremtsilva/dissertacao
7c039ffe871468be0215c482adb42830fff586aa
[ "MIT" ]
null
null
null
src/sim/basicExample/main.py
andremtsilva/dissertacao
7c039ffe871468be0215c482adb42830fff586aa
[ "MIT" ]
null
null
null
""" This is the most simple scenario with a basic topology, some users and a set of apps with only one service. @author: Isaac Lera """ import os import time import json import random import logging.config import networkx as nx import numpy as np from pathlib import Path from yafs.core import Sim from yafs.application import create_applications_from_json from yafs.topology import Topology from yafs.placement import JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution from yafs.stats import Stats RANDOM_SEED = 1 def main(stop_time, it): folder_results = Path("results/") folder_results.mkdir(parents=True, exist_ok=True) folder_results = str(folder_results)+"/" """ TOPOLOGY """ # Fix position of nodes for drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t = Topology() # You also can create a topology using JSONs files. Check out examples folder size = 3 t.G = nx.generators.binomial_tree(size) # In NX-lib there are a lot of Graphs generators # Definition of mandatory attributes of a Topology ## Attr. on edges # PR (link propagation) and BW (bandwith) are 1 unit attPR_BW = {x: 1 for x in t.G.edges()} nx.set_edge_attributes(t.G, name="PR", values=attPR_BW) nx.set_edge_attributes(t.G, name="BW", values=attPR_BW) ## Attr. on nodes # IPT attIPT = {x: random.randrange(100, 900, 100) for x in t.G.nodes()} nx.set_node_attributes(t.G, name="IPT", values=attIPT) # nx.write_gexf(t.G,folder_results+"graph_binomial_tree_%i"%size) # you can export the Graph in multiples format to view in tools like Gephi, and so on. nx.write_graphml(t.G,folder_results+"graph_binomial_tree_%i.graphml"%size) # Graph visualization pos = nx.spring_layout(t.G) nx.draw(t.G, pos, with_labels=True, edge_color='black', width=1, alpha=0.7) print(t.G.nodes()) # nodes id can be str or int print() print(nx.get_node_attributes(t.G, "IPT")) print() """ APPLICATION or SERVICES """ dataApp = json.load(open('data/appDefinition.json')) apps = create_applications_from_json(dataApp) # print(apps) """ SERVICE PLACEMENT """ placementJson = json.load(open('data/allocDefinition.json')) placement = JSONPlacement(name="Placement", json=placementJson) """ Defining ROUTING algorithm to define how path messages in the topology among modules """ selectorPath = DeviceSpeedAwareRouting() """ SIMULATION ENGINE """ s = Sim(t, default_results_path=folder_results+"sim_trace") """ Deploy services == APP's modules """ for aName in apps.keys(): s.deploy_app(apps[aName], placement, selectorPath) # Note: each app can have a different routing algorithm """ Deploy users """ userJSON = json.load(open('data/usersDefinition.json')) for user in userJSON["sources"]: app_name = user["app"] app = s.apps[app_name] msg = app.get_message(user["message"]) node = user["id_resource"] dist = deterministic_distribution(100, name="Deterministic") idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist) """ RUNNING - last step """ logging.info(" Performing simulation: %i " % it) s.run(stop_time) # To test deployments put test_initial_deploy a TRUE s.print_debug_assignaments() if __name__ == '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations = 1 # iteration for each experiment simulationDuration = 1000 # Iteration for each experiment changing the seed of randoms for iteration in range(nIterations): random.seed(iteration) logging.info("Running experiment it: - %i" % iteration) start_time = time.time() main(stop_time=simulationDuration, it=iteration) print("\n--- %s seconds ---" % (time.time() - start_time)) print("Simulation Done!") m = Stats(defaultPath="results/sim_trace") # print ("\tNetwork bytes transmitted:") # print (f"\t\t{m.bytes_transmitted():.1f}") # m.df_link.head(15) # from Stats class time_loops = [["M.USER.APP.0", "M.USER.APP.1", "M.USER.APP.2", "M.USER.APP.3"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print ("\t- Network saturation -") print() print ("\t\tAverage waiting messages : " f"{m.average_messages_not_transmitted()}") print() print ("\t\tPeak of waiting messages :" f"{m.peak_messages_not_transmitted()}") print() print(f"\t\tShow Loops: {m.showLoops(time_loops)}") print() print (f"\t\tTOTAL messages not transmitted:" f" {m.messages_not_transmitted()}") print() #print(m.df.head()) #print(m.df['time_latency']) #print(m.df_link.head()) print(m.get_df_modules())
27.857923
156
0.652217
681
5,098
4.74743
0.35536
0.007423
0.014847
0.014847
0.070523
0.03588
0.03588
0.020414
0
0
0
0.0094
0.227933
5,098
183
157
27.857924
0.811992
0.198117
0
0.079545
0
0
0.16887
0.062315
0
0
0
0
0
1
0.011364
false
0
0.170455
0
0.181818
0.204545
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad523fc14942dd490ad41c526c6171f60967ac3
476
py
Python
Backend/models/risklayerPrognosis.py
dbvis-ukon/coronavis
f00374ac655c9d68541183d28ede6fe5536581dc
[ "Apache-2.0" ]
15
2020-04-24T20:18:11.000Z
2022-01-31T21:05:05.000Z
Backend/models/risklayerPrognosis.py
dbvis-ukon/coronavis
f00374ac655c9d68541183d28ede6fe5536581dc
[ "Apache-2.0" ]
2
2021-05-19T07:15:09.000Z
2022-03-07T08:29:34.000Z
Backend/models/risklayerPrognosis.py
dbvis-ukon/coronavis
f00374ac655c9d68541183d28ede6fe5536581dc
[ "Apache-2.0" ]
4
2020-04-27T16:20:13.000Z
2021-02-23T10:39:42.000Z
from db import db class RisklayerPrognosis(db.Model): __tablename__ = 'risklayer_prognosis' datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False) prognosis = db.Column(db.Float, nullable=False) # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: # strict = True # model = RisklayerPrognosis # # timestamp = fields.Timestamp(data_key="datenbestand") # prognosis = fields.Number(data_key="prognosis")
28
76
0.72479
49
476
6.877551
0.510204
0.047478
0.059347
0
0
0
0
0
0
0
0
0
0.170168
476
16
77
29.75
0.853165
0.487395
0
0
0
0
0.080508
0
0
0
0
0
0
1
0
false
0
0.2
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad5abaadbbca74176e6ec4d71b60fea9789204e
2,520
py
Python
tests.py
smartfile/django-secureform
3b7a8b90550327f370ea02c6886220b2db0517b5
[ "MIT" ]
12
2015-02-23T19:45:45.000Z
2021-05-05T20:35:26.000Z
tests.py
smartfile/django-secureform
3b7a8b90550327f370ea02c6886220b2db0517b5
[ "MIT" ]
3
2015-08-09T18:14:16.000Z
2018-10-23T03:16:38.000Z
tests.py
smartfile/django-secureform
3b7a8b90550327f370ea02c6886220b2db0517b5
[ "MIT" ]
6
2015-05-09T07:46:00.000Z
2019-11-27T09:54:57.000Z
import os import unittest os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django if django.VERSION >= (1, 7): django.setup() from django import forms from django.db import models from django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms import SecureForm def get_form_sname(form, name): for sname, v in form._secure_field_map.items(): if v and v == name: return sname raise KeyError(name) def get_form_honeypot(form): for sname, v in form._secure_field_map.items(): if v is None: return sname raise Exception('No honeypots found.') def get_form_secure_data(form): # We must copy over the security data. return form._meta.secure_field_name, form[form._meta.secure_field_name].value() class BasicForm(SecureForm): name = forms.CharField(required=True, max_length=16) class FormTestCase(unittest.TestCase): klass = BasicForm def setUp(self): self.form = self.klass() self.form.secure_data() def assertIn(self, value, iterable): self.assertTrue(value in iterable, '%s did not occur in %s' % (value, iterable)) def getForm(self, **kwargs): data = dict((get_form_secure_data(self.form), )) for n, v in kwargs.items(): data[get_form_sname(self.form, n)] = v return self.klass(data=data) class BasicTestCase(FormTestCase): def test_valid(self): post = self.getForm(name='foobar') self.assertTrue(post.is_valid()) def test_missing(self): post = self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name', post._errors) def test_replay(self): post = self.getForm(name='foobar') post.is_valid() post = self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This form has already been submitted.', post._errors[NON_FIELD_ERRORS]) def test_honeypot(self): honeypot = get_form_honeypot(self.form) data = dict((get_form_secure_data(self.form), )) data[honeypot] = 'mmm, hunny!' data[get_form_sname(self.form, 'name')] = 'foobar' post = self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected value in form field.', post._errors[NON_FIELD_ERRORS]) if __name__ == '__main__': unittest.main()
28.965517
94
0.660714
335
2,520
4.767164
0.271642
0.035066
0.043832
0.031935
0.347527
0.288666
0.204133
0.180338
0.139011
0.139011
0
0.002046
0.224206
2,520
86
95
29.302326
0.814834
0.014286
0
0.222222
0
0
0.076551
0.008864
0
0
0
0
0.174603
1
0.15873
false
0
0.111111
0.015873
0.412698
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad6cfb56509f081f06c889b6fbe45a5dd8ec0f3
24,265
py
Python
tools/gen_usb_descriptor.py
BrianPugh/circuitpython
f0bb9635bf311013e7b1ff69d1a0542575cf9d0a
[ "MIT", "Unlicense", "MIT-0", "BSD-3-Clause" ]
1
2020-08-29T12:06:14.000Z
2020-08-29T12:06:14.000Z
tools/gen_usb_descriptor.py
BrianPugh/circuitpython
f0bb9635bf311013e7b1ff69d1a0542575cf9d0a
[ "MIT", "Unlicense", "MIT-0", "BSD-3-Clause" ]
null
null
null
tools/gen_usb_descriptor.py
BrianPugh/circuitpython
f0bb9635bf311013e7b1ff69d1a0542575cf9d0a
[ "MIT", "Unlicense", "MIT-0", "BSD-3-Clause" ]
1
2021-01-18T00:52:39.000Z
2021-01-18T00:52:39.000Z
# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import argparse import os import sys sys.path.append("../../tools/usb_descriptor") from adafruit_usb_descriptor import audio, audio10, cdc, hid, midi, msc, standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on Linux but conflicts with mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer of the device') parser.add_argument('--product', type=str, help='product name of the device') parser.add_argument('--vid', type=lambda x: int(x, 16), help='vendor id') parser.add_argument('--pid', type=lambda x: int(x, 16), help='product id') parser.add_argument('--serial_number_length', type=int, default=32, help='length needed for the serial number in digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include in descriptor (AUDIO includes MIDI support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to include in HID report descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix to use in the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number of CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number of MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number of HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number of HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args = parser.parse_args() unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices: raise ValueError("Unknown device(s)", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError("Unknown HID devices(s)", unknown_hid_devices) if not args.renumber_endpoints: if 'CDC' in args.devices: if args.cdc_ep_num_notification == 0: raise ValueError("CDC notification endpoint number must not be 0") elif args.cdc_ep_num_data_out == 0: raise ValueError("CDC data OUT endpoint number must not be 0") elif args.cdc_ep_num_data_in == 0: raise ValueError("CDC data IN endpoint number must not be 0") if 'MSC' in args.devices: if args.msc_ep_num_out == 0: raise ValueError("MSC endpoint OUT number must not be 0") elif args.msc_ep_num_in == 0: raise ValueError("MSC endpoint IN number must not be 0") if 'HID' in args.devices: if args.args.hid_ep_num_out == 0: raise ValueError("HID endpoint OUT number must not be 0") elif args.hid_ep_num_in == 0: raise ValueError("HID endpoint IN number must not be 0") if 'AUDIO' in args.devices: if args.args.midi_ep_num_out == 0: raise ValueError("MIDI endpoint OUT number must not be 0") elif args.midi_ep_num_in == 0: raise ValueError("MIDI endpoint IN number must not be 0") class StringIndex: """Assign a monotonically increasing index to each unique string. Start with 0.""" string_to_index = {} index_to_variable = {} strings = [] @classmethod def index(cls, string, *, variable_name = None): if string in cls.string_to_index: idx = cls.string_to_index[string] if not cls.index_to_variable[idx]: cls.index_to_variable[idx] = variable_name return idx else: idx = len(cls.strings) cls.string_to_index[string] = idx cls.strings.append(string) cls.index_to_variable[idx] = variable_name return idx @classmethod def strings_in_order(cls): return cls.strings # langid must be the 0th string descriptor LANGID_INDEX = StringIndex.index("\u0409", variable_name="language_id") assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX = StringIndex.index("S" * args.serial_number_length, variable_name="usb_serial_number") device = standard.DeviceDescriptor( description="top", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are interface-set local and endpoints are interface local # until util.join_interfaces renumbers them. cdc_union = cdc.Union( description="CDC comm", bMasterInterface=0x00, # Adjust this after interfaces are renumbered. bSlaveInterface_list=[0x01]) # Adjust this after interfaces are renumbered. cdc_call_management = cdc.CallManagement( description="CDC comm", bmCapabilities=0x01, bDataInterface=0x01) # Adjust this after interfaces are renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description="CDC comm", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index("{} CDC control".format(args.interface_name)), subdescriptors=[ cdc.Header( description="CDC comm", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description="CDC comm", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description="CDC comm in", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor( description="CDC data", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index("{} CDC data".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description="CDC data out", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description="CDC data in", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor( description="MSC", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index("{} Mass Storage".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description="MSC in", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description="MSC out", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ] ) ] # When there's only one hid_device, it shouldn't have a report id. # Otherwise, report ids are assigned sequentially: # args.hid_devices[0] has report_id 1 # args.hid_devices[1] has report_id 2 # etc. report_ids = {} if len(args.hid_devices) == 1: name = args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else: report_id = 1 concatenated_descriptors = bytearray() for name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id += 1 combined_hid_report_descriptor = hid.ReportDescriptor( description="MULTIDEVICE", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard and generic devices to have both in and out endpoints, # and will fail (possibly silently) if both are not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description="HID in", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description="HID out", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor( description="HID Multiple Devices", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index("{} HID".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description="HID", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ] # Audio! # In and out here are relative to CircuitPython # USB OUT -> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description="MIDI PC -> {}".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index("{} usb_midi.ports[0]".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description="MIDI data out to user code.", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB IN <- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description="MIDI data in from user code.", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description="MIDI PC <- {}".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index("{} usb_midi.ports[1]".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description="Midi goodness", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index("{} MIDI".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor( description="MIDI data out to {}".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description="MIDI data in from {}".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface( description="Empty audio control", audio_streaming_interfaces = [], midi_streaming_interfaces = [ audio_midi_interface ] ) audio_control_interface = standard.InterfaceDescriptor( description="All the audio", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index("{} Audio".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) # Audio streaming interfaces must occur before MIDI ones. audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber the endpoints to make them unique across descriptors, # and renumber the interfaces in order. But we still need to fix up certain # interface cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust the CDC interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description="CDC IAD", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if 'CDC' in args.devices: # Put the CDC IAD just before the CDC interfaces. # There appears to be a bug in the Windows composite USB driver that requests the # HID report descriptor with the wrong interface number if the HID interface is not given # first. However, it still fetches the descriptor anyway. We could reorder the interfaces but # the Windows 7 Adafruit_usbser.inf file thinks CDC is at Interface 0, so we'll leave it # there for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices: # Only add the control interface because other audio interfaces are managed by it to ensure the # correct ordering. descriptor_list.append(audio_control_interface) # Finally, build the composite descriptor. configuration = standard.ConfigurationDescriptor( description="Composite configuration", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file = args.output_h_file c_file.write("""\ #include <stdint.h> #include "py/objtuple.h" #include "shared-bindings/usb_hid/Device.h" #include "{H_FILE_NAME}" """.format(H_FILE_NAME=h_file.name)) c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write("""\ const uint8_t usb_desc_dev[] = { """) for b in bytes(device): c_file.write("0x{:02x}, ".format(b)) c_file.write("""\ }; """) c_file.write("""\ const uint8_t usb_desc_cfg[] = { """) # Write out all the regular descriptors as one long array (that's how ASF4 does it). descriptor_length = 0 for descriptor in descriptor_list: c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i = 0 # This prints each subdescriptor on a separate line. n = 0 while i < len(b): length = b[i] for j in range(length): c_file.write("0x{:02x}, ".format(b[i + j])) c_file.write("// " + notes[n]) n += 1 c_file.write("\n") i += length descriptor_length += len(b) c_file.write("""\ }; """) pointers_to_strings = [] for idx, descriptor in enumerate(string_descriptors): c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i = 0 # This prints each subdescriptor on a separate line. variable_name = StringIndex.index_to_variable[idx] if not variable_name: variable_name = "string_descriptor{}".format(idx) const = "const " if variable_name == "usb_serial_number": const = "" c_file.write("""\ {const}uint16_t {NAME}[] = {{ """.format(const=const, NAME=variable_name)) pointers_to_strings.append("{name}".format(name=variable_name)) n = 0 while i < len(b): length = b[i] for j in range(length // 2): c_file.write("0x{:04x}, ".format(b[i + 2*j + 1] << 8 | b[i + 2*j])) n += 1 c_file.write("\n") i += length c_file.write("""\ }; """) c_file.write("""\ // array of pointer to string descriptors uint16_t const * const string_desc_arr [] = { """) c_file.write(""",\ """.join(pointers_to_strings)) c_file.write(""" }; """) c_file.write("\n") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now we values we need for the .h file. h_file.write("""\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const uint8_t usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const * const string_desc_arr [{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name included in Inquiry response, max 8 bytes #define CFG_TUD_MSC_VENDOR "{msc_vendor}" // Product name included in Inquiry response, max 16 bytes #define CFG_TUD_MSC_PRODUCT "{msc_product}" """ .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out the report descriptor and info c_file.write("""\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ """.format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor): c_file.write("0x{:02x}, ".format(b)) c_file.write("""\ }; """) # Write out USB HID report buffer definitions. for name in args.hid_devices: c_file.write("""\ static uint8_t {name}_report_buffer[{report_length}]; """.format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write("""\ static uint8_t {name}_out_report_buffer[{report_length}]; """.format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table of device objects. c_file.write(""" usb_hid_device_obj_t usb_hid_devices[] = { """) for name in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else 'NULL' c_file.write("""\ {{ .base = {{ .type = &usb_hid_device_type }}, .report_buffer = {name}_report_buffer, .report_id = {report_id}, .report_length = {report_length}, .usage_page = {usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length = {out_report_length}, }}, """.format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write("""\ }; """) # Write out tuple of device objects. c_file.write(""" mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base = {{ .type = &mp_type_tuple, }}, .len = {num_devices}, .items = {{ """.format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)): c_file.write("""\ (mp_obj_t) &usb_hid_devices[{idx}], """.format(idx=idx)) c_file.write("""\ }, }; """) h_file.write("""\ #endif // MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H """)
37.330769
135
0.697465
3,007
24,265
5.368806
0.142667
0.01053
0.019202
0.017096
0.499876
0.411174
0.318632
0.254274
0.207012
0.1569
0
0.010072
0.198063
24,265
649
136
37.38829
0.819569
0.101834
0
0.311024
0
0
0.199384
0.036565
0
0
0.001794
0
0.001969
1
0.003937
false
0
0.009843
0.001969
0.027559
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ad7684b6c380ab5df46b6e04110892e72e1a9ab
7,500
py
Python
bclstm/train_meld.py
Columbine21/THUIAR-ERC
90e928e1ce777152e459dbc487acf04c32cbc645
[ "MIT" ]
1
2021-01-28T13:43:32.000Z
2021-01-28T13:43:32.000Z
bclstm/train_meld.py
Columbine21/THUIAR-ERC
90e928e1ce777152e459dbc487acf04c32cbc645
[ "MIT" ]
null
null
null
bclstm/train_meld.py
Columbine21/THUIAR-ERC
90e928e1ce777152e459dbc487acf04c32cbc645
[ "MIT" ]
null
null
null
from tqdm import tqdm import pandas as pd import numpy as np, argparse, time, pickle, random, os, datetime import torch import torch.optim as optim from model import MaskedNLLLoss, BC_LSTM from dataloader import MELDDataLoader from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report def setup_seed(seed): """ Manually Fix the random seed to get deterministic results. """ torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic = True def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, mode='train'): losses, preds, labels, masks, losses_sense = [], [], [], [], [] max_sequence_len = [] assert mode != 'train' or optimizer != None if mode == 'train': model.train() else: model.eval() with tqdm(dataloader) as td: for data in td: if mode == 'train': optimizer.zero_grad() textf, acouf, mask, label = [d.cuda() for d in data[:-1]] if args.cuda else data[:-1] log_prob, _ = model(textf, None, acouf, None, mask) lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes labels_ = label.view(-1) # batch*seq_len loss = loss_function(lp_, labels_, mask) pred_ = torch.argmax(lp_,1) # batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if mode == 'train': total_loss = loss total_loss.backward() optimizer.step() if preds!=[]: preds = np.concatenate(preds) labels = np.concatenate(labels) masks = np.concatenate(masks) else: return float('nan'), float('nan'), float('nan'), [], [], [], float('nan'),[] avg_loss = round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2) avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2) if mode == 'test': class_report = classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'], digits=6) print(class_report) return avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_workers', type=int, default=0, help='num workers of loading data') # dataloader settings parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size') parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') # model settings. parser.add_argument('--attention_type', type=str, default='general2') parser.add_argument('--utterance_dim', type=int, default=600, help='embedding dims to use') parser.add_argument('--emotion_state_dim', type=int, default=100) parser.add_argument('--hidden_layer_dim', type=int, default=100) parser.add_argument('--dropout', type=float, default=0.25) parser.add_argument('--n_classes', type=int, default=7) # late fusion module. parser.add_argument('--lateFusionModule', type=str, default='concat') parser.add_argument('--input_features', type=tuple, default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7)) parser.add_argument('--pre_fusion_dropout', type=float, default=0.4) parser.add_argument('--post_fusion_dropout', type=float, default=0.3) # train settings. parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate') parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2 regularization weight') parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs') return parser.parse_args() if __name__ == '__main__': args = parse_args() args.cuda = torch.cuda.is_available() if args.cuda: print('Running on GPU') else: print('Running on CPU') for seed in [1, 11, 111, 1111, 11111]: setup_seed(seed) args.seed = seed print(args) model = BC_LSTM(args) print('MELD BC_LSTM MODULE ...') if args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args) valid_losses, valid_fscores = [], [] test_fscores, test_accuracys, test_losses = [], [], [] best_loss, best_label, best_pred, best_mask = None, None, None, None for e in range(args.epochs): start_time = time.time() train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train') valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid') test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch: {}, train_loss: {}, acc: {}, fscore: {}, valid_loss: {}, acc: {}, fscore: {}, test_loss: {}, acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2)) print (x) lf.write(x + '\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() # [1, epoches] test_accuracys = np.array(test_accuracys).transpose() # [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1, acc_score2, f1_score2] scores = [str(item) for item in scores] print ('Test Scores: Weighted F1') print('@Best Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt', 'a') rf.write('\t'.join(scores) + '\t' + str(args) + '\n') rf.close()
41.666667
305
0.625867
969
7,500
4.633643
0.244582
0.034076
0.064365
0.005345
0.196437
0.141648
0.074388
0.061915
0.035857
0.003118
0
0.021155
0.231067
7,500
179
306
41.899441
0.757413
0.0284
0
0.062016
0
0.007752
0.12225
0.018564
0
0
0
0
0.007752
1
0.023256
false
0
0.062016
0
0.108527
0.069767
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4add579fe7516845335bc7bc7e7d3e61d0a5f88e
27,214
py
Python
rlbench/task_environment.py
robfiras/RLBench
97ab9526b6efb718f2b5aae40897ccd75aeff11e
[ "BSD-3-Clause" ]
null
null
null
rlbench/task_environment.py
robfiras/RLBench
97ab9526b6efb718f2b5aae40897ccd75aeff11e
[ "BSD-3-Clause" ]
null
null
null
rlbench/task_environment.py
robfiras/RLBench
97ab9526b6efb718f2b5aae40897ccd75aeff11e
[ "BSD-3-Clause" ]
null
null
null
import logging from typing import List, Callable import numpy as np from pyquaternion import Quaternion from pyrep import PyRep from pyrep.errors import IKError from pyrep.objects import Dummy, Object from rlbench import utils from rlbench.action_modes import ArmActionMode, ActionMode from rlbench.backend.exceptions import BoundaryError, WaypointError from rlbench.backend.observation import Observation from rlbench.backend.robot import Robot from rlbench.backend.scene import Scene from rlbench.backend.task import Task from rlbench.demo import Demo from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL = 9999 _DT = 0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS = 10 class InvalidActionError(Exception): pass class TaskEnvironmentError(Exception): pass class TaskEnvironment(object): def __init__(self, pyrep: PyRep, robot: Robot, scene: Scene, task: Task, action_mode: ActionMode, dataset_root: str, obs_config: ObservationConfig, static_positions: bool = False, attach_grasped_objects: bool = True): self._pyrep = pyrep self._robot = robot self._scene = scene self._task = task self._variation_number = 0 self._action_mode = action_mode self._dataset_root = dataset_root self._obs_config = obs_config self._static_positions = static_positions self._attach_grasped_objects = attach_grasped_objects self._reset_called = False self._prev_ee_velocity = None self._enable_path_observations = False self._scene.load(self._task) self._pyrep.start() self._target_workspace_check = Dummy.create() self._last_e = None def get_name(self) -> str: return self._task.get_name() def sample_variation(self) -> int: self._variation_number = np.random.randint( 0, self._task.variation_count()) return self._variation_number def set_variation(self, v: int) -> None: if v >= self.variation_count(): raise TaskEnvironmentError( 'Requested variation %d, but there are only %d variations.' % ( v, self.variation_count())) self._variation_number = v def variation_count(self) -> int: return self._task.variation_count() def reset(self) -> (List[str], Observation): self._scene.reset() try: desc = self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions) except (BoundaryError, WaypointError) as e: raise TaskEnvironmentError( 'Could not place the task %s in the scene. This should not ' 'happen, please raise an issues on this task.' % self._task.get_name()) from e self._reset_called = True # redundancy resolution self._last_e = None # Returns a list of descriptions and the first observation return desc, self._scene.get_observation() def get_observation(self) -> Observation: return self._scene.get_observation() def get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self): return self._task.get_graspable_objects() def get_robot_visuals(self): return self._robot.arm.get_visuals() def get_all_graspable_object_positions(self, relative_to_cameras=False): """ returns the positions of all graspable object relative to all enabled cameras """ objects = self._task.get_graspable_objects() positions = [] for ob in objects: if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({"left_shoulder_camera": ob.get_position(), "right_shoulder_camera": ob.get_position(), "front_camera": ob.get_position(), "wrist_camera": ob.get_position()}) return positions def get_all_graspable_object_poses(self, relative_to_cameras=False): """ returns the pose of all graspable object relative to all enabled cameras """ objects = self._task.get_graspable_objects() poses = [] for ob in objects: if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({"left_shoulder_camera": ob.get_pose(), "right_shoulder_camera": ob.get_pose(), "front_camera": ob.get_pose(), "wrist_camera": ob.get_pose()}) return poses def _assert_action_space(self, action, expected_shape): if np.shape(action) != expected_shape: raise RuntimeError( 'Expected the action shape to be: %s, but was shape: %s' % ( str(expected_shape), str(np.shape(action)))) def _assert_unit_quaternion(self, quat): if not np.isclose(np.linalg.norm(quat), 1.0): raise RuntimeError('Action contained non unit quaternion!') def _torque_action(self, action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if t < 0 else -_TORQUE_MAX_VEL) for t in action]) self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: joint_positions = self._robot.arm.solve_ik( action[:3], quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except IKError as e: raise InvalidActionError('Could not find a path.') from e done = False prev_values = None # Move until reached target joint positions or until we stop moving # (e.g. when we collide wth something) while not done: self._scene.step() cur_positions = self._robot.arm.get_joint_positions() reached = np.allclose(cur_positions, joint_positions, atol=0.01) not_moving = False if prev_values is not None: not_moving = np.allclose( cur_positions, prev_values, atol=0.001) prev_values = cur_positions done = reached or not_moving def _path_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: # Check if the target is in the workspace; if not, then quick reject # Only checks position, not rotation pos_to_check = action[:3] if relative_to is not None: self._target_workspace_check.set_position( pos_to_check, relative_to) pos_to_check = self._target_workspace_check.get_position() valid = self._scene.check_target_in_workspace(pos_to_check) if not valid: raise InvalidActionError('Target is outside of workspace.') path = self._robot.arm.get_path( action[:3], quaternion=action[3:], ignore_collisions=True, relative_to=relative_to) done = False observations = [] while not done: done = path.step() self._scene.step() if self._enable_path_observations: observations.append(self._scene.get_observation()) success, terminate = self._task.success() # If the task succeeds while traversing path, then break early if success: break observations.append(self._scene.get_observation()) return observations except IKError as e: raise InvalidActionError('Could not find a path.') from e def step(self, action, camcorder=None) -> (Observation, int, bool): # returns observation, reward, done, info if not self._reset_called: raise RuntimeError( "Call 'reset' before calling 'step' on a task.") # action should contain 1 extra value for gripper open close state arm_action = np.array(action[:-1]) ee_action = action[-1] if 0.0 > ee_action > 1.0: raise ValueError('Gripper action expected to be within 0 and 1.') # Discretize the gripper action current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0) if ee_action > 0.5: ee_action = 1.0 elif ee_action < 0.5: ee_action = 0.0 if current_ee != ee_action: arm_action = np.array([0.0]*7) if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() # if needed save some images if camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur + arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur + arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE: cur = np.array(self._robot.arm.get_joint_forces()) new_action = cur + arm_action self._torque_action(new_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations = self._path_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw, qx, qy, qz) qw, qx, qy, qz = list(new_rot) new_pose = [a_x + x, a_y + y, a_z + z] + [qx, qy, qz, qw] self._path_observations = [] self._path_observations = self._path_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion( qw, qx, qy, qz) qw, qx, qy, qz = list(new_rot) new_pose = [a_x + x, a_y + y, a_z + z] + [qx, qy, qz, qw] self._ee_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations = self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip()) else: raise RuntimeError('Unrecognised action mode.') if current_ee != ee_action: done = False while not done: done = self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step() self._task.step() # if needed save some images if camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) if ee_action == 0.0 and self._attach_grasped_objects: # If gripper close action, the check for grasp. for g_obj in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else: # If gripper open action, the check for ungrasp. self._robot.gripper.release() success, terminate = self._task.success() task_reward = self._task.reward() reward = float(success) if task_reward is None else task_reward return self._scene.get_observation(), reward, terminate def resolve_redundancy_joint_velocities(self, actions, setup): """ Resolves redundant self-motion into the nullspace without changing the gripper tip position :param actions: Current actions without redundancy resolution. :param setup: Setup for redundancy resolution defining the mode, weighting etc. :return: Array of joint velocities, which move the robot's tip according to the provided actions yet push the joint position towards a reference position. """ # get the Jacobian J = self._robot.arm.get_jacobian() J = np.transpose(J) J = np.flip(J) J = J[-3:] # compute the pseudo inverse J_plus = np.linalg.pinv(J) # weighting if type(setup["W"]) is list: W = np.array(setup["W"]) elif setup["W"] is None: # use default weighting later W = None else: raise TypeError("Unsupported type %s for weighting vector." % type(setup["W"])) # compute the error if setup["mode"] == "reference_position": dL, L = self.get_loss_reference_position(setup["ref_position"], W) elif setup["mode"] == "collision_avoidance": dL, L = self.get_loss_collision_avoidance(W, setup) # compute the joint velocities q_dot_redundancy = setup["alpha"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL) # the provided jacobian seems to be inaccurate resulting in slight movement of the ee. This is why # the velocites are set to 0 once the error stops changing much. e = dL if setup["cut-off_error"] is not None: if self._last_e is not None: e_dot = np.sum(np.abs(e - self._last_e)) if self._last_e is not None and e_dot < setup["cut-off_error"]: q_dot_redundancy = np.array([0.0] * 7) self._last_e = e else: self._last_e = e return actions - q_dot_redundancy, L def get_loss_reference_position(self, ref_pos, W): """ Calculates the summed squarred error between the current and the reference consfiguration as well as its partial derivatives with respect to al q's for redundancy resoltuion. -> L(q) = 1/2 sum_{i=1}^N w_i (q_i - \tilde{q}_i)^2 :param ref_pos: Reference position. :param W: Weighting vector. :return: 1: The partial derivatives of the summed squarred error between the current and the reference configuration -> -> \nabla_q L(q) 2: Summed squarred error between the current and the reference configuration. -> L(q) """ if W is None: # default weighting W = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) e = (self._robot.arm.get_joint_positions() - ref_pos) return e * W, 0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self, W, setup): """ Calculates the loss as well as the respective partial derivatives for redundancy resoltuion with collision avoidance. This only works with tasks that include one obstacles! L(q) = \sum_{i=1}^N d(q)^{-1} :param W: Weighting vector. :return: 1: The partial derivatives of the loss above. -> \nable_q L(q) 2: The loss shown above.-> L(q) """ # get the position of the object p_obs = self._task.obstacle.get_position() + np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position() #p_obs = self._task.obstacle.get_position() p_obs = np.append(p_obs, [1]) # get the transformation matrices, their derivatives, and the positions of the links A_1, A_2, A_3, A_4, A_5, A_6, A_7 = self._robot.get_transformation_matrices() dA_1, dA_2, dA_3, dA_4, dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives() p_1, p_2, p_3, p_4, p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames() # we use reciprocal of the distance between each link and an obstacle as our Loss # the chain rule delivers: d/dq L = (p_i^0 (q_1,..., q_i) - p_obs)^T * d/dq (p_i^0 (q_1,..., q_i) - p_obs) # where p_i^0 = (\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i # as the left side of d/dq L is used often, let's calculate it in advance d_1_T = np.transpose(A_1.dot(p_1) - p_obs) d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs) d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs) d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs) d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs) d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs) d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs) # now we can calculate the derivatives in each dimension dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) + \ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W is None: # default weighting vector -> based on the reciprocal of the distance. The greater the distance the smaller # the weight. That is, it is concentrated on close objects. W = np.array([1 / np.sum(np.square(d_1_T)), 1 / np.sum(np.square(d_2_T)) , 1 / np.sum(np.square(d_3_T)) , 1 / np.sum(np.square(d_4_T)) , 1 / np.sum(np.square(d_5_T)) , 1 / np.sum(np.square(d_6_T)) , 1 / np.sum(np.square(d_7_T)) ]) * 0.1 # --- scaling to keep distance to joint limits --- # get the minimum distance of each joint to its limit joint_positions = np.array([j.get_joint_position() for j in self._robot.arm.joints]) lower_joint_limits = np.array(setup["lower_joint_pos_limit"]) upper_joint_limits = np.array(setup["upper_joint_pos_limit"]) min_j_distances = [np.minimum(u-j, j-l) for l,u,j in zip(lower_joint_limits, upper_joint_limits, joint_positions)] # start scaling down error when joint limit is 15° away. # Scaling is done linearly from 0 to 1 for 0° <= d <= 15° rad_thres = 15*(np.pi/180) W *= np.array([ np.minimum((1/rad_thres)*d, 1.0) for d in min_j_distances]) # concatenate the derivaties to vector and apply weightig dL = np.array([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])*W # calculate the loss L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \ + np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \ + np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \ + np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \ + np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \ + np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \ + np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return dL, L def enable_path_observations(self, value: bool) -> None: if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') self._enable_path_observations = value def get_path_observations(self): if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') return self._path_observations def get_demos(self, amount: int, live_demos: bool = False, image_paths: bool = False, callable_each_step: Callable[[Observation], None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS, ) -> List[Demo]: """Negative means all demos""" if not live_demos and (self._dataset_root is None or len(self._dataset_root) == 0): raise RuntimeError( "Can't ask for a stored demo when no dataset root provided.") if not live_demos: if self._dataset_root is None or len(self._dataset_root) == 0: raise RuntimeError( "Can't ask for stored demo when no dataset root provided.") demos = utils.get_stored_demos( amount, image_paths, self._dataset_root, self._variation_number, self._task.get_name(), self._obs_config) else: ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos = self._get_live_demos( amount, callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return demos def _get_live_demos(self, amount: int, callable_each_step: Callable[ [Observation], None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS) -> List[Demo]: demos = [] for i in range(amount): attempts = max_attempts while attempts > 0: random_seed = np.random.get_state() self.reset() logging.info('Collecting demo %d' % i) try: demo = self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed = random_seed demos.append(demo) break except Exception as e: attempts -= 1 logging.info('Bad demo. ' + str(e)) if attempts <= 0: raise RuntimeError( 'Could not collect demos. Maybe a problem with the task?') return demos def reset_to_demo(self, demo: Demo) -> (List[str], Observation): demo.restore_state() return self.reset()
44.833608
124
0.596017
3,931
27,214
3.830323
0.111422
0.029754
0.025503
0.010759
0.460384
0.406588
0.370924
0.326426
0.319918
0.314538
0
0.023105
0.295473
27,214
606
125
44.907591
0.762061
0.128831
0
0.237327
0
0
0.045825
0.003564
0
0
0
0
0.034562
1
0.059908
false
0.004608
0.036866
0.013825
0.147465
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4add672a5d82fff4c573be986ee4381ccf2640c3
11,795
py
Python
tests/generic_relations/test_forms.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
tests/generic_relations/test_forms.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
tests/generic_relations/test_forms.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
from django import forms from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from django.db import models from django.test import TestCase from django.test.utils import isolate_apps from .models import ( Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem, ) class CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm): class Meta: model = TaggedItem fields = '__all__' widgets = {'tag': CustomWidget} class GenericInlineFormsetTests(TestCase): def test_output(self): GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag"> Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50"></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE"> <input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>""" ) formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag"> Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50"></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE"><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>""" ) platypus = Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus anatinus', ) platypus.tags.create(tag='shiny') GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50"></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE"> <input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50"></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE"> <input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id" id="id_generic_relations-taggeditem-content_type-object_id-1-id"></p>""" % tagged_item_id ) lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') formset = GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), """<p><label for="id_x-0-tag">Tag:</label> <input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50"></p> <p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE"> <input type="hidden" name="x-0-id" id="id_x-0-id"></p>""" ) def test_options(self): TaggedItemFormSet = generic_inlineformset_factory( TaggedItem, can_delete=False, exclude=['tag'], extra=3, ) platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus') harmless = platypus.tags.create(tag='harmless') mammal = platypus.tags.create(tag='mammal') # Works without a queryset. formset = TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" value="%s" ' 'id="id_generic_relations-taggeditem-content_type-object_id-0-id">' % harmless.pk ) self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk) # A queryset can be used to alter display ordering. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk) # A queryset that omits items. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self): """ BaseGenericInlineFormSet.get_queryset() adds default ordering, if needed. """ inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset = inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True) def test_initial(self): quartz = Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) ctype = ContentType.objects.get_for_model(quartz) initial_data = [{ 'tag': 'lizard', 'content_type': ctype.pk, 'object_id': quartz.pk, }] formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_meta_widgets(self): """TaggedItemForm has a widget defined in Meta.""" Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm) form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations') def test_incorrect_content_type(self): class BadModel(models.Model): content_type = models.PositiveIntegerField() msg = "fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to ContentType" with self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel, TaggedItemForm) def test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm): def save(self, *args, **kwargs): self.instance.saved_by = 'custom method' return super().save(*args, **kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom method') def test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def test_initial_count(self): GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS': '', } formset = GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(), 3) formset = GenericFormSet(data=data, prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(), 0) def test_save_as_new(self): """ The save_as_new parameter creates new items that are associated with the object. """ lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') yellow = lion.tags.create(tag='yellow') hairy = lion.tags.create(tag='hairy') GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '', 'form-0-id': str(yellow.pk), 'form-0-tag': 'hunts', 'form-1-id': str(hairy.pk), 'form-1-tag': 'roars', } formset = GenericFormSet(data, instance=lion, prefix='form', save_as_new=True) self.assertTrue(formset.is_valid()) tags = formset.save() self.assertEqual([tag.tag for tag in tags], ['hunts', 'roars']) hunts, roars = tags self.assertSequenceEqual(lion.tags.order_by('tag'), [hairy, hunts, roars, yellow])
49.145833
115
0.667147
1,406
11,795
5.393314
0.130868
0.055123
0.116577
0.147963
0.655545
0.626533
0.583278
0.531452
0.513649
0.476197
0
0.009789
0.203222
11,795
239
116
49.351464
0.797084
0.025858
0
0.319767
0
0.005814
0.110581
0.020673
0
0
0
0
0.180233
1
0.069767
false
0.005814
0.040698
0
0.151163
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4add77a89d96d39ac35506a52c38ceda993b7f43
3,192
py
Python
src/sage/rings/polynomial/pbori/fglm.py
tamnguyen135/sage
2c87dc16f26604033bb1b2d1dc6796d279c88b16
[ "BSL-1.0" ]
1
2020-11-12T04:06:19.000Z
2020-11-12T04:06:19.000Z
src/sage/rings/polynomial/pbori/fglm.py
tamnguyen135/sage
2c87dc16f26604033bb1b2d1dc6796d279c88b16
[ "BSL-1.0" ]
null
null
null
src/sage/rings/polynomial/pbori/fglm.py
tamnguyen135/sage
2c87dc16f26604033bb1b2d1dc6796d279c88b16
[ "BSL-1.0" ]
null
null
null
from .PyPolyBoRi import (BooleSet, Polynomial, BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring, to_ring): r""" Unchecked variant of fglm """ vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I, from_ring, to_ring): r""" Convert *reduced* Groebner Basis in from_ring to a GroebnerBasis in to_ring. It acts independent of the global ring, which is restored at the end of the computation. TESTS:: sage: from sage.rings.polynomial.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring = r sage: new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i) for i in range(3)] sage: ideal=[x+z, y+z]# lp Groebner basis sage: from sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal, old_ring, new_ring)) [y + x, z + x] """ for poly in I: if poly.ring().id() != from_ring.id(): raise ValueError("Ideal I must be from the first ring argument") return _fglm(I, from_ring, to_ring) def vars_real_divisors(monomial, monomial_set): r""" Returns all elements of of monomial_set, which result multiplied by a variable in monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} """ return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \ graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements, variables): r""" Calculates $m_{k+1}$ from the FGLM algorithm as described in Wichmanns diploma thesis It would be nice to be able to efficiently extract the smallest term of a polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x = r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set()) x(1)*x(3) """ return sorted(completed_elements.cartesian_product(variables).diff( completed_elements))[0]
37.116279
95
0.643797
464
3,192
4.30819
0.24569
0.048024
0.072036
0.102051
0.452226
0.415208
0.382691
0.366683
0.258629
0.258629
0
0.014421
0.239662
3,192
85
96
37.552941
0.80923
0.671679
0
0
0
0
0.055416
0
0
0
0
0
0
1
0.2
false
0
0.05
0
0.45
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4add8cc9f3e45d7c32a6f558ec3d3dca3bae287a
797
py
Python
ferry/embed/umap_reduce.py
coursetable/ferry
f369b9588557c359af8589f2575a03493d6b08b6
[ "MIT" ]
4
2020-11-12T19:37:06.000Z
2021-12-14T01:38:39.000Z
ferry/embed/umap_reduce.py
coursetable/ferry
f369b9588557c359af8589f2575a03493d6b08b6
[ "MIT" ]
96
2020-09-08T05:17:17.000Z
2022-03-31T23:12:51.000Z
ferry/embed/umap_reduce.py
coursetable/ferry
f369b9588557c359af8589f2575a03493d6b08b6
[ "MIT" ]
2
2021-03-03T23:02:40.000Z
2021-06-17T23:33:05.000Z
""" Uses UMAP (https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course embeddings to two dimensions for visualization. """ import pandas as pd import umap from sklearn.preprocessing import StandardScaler from ferry import config courses = pd.read_csv( config.DATA_DIR / "course_embeddings/courses_deduplicated.csv", index_col=0, ) # mypy: ignore-errors embeddings = pd.read_hdf( config.DATA_DIR / "course_embeddings/fasttext_embeddings.h5", key="embeddings", ) embeddings = StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses["umap1"] = umap_embeddings[:, 0] courses["umap2"] = umap_embeddings[:, 1] courses.to_csv(config.DATA_DIR / "course_embeddings/courses_deduplicated_umap.csv")
25.709677
83
0.771644
103
797
5.786408
0.475728
0.107383
0.065436
0.095638
0.219799
0.171141
0.171141
0.171141
0
0
0
0.008487
0.112923
797
30
84
26.566667
0.834512
0.190715
0
0
0
0
0.233909
0.202512
0
0
0
0
0
1
0
false
0
0.222222
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ade3cbddad00f03add91a88139ed29e5accd6ee
1,359
py
Python
flora_fauna.py
zhumakova/ClassProject
b869258706dae7c8e8ab723c61a45fd78e26494f
[ "MIT" ]
null
null
null
flora_fauna.py
zhumakova/ClassProject
b869258706dae7c8e8ab723c61a45fd78e26494f
[ "MIT" ]
null
null
null
flora_fauna.py
zhumakova/ClassProject
b869258706dae7c8e8ab723c61a45fd78e26494f
[ "MIT" ]
null
null
null
import inheritance class Flora: def __init__(self, name, lifespan, habitat, plant_type): self.name = name self.lifespan = lifespan self.habitat = habitat self.plant_type = plant_type self.plant_size = 0 class Fauna: def __init__(self, name): self.name = name class Predator(Fauna): def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int): super().__init__(name) self.predator_type = predator_type self.what_eats = what_eats self.lifespan = lifespan # def check_planet(self,planet:tsk4.Planet): # if planet.fauna and not planet.humanity: # print('YES') # else: # print('NO') class Mammal(Fauna): def __init__(self, name, mammal_type, lifespan): super().__init__(name) self.mammal_type = mammal_type self.lifespan = lifespan def check_planet(self,planet:inheritance.Planet): if planet.flora and planet.fauna and not planet.humanity: planet.add_fauna(self) shark = Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti = Mammal('marti','earth',20) marti.check_planet(inheritance.friendly) print(inheritance.friendly.__dict__) print(inheritance.Planet.__dict__)
23.033898
81
0.659308
167
1,359
5.065868
0.269461
0.056738
0.052009
0.070922
0.248227
0.177305
0.104019
0.104019
0
0
0
0.007648
0.230316
1,359
58
82
23.431034
0.801147
0.101545
0
0.21875
0
0
0.030503
0
0
0
0
0
0
1
0.15625
false
0
0.03125
0
0.3125
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ae1184aa79f99e44e7d8332e7ab1d618e3d5b6f
16,307
py
Python
search/controllers/simple/tests.py
ID2797370/arxiv-search
889402e8eef9a2faaa8e900978cd27ff2784ce33
[ "MIT" ]
35
2018-12-18T02:51:09.000Z
2022-03-30T04:43:20.000Z
search/controllers/simple/tests.py
ID2797370/arxiv-search
889402e8eef9a2faaa8e900978cd27ff2784ce33
[ "MIT" ]
172
2018-02-02T14:35:11.000Z
2018-12-04T15:35:30.000Z
search/controllers/simple/tests.py
ID2797370/arxiv-search
889402e8eef9a2faaa8e900978cd27ff2784ce33
[ "MIT" ]
13
2019-01-10T22:01:48.000Z
2021-11-05T12:25:08.000Z
"""Tests for simple search controller, :mod:`search.controllers.simple`.""" from http import HTTPStatus from unittest import TestCase, mock from werkzeug.datastructures import MultiDict from werkzeug.exceptions import InternalServerError, NotFound, BadRequest from search.domain import SimpleQuery from search.controllers import simple from search.controllers.simple.forms import SimpleSearchForm from search.services.index import ( IndexConnectionError, QueryError, DocumentNotFound, ) class TestRetrieveDocument(TestCase): """Tests for :func:`.simple.retrieve_document`.""" @mock.patch("search.controllers.simple.SearchSession") def test_encounters_queryerror(self, mock_index): """There is a bug in the index or query.""" def _raiseQueryError(*args, **kwargs): raise QueryError("What now") mock_index.get_document.side_effect = _raiseQueryError with self.assertRaises(InternalServerError): try: response_data, code, headers = simple.retrieve_document(1) except QueryError as ex: self.fail("QueryError should be handled (caught %s)" % ex) self.assertEqual( mock_index.get_document.call_count, 1, "A search should be attempted", ) @mock.patch("search.controllers.simple.SearchSession") def test_index_raises_connection_exception(self, mock_index): """Index service raises a IndexConnectionError.""" mock_index.get_document.side_effect = IndexConnectionError with self.assertRaises(InternalServerError): response_data, code, headers = simple.retrieve_document("124.5678") self.assertEqual( mock_index.get_document.call_count, 1, "A search should be attempted", ) call_args, call_kwargs = mock_index.get_document.call_args self.assertIsInstance(call_args[0], str, "arXiv ID is passed") # self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch("search.controllers.simple.SearchSession") def test_document_not_found(self, mock_index): """The document is not found.""" def _raiseDocumentNotFound(*args, **kwargs): raise DocumentNotFound("What now") mock_index.get_document.side_effect = _raiseDocumentNotFound with self.assertRaises(NotFound): try: response_data, code, headers = simple.retrieve_document(1) except DocumentNotFound as ex: self.fail( "DocumentNotFound should be handled (caught %s)" % ex ) self.assertEqual( mock_index.get_document.call_count, 1, "A search should be attempted", ) class TestSearchController(TestCase): """Tests for :func:`.simple.search`.""" @mock.patch( "search.controllers.simple.url_for", lambda *a, **k: f'https://arxiv.org/{k["paper_id"]}', ) @mock.patch("search.controllers.simple.SearchSession") def test_arxiv_id(self, mock_index): """Query parameter contains an arXiv ID.""" request_data = MultiDict({"query": "1702.00123"}) response_data, code, headers = simple.search(request_data) self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY, "Response should be a 301 redirect.", ) self.assertIn("Location", headers, "Location header should be set") self.assertEqual( mock_index.search.call_count, 0, "No search should be attempted" ) @mock.patch("search.controllers.simple.SearchSession") def test_no_form_data(self, mock_index): """No form data has been submitted.""" request_data = MultiDict() response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, "Response should be OK.") self.assertIn("form", response_data, "Response should include form.") self.assertEqual( mock_index.search.call_count, 0, "No search should be attempted" ) @mock.patch("search.controllers.simple.SearchSession") def test_single_field_term(self, mock_index): """Form data are present.""" mock_index.search.return_value = {"metadata": {}, "results": []} request_data = MultiDict({"searchtype": "title", "query": "foo title"}) response_data, code, headers = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, "A search should be attempted" ) call_args, call_kwargs = mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery, "An SimpleQuery is passed to the search index", ) self.assertEqual(code, HTTPStatus.OK, "Response should be OK.") @mock.patch("search.controllers.simple.SearchSession") def test_invalid_data(self, mock_index): """Form data are invalid.""" request_data = MultiDict({"searchtype": "title"}) response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, "Response should be OK.") self.assertIn("form", response_data, "Response should include form.") self.assertEqual( mock_index.search.call_count, 0, "No search should be attempted" ) @mock.patch("search.controllers.simple.SearchSession") def test_index_raises_connection_exception(self, mock_index): """Index service raises a IndexConnectionError.""" def _raiseIndexConnectionError(*args, **kwargs): raise IndexConnectionError("What now") mock_index.search.side_effect = _raiseIndexConnectionError request_data = MultiDict({"searchtype": "title", "query": "foo title"}) with self.assertRaises(InternalServerError): _, _, _ = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, "A search should be attempted" ) call_args, call_kwargs = mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery, "An SimpleQuery is passed to the search index", ) @mock.patch("search.controllers.simple.SearchSession") def test_index_raises_query_error(self, mock_index): """Index service raises a QueryError.""" def _raiseQueryError(*args, **kwargs): raise QueryError("What now") mock_index.search.side_effect = _raiseQueryError request_data = MultiDict({"searchtype": "title", "query": "foo title"}) with self.assertRaises(InternalServerError): try: response_data, code, headers = simple.search(request_data) except QueryError as ex: self.fail("QueryError should be handled (caught %s)" % ex) self.assertEqual( mock_index.search.call_count, 1, "A search should be attempted" ) class TestSimpleSearchForm(TestCase): """Tests for :class:`.SimpleSearchForm`.""" def test_searchtype_only(self): """User has entered only a searchtype (field).""" data = MultiDict({"searchtype": "title"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), "Form should be invalid") def test_query_only(self): """User has entered only a query (value); this should never happen.""" data = MultiDict({"query": "someone monkeyed with the request"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), "Form should be invalid") def test_query_and_searchtype(self): """User has entered a searchtype (field) and query (value).""" data = MultiDict({"searchtype": "title", "query": "foo title"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), "Form should be valid") class TestQueryFromForm(TestCase): """Tests for :func:`.simple._query_from_form`.""" def test_multiple_simple(self): """Form data has three simple.""" data = MultiDict({"searchtype": "title", "query": "foo title"}) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, "Should return an instance of SimpleQuery" ) def test_form_data_has_order(self): """Form data includes sort order.""" data = MultiDict( { "searchtype": "title", "query": "foo title", "order": "submitted_date", } ) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, "Should return an instance of SimpleQuery" ) self.assertEqual(query.order, "submitted_date") def test_form_data_has_no_order(self): """Form data includes sort order parameter, but it is 'None'.""" data = MultiDict( {"searchtype": "title", "query": "foo title", "order": "None"} # ) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, "Should return an instance of SimpleQuery" ) self.assertIsNone(query.order, "Order should be None") def test_querystring_has_wildcard_at_start(self): """Querystring starts with a wildcard.""" data = MultiDict({"searchtype": "title", "query": "*foo title"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), "Form should be invalid") def test_input_whitespace_is_stripped(self): """If query has padding whitespace, it should be removed.""" data = MultiDict({"searchtype": "title", "query": " foo title "}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), "Form should be valid.") self.assertEqual(form.query.data, "foo title") def test_querystring_has_unbalanced_quotes(self): """Querystring has an odd number of quote characters.""" data = MultiDict({"searchtype": "title", "query": '"rhubarb'}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), "Form should be invalid") data["query"] = '"rhubarb"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), "Form should be valid") data["query"] = '"rhubarb" "pie' form = SimpleSearchForm(data) self.assertFalse(form.validate(), "Form should be invalid") data["query"] = '"rhubarb" "pie"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), "Form should be valid") class TestPaginationParametersAreFunky(TestCase): """ The user may have monkeyed with the order or sort parameters. Since these are limited to specific values, there is no other reason for them to be invalid. Given that they are passed around among views (to persist users' selection), it's important to break the chain. To do this, we return a 400 Bad Request, with a clean link back to the search form. """ @mock.patch("search.controllers.simple.url_for") def test_order_is_invalid(self, mock_url_for): """The order parameter on the request is invalid.""" request_data = MultiDict( { "searchtype": "title", "query": "foo title", "size": 50, # Valid. "order": "foo", # Invalid } ) with self.assertRaises(BadRequest): simple.search(request_data) @mock.patch("search.controllers.simple.url_for") def test_size_is_invalid(self, mock_url_for): """The order parameter on the request is invalid.""" request_data = MultiDict( { "searchtype": "title", "query": "foo title", "size": 51, # Invalid "order": "", # Valid } ) with self.assertRaises(BadRequest): simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase): """ The user may have entered an author query using `surname_f` syntax. This is an artefact of the classic search system, and not intended to be supported. Nevertheless, users have become accustomed to this syntax. We therefore rewrite the query using a comma, and show the user a warning about the syntax change. """ @mock.patch("search.controllers.simple.SearchSession") def test_all_fields_search_contains_classic_syntax(self, mock_index): """User has entered a `surname_f` query in an all-fields search.""" request_data = MultiDict( { "searchtype": "all", "query": "franklin_r", "size": 50, "order": "", } ) mock_index.search.return_value = {"metadata": {}, "results": []} data, code, headers = simple.search(request_data) self.assertEqual( data["query"].value, "franklin, r", "The query should be rewritten.", ) self.assertTrue( data["has_classic_format"], "A flag denoting the syntax interception should be set" " in the response context, so that a message may be" " rendered in the template.", ) @mock.patch("search.controllers.simple.SearchSession") def test_author_search_contains_classic_syntax(self, mock_index): """User has entered a `surname_f` query in an author search.""" request_data = MultiDict( { "searchtype": "author", "query": "franklin_r", "size": 50, "order": "", } ) mock_index.search.return_value = {"metadata": {}, "results": []} data, code, headers = simple.search(request_data) self.assertEqual( data["query"].value, "franklin, r", "The query should be rewritten.", ) self.assertTrue( data["has_classic_format"], "A flag denoting the syntax interception should be set" " in the response context, so that a message may be" " rendered in the template.", ) @mock.patch("search.controllers.simple.SearchSession") def test_all_fields_search_multiple_classic_syntax(self, mock_index): """User has entered a classic query with multiple authors.""" request_data = MultiDict( { "searchtype": "all", "query": "j franklin_r hawking_s", "size": 50, "order": "", } ) mock_index.search.return_value = {"metadata": {}, "results": []} data, code, headers = simple.search(request_data) self.assertEqual( data["query"].value, "j franklin, r; hawking, s", "The query should be rewritten.", ) self.assertTrue( data["has_classic_format"], "A flag denoting the syntax interception should be set" " in the response context, so that a message may be" " rendered in the template.", ) @mock.patch("search.controllers.simple.SearchSession") def test_title_search_contains_classic_syntax(self, mock_index): """User has entered a `surname_f` query in a title search.""" request_data = MultiDict( { "searchtype": "title", "query": "franklin_r", "size": 50, "order": "", } ) mock_index.search.return_value = {"metadata": {}, "results": []} data, code, headers = simple.search(request_data) self.assertEqual( data["query"].value, "franklin_r", "The query should not be rewritten.", ) self.assertFalse( data["has_classic_format"], "Flag should not be set, as no rewrite has occurred.", )
37.145786
79
0.612682
1,745
16,307
5.580516
0.150143
0.032348
0.042514
0.042719
0.711645
0.682789
0.658862
0.621586
0.565517
0.546005
0
0.004333
0.278224
16,307
438
80
37.230594
0.823025
0.125345
0
0.57764
0
0
0.228464
0.043144
0
0
0
0
0.152174
1
0.086957
false
0.009317
0.024845
0
0.130435
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ae16756e558b0122e3a75646fd26aece7eef166
19,270
py
Python
kuri_wandering_robot/scripts/kuri_wandering_robot_executive_node.py
hcrlab/kuri_wandering_robot
9c747bfe27e3c3450fd4717e26b866af2ef70149
[ "BSD-3-Clause" ]
null
null
null
kuri_wandering_robot/scripts/kuri_wandering_robot_executive_node.py
hcrlab/kuri_wandering_robot
9c747bfe27e3c3450fd4717e26b866af2ef70149
[ "BSD-3-Clause" ]
null
null
null
kuri_wandering_robot/scripts/kuri_wandering_robot_executive_node.py
hcrlab/kuri_wandering_robot
9c747bfe27e3c3450fd4717e26b866af2ef70149
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # ROS Libraries import actionlib from actionlib_msgs.msg import GoalStatus from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power from wandering_behavior.msg import WanderAction, WanderGoal import rospy from sensor_msgs.msg import CompressedImage from std_msgs.msg import Empty from trajectory_msgs.msg import JointTrajectoryPoint # Python Default Libraries import base64 import csv from enum import Enum import os import requests import threading import time import traceback # Custom Libraries from sent_messages_database import SentMessagesDatabase class KuriWanderingRobotState(Enum): """ During NORMAL, the base moves according to wandering_behavior. During CHARGING, the robot's eyes are closed and it is charging. The robot transitions from NORMAL to CHARGING if its battery is below a threshold and it is on the charger. It transitions from CHARGING to NORMAL if it's battery is above a threshold or it is off the charger. """ NORMAL = 1 CHARGING = 2 class KuriWanderingRobot(object): """ The central executive node. This node runs a control loop that manages the robot's state: turning on and monitoring progress of the wandering module in NORMAL, turning off wandering in CHARGING, and switching back to NORMAL when the robot is sufficiently charged. This node also runs anomaly detection to detect low battery; when it detects low battery, it sends a low battery request to the Slackbot, which then sends it to the helpers. This node can be extended with additional anomaly detection and help requests, as needed. This node also subscribes to a dummy `where_am_i_help` topic, which sends helpers the sample `where_am_i` help message. Note that that is only in place to illsutrate the sample `where_am_i` help message, and actually using that would require developing a custom anomaly detection system to trigger the robot asking for that type of help. Finally, this node has a separate thread that continually queries the Slackbot for responses to its help requests. """ def __init__(self): """ Initialize an instance of the KuriWanderingRobot class """ self.has_loaded = False # Get the Slackbot URL self.slackbot_url = rospy.get_param('~slackbot_url') # Initialize the state. self.state_lock = threading.Lock() self.state_changed = True self.state = KuriWanderingRobotState.NORMAL # Initialize the wandering module self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) # Initialize the eye controller self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position = 0.41 self.eye_open_position = 0.0 # Initialize the camera self.img_sub = rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1) self.latest_image = None self.latest_image_lock = threading.Lock() # Initialize low battery anomaly detector self.battery_sub = rospy.Subscriber( "/mobile_base/power", Power, self.power_callback, queue_size=1) self.previous_battery_lock = threading.Lock() self.previous_battery = None self.previous_dock_present = None self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40, 20, 10, 5, 4, 3, 2, 1]) # if the battery is less than this and Kuri is docked, charge self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50) # if the batter is greater than this and Kuri is charging, switch back to NORMAL self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90) # Whether the low battery message should include Kuri's current camera image self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True) # Initialize the dummy `where_am_i` anomaly detector self.where_am_i_help_sub = rospy.Subscriber( "/where_am_i_help", Empty, self.where_am_i_help_callback, queue_size=1) # Initialize storing images and message IDs self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath') self.sent_messages_database = SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval = 1 self.database_updates_since_last_save = 0 # Initialize the head controller self.head_state_sub = rospy.Subscriber( "/head_controller/state", JointTrajectoryControllerState, self.head_state_callback, queue_size=1) self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed = 0.2 # head tilt is in [-0.8, 0.3] self.head_pan_speed = 0.2 # head pan is in [-0.75, 0.75] # Initialize the Slackbot updates thread self.slackbot_responses_thread = threading.Thread( target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start() # Initialize the state machine self.state_machine_thread = threading.Thread( target=self.state_machine_control_loop, ) self.state_machine_thread.start() self.has_centered_head = False self.has_loaded = True def database_updated(self, num_updates=1): """ Called everytime the database is updated. Saves the database every self.database_save_interval updates """ self.database_updates_since_last_save += num_updates if self.database_updates_since_last_save % self.database_save_interval == 0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug("Saved sent_messages_database!") def open_eyes(self, duration_secs=0.2): """ Open the robot's eyes """ rospy.logdebug("Open Eyes") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = ["eyelids_joint"] point = JointTrajectoryPoint() point.positions = [self.eye_open_position] point.velocities = [] point.accelerations = [] point.effort = [] point.time_from_start = duration goal.trajectory.points = [point] # Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self, duration_secs=0.2): """ Close the robot's eyes """ rospy.logdebug("Close Eyes") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = ["eyelids_joint"] point = JointTrajectoryPoint() point.positions = [self.eye_closed_position] point.velocities = [] point.accelerations = [] point.effort = [] point.time_from_start = duration goal.trajectory.points = [point] # Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self, head_state_msg): """ Get the head's current position """ if not self.has_loaded: return if not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def center_head(self, current_pan, current_tilt): """ Center Kuri's head. This involves moving from the current pan and tilt to the centered values of (0.0, -0.3) """ pan_endpoint = 0.0 tilt_endpoint = -0.3 n_waypoints = 10 # Compute the actual endpoint and duration_secs duration_secs = max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration = rospy.Duration.from_sec(duration_secs) # Create the goal goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = ["head_1_joint", "head_2_joint"] goal.trajectory.points = [] pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval = duration/n_waypoints for i in range(n_waypoints): point = JointTrajectoryPoint() point.positions = [current_pan + i*pan_interval, current_tilt + i*tilt_interval] point.velocities = [] point.accelerations = [] point.effort = [] point.time_from_start = (i+1)*time_interval goal.trajectory.points.append(point) # Send the goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head = True def state_machine_control_loop(self, rate_hz=10): """ The control loop for the state machine. All of the state machine logic is handled in this function and the functions it calls. During NORMAL, the base moves according to wandering_behavior. During CHARGING, the robot's eyes are closed and it is charging. The robot transitions from NORMAL to CHARGING if its battery is below a threshold and it is on the charger. It transitions from CHARGING to NORMAL if it's battery is above a threshold or it is off the charger. """ rate = rospy.Rate(rate_hz) while not rospy.is_shutdown(): rate.sleep() with self.state_lock: state_at_start_of_loop = self.state if (self.state == KuriWanderingRobotState.NORMAL): goal_state = self.wandering_module_action.get_state() if (self.state_changed or goal_state == GoalStatus.ABORTED or goal_state == GoalStatus.SUCCEEDED): rospy.logdebug("Waiting for wandering_module_action server") self.wandering_module_action.wait_for_server() rospy.logdebug("Sending goal to wandering_module_action") # Effort -1 means "don't stop unless preempted" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with self.previous_battery_lock: if (self.previous_battery is not None and self.previous_battery < self.to_charge_threshold and self.previous_dock_present): self.close_eyes() self.state = KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo("State: NORMAL ==> CHARGING") elif self.state == KuriWanderingRobotState.CHARGING: with self.previous_battery_lock: if (self.previous_battery is None or not self.previous_dock_present or self.previous_battery >= self.charging_done_threshold): self.state = KuriWanderingRobotState.NORMAL rospy.loginfo("State: CHARGING ==> NORMAL") state_at_end_of_loop = self.state self.state_changed = (state_at_start_of_loop != state_at_end_of_loop) def image_callback(self, img_msg): """ Store the latest image. """ if not self.has_loaded: return with self.latest_image_lock: self.latest_image = img_msg def power_callback(self, msg): """ Callback function for Kuri's power update. It Kuri's battery has crossed a battery_notification_threshold, notify the Slackbot. """ if not self.has_loaded: return with self.state_lock: with self.previous_battery_lock: self.previous_dock_present = msg.dock_present if self.state == KuriWanderingRobotState.CHARGING: self.previous_battery = msg.battery.pct else: update_previous_battery = True if msg.battery.pct <= self.battery_notification_thresholds[0]: # Send the low-battery helper notifications when the battery # crosses the thresholds defined in self.battery_notification_thresholds for i in range(len(self.battery_notification_thresholds)): if (self.previous_battery is None or (self.previous_battery > self.battery_notification_thresholds[i]) and msg.battery.pct <= self.battery_notification_thresholds[i]): try: # Send a low_battery_alert dict_to_send = {'battery_pct':msg.battery.pct} if self.low_battery_message_include_image: with self.latest_image_lock: if self.latest_image is not None: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] = image_contents rospy.loginfo("Sending battery request for pct %s" % msg.battery.pct) res = requests.post( os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send, ) res_json = res.json() if not res_json['success']: update_previous_battery = False except Exception as e: rospy.logwarn("Error communicating with Slackbot /low_battery at URL %s." % self.slackbot_url) if "res" in locals(): rospy.logwarn("Response text %s." % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn("Error %s." % e) update_previous_battery = False break if (update_previous_battery and (self.previous_battery is None or msg.battery.pct < self.previous_battery)): self.previous_battery = msg.battery.pct def where_am_i_help_callback(self, msg): """ A dummy callback that triggers sending a where_am_i help message to the Slackbot. This is merely intended to showcase some of the Slackbot's capabilities. Users who want a robot that autonomously asks the human to tell it where it is should implement their own anomaly detection system for triggering this help request. """ with self.latest_image_lock: if self.latest_image is None: rospy.loginfo("Attempted to send where_am_i help request but have no image.") return try: # Send a low_battery_alert rospy.loginfo("Sending where_am_i help request") with self.latest_image_lock: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res = requests.post( os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents, 'options':['Lounge', "Office#252", "200 Corridoor", "Atrium"]}, ) res_json = res.json() message_id = res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except Exception as e: rospy.logwarn("Error communicating with Slackbot /where_am_i at URL %s." % self.slackbot_url) if "res" in locals(): rospy.logwarn("Response text %s." % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn("Error %s." % e) def get_slackbot_updates(self, refresh_secs=5.0): """ Once every refresh_secs seconds, request updates (e.g., human responses) from the Slackbot. Note that you can optionally request updates for partular message_ids (e.g., those that have not received responses yet) """ r = rospy.Rate(1.0/refresh_secs) while not rospy.is_shutdown(): if not self.has_loaded: r.sleep() try: message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request responses for those message_ids res = requests.post( os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json = res.json() rospy.logdebug("Got updates from Slackbot %s" % res_json) message_id_to_responses = res_json["message_id_to_responses"] if len(message_id_to_responses) > 0: num_updates = 0 # Insert reactions into the database for message_id in message_id_to_responses: for action_ts, response in message_id_to_responses[message_id]: rospy.loginfo("Got reaction %s from at ts %s" % (response, action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts, response) num_updates += 1 self.database_updated(num_updates) except Exception as e: rospy.logwarn("Error communicating with Slackbot /get_updates at URL %s." % self.slackbot_url) if "res" in locals(): rospy.logwarn("Response text %s." % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn("Error %s." % e) r.sleep() if __name__ == "__main__": rospy.init_node("kuri_wandering_robot") kuri_wandering_robot = KuriWanderingRobot() rospy.spin()
47.69802
195
0.631915
2,263
19,270
5.150243
0.173663
0.019562
0.024453
0.010296
0.396654
0.318147
0.250965
0.221965
0.216474
0.206178
0
0.006861
0.296627
19,270
403
196
47.816377
0.853032
0.203788
0
0.30888
0
0
0.082435
0.027298
0
0
0
0
0
1
0.042471
false
0
0.069498
0
0.135135
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ae27b557f549eb57426e50a39da725dc0fc0caa
2,353
py
Python
Job Portal with Automated Resume Screening/gensim-4.1.2/gensim/test/test_rpmodel.py
Candida18/Job-Portal-with-Automated-Resume-Screening
19d19464ad3d1714da856656753a4afdfe257b31
[ "MIT" ]
3
2021-03-29T19:21:08.000Z
2021-12-31T09:30:11.000Z
Job Portal with Automated Resume Screening/gensim-4.1.2/gensim/test/test_rpmodel.py
Candida18/Job-Portal-with-Automated-Resume-Screening
19d19464ad3d1714da856656753a4afdfe257b31
[ "MIT" ]
1
2021-08-30T08:53:09.000Z
2021-08-30T08:53:09.000Z
venv/Lib/site-packages/gensim/test/test_rpmodel.py
saritmaitra/nlp_ner_topic_modeling
70914b4ae4cd7d3b9cb10776161132216394883c
[ "MIT" ]
2
2022-01-15T05:36:58.000Z
2022-02-08T15:25:50.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """ Automated tests for checking transformation algorithms (the models package). """ import logging import unittest import numpy as np from gensim.corpora.mmcorpus import MmCorpus from gensim.models import rpmodel from gensim import matutils from gensim.test.utils import datapath, get_tmpfile class TestRpModel(unittest.TestCase): def setUp(self): self.corpus = MmCorpus(datapath('testcorpus.mm')) def test_transform(self): # create the transformation model # HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results) np.random.seed(13) model = rpmodel.RpModel(self.corpus, num_topics=2) # transform one document doc = list(self.corpus)[0] transformed = model[doc] vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests expected = np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal up to sign def test_persistence(self): fname = get_tmpfile('gensim_models.tst') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector def test_persistence_compressed(self): fname = get_tmpfile('gensim_models.tst.gz') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG) unittest.main()
36.2
118
0.694008
299
2,353
5.381271
0.458194
0.039155
0.04972
0.07458
0.389683
0.389683
0.389683
0.347421
0.323182
0.323182
0
0.021693
0.19677
2,353
64
119
36.765625
0.82963
0.243094
0
0.333333
0
0
0.056186
0
0
0
0
0
0.179487
1
0.102564
false
0
0.179487
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ae2c9c85b28962ffc9f80c3635fc6bd15adc317
3,306
py
Python
playground/tianhaoz95/gan_getting_started/cgan_model.py
tianhaoz95/mangekyo
fd2b151538d0c15cca60e05a844baffcbe08e68c
[ "MIT" ]
null
null
null
playground/tianhaoz95/gan_getting_started/cgan_model.py
tianhaoz95/mangekyo
fd2b151538d0c15cca60e05a844baffcbe08e68c
[ "MIT" ]
5
2020-09-25T00:43:18.000Z
2020-10-10T03:59:39.000Z
playground/tianhaoz95/gan_getting_started/cgan_model.py
tianhaoz95/mangekyo
fd2b151538d0c15cca60e05a844baffcbe08e68c
[ "MIT" ]
null
null
null
import tensorflow as tf from tensorflow import keras class CondGeneratorModel(keras.Model): def __init__(self): super(CondGeneratorModel, self).__init__() # Expand 7*7*128 features into a (7,7,128) tensor self.dense_1 = keras.layers.Dense(7*7*256) self.reshape_1 = keras.layers.Reshape((7, 7, 256)) # Expand (10,) to (7,7,1) self.embedder = keras.layers.Embedding(10, 100) self.dense_2 = keras.layers.Dense(7*7*256) # From (7,7,256) to (7,7,128) self.convt_1 = keras.layers.Conv2DTranspose( 128, (5, 5), strides=1, padding='same', use_bias=False) self.convt_bn_1 = keras.layers.BatchNormalization() self.convt_relu_1 = keras.layers.LeakyReLU() # From (7,7,128) to (14,14,64) self.convt_2 = keras.layers.Conv2DTranspose( 64, (5, 5), strides=2, padding='same', use_bias=False) self.convt_bn_2 = keras.layers.BatchNormalization() self.convt_relu_2 = keras.layers.LeakyReLU() # From (14,14,64) to (28,28,1) self.convt_out = keras.layers.Conv2DTranspose( 1, (5, 5), strides=2, padding='same', use_bias=False) def call(self, inputs): feat_x = inputs[0] label = inputs[2] # Expand label input to be the same as latent feature label_x = self.embedder(label) label_x = self.dense_2(label_x) label_x = tf.squeeze(label_x, 1) # Expand features to image channels feat_x = self.dense_1(feat_x) # Combine latent feature and label input x = tf.math.multiply(feat_x, label_x) x = self.reshape_1(x) # From (7,7,256) to (7,7,128) x = self.convt_1(x) x = self.convt_bn_1(x) x = self.convt_relu_1(x) # From (7,7,128) to (14,14,64) x = self.convt_2(x) x = self.convt_bn_2(x) x = self.convt_relu_2(x) # From (14,14,64) to (28,28,1) x = self.convt_out(x) return [x, None, label] class CondDiscriminatorModel(keras.Model): def __init__(self): super(CondDiscriminatorModel, self).__init__() self.embedder = keras.layers.Embedding(10, 100) self.expand_layer = keras.layers.Dense(28*28*1) self.reshape_layer = keras.layers.Reshape((28, 28, 1)) self.conv_1 = keras.layers.Conv2D( 64, (5, 5), strides=2, padding='same', input_shape=(28, 28, 1)) self.relu_1 = keras.layers.LeakyReLU() self.drop_1 = keras.layers.Dropout(0.3) self.conv_2 = keras.layers.Conv2D( 128, (5, 5), strides=2, padding='same') self.relu_2 = keras.layers.LeakyReLU() self.drop_2 = keras.layers.Dropout(0.3) self.flatten = keras.layers.Flatten() self.out = keras.layers.Dense(1) def call(self, inputs): images_x = inputs[0] labels = inputs[2] labels_x = self.embedder(labels) labels_x = self.expand_layer(labels_x) labels_x = self.reshape_layer(labels_x) x = tf.math.multiply(images_x, labels_x) x = self.conv_1(x) x = self.relu_1(x) x = self.drop_1(x) x = self.conv_2(x) x = self.relu_2(x) x = self.drop_2(x) x = self.flatten(x) x = self.out(x) return x
38.44186
75
0.594374
492
3,306
3.827236
0.160569
0.128518
0.041423
0.018587
0.391397
0.300053
0.168879
0.155603
0.03505
0
0
0.079035
0.272837
3,306
85
76
38.894118
0.704243
0.111313
0
0.088235
0
0
0.006835
0
0
0
0
0
0
1
0.058824
false
0
0.029412
0
0.147059
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ae3be8ccc9773f8672701a5f6e37ff13253c5e3
13,115
py
Python
ahd2fhir/utils/resource_handler.py
miracum/ahd2fhir
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
[ "Apache-2.0" ]
3
2021-11-23T16:24:21.000Z
2022-03-30T07:59:03.000Z
ahd2fhir/utils/resource_handler.py
miracum/ahd2fhir
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
[ "Apache-2.0" ]
40
2021-05-27T14:26:33.000Z
2022-03-29T14:29:33.000Z
ahd2fhir/utils/resource_handler.py
miracum/ahd2fhir
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
[ "Apache-2.0" ]
1
2021-06-30T11:11:01.000Z
2021-06-30T11:11:01.000Z
import base64 import datetime import logging import os import time from typing import List, Tuple import structlog import tenacity from averbis import Pipeline from fhir.resources.bundle import Bundle from fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition import Composition, CompositionSection from fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes import DateTime from fhir.resources.identifier import Identifier from fhir.resources.reference import Reference from fhir.resources.resource import Resource from prometheus_client import Counter, Histogram, Summary from tenacity.after import after_log from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter("mapping_failures", "Exceptions during mapping") MAPPING_DURATION_SUMMARY = Histogram( "map_duration_seconds", "Time spent mapping", buckets=( 0.05, 0.1, 0.5, 1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 21.0, 34.0, 55.0, "inf", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( "extracted_resources", "Number of extracted resources for each processed document" ) DOCUMENT_LENGTH_SUMMARY = Summary( "document_length", "Length of each processed document's text in charactes", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = ( "Clinical document Kind of document from LOINC Document Ontology" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ "coding": [ { "system": "http://loinc.org", "code": "74477-1", "display": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], "text": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION = "de.averbis.types.health.DocumentAnnotation" AHD_TYPE_MEDICATION = "de.averbis.types.health.Medication" AHD_TYPE_DIAGNOSIS = "de.averbis.types.health.Diagnosis" log = structlog.get_logger() class TransientError(Exception): pass class ResourceHandler: def __init__(self, averbis_pipeline: Pipeline): self.pipeline = averbis_pipeline self.bundle_builder = BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def handle_documents(self, document_references: List[DocumentReference]) -> Bundle: """ Process a list of DocumentReferences """ all_resources = [] bundle_id = None for document_reference in document_references: resources_from_document = self._process_documentreference( document_reference ) composition = self._build_composition( document_reference, resources_from_document ) bundle_id = composition.id all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle = self.bundle_builder.build_from_resources( all_resources, bundle_id ) return result_bundle def handle_bundle(self, bundle: Bundle): """ Process all FHIR DocumentReference resources from a given bundle """ document_references = [] for entry in bundle.entry: if entry.resource.resource_type == "DocumentReference": document_references.append(entry.resource) return self.handle_documents(document_references) def _build_composition( self, document_reference: DocumentReference, all_resources: List[Resource] ): composition_type = ( document_reference.type if document_reference.type is not None else DISCHARGE_SUMMARY_CONCEPT ) composition_subject = document_reference.subject composition_category = document_reference.category composition_encounter = None if document_reference.context is not None: if len(document_reference.context.encounter) > 1: log.warning( "DocumentReference contains more than one encounter. " + "Using the first." ) composition_encounter = document_reference.context.encounter[0] composition_author = None composition_sections = [] for resource in all_resources: resource_type = resource.resource_type if resource_type == "Device": author = Reference.construct() author.reference = f"Device/{resource.id}" author.type = "Device" composition_author = author continue # Check if no resource specific section exists ands adds it, # otherwise select the correct section if not any( section.title == resource_type for section in composition_sections ): resource_section = CompositionSection.construct() resource_section.title = resource_type resource_section.entry = [] composition_sections.append(resource_section) ind = len(composition_sections) - 1 else: ind = [ ind for ind, section in enumerate(composition_sections) if section.title == resource_type ][0] entry_reference = Reference.construct() entry_reference.reference = resource_type + "/" + resource.id composition_sections[ind].entry.append(entry_reference) if composition_author is None: composition_author = Reference(**{"display": "Averbis Health Discovery"}) composition_identifier = ( self._build_composition_identifier_from_documentreference( document_reference ) ) composition = Composition( **{ "title": "NLP FHIR Results " + time.strftime("%Y-%m-%dT%H:%M"), "status": "final", "date": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), "type": composition_type, "identifier": composition_identifier, "id": sha256_of_identifier(composition_identifier), "subject": composition_subject, "category": composition_category, "encounter": composition_encounter, "author": [composition_author], "section": composition_sections, } ) return composition def _process_documentreference(self, document_reference: DocumentReference): log = structlog.get_logger().bind( document_id=f"{document_reference.get_resource_type()}/" + f"{document_reference.id}" ) # Text extraction and text analysis (text, content_type, lang) = self._extract_text_from_resource( document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result = None try: averbis_result = self._perform_text_analysis( text=text, mime_type=content_type, lang=lang ) except Exception as exc: log.exception(exc) log.error("Failed to perform text analysis", error=exc) raise TransientError(exc) total_results = [] # Building FHIR resources as results medication_statement_lists = [] for val in averbis_result: if val["type"] == AHD_TYPE_DIAGNOSIS: mapped_condition = ahd_to_condition.get_fhir_condition( val, document_reference ) if mapped_condition is not None: total_results.append(mapped_condition) if val["type"] == AHD_TYPE_DOCUMENT_ANNOTATION: device = build_device(val) if device is not None: total_results.append(device) if val["type"] == AHD_TYPE_MEDICATION: statement = ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference ) if statement is not None: medication_statement_lists.append(statement) # if custom_mappers_enabled if os.getenv("CUSTOM_MAPPERS_ENABLED", "False").lower() in ["true", "1"]: total_results.extend(custom_mappers(val, document_reference)) medication_results = [] medication_statement_results = [] for medication_statement_list in medication_statement_lists: for medication_statement_dict in medication_statement_list: medication_results.append(medication_statement_dict["medication"]) medication_statement_results.append( medication_statement_dict["statement"] ) # de-duplicate any Medication and MedicationStatement resources medication_resources_unique = {m.id: m for m in medication_results}.values() medication_statements_unique = { m.id: m for m in medication_statement_results }.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return total_results def _extract_text_from_resource( self, document_reference: DocumentReference, ) -> Tuple[str, str]: valid_content = [ content for content in document_reference.content if content.attachment.data is not None ] if len(valid_content) == 0: raise ValueError( f"Document {document_reference.id} contains no valid content" ) if len(valid_content) > 1: raise ValueError( f"Document {document_reference.id} contains more than one attachment" ) content = valid_content[0] language = None if content.attachment.language: language = content.attachment.language.lower().split("-")[0] return ( base64.b64decode(content.attachment.data).decode("utf8"), content.attachment.contentType, language, ) @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(), logging.WARNING), reraise=True, ) def _perform_text_analysis( self, text: str, mime_type: str = "text/plain", lang: str = None ): types = ",".join( [ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ] ) analyse_args = {"language": lang, "annotation_types": types} try: if mime_type == "text/html": return self.pipeline.analyse_html(text, **analyse_args) else: return self.pipeline.analyse_text(text, **analyse_args) except Exception as exc: log.exception(exc) log.error("Text analysis failed") raise exc def _build_composition_identifier_from_documentreference( self, doc_ref: DocumentReference, ): """ construct a hopefully unqiue identifier for the condition from the document identifier as well as the offset into the text and the unique id of the annotation """ doc_ref_identifier = None if doc_ref.identifier is None or len(doc_ref.identifier) == 0: log.warning( "No identifier specified on the document. " + "Trying to fall-back to the DocumentReference.id" ) doc_ref_identifier = doc_ref.id else: if len(doc_ref.identifier) > 1: log.warning( "More than one identifier specified on the document. " + "Using the first occurrence." ) doc_ref_identifier = doc_ref.identifier[0].value composition_identifier_system = ( "https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition" ) composition_identifier_value = f"{doc_ref_identifier}_ahd-analysis-result" return Identifier( **{ "system": composition_identifier_system, "value": composition_identifier_value, } )
34.24282
88
0.619596
1,295
13,115
6.029344
0.200772
0.050077
0.017418
0.010374
0.08248
0.037654
0.030738
0.030738
0.011014
0
0
0.00781
0.306824
13,115
382
89
34.332461
0.851061
0.039192
0
0.091503
0
0
0.107274
0.022462
0
0
0
0
0
1
0.026144
false
0.003268
0.078431
0
0.137255
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4ae60da63587ab2aea48c92c16464b071dd138fd
828
py
Python
julynter/oldcmd.py
dew-uff/julynter
f4657aba4fa3e17af2cd241f0c3170b76df7c57c
[ "BSD-3-Clause" ]
9
2020-07-13T23:56:04.000Z
2021-11-02T18:42:07.000Z
julynter/oldcmd.py
dew-uff/julynter
f4657aba4fa3e17af2cd241f0c3170b76df7c57c
[ "BSD-3-Clause" ]
8
2021-07-14T15:33:57.000Z
2022-02-27T06:45:57.000Z
julynter/oldcmd.py
dew-uff/julynter
f4657aba4fa3e17af2cd241f0c3170b76df7c57c
[ "BSD-3-Clause" ]
null
null
null
"""Define commands for Python 2.7""" import argparse import traceback from . import util from .cmd import run from .cmd import extractpipenv def main(): """Main function""" print("This version is not supported! It has limitted analysis features") parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args() try: if not getattr(args, 'func', None): parser.print_help() else: args.func(args, rest) if not util.EXITED: util.do_exit(0) except: # pylint: disable=bare-except if not util.EXITED: traceback.print_exc() util.do_exit(1)
28.551724
77
0.657005
100
828
5.35
0.58
0.028037
0.048598
0.056075
0
0
0
0
0
0
0
0.00641
0.246377
828
28
78
29.571429
0.850962
0.088164
0
0.086957
0
0
0.125
0
0
0
0
0
0
1
0.043478
false
0
0.217391
0
0.26087
0.130435
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aea193e4b6512fd0f264e141522245728635ebf
1,273
py
Python
test/linux/gyptest-ldflags-from-environment.py
chlorm-forks/gyp
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
[ "BSD-3-Clause" ]
77
2018-07-01T15:55:34.000Z
2022-03-30T09:16:54.000Z
test/linux/gyptest-ldflags-from-environment.py
chlorm-forks/gyp
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
[ "BSD-3-Clause" ]
116
2021-05-29T16:32:51.000Z
2021-08-13T16:05:29.000Z
test/linux/gyptest-ldflags-from-environment.py
chlorm-forks/gyp
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
[ "BSD-3-Clause" ]
53
2018-04-13T12:06:06.000Z
2022-03-25T13:54:38.000Z
#!/usr/bin/env python # Copyright (c) 2017 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies the use of linker flags in environment variables. In this test, gyp and build both run in same local environment. """ import TestGyp import re import subprocess import sys FORMATS = ('make', 'ninja') if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) def GetDynamicLinker(p): p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'\[Requesting program interpreter: ([^\]]+)\]') proc = subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8') assert not proc.returncode return r.search(o).group(1) if GetDynamicLinker('ldflags') != '/target': test.fail_test() if GetDynamicLinker('ldflags_host') != '/host': test.fail_test() test.pass_test()
27.673913
73
0.660644
170
1,273
4.894118
0.570588
0.02524
0.036058
0.040865
0
0
0
0
0
0
0
0.007737
0.187745
1,273
45
74
28.288889
0.796905
0.230951
0
0.08
0
0
0.240702
0.080579
0
0
0
0
0.04
1
0.04
false
0.04
0.16
0
0.24
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aeb6ef2b04d214ccf1780ce3742b6d40d27fe53
2,572
py
Python
binary_tree/m_post_order_traversal.py
dhrubach/python-code-recipes
14356c6adb1946417482eaaf6f42dde4b8351d2f
[ "MIT" ]
null
null
null
binary_tree/m_post_order_traversal.py
dhrubach/python-code-recipes
14356c6adb1946417482eaaf6f42dde4b8351d2f
[ "MIT" ]
null
null
null
binary_tree/m_post_order_traversal.py
dhrubach/python-code-recipes
14356c6adb1946417482eaaf6f42dde4b8351d2f
[ "MIT" ]
null
null
null
###################################################################### # LeetCode Problem Number : 145 # Difficulty Level : Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import TreeNode class BinaryTree: # runtime --> 77.59%, memory --> 50.59% def postOrderRecursive(self, root: TreeNode) -> [int]: if not root: return [] res = [] """ post - order traversal visit left sub - tree visit right sub - tree visit node """ res += self.postOrderRecursive(root.left) res += self.postOrderRecursive(root.right) res.append(root.val) """ return visited node + child nodes """ return res def postOrderIterative(self, root: TreeNode) -> [int]: if not root: return [] ret = [] """ on visiting a node, push 2 copies to the stack. use 1st copy to process the child nodes use 2nd copy to insert into result """ st = [root] * 2 while st: cur = st.pop() """ if current node is the last node in the stack, then visit it's child nodes if current node is not the last node in the stack, then current node is the 2nd copy. Insert node into result list """ if st and st[-1] is cur: """insert right child node followed by left. this ensures processing is done from left to right. """ if cur.right: st += [cur.right] * 2 if cur.left: st += [cur.left] * 2 else: ret.append(cur.val) return ret # runtime --> 54.35%, memory --> 5.09% def postOrderIterativeReverse(self, root: TreeNode) -> [int]: if not root: return [] res, stack = [], [root] while stack: cur = stack.pop() if cur: """ visit the nodes in reverse order i.e. node -> right child node -> left child node similar to right-first pre-order traversal """ res.append(cur.val) stack.append(cur.left) stack.append(cur.right) """ reversed result will give post-order traversal """ return res[::-1]
28.898876
70
0.47395
274
2,572
4.437956
0.368613
0.029605
0.039474
0.046875
0.129934
0.129934
0.129934
0.088816
0.060855
0
0
0.017024
0.383359
2,572
88
71
29.227273
0.749685
0.077372
0
0.166667
0
0
0
0
0
0
0
0
0
1
0.083333
false
0
0.027778
0
0.305556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aecb09acc6ad3252011c93a09793cb698638ff1
18,290
py
Python
dokuwiki.py
luminisward/python-dokuwiki
329862e6c91a79b2ad9f0b7616f7591459f2d4fd
[ "MIT" ]
null
null
null
dokuwiki.py
luminisward/python-dokuwiki
329862e6c91a79b2ad9f0b7616f7591459f2d4fd
[ "MIT" ]
null
null
null
dokuwiki.py
luminisward/python-dokuwiki
329862e6c91a79b2ad9f0b7616f7591459f2d4fd
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """This python module aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with python2.7 and python3+. Installation ------------ It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use the ``pip`` command to install it:: pip install dokuwiki Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_ """ import re import sys import base64 import weakref from xml.parsers.expat import ExpatError if sys.version_info[0] == 3: from xmlrpc.client import ServerProxy, Binary, Fault, Transport from urllib.parse import urlencode else: from xmlrpclib import ServerProxy, Binary, Fault, Transport from urllib import urlencode from datetime import datetime, timedelta ERR = 'XML or text declaration not at start of entity: line 2, column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): """DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the format changes between DokuWiki versions ... This function convert *date* to a `datetime` object. """ date = date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) == 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): """DokuWiki returns date with a +0000 timezone. This function convert *date* to the local time. """ date_offset = (datetime.now() - datetime.utcnow()) # Python < 2.7 don't have the 'total_seconds' method so calculate it by hand! date_offset = (date_offset.microseconds + (date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) / 1e6 date_offset = int(round(date_offset / 60 / 60)) return date + timedelta(hours=date_offset) class DokuWikiError(Exception): """Exception raised by this module when there is an error.""" pass class CookiesTransport(Transport): """A Python3 xmlrpc.client.Transport subclass that retains cookies.""" def __init__(self): Transport.__init__(self) self._cookies = dict() def send_headers(self, connection, headers): if self._cookies: cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items()) connection.putheader("Cookie", "; ".join(cookies)) Transport.send_headers(self, connection, headers) def parse_response(self, response): """parse and store cookie""" try: for header in response.msg.get_all("Set-Cookie"): cookie = header.split(";", 1)[0] cookieKey, cookieValue = cookie.split("=", 1) self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self, response) class CookiesTransport2(Transport): """A Python2 xmlrpclib.Transport subclass that retains cookies.""" def __init__(self): Transport.__init__(self) self._cookies = dict() def send_request(self, connection, handler, request_body): Transport.send_request(self, connection, handler, request_body) # set cookie below handler if self._cookies: cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items()) connection.putheader("Cookie", "; ".join(cookies)) def parse_response(self, response): """parse and store cookie""" try: for header in response.getheader("set-cookie").split(", "): # filter 'expire' information if not header.startswith("D"): continue cookie = header.split(";", 1)[0] cookieKey, cookieValue = cookie.split("=", 1) self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self, response) class DokuWiki(object): """Initialize a connection to a DokuWiki wiki. *url*, *user* and *password* are respectively the URL, the login and the password for connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters. The exception `DokuWikiError` is raised if the authentification fails but others exceptions (like ``gaierror`` for invalid domain, ``ProtocolError`` for an invalid wiki, ...) are not catched. .. code:: try: wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False) except (DokuWikiError, Exception) as err: print('unable to connect: %s' % err) """ def __init__(self, url, user, password, cookieAuth=False, **kwargs): """Initialize the object by connecting to the XMLRPC server.""" # Initialize XMLRPC client. try: params = _URL_RE.search(url).groupdict() if cookieAuth == False: url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % ( params['proto'], user, password, params['host'], params['uri'] or '') else: url = '%s://%s%s/lib/exe/xmlrpc.php' % ( params['proto'], params['host'], params['uri'] or '') except AttributeError: raise DokuWikiError("invalid url '%s'" % url) if cookieAuth == False: self.proxy = ServerProxy(url, **kwargs) else: if sys.version_info[0] == 3: self.proxy = ServerProxy(url, CookiesTransport(), **kwargs) else: self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs) # Force login to check the connection. if not self.login(user, password): raise DokuWikiError('invalid login or password!') # Set "namespaces" for pages and medias functions. self.pages = _Pages(weakref.ref(self)()) self.medias = _Medias(weakref.ref(self)()) def send(self, command, *args, **kwargs): """Generic method for executing an XML-RPC *command*. *args* and *kwargs* are the arguments and parameters needed by the command. """ args = list(args) if kwargs: args.append(kwargs) method = self.proxy for elt in command.split('.'): method = getattr(method, elt) try: return method(*args) except Fault as err: if err.faultCode == 121: return {} elif err.faultCode == 321: return [] raise DokuWikiError(err) except ExpatError as err: if str(err) != ERR: raise DokuWikiError(err) @property def version(self): """Property that returns the DokuWiki version of the remote Wiki.""" return self.send('dokuwiki.getVersion') @property def time(self): """Property that returns the current time at the remote wiki server as Unix timestamp. """ return self.send('dokuwiki.getTime') @property def xmlrpc_version(self): """Property that returns the XML RPC interface version of the remote Wiki. This is DokuWiki implementation specific and independent of the supported standard API version returned by ``wiki.getRPCVersionSupported``. """ return self.send('dokuwiki.getXMLRPCAPIVersion') @property def xmlrpc_supported_version(self): """Property that returns *2* with the supported RPC API version.""" return self.send('wiki.getRPCVersionSupported') @property def title(self): """Property that returns the title of the wiki.""" return self.send('dokuwiki.getTitle') def login(self, user, password): """Log to the wiki using *user* and *password* credentials. It returns a boolean that indicates if the user succesfully authenticate.""" return self.send('dokuwiki.login', user, password) def add_acl(self, scope, user, permission): """Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts the page/namespace *scope* to *user* (use *@group* syntax for groups) with *permission* level. It returns a boolean that indicate if the rule was correctly added. """ return self.send('plugin.acl.addAcl', scope, user, permission) def del_acl(self, scope, user): """Delete any ACL matching the given *scope* and *user* (or group if *@group* syntax is used). It returns a boolean that indicate if the rule was correctly removed. """ return self.send('plugin.acl.delAcl', scope, user) class _Pages(object): """This object regroup methods for managing pages of a DokuWiki. This object is accessible from the ``pages`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() """ def __init__(self, dokuwiki): self._dokuwiki = dokuwiki def list(self, namespace='/', **options): """List all pages of the given *namespace*. Valid *options* are: * *depth*: (int) recursion level, 0 for all * *hash*: (bool) do an md5 sum of content * *skipacl*: (bool) list everything regardless of ACL """ return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self, timestamp): """Returns a list of changes since given *timestamp*. For example, for returning all changes since *2016-01-01*:: from datetime import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) """ return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string): """Performs a fulltext search on *string* and returns the first 15 results. """ return self._dokuwiki.send('dokuwiki.search', string) def versions(self, page, offset=0): """Returns the available versions of *page*. *offset* can be used to list earlier versions in the history. """ return self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self, page, version=None): """Returns informations of *page*. Informations of the last version is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page, version=None): """Returns the content of *page*. The content of the last version is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPage', page)) def append(self, page, content, **options): """Appends *content* text to *page*. Valid *options* are: * *sum*: (str) change summary * *minor*: (bool) whether this is a minor change """ return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def html(self, page, version=None): """Returns HTML content of *page*. The HTML content of the last version of the page is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page, content, **options): """Set/replace the *content* of *page*. Valid *options* are: * *sum*: (str) change summary * *minor*: (bool) whether this is a minor change """ try: return self._dokuwiki.send('wiki.putPage', page, content, options) except ExpatError as err: # Sometime the first line of the XML response is blank which raise # the 'ExpatError' exception although the change has been done. This # allow to ignore the error. if str(err) != ERR: raise DokuWikiError(err) def delete(self, page): """Delete *page* by setting an empty content.""" return self.set(page, '') def lock(self, page): """Locks *page*.""" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock page') def unlock(self, page): """Unlocks *page*.""" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable to unlock page') def permission(self, page): """Returns the permission level of *page*.""" return self._dokuwiki.send('wiki.aclCheck', page) def links(self, page): """Returns a list of all links contained in *page*.""" return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page): """Returns a list of all links referencing *page*.""" return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): """This object regroup methods for managing medias of a DokuWiki. This object is accessible from the ``medias`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() """ def __init__(self, dokuwiki): self._dokuwiki = dokuwiki def list(self, namespace='/', **options): """Returns all medias of the given *namespace*. Valid *options* are: * *depth*: (int) recursion level, 0 for all * *skipacl*: (bool) skip acl checking * *pattern*: (str) check given pattern * *hash*: (bool) add hashes to result list """ return self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self, timestamp): """Returns the list of medias changed since given *timestamp*. For example, for returning all changes since *2016-01-01*:: from datetime import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) """ return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False): """Returns the binary data of *media* or save it to a file. If *dirpath* is not set the binary data is returned, otherwise the data is saved to a file. By default, the filename is the name of the media but it can be changed with *filename* parameter. *overwrite* parameter allow to overwrite the file if it already exists locally. """ import os data = self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data) if b64decode else data.data if dirpath is None: return data if filename is None: filename = media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath, filename) if os.path.exists(filepath) and not overwrite: raise FileExistsError("[Errno 17] File exists: '%s'" % filepath) with open(filepath, 'wb') as fhandler: fhandler.write(data) def info(self, media): """Returns informations of *media*.""" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media, filepath, overwrite=True): """Set *media* from local file *filepath*. *overwrite* parameter specify if the media must be overwrite if it exists remotely. """ with open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def set(self, media, _bytes, overwrite=True, b64encode=False): """Set *media* from *_bytes*. *overwrite* parameter specify if the media must be overwrite if it exists remotely. """ data = base64.b64encode(_bytes) if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def delete(self, media): """Delete *media*.""" return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): """Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_.""" @staticmethod def get(content, keep_order=False): """Get dataentry from *content*. *keep_order* indicates whether to return an ordered dictionnay.""" if keep_order: from collections import OrderedDict dataentry = OrderedDict() else: dataentry = {} found = False for line in content.split('\n'): if line.strip().startswith('---- dataentry'): found = True continue elif line == '----': break elif not found: continue line_split = line.split(':') key = line_split[0].strip() value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip() dataentry.setdefault(key, value) if not found: raise DokuWikiError('no dataentry found') return dataentry @staticmethod def gen(name, data): """Generate dataentry *name* from *data*.""" return '---- dataentry %s ----\n%s\n----' % (name, '\n'.join( '%s:%s' % (attr, value) for attr, value in data.items())) @staticmethod def ignore(content): """Remove dataentry from *content*.""" page_content = [] start = False for line in content.split('\n'): if line == '----' and not start: start = True continue if start: page_content.append(line) return '\n'.join(page_content) if page_content else content
37.326531
89
0.601203
2,132
18,290
5.10272
0.211069
0.030885
0.035297
0.03493
0.37145
0.305819
0.270981
0.232926
0.218954
0.200202
0
0.008495
0.279169
18,290
489
90
37.402863
0.816672
0.346419
0
0.292683
0
0
0.098791
0.027156
0
0
0
0
0
1
0.182927
false
0.028455
0.04878
0
0.406504
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aed13aa20c6ab391e3ffb7e313d6df343ae7084
1,449
py
Python
setup.py
lvgig/test-aide
60a9420062dd778ce9dad43993dd8ab4f300ac4e
[ "BSD-3-Clause" ]
2
2021-11-08T08:41:08.000Z
2021-11-08T09:11:24.000Z
setup.py
lvgig/test-aide
60a9420062dd778ce9dad43993dd8ab4f300ac4e
[ "BSD-3-Clause" ]
null
null
null
setup.py
lvgig/test-aide
60a9420062dd778ce9dad43993dd8ab4f300ac4e
[ "BSD-3-Clause" ]
null
null
null
import setuptools import re with open("README.md", "r") as fh: long_description = fh.read() # get version from _version.py file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = "test_aide/_version.py" version_file_str = open(VERSION_FILE, "rt").read() VERSION_STR_RE = r"^__version__ = ['\"]([^'\"]*)['\"]" mo = re.search(VERSION_STR_RE, version_file_str, re.M) if mo: version = mo.group(1) else: raise RuntimeError("Unable to find version string in %s." % (VERSION_FILE,)) def list_reqs(fname="requirements.txt"): with open(fname) as fd: return fd.read().splitlines() setuptools.setup( name="test-aide", version=version, author="LV GI Data Science Team", author_email="#DataSciencePackages@lv.co.uk", description="Package of helper functions to be used for unit testing", long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=">=3.6", classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "License :: OSI Approved :: BSD License", ], )
32.2
94
0.673568
183
1,449
5.169399
0.546448
0.05814
0.132135
0.109937
0
0
0
0
0
0
0
0.013502
0.182195
1,449
44
95
32.931818
0.78481
0.095238
0
0
0
0
0.376147
0.038226
0
0
0
0
0
1
0.027778
false
0
0.055556
0
0.111111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aed5c4d1497088a992494a4109f38cb6b27e78e
510
py
Python
examples/pylab_examples/matshow.py
jbbrokaw/matplotlib
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
[ "MIT", "BSD-3-Clause" ]
16
2016-06-14T19:45:35.000Z
2020-11-30T19:02:58.000Z
lib/mpl_examples/pylab_examples/matshow.py
yingkailiang/matplotlib
255a79b106c98c1904489afe6a754e4d943179d6
[ "MIT", "BSD-3-Clause" ]
7
2015-05-08T19:36:25.000Z
2015-06-30T15:32:17.000Z
lib/mpl_examples/pylab_examples/matshow.py
yingkailiang/matplotlib
255a79b106c98c1904489afe6a754e4d943179d6
[ "MIT", "BSD-3-Clause" ]
14
2015-10-05T04:15:46.000Z
2020-06-11T18:06:02.000Z
"""Simple matshow() example.""" from matplotlib.pylab import * def samplemat(dims): """Make a matrix with all zeros and increasing elements on the diagonal""" aa = zeros(dims) for i in range(min(dims)): aa[i, i] = i return aa # Display 2 matrices of different sizes dimlist = [(12, 12), (15, 35)] for d in dimlist: matshow(samplemat(d)) # Display a random matrix with a specified figure number and a grayscale # colormap matshow(rand(64, 64), fignum=100, cmap=cm.gray) show()
22.173913
78
0.670588
78
510
4.384615
0.692308
0.05848
0
0
0
0
0
0
0
0
0
0.039604
0.207843
510
22
79
23.181818
0.806931
0.417647
0
0
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0.090909
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aee10083d95f61f20711f9c9064a70b35ea7926
4,445
py
Python
setup.py
HeyLifeHD/rp-bp
9c59b1bc0267400747477467c45f96364d5528e1
[ "MIT" ]
6
2016-05-16T18:52:41.000Z
2021-12-31T06:27:29.000Z
setup.py
HeyLifeHD/rp-bp
9c59b1bc0267400747477467c45f96364d5528e1
[ "MIT" ]
110
2016-06-22T13:24:39.000Z
2022-02-07T09:29:14.000Z
setup.py
HeyLifeHD/rp-bp
9c59b1bc0267400747477467c45f96364d5528e1
[ "MIT" ]
5
2017-05-22T12:21:51.000Z
2022-02-06T10:32:56.000Z
#! /usr/bin/env python3 import importlib import logging import os import subprocess from setuptools import setup from setuptools.command.install import install as install from setuptools.command.develop import develop as develop logger = logging.getLogger(__name__) stan_model_files = [ os.path.join("nonperiodic", "no-periodicity.stan"), os.path.join("nonperiodic", "start-high-high-low.stan"), os.path.join("nonperiodic", "start-high-low-high.stan"), os.path.join("periodic", "start-high-low-low.stan"), os.path.join("untranslated", "gaussian-naive-bayes.stan"), os.path.join("translated", "periodic-gaussian-mixture.stan") ] stan_pickle_files = [ os.path.join("nonperiodic", "no-periodicity.pkl"), os.path.join("nonperiodic", "start-high-high-low.pkl"), os.path.join("nonperiodic", "start-high-low-high.pkl"), os.path.join("periodic", "start-high-low-low.pkl"), os.path.join("untranslated", "gaussian-naive-bayes.pkl"), os.path.join("translated", "periodic-gaussian-mixture.pkl") ] def _pickle_it(stan, pickle): import shlex dirname = os.path.dirname(pickle) if not os.path.exists(dirname): os.makedirs(dirname) cmd = "pickle-stan {} {}".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd, shell=True) def _post_install(force_recompile): import site importlib.reload(site) import pbio.ribo.ribo_filenames as filenames import pbio.misc.shell_utils as shell_utils smf = [os.path.join("rpbp_models", s) for s in stan_model_files] models_base = filenames.get_default_models_base() spf = [os.path.join(models_base, s) for s in stan_pickle_files] # Compile and pickle the Stan models if force_recompile: for stan, pickle in zip(smf, spf): _pickle_it(stan, pickle) else: # default for stan, pickle in zip(smf, spf): if os.path.exists(pickle): msg = "A model already exists at: {}. Skipping.".format(pickle) logging.warning(msg) continue _pickle_it(stan, pickle) # Check for the prerequisite programs programs = ['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar', logger=logger) programs = ['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR', logger=logger) programs = ['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2', logger=logger) programs = ['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools', logger=logger) class SetupInstall(install): user_options = install.user_options + [ ('force-recompile', None, 'Set this flag to recompile the Stan models'), ] def initialize_options(self): install.initialize_options(self) self.force_recompile = None def finalize_options(self): install.finalize_options(self) def run(self): force_recompile = self.force_recompile # 0 or 1 level = logging.getLevelName("INFO") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') install.run(self) # skip if RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) class SetupDevelop(develop): user_options = develop.user_options + [ ('force-recompile', None, 'Set this flag to recompile the Stan models'), ] def initialize_options(self): develop.initialize_options(self) self.force_recompile = None def finalize_options(self): develop.finalize_options(self) def run(self): force_recompile = self.force_recompile # 0 or 1 level = logging.getLevelName("INFO") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') develop.run(self) # skip if RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) setup( cmdclass={ 'install': SetupInstall, 'develop': SetupDevelop } )
29.633333
80
0.64027
532
4,445
5.201128
0.240602
0.036863
0.050596
0.045537
0.559812
0.549331
0.549331
0.438742
0.361402
0.361402
0
0.002965
0.24117
4,445
149
81
29.832215
0.817373
0.031271
0
0.294118
0
0
0.17892
0.057469
0
0
0
0
0
1
0.078431
false
0
0.117647
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aee208ed627e09e244d2f3b0703567eec906294
9,909
py
Python
utils/data_utils.py
BorisMansencal/quickNAT_pytorch
1853afbe409f2fec6db298c70a3dd0ae088091f0
[ "MIT" ]
null
null
null
utils/data_utils.py
BorisMansencal/quickNAT_pytorch
1853afbe409f2fec6db298c70a3dd0ae088091f0
[ "MIT" ]
null
null
null
utils/data_utils.py
BorisMansencal/quickNAT_pytorch
1853afbe409f2fec6db298c70a3dd0ae088091f0
[ "MIT" ]
null
null
null
import os import h5py import nibabel as nb import numpy as np import torch import torch.utils.data as data from torchvision import transforms import utils.preprocessor as preprocessor # transform_train = transforms.Compose([ # transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), # ]) class ImdbData(data.Dataset): def __init__(self, X, y, w, transforms=None): self.X = X if len(X.shape) == 4 else X[:, np.newaxis, :, :] self.y = y self.w = w self.transforms = transforms def __getitem__(self, index): img = torch.from_numpy(self.X[index]) label = torch.from_numpy(self.y[index]) weight = torch.from_numpy(self.w[index]) return img, label, weight def __len__(self): return len(self.y) def get_imdb_dataset(data_params): data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r') label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r') class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r') weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r') data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r') label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r') class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r') weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r') return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()])) def load_dataset(file_paths, orientation, remap_config, return_weights=False, reduce_slices=False, remove_black=False): print("Loading and preprocessing data...") volume_list, labelmap_list, headers, class_weights_list, weights_list = [], [], [], [], [] for file_path in file_paths: volume, labelmap, class_weights, weights, header = load_and_preprocess(file_path, orientation, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap) if return_weights: class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header) print("#", end='', flush=True) print("100%", flush=True) if return_weights: return volume_list, labelmap_list, class_weights_list, weights_list, headers else: return volume_list, labelmap_list, headers def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False, remove_black=False, return_weights=False): volume, labelmap, header = load_data(file_path, orientation) volume, labelmap, class_weights, weights = preprocess(volume, labelmap, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) return volume, labelmap, class_weights, weights, header def load_and_preprocess_eval(file_path, orientation, notlabel=True): volume_nifty = nb.load(file_path[0]) header = volume_nifty.header volume = volume_nifty.get_fdata() if notlabel: volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume)) else: volume = np.round(volume) if orientation == "COR": volume = volume.transpose((2, 0, 1)) elif orientation == "AXI": volume = volume.transpose((1, 2, 0)) return volume, header def load_data(file_path, orientation): volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1]) volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume)) volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation) return volume, labelmap, volume_nifty.header def preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False): if reduce_slices: volume, labelmap = preprocessor.reduce_slices(volume, labelmap) if remap_config: labelmap = preprocessor.remap_labels(labelmap, remap_config) if remove_black: volume, labelmap = preprocessor.remove_black(volume, labelmap) if return_weights: class_weights, weights = preprocessor.estimate_weights_mfb(labelmap) return volume, labelmap, class_weights, weights else: return volume, labelmap, None, None # def load_file_paths(data_dir, label_dir, volumes_txt_file=None): # """ # This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. # It should be modified to suit the need of the project # :param data_dir: Directory which contains the data files # :param label_dir: Directory which contains the label files # :param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read # :return: list of file paths as string # """ # # volume_exclude_list = ['IXI290', 'IXI423'] # if volumes_txt_file: # with open(volumes_txt_file) as file_handle: # volumes_to_use = file_handle.read().splitlines() # else: # volumes_to_use = [name for name in os.listdir(data_dir) if # name.startswith('IXI') and name not in volume_exclude_list] # # file_paths = [ # [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for # vol in volumes_to_use] # return file_paths def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): """ This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. It should be modified to suit the need of the project :param data_dir: Directory which contains the data files :param label_dir: Directory which contains the label files :param data_id: A flag indicates the name of Dataset for proper file reading :param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read :return: list of file paths as string """ if volumes_txt_file: with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() else: volumes_to_use = [name for name in os.listdir(data_dir)] if data_id == "MALC": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')] for vol in volumes_to_use] elif data_id == "ADNI": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol in volumes_to_use] elif data_id == "CANDI": file_paths = [ [os.path.join(data_dir, vol + '/' + vol + '_1.mgz'), os.path.join(label_dir, vol + '/' + vol + '_1_seg.mgz')] for vol in volumes_to_use] elif data_id == "IBSR": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')] for vol in volumes_to_use] elif data_id == "BORIS": #BORIS file_paths = [ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol in volumes_to_use] else: raise ValueError("Invalid entry, valid options are MALC, ADNI, CANDI and IBSR") return file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): """ This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. It should be modified to suit the need of the project :param data_dir: Directory which contains the data files :param volumes_txt_file: Path to the a csv file, when provided only these data points will be read :param dir_struct: If the id_list is in FreeSurfer style or normal :return: list of file paths as string """ with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() if dir_struct == "FS": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol in volumes_to_use] elif dir_struct == "Linear": file_paths = [ [os.path.join(data_dir, vol)] for vol in volumes_to_use] elif dir_struct == "part_FS": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz')] for vol in volumes_to_use] else: raise ValueError("Invalid entry, valid options are FS and Linear") return file_paths
41.460251
138
0.631749
1,304
9,909
4.566718
0.148006
0.029387
0.038623
0.039966
0.645676
0.595466
0.536188
0.515701
0.475063
0.45827
0
0.006042
0.265113
9,909
238
139
41.634454
0.811728
0.221617
0
0.305195
0
0
0.073052
0.006175
0
0
0
0
0
1
0.071429
false
0
0.051948
0.006494
0.207792
0.019481
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4af2582d62d2fd8906d1b6bfaa4cb05ec6512096
1,245
py
Python
playground/check_equal.py
INK-USC/hypter
732551e1e717b66ad26ba538593ed184957ecdea
[ "MIT" ]
11
2021-07-16T15:49:39.000Z
2021-12-17T14:46:25.000Z
playground/check_equal.py
INK-USC/hypter
732551e1e717b66ad26ba538593ed184957ecdea
[ "MIT" ]
null
null
null
playground/check_equal.py
INK-USC/hypter
732551e1e717b66ad26ba538593ed184957ecdea
[ "MIT" ]
1
2021-08-04T07:21:02.000Z
2021-08-04T07:21:02.000Z
import json d1 = {} with open("/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl") as fin: for line in fin: d = json.loads(line) d1[d["id"]] = d["output"][0]["answer"] d2 = {} dq = {} with open("/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl") as fin: for line in fin: d = json.loads(line) d2[d["id"]] = d["output"][0]["answer"] dq[d["id"]] = d["input"] d3 = {} with open("/home/qinyuan/zs/data/structured_zeroshot-test.jsonl") as fin: for line in fin: d = json.loads(line) d3[d["id"]] = [item["answer"] for item in d["output"]] count = 0 win1 = 0 win2 = 0 for key in d1.keys(): if d1[key]!= d2[key]: print("{}. {}. {}. {}. {}".format(key, dq[key], d1[key], d2[key], d3[key])) count += 1 if d1[key] in d3[key] and d2[key] not in d3[key]: win1 += 1 print(d1[key]) print(d2[key]) if d2[key] in d3[key] and d1[key] not in d3[key]: win2 += 1 print(d1[key]) print(d2[key]) print(count) print(win1) print(win2)
27.065217
153
0.553414
192
1,245
3.567708
0.302083
0.043796
0.040876
0.083212
0.471533
0.364964
0.315328
0.254015
0.157664
0.157664
0
0.056156
0.256225
1,245
45
154
27.666667
0.683585
0
0
0.277778
0
0.055556
0.287208
0.233307
0
0
0
0
0
1
0
false
0
0.027778
0
0.027778
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4af386aea4c2e177a4c714ca6af54611fc4df7d6
673
py
Python
exercicios-Python/ex083.py
pedrosimoes-programmer/exercicios-python
150de037496d63d76086678d87425a8ccfc74573
[ "MIT" ]
null
null
null
exercicios-Python/ex083.py
pedrosimoes-programmer/exercicios-python
150de037496d63d76086678d87425a8ccfc74573
[ "MIT" ]
null
null
null
exercicios-Python/ex083.py
pedrosimoes-programmer/exercicios-python
150de037496d63d76086678d87425a8ccfc74573
[ "MIT" ]
null
null
null
# Forma sem bugs expressao = (str(input('Digite a expressão: '))) pilhaParenteses = [] for v in expressao: if v == '(': pilhaParenteses.append('(') elif v == ')': if len(pilhaParenteses) > 0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break if len(pilhaParenteses) == 0: print(f'A expressão {expressao} está válida.') else: print(f'A expressão {expressao} está inválida!') # Forma com bugs #expressao = (str(input('Digite a expressão: '))) #if expressao.count('(') == expressao.count(')'): # print('Sua expressão está válida.') #else: # print('Sua expressão está inválida!')
28.041667
52
0.601783
74
673
5.472973
0.391892
0.098765
0.079012
0.103704
0.325926
0.325926
0.182716
0
0
0
0
0.003891
0.236256
673
23
53
29.26087
0.784047
0.313522
0
0.133333
0
0
0.216336
0
0
0
0
0
0
1
0
false
0
0
0
0
0.133333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4af4341b8d96ec6fde46ee878b92d71af06be79a
1,607
py
Python
src/inspectortodo/todo.py
code-acrobat/InspectorTodo
342bd0840d4f087cf2914f906ebc69bf2b21d9ce
[ "Apache-2.0" ]
8
2018-05-28T08:41:01.000Z
2022-03-02T08:54:54.000Z
src/inspectortodo/todo.py
code-acrobat/InspectorTodo
342bd0840d4f087cf2914f906ebc69bf2b21d9ce
[ "Apache-2.0" ]
9
2018-08-04T20:16:46.000Z
2022-03-08T14:29:47.000Z
src/inspectortodo/todo.py
code-acrobat/InspectorTodo
342bd0840d4f087cf2914f906ebc69bf2b21d9ce
[ "Apache-2.0" ]
3
2018-05-29T08:00:29.000Z
2022-02-23T11:02:58.000Z
# Copyright 2018 TNG Technology Consulting GmbH, Unterföhring, Germany # Licensed under the Apache License, Version 2.0 - see LICENSE.md in project root directory import logging from xml.sax.saxutils import escape log = logging.getLogger() class Todo: def __init__(self, file_path, line_number, content): self.file_path = file_path self.line_number = line_number self.content = content self.is_valid = True self.error_reason = None def __str__(self): return 'Todo in file ' + self.file_path + ':' + str(self.line_number) + ' | ' + self.content def mark_as_valid(self): self.is_valid = True self.error_reason = None def mark_as_invalid(self, error_reason): self.is_valid = False self.error_reason = error_reason def print(self, show_valid=False): if not show_valid and self.is_valid: return log.error('[REASON] %s' % self.error_reason) log.error('[FILE] %s' % self.file_path) log.error('[LINE] %s' % self.line_number) log.error('[CONTENT] %s' % self.content) def print_xml(self, xml_file): if self.is_valid: xml_file.write('\t<testcase classname="{}" name="line {}" />\n'.format(self.file_path, self.line_number)) else: xml_file.write('\t<testcase classname="{}" name="line {}" >\n'.format(self.file_path, self.line_number)) xml_file.write('\t\t<failure message="{}">{}</failure>\n'.format(self.error_reason, escape(self.content))) xml_file.write('\t</testcase>\n')
34.934783
118
0.632856
217
1,607
4.474654
0.317972
0.090628
0.07415
0.053553
0.266735
0.222451
0.222451
0.222451
0.222451
0.146241
0
0.00489
0.236465
1,607
45
119
35.711111
0.786471
0.09832
0
0.125
0
0
0.148686
0.018672
0
0
0
0
0
1
0.1875
false
0
0.0625
0.03125
0.34375
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4af5891fa135d7fd02c534a37ddba2e1d64a9e74
9,595
py
Python
generators.py
FabLabUTFSM/fusm_usage_report
92b18ad81f97482d6e8428b6c7cbdfc23d0ca440
[ "MIT" ]
null
null
null
generators.py
FabLabUTFSM/fusm_usage_report
92b18ad81f97482d6e8428b6c7cbdfc23d0ca440
[ "MIT" ]
null
null
null
generators.py
FabLabUTFSM/fusm_usage_report
92b18ad81f97482d6e8428b6c7cbdfc23d0ca440
[ "MIT" ]
null
null
null
import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import plotly.express as px from plotly.subplots import make_subplots import pandas as pd import math from datetime import datetime, time from utils import MONTH_NAMES, month_range def section(title, content, gray=False): return html.Section(className=f'hero is-fullheight is-medium {"has-background-grey-lighter" if gray else ""}', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-centered', children=[ html.Div(className='column is-four-fifths is-full-mobile', children=[ html.Div(className='level', children=[ html.H2(title, className='title') ]), ] + content) ]) ]) ]) ]) def quality_index(df): indexes = df.sort_values('Valor', ascending=False).fillna('?').values return html.Div(className='columns is-multiline is-4 is-variable', children=[ html.Div(className=f'column is-one-quarter index-container {"unknown-data" if i[1] == "?" else ""}', children=[ html.H1(i[1], className='title'), html.H2(i[0], className='subtitle') ]) for i in indexes ]) def month_selector(df, first_month=None): current_month = datetime.now().month return html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1: MONTH_NAMES[i] for i in range(first_month-1, current_month)}, min=first_month, max=current_month, value=[current_month-2,current_month], pushable=1 ), className='slider-frame') def point_list(items): return html.Ul([html.Li(item) for item in items]) def first(): return html.Section(className='hero is-fullheight', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-vcentered is-centered', children=[ html.Div(className='column is-5', children=[ html.Figure(className='image is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'), ]), ]), html.Div(className='column is-5 main-title', children=[ html.H1('Informe de Gestión de Operaciones', className='title') ]) ]) ]), ]) ]) def last(): return html.Footer(className='footer has-background-white', children=[ html.Div(className='content has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'), html.P(className='is-size-7', children=[ 'FabLab UTFSM 2019', html.Br(), 'UTFSM Campus San Joaquín, Edificio C', html.Br(), 'Av. Vicuña Mackenna 3939, Santiago de Chile', html.Br(), 'Desarrollado bajo licencia MIT' ]) ]) ]) def fig_records(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months = month_range(months) def create_frame(df, serie_name): count = df['Tipo Máquina'].value_counts() frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] = [count.get(machine, 0) for machine in machine_list] return frame extras = {'barmode': 'relative' if stacked else 'group'} figure = go.Figure() for m in months: name = MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y')) if stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( x=frame['Tipo de Máquina'], y=frame['Total'], text=frame['Total'], textposition='top center', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(yaxis={ 'title': 'Número de registros'}, **extras) return figure def fig_hours(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months=month_range(months) def create_frame(df, serie_name): count = df.groupby('Tipo Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0) frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] = [count.get(machine, 0) for machine in machine_list] return frame if months and type(months) == list: df = df[df.index.month.isin(months)] frame = create_frame(df, 'Total') figure = go.Figure() extras = {'barmode': 'relative' if stacked else 'group'} for m in months: name = MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h')) if stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( y=frame['Tipo de Máquina'], x=frame['Total'], text=frame['Total'], textposition='middle right', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(xaxis={ 'title': f'Horas de uso {"total" if stacked else ""}'}, **extras) return figure def cap_per_machine_per_month(month_caps, machine, month): this_month = month_caps[month_caps['Mes'] == month] machine_count = {'Impresora 3D': 5, 'Cortadora Láser': 2, 'Router CNC': 3, 'Torno': 1, 'Cirqoid': 1} return (this_month['Dias'] * this_month['Horas']).values[0] * 60 * machine_count[machine] def fig_total_capacity_2(df, month_caps, months): machine_list = df['Tipo Máquina'].unique() months = month_range(months) month_names = [MONTH_NAMES[m-1] for m in months] figure = go.Figure() for machine in machine_list: texts = [] caps = [] for month in months: total_cap = cap_per_machine_per_month(month_caps, machine, month) hours = total_cap // 60 used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap) texts.append(f'{used_cap}% utilizado de una capacidad total de {hours} horas.') figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada')) return figure """ TODO: Terminar el heatmap de alguna manera... def fig_uses(df, months): dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes'] days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))] figure = go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict = dict() for i, d in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return figure """ def trace_context_use(df, level=None, **kwargs): grouped = None if not level: grouped = df.groupby('Contexto 1') else: grouped = df[df['Contexto 1'] == level].groupby('Contexto 2') context_data = grouped.sum()['Tiempo de uso en minutos'] return go.Pie(labels=context_data.index, values=context_data.values, **kwargs) def fig_contexts_use(df, months, level, **kwargs): col_count = 3 row_count = math.ceil(len(month_range(months))/col_count) figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in range(col_count)] for r in range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)]) def take_month(months): for m in month_range(months): yield trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1]) pie_factory = take_month(months) try: for r in range(row_count): for c in range(col_count): figure.add_trace(next(pie_factory), r+1, c+1) except StopIteration as stop: pass return figure def records_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'}) def time_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'}) def machine_capacity(df, caps, months=None): return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'}) #def uses(df, months): # return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'}) def contexts(df, months, level=None): return dcc.Graph(figure=fig_contexts_use(df, months, level), style={'height': '80vh'})
41.004274
163
0.619906
1,256
9,595
4.623408
0.234873
0.03513
0.035819
0.045462
0.433615
0.382125
0.293095
0.256242
0.242122
0.193215
0
0.009962
0.236269
9,595
234
164
41.004274
0.782478
0.010005
0
0.359116
0
0.005525
0.16219
0.010144
0
0
0
0.004274
0
1
0.104972
false
0.005525
0.049724
0.044199
0.254144
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4af6882f3b0de2bc194a5844807fd94589dcf8e9
119,159
py
Python
Lib/fontTools/designspaceLib/__init__.py
guorenxi/fonttools
cefb41e6c261eeff0062a7b4017061982ed87aa7
[ "Apache-2.0", "MIT" ]
null
null
null
Lib/fontTools/designspaceLib/__init__.py
guorenxi/fonttools
cefb41e6c261eeff0062a7b4017061982ed87aa7
[ "Apache-2.0", "MIT" ]
null
null
null
Lib/fontTools/designspaceLib/__init__.py
guorenxi/fonttools
cefb41e6c261eeff0062a7b4017061982ed87aa7
[ "Apache-2.0", "MIT" ]
null
null
null
from __future__ import annotations import collections import copy import itertools import math import os import posixpath from io import BytesIO, StringIO from textwrap import indent from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union from fontTools.misc import etree as ET from fontTools.misc import plistlib from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import tobytes, tostr """ designSpaceDocument - read and write designspace files """ __all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] # ElementTree allows to find namespace-prefixed elements, but not attributes # so we have to do it ourselves for 'xml:lang' XML_NS = "{http://www.w3.org/XML/1998/namespace}" XML_LANG = XML_NS + "lang" def posix(path): """Normalize paths using forward slash to work also on Windows.""" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The above transformation loses absolute paths new_path = '/' + new_path elif path.startswith(r'\\'): # The above transformation loses leading slashes of UNC path mounts new_path = '//' + new_path return new_path def posixpath_property(private_name): """Generate a propery that holds a path always using forward slashes.""" def getter(self): # Normal getter return getattr(self, private_name) def setter(self, value): # The setter rewrites paths using forward slashes if value is not None: value = posix(value) setattr(self, private_name, value) return property(getter, setter) class DesignSpaceDocumentError(Exception): def __init__(self, msg, obj=None): self.msg = msg self.obj = obj def __str__(self): return str(self.msg) + ( ": %r" % self.obj if self.obj is not None else "") class AsDictMixin(object): def asdict(self): d = {} for attr, value in self.__dict__.items(): if attr.startswith("_"): continue if hasattr(value, "asdict"): value = value.asdict() elif isinstance(value, list): value = [ v.asdict() if hasattr(v, "asdict") else v for v in value ] d[attr] = value return d class SimpleDescriptor(AsDictMixin): """ Containers for a bunch of attributes""" # XXX this is ugly. The 'print' is inappropriate here, and instead of # assert, it should simply return True/False def compare(self, other): # test if this object contains the same data as the other for attr in self._attrs: try: assert(getattr(self, attr) == getattr(other, attr)) except AssertionError: print("failed attribute", attr, getattr(self, attr), "!=", getattr(other, attr)) def __repr__(self): attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs] attrs = indent('\n'.join(attrs), ' ') return f"{self.__class__.__name__}(\n{attrs}\n)" class SourceDescriptor(SimpleDescriptor): """Simple container for data related to the source .. code:: python doc = DesignSpaceDocument() s1 = SourceDescriptor() s1.path = masterPath1 s1.name = "master.ufo1" s1.font = defcon.Font("master.ufo1") s1.location = dict(weight=0) s1.familyName = "MasterFamilyName" s1.styleName = "MasterStyleNameOne" s1.localisedFamilyName = dict(fr="Caractère") s1.mutedGlyphNames.append("A") s1.mutedGlyphNames.append("Z") doc.addSource(s1) """ flavor = "source" _attrs = ['filename', 'path', 'name', 'layerName', 'location', 'copyLib', 'copyGroups', 'copyFeatures', 'muteKerning', 'muteInfo', 'mutedGlyphNames', 'familyName', 'styleName', 'localisedFamilyName'] filename = posixpath_property("_filename") path = posixpath_property("_path") def __init__( self, *, filename=None, path=None, font=None, name=None, location=None, designLocation=None, layerName=None, familyName=None, styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None, ): self.filename = filename """string. A relative path to the source file, **as it is in the document**. MutatorMath + VarLib. """ self.path = path """The absolute path, calculated from filename.""" self.font = font """Any Python object. Optional. Points to a representation of this source font that is loaded in memory, as a Python object (e.g. a ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). The default document reader will not fill-in this attribute, and the default writer will not use this attribute. It is up to the user of ``designspaceLib`` to either load the resource identified by ``filename`` and store it in this field, or write the contents of this field to the disk and make ```filename`` point to that. """ self.name = name """string. Optional. Unique identifier name for this source. MutatorMath + Varlib. """ self.designLocation = designLocation if designLocation is not None else location or {} """dict. Axis values for this source, in design space coordinates. MutatorMath + Varlib. This may be only part of the full design location. See :meth:`getFullDesignLocation()` .. versionadded:: 5.0 """ self.layerName = layerName """string. The name of the layer in the source to look for outline data. Default ``None`` which means ``foreground``. """ self.familyName = familyName """string. Family name of this source. Though this data can be extracted from the font, it can be efficient to have it right here. Varlib. """ self.styleName = styleName """string. Style name of this source. Though this data can be extracted from the font, it can be efficient to have it right here. Varlib. """ self.localisedFamilyName = localisedFamilyName or {} """dict. A dictionary of localised family name strings, keyed by language code. If present, will be used to build localized names for all instances. .. versionadded:: 5.0 """ self.copyLib = copyLib """bool. Indicates if the contents of the font.lib need to be copied to the instances. MutatorMath. .. deprecated:: 5.0 """ self.copyInfo = copyInfo """bool. Indicates if the non-interpolating font.info needs to be copied to the instances. MutatorMath. .. deprecated:: 5.0 """ self.copyGroups = copyGroups """bool. Indicates if the groups need to be copied to the instances. MutatorMath. .. deprecated:: 5.0 """ self.copyFeatures = copyFeatures """bool. Indicates if the feature text needs to be copied to the instances. MutatorMath. .. deprecated:: 5.0 """ self.muteKerning = muteKerning """bool. Indicates if the kerning data from this source needs to be muted (i.e. not be part of the calculations). MutatorMath only. """ self.muteInfo = muteInfo """bool. Indicated if the interpolating font.info data for this source needs to be muted. MutatorMath only. """ self.mutedGlyphNames = mutedGlyphNames or [] """list. Glyphnames that need to be muted in the instances. MutatorMath only. """ @property def location(self): """dict. Axis values for this source, in design space coordinates. MutatorMath + Varlib. .. deprecated:: 5.0 Use the more explicit alias for this property :attr:`designLocation`. """ return self.designLocation @location.setter def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setFamilyName(self, familyName, languageCode="en"): """Setter for :attr:`localisedFamilyName` .. versionadded:: 5.0 """ self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode="en"): """Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0 """ return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: """Get the complete design location of this source, from its :attr:`designLocation` and the document's axis defaults. .. versionadded:: 5.0 """ result: AnisotropicLocationDict = {} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): """Represents the rule descriptor element: a set of glyph substitutions to trigger conditionally in some parts of the designspace. .. code:: python r1 = RuleDescriptor() r1.name = "unique.rule.name" r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append(("a", "a.alt")) .. code:: xml <!-- optional: list of substitution rules --> <rules> <rule name="vertical.bars"> <conditionset> <condition minimum="250.000000" maximum="750.000000" name="weight"/> <condition minimum="100" name="width"/> <condition minimum="10" maximum="40" name="optical"/> </conditionset> <sub name="cent" with="cent.alt"/> <sub name="dollar" with="dollar.alt"/> </rule> </rules> """ _attrs = ['name', 'conditionSets', 'subs'] # what do we need here def __init__(self, *, name=None, conditionSets=None, subs=None): self.name = name """string. Unique name for this rule. Can be used to reference this rule data.""" # list of lists of dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets = conditionSets or [] """a list of conditionsets. - Each conditionset is a list of conditions. - Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys. """ # list of substitutions stored as tuples of glyphnames ("a", "a.alt") self.subs = subs or [] """list of substitutions. - Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt"). - Note: By default, rules are applied first, before other text shaping/OpenType layout, as they are part of the `Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` § Attributes. """ def evaluateRule(rule, location): """Return True if any of the rule's conditionsets matches the given location.""" return any(evaluateConditions(c, location) for c in rule.conditionSets) def evaluateConditions(conditions, location): """Return True if all the conditions matches the given location. - If a condition has no minimum, check for < maximum. - If a condition has no maximum, check for > minimum. """ for cd in conditions: value = location[cd['name']] if cd.get('minimum') is None: if value > cd['maximum']: return False elif cd.get('maximum') is None: if cd['minimum'] > value: return False elif not cd['minimum'] <= value <= cd['maximum']: return False return True def processRules(rules, location, glyphNames): """Apply these rules at this location to these glyphnames. Return a new list of glyphNames with substitutions applied. - rule order matters """ newNames = [] for rule in rules: if evaluateRule(rule, location): for name in glyphNames: swap = False for a, b in rule.subs: if name == a: swap = True break if swap: newNames.append(b) else: newNames.append(name) glyphNames = newNames newNames = [] return glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str, float] class InstanceDescriptor(SimpleDescriptor): """Simple container for data related to the instance .. code:: python i2 = InstanceDescriptor() i2.path = instancePath2 i2.familyName = "InstanceFamilyName" i2.styleName = "InstanceStyleName" i2.name = "instance.ufo2" # anisotropic location i2.designLocation = dict(weight=500, width=(400,300)) i2.postScriptFontName = "InstancePostscriptName" i2.styleMapFamilyName = "InstanceStyleMapFamilyName" i2.styleMapStyleName = "InstanceStyleMapStyleName" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2) """ flavor = "instance" _defaultLanguageCode = "en" _attrs = ['filename', 'path', 'name', 'locationLabel', 'designLocation', 'userLocation', 'familyName', 'styleName', 'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs', 'kerning', 'info', 'lib'] filename = posixpath_property("_filename") path = posixpath_property("_path") def __init__( self, *, filename=None, path=None, font=None, name=None, location=None, locationLabel=None, designLocation=None, userLocation=None, familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True, lib=None, ): self.filename = filename """string. Relative path to the instance file, **as it is in the document**. The file may or may not exist. MutatorMath + VarLib. """ self.path = path """string. Absolute path to the instance file, calculated from the document path and the string in the filename attr. The file may or may not exist. MutatorMath. """ self.font = font """Same as :attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font` """ self.name = name """string. Unique identifier name of the instance, used to identify it if it needs to be referenced from elsewhere in the document. """ self.locationLabel = locationLabel """Name of a :class:`LocationLabelDescriptor`. If provided, the instance should have the same location as the LocationLabel. .. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 """ self.designLocation: AnisotropicLocationDict = designLocation if designLocation is not None else (location or {}) """dict. Axis values for this instance, in design space coordinates. MutatorMath + Varlib. .. seealso:: This may be only part of the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 """ self.userLocation: SimpleLocationDict = userLocation or {} """dict. Axis values for this instance, in user space coordinates. MutatorMath + Varlib. .. seealso:: This may be only part of the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 """ self.familyName = familyName """string. Family name of this instance. MutatorMath + Varlib. """ self.styleName = styleName """string. Style name of this instance. MutatorMath + Varlib. """ self.postScriptFontName = postScriptFontName """string. Postscript fontname for this instance. MutatorMath + Varlib. """ self.styleMapFamilyName = styleMapFamilyName """string. StyleMap familyname for this instance. MutatorMath + Varlib. """ self.styleMapStyleName = styleMapStyleName """string. StyleMap stylename for this instance. MutatorMath + Varlib. """ self.localisedFamilyName = localisedFamilyName or {} """dict. A dictionary of localised family name strings, keyed by language code. """ self.localisedStyleName = localisedStyleName or {} """dict. A dictionary of localised stylename strings, keyed by language code. """ self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} """A dictionary of localised style map familyname strings, keyed by language code. """ self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} """A dictionary of localised style map stylename strings, keyed by language code. """ self.glyphs = glyphs or {} """dict for special master definitions for glyphs. If glyphs need special masters (to record the results of executed rules for example). MutatorMath. .. deprecated:: 5.0 Use rules or sparse sources instead. """ self.kerning = kerning """ bool. Indicates if this instance needs its kerning calculated. MutatorMath. .. deprecated:: 5.0 """ self.info = info """bool. Indicated if this instance needs the interpolating font.info calculated. .. deprecated:: 5.0 """ self.lib = lib or {} """Custom data associated with this instance.""" @property def location(self): """dict. Axis values for this instance. MutatorMath + Varlib. .. deprecated:: 5.0 Use the more explicit alias for this property :attr:`designLocation`. """ return self.designLocation @location.setter def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setStyleName(self, styleName, languageCode="en"): """These methods give easier access to the localised names.""" self.localisedStyleName[languageCode] = tostr(styleName) def getStyleName(self, languageCode="en"): return self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName, languageCode="en"): self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode="en"): return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode="en"): return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode="en"): return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName: Optional[str] = None): """Clear all location-related fields. Ensures that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly empty if clearing everything). In order to update the location of this instance wholesale, a user should first clear all the fields, then change the field(s) for which they have data. .. code:: python instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} instance.userLocation = {'Opsz': 16} In order to update a single axis location, the user should only clear that axis, then edit the values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args: axisName: if provided, only clear the location for that axis. .. versionadded:: 5.0 """ self.locationLabel = None if axisName is None: self.designLocation = {} self.userLocation = {} else: if self.designLocation is None: self.designLocation = {} if axisName in self.designLocation: del self.designLocation[axisName] if self.userLocation is None: self.userLocation = {} if axisName in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: """Get the :class:`LocationLabelDescriptor` instance that matches this instances's :attr:`locationLabel`. Raises if the named label can't be found. .. versionadded:: 5.0 """ if self.locationLabel is None: return None label = doc.getLocationLabel(self.locationLabel) if label is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label `{self.locationLabel}` in instance `{self.name}`.' ) return label def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: """Get the complete design location of this instance, by combining data from the various location fields, default axis values and mappings, and top-level location labels. The source of truth for this instance's location is determined for each axis independently by taking the first not-None field in this list: - ``locationLabel``: the location along this axis is the same as the matching STAT format 4 label. No anisotropy. - ``designLocation[axisName]``: the explicit design location along this axis, possibly anisotropic. - ``userLocation[axisName]``: the explicit user location along this axis. No anisotropy. - ``axis.default``: default axis value. No anisotropy. .. versionadded:: 5.0 """ label = self.getLocationLabelDescriptor(doc) if label is not None: return doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict = {} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return result def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: """Get the complete user location for this instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 """ return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to find or make a tag name for this axis name names = { 'weight': ('wght', dict(en = 'Weight')), 'width': ('wdth', dict(en = 'Width')), 'optical': ('opsz', dict(en = 'Optical Size')), 'slant': ('slnt', dict(en = 'Slant')), 'italic': ('ital', dict(en = 'Italic')), } if name.lower() in names: return names[name.lower()] if len(name) < 4: tag = name + "*" * (4 - len(name)) else: tag = name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = "axis" def __init__( self, *, tag=None, name=None, labelNames=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): # opentype tag for this axis self.tag = tag """string. Four letter tag for this axis. Some might be registered at the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags must begin with an uppercase letter and use only uppercase letters or digits. """ # name of the axis used in locations self.name = name """string. Name of the axis as it is used in the location dicts. MutatorMath + Varlib. """ # names for UI purposes, if this is not a standard axis, self.labelNames = labelNames or {} """dict. When defining a non-registered axis, it will be necessary to define user-facing readable names for the axis. Keyed by xml:lang code. Values are required to be ``unicode`` strings, even if they only contain ASCII characters. """ self.hidden = hidden """bool. Whether this axis should be hidden in user interfaces. """ self.map = map or [] """list of input / output values that can describe a warp of user space to design space coordinates. If no map values are present, it is assumed user space is the same as design space, as in [(minimum, minimum), (maximum, maximum)]. Varlib. """ self.axisOrdering = axisOrdering """STAT table field ``axisOrdering``. See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0 """ self.axisLabels: List[AxisLabelDescriptor] = axisLabels or [] """STAT table entries for Axis Value Tables format 1, 2, 3. See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0 """ class AxisDescriptor(AbstractAxisDescriptor): """ Simple container for the axis data. Add more localisations? .. code:: python a1 = AxisDescriptor() a1.minimum = 1 a1.maximum = 1000 a1.default = 400 a1.name = "weight" a1.tag = "wght" a1.labelNames['fa-IR'] = "قطر" a1.labelNames['en'] = "Wéíght" a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels = [ AxisLabelDescriptor(name="Regular", userValue=400, elidable=True) ] doc.addAxis(a1) """ _attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels'] def __init__( self, *, tag=None, name=None, labelNames=None, minimum=None, default=None, maximum=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum = minimum """number. The minimum value for this axis in user space. MutatorMath + Varlib. """ self.maximum = maximum """number. The maximum value for this axis in user space. MutatorMath + Varlib. """ self.default = default """number. The default value for this axis, i.e. when a new location is created, this is the value this axis will get in user space. MutatorMath + Varlib. """ def serialize(self): # output to a dict, used in testing return dict( tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def map_forward(self, v): """Maps value from axis mapping's input (user) to output (design).""" from fontTools.varLib.models import piecewiseLinearMap if not self.map: return v return piecewiseLinearMap(v, {k: v for k, v in self.map}) def map_backward(self, v): """Maps value from axis mapping's output (design) to input (user).""" from fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple): v = v[0] if not self.map: return v return piecewiseLinearMap(v, {v: k for k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): """Container for discrete axis data. Use this for axes that do not interpolate. The main difference from a continuous axis is that a continuous axis has a ``minimum`` and ``maximum``, while a discrete axis has a list of ``values``. Example: an Italic axis with 2 stops, Roman and Italic, that are not compatible. The axis still allows to bind together the full font family, which is useful for the STAT table, however it can't become a variation axis in a VF. .. code:: python a2 = DiscreteAxisDescriptor() a2.values = [0, 1] a2.name = "Italic" a2.tag = "ITAL" a2.labelNames['fr'] = "Italique" a2.map = [(0, 0), (1, -11)] a2.axisOrdering = 2 a2.axisLabels = [ AxisLabelDescriptor(name="Roman", userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0 """ flavor = "axis" _attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels') def __init__( self, *, tag=None, name=None, labelNames=None, values=None, default=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default: float = default """The default value for this axis, i.e. when a new location is created, this is the value this axis will get in user space. However, this default value is less important than in continuous axes: - it doesn't define the "neutral" version of outlines from which deltas would apply, as this axis does not interpolate. - it doesn't provide the reference glyph set for the designspace, as fonts at each value can have different glyph sets. """ self.values: List[float] = values or [] """List of possible values for this axis. Contrary to continuous axes, only the values in this list can be taken by the axis, nothing in-between. """ def map_forward(self, value): """Maps value from axis mapping's input to output. Returns value unchanged if no mapping entry is found. Note: for discrete axes, each value must have its mapping entry, if you intend that value to be mapped. """ return next((v for k, v in self.map if k == value), value) def map_backward(self, value): """Maps value from axis mapping's output to input. Returns value unchanged if no mapping entry is found. Note: for discrete axes, each value must have its mapping entry, if you intend that value to be mapped. """ if isinstance(value, tuple): value = value[0] return next((k for k, v in self.map if v == value), value) class AxisLabelDescriptor(SimpleDescriptor): """Container for axis label data. Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3). All values are user values. See: `OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the Axis value depends on which field are filled-in, see :meth:`getFormat` .. versionadded:: 5.0 """ flavor = "label" _attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def __init__( self, *, name, userValue, userMinimum=None, userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None, ): self.userMinimum: Optional[float] = userMinimum """STAT field ``rangeMinValue`` (format 2).""" self.userValue: float = userValue """STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2).""" self.userMaximum: Optional[float] = userMaximum """STAT field ``rangeMaxValue`` (format 2).""" self.name: str = name """Label for this axis location, STAT field ``valueNameID``.""" self.elidable: bool = elidable """STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ """ self.olderSibling: bool = olderSibling """STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ """ self.linkedUserValue: Optional[float] = linkedUserValue """STAT field ``linkedValue`` (format 3).""" self.labelNames: MutableMapping[str, str] = labelNames or {} """User-facing translations of this location's label. Keyed by ``xml:lang`` code. """ def getFormat(self) -> int: """Determine which format of STAT Axis value to use to encode this label. =========== ========= =========== =========== =============== STAT Format userValue userMinimum userMaximum linkedUserValue =========== ========= =========== =========== =============== 1 ✅ ❌ ❌ ❌ 2 ✅ ✅ ✅ ❌ 3 ✅ ❌ ❌ ✅ =========== ========= =========== =========== =============== """ if self.linkedUserValue is not None: return 3 if self.userMinimum is not None or self.userMaximum is not None: return 2 return 1 @property def defaultName(self) -> str: """Return the English name from :attr:`labelNames` or the :attr:`name`.""" return self.labelNames.get("en") or self.name class LocationLabelDescriptor(SimpleDescriptor): """Container for location label data. Analogue of OpenType's STAT data for a free-floating location (format 4). All values are user values. See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0 """ flavor = "label" _attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames') def __init__( self, *, name, userLocation, elidable=False, olderSibling=False, labelNames=None, ): self.name: str = name """Label for this named location, STAT field ``valueNameID``.""" self.userLocation: SimpleLocationDict = userLocation or {} """Location in user coordinates along each axis. If an axis is not mentioned, it is assumed to be at its default location. .. seealso:: This may be only part of the full location. See: :meth:`getFullUserLocation` """ self.elidable: bool = elidable """STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ """ self.olderSibling: bool = olderSibling """STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ """ self.labelNames: Dict[str, str] = labelNames or {} """User-facing translations of this location's label. Keyed by xml:lang code. """ @property def defaultName(self) -> str: """Return the English name from :attr:`labelNames` or the :attr:`name`.""" return self.labelNames.get("en") or self.name def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: """Get the complete user location of this label, by combining data from the explicit user location and default axis values. .. versionadded:: 5.0 """ return { axis.name: self.userLocation.get(axis.name, axis.default) for axis in doc.axes } class VariableFontDescriptor(SimpleDescriptor): """Container for variable fonts, sub-spaces of the Designspace. Use-cases: - From a single DesignSpace with discrete axes, define 1 variable font per value on the discrete axes. Before version 5, you would have needed 1 DesignSpace per such variable font, and a lot of data duplication. - From a big variable font with many axes, define subsets of that variable font that only include some axes and freeze other axes at a given location. .. versionadded:: 5.0 """ flavor = "variable-font" _attrs = ('filename', 'axisSubsets', 'lib') filename = posixpath_property("_filename") def __init__(self, *, name, filename=None, axisSubsets=None, lib=None): self.name: str = name """string, required. Name of this variable to identify it during the build process and from other parts of the document, and also as a filename in case the filename property is empty. VarLib. """ self.filename: str = filename """string, optional. Relative path to the variable font file, **as it is in the document**. The file may or may not exist. If not specified, the :attr:`name` will be used as a basename for the file. """ self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or [] """Axis subsets to include in this variable font. If an axis is not mentioned, assume that we only want the default location of that axis (same as a :class:`ValueAxisSubsetDescriptor`). """ self.lib: MutableMapping[str, Any] = lib or {} """Custom data associated with this variable font.""" class RangeAxisSubsetDescriptor(SimpleDescriptor): """Subset of a continuous axis to include in a variable font. .. versionadded:: 5.0 """ flavor = "axis-subset" _attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum') def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf): self.name: str = name """Name of the :class:`AxisDescriptor` to subset.""" self.userMinimum: float = userMinimum """New minimum value of the axis in the target variable font. If not specified, assume the same minimum value as the full axis. (default = ``-math.inf``) """ self.userDefault: Optional[float] = userDefault """New default value of the axis in the target variable font. If not specified, assume the same default value as the full axis. (default = ``None``) """ self.userMaximum: float = userMaximum """New maximum value of the axis in the target variable font. If not specified, assume the same maximum value as the full axis. (default = ``math.inf``) """ class ValueAxisSubsetDescriptor(SimpleDescriptor): """Single value of a discrete or continuous axis to use in a variable font. .. versionadded:: 5.0 """ flavor = "axis-subset" _attrs = ('name', 'userValue') def __init__(self, *, name, userValue): self.name: str = name """Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to "snapshot" or "freeze". """ self.userValue: float = userValue """Value in user coordinates at which to freeze the given axis.""" class BaseDocWriter(object): _whiteSpace = " " axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor @classmethod def getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod def getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls): return cls.ruleDescriptorClass() def __init__(self, documentPath, documentObject: DesignSpaceDocument): self.path = documentPath self.documentObject = documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root = ET.Element("designspace") def write(self, pretty=True, encoding="UTF-8", xml_declaration=True): self.root.attrib['format'] = ".".join(str(i) for i in self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName is not None: axesElement = ET.Element("axes") if self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement = ET.Element("labels") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, "rulesProcessingLast", False): attributes = {"processing": "last"} else: attributes = {} self.root.append(ET.Element("rules", attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element("sources")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element("variable-fonts") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element("instances")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): """Try to use the version specified in the document, or a sufficiently recent version to be able to encode what the document contains. """ minVersion = self.documentObject.formatTuple if ( any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is not None or axis.axisLabels for axis in self.documentObject.axes ) or self.documentObject.locationLabels or any( source.localisedFamilyName for source in self.documentObject.sources ) or self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation for instance in self.documentObject.instances ) ): if minVersion < (5, 0): minVersion = (5, 0) return minVersion def _makeLocationElement(self, locationObject, name=None): """ Convert Location dict to a locationElement.""" locElement = ET.Element("location") if name is not None: locElement.attrib['name'] = name validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items(): if axisName in validatedLocation: # only accept values we know validatedLocation[axisName] = axisValue for dimensionName, dimensionValue in validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name'] = dimensionName if type(dimensionValue) == tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self, num): if int(num) == num: return "%d" % num return ("%f" % num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): # if none of the conditions have minimum or maximum values, do not add the rule. ruleElement = ET.Element('rule') if ruleObject.name is not None: ruleElement.attrib['name'] = ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for cond in conditions: if cond.get('minimum') is None and cond.get('maximum') is None: # neither is defined, don't add this condition continue conditionElement = ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum') is not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self, axisObject): axisElement = ET.Element('axis') axisElement.attrib['tag'] = axisObject.tag axisElement.attrib['name'] = axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map: for inputValue, outputValue in axisObject.map: mapElement = ET.Element('map') mapElement.attrib['input'] = self.intOrFloat(inputValue) mapElement.attrib['output'] = self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering or axisObject.axisLabels: labelsElement = ET.Element('labels') if axisObject.axisOrdering is not None: labelsElement.attrib['ordering'] = str(axisObject.axisOrdering) for label in axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) elif isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values'] = " ".join(self.intOrFloat(v) for v in axisObject.values) axisElement.attrib['default'] = self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib['hidden'] = "1" self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue) if label.userMinimum is not None: labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum) if label.userMaximum is not None: labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum) labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] = "true" if label.olderSibling: labelElement.attrib['oldersibling'] = "true" if label.linkedUserValue is not None: labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def _addLabelNames(self, parentElement, labelNames): for languageCode, labelName in sorted(labelNames.items()): languageElement = ET.Element('labelname') languageElement.attrib[XML_LANG] = languageCode languageElement.text = labelName parentElement.append(languageElement) def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] = "true" if label.olderSibling: labelElement.attrib['oldersibling'] = "true" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement( self, parentElement, *, designLocation: AnisotropicLocationDict = None, userLocation: SimpleLocationDict = None ): locElement = ET.Element("location") for axis in self.documentObject.axes: if designLocation is not None and axis.name in designLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value = designLocation[axis.name] if isinstance(value, tuple): dimElement.attrib['xvalue'] = self.intOrFloat(value[0]) dimElement.attrib['yvalue'] = self.intOrFloat(value[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(value) locElement.append(dimElement) elif userLocation is not None and axis.name in userLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value = userLocation[axis.name] dimElement.attrib['uservalue'] = self.intOrFloat(value) locElement.append(dimElement) if len(locElement) > 0: parentElement.append(locElement) def _addInstance(self, instanceObject): instanceElement = ET.Element('instance') if instanceObject.name is not None: instanceElement.attrib['name'] = instanceObject.name if instanceObject.locationLabel is not None: instanceElement.attrib['location'] = instanceObject.locationLabel if instanceObject.familyName is not None: instanceElement.attrib['familyname'] = instanceObject.familyName if instanceObject.styleName is not None: instanceElement.attrib['stylename'] = instanceObject.styleName # add localisations if instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue # already stored in the element attribute localisedStyleNameElement = ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue # already stored in the element attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >= (5, 0): if instanceObject.locationLabel is None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation ) else: # Pre-version 5.0 code was validating and filling in the location # dict while writing it out, as preserved below. if instanceObject.location is not None: locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if instanceObject.filename is not None: instanceElement.attrib['filename'] = instanceObject.filename if instanceObject.postScriptFontName is not None: instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName if instanceObject.styleMapFamilyName is not None: instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName is not None: instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName if self.effectiveFormatTuple < (5, 0): # Deprecated members as of version 5.0 if instanceObject.glyphs: if instanceElement.findall('.glyphs') == []: glyphsElement = ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall('.glyphs')[0] for glyphName, data in sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data) glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement = ET.Element('kerning') instanceElement.append(kerningElement) if instanceObject.info: infoElement = ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement) def _addSource(self, sourceObject): sourceElement = ET.Element("source") if sourceObject.filename is not None: sourceElement.attrib['filename'] = sourceObject.filename if sourceObject.name is not None: if sourceObject.name.find("temp_master") != 0: # do not save temporary source names sourceElement.attrib['name'] = sourceObject.name if sourceObject.familyName is not None: sourceElement.attrib['familyname'] = sourceObject.familyName if sourceObject.styleName is not None: sourceElement.attrib['stylename'] = sourceObject.styleName if sourceObject.layerName is not None: sourceElement.attrib['layer'] = sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue # already stored in the element attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement = ET.Element('lib') libElement.attrib['copy'] = "1" sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement = ET.Element('groups') groupsElement.attrib['copy'] = "1" sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement = ET.Element('features') featuresElement.attrib['copy'] = "1" sourceElement.append(featuresElement) if sourceObject.copyInfo or sourceObject.muteInfo: infoElement = ET.Element('info') if sourceObject.copyInfo: infoElement.attrib['copy'] = "1" if sourceObject.muteInfo: infoElement.attrib['mute'] = "1" sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement = ET.Element("kerning") kerningElement.attrib["mute"] = '1' sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for name in sourceObject.mutedGlyphNames: glyphElement = ET.Element("glyph") glyphElement.attrib["name"] = name glyphElement.attrib["mute"] = '1' sourceElement.append(glyphElement) if self.effectiveFormatTuple >= (5, 0): self._addLocationElement(sourceElement, designLocation=sourceObject.location) else: # Pre-version 5.0 code was validating and filling in the location # dict while writing it out, as preserved below. locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None: vfElement = ET.Element('variable-font') vfElement.attrib['name'] = vf.name if vf.filename is not None: vfElement.attrib['filename'] = vf.filename if vf.axisSubsets: subsetsElement = ET.Element('axis-subsets') for subset in vf.axisSubsets: subsetElement = ET.Element('axis-subset') subsetElement.attrib['name'] = subset.name if isinstance(subset, RangeAxisSubsetDescriptor): if subset.userMinimum != -math.inf: subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum) if subset.userMaximum != math.inf: subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum) if subset.userDefault is not None: subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault) elif isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement) def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None: if not data: return libElement = ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): glyphElement = ET.Element('glyph') if data.get('mute'): glyphElement.attrib['mute'] = "1" if data.get('unicodes') is not None: glyphElement.attrib['unicode'] = " ".join([hex(u) for u in data.get('unicodes')]) if data.get('instanceLocation') is not None: locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if glyphName is not None: glyphElement.attrib['name'] = glyphName if data.get('note') is not None: noteElement = ET.Element('note') noteElement.text = data.get('note') glyphElement.append(noteElement) if data.get('masters') is not None: mastersElement = ET.Element("masters") for m in data.get('masters'): masterElement = ET.Element("master") if m.get('glyphName') is not None: masterElement.attrib['glyphname'] = m.get('glyphName') if m.get('font') is not None: masterElement.attrib['source'] = m.get('font') if m.get('location') is not None: locationElement, m['location'] = self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def __init__(self, documentPath, documentObject): self.path = documentPath self.documentObject = documentObject tree = ET.parse(self.path) self.root = tree.getroot() self.documentObject.formatVersion = self.root.attrib.get("format", "3.0") self._axes = [] self.rules = [] self.sources = [] self.instances = [] self.axisDefaults = {} self._strictAxisNames = True @classmethod def fromstring(cls, string, documentObject): f = BytesIO(tobytes(string, encoding="utf-8")) self = cls(f, documentObject) self.path = None return self def read(self): self.readAxes() self.readLabels() self.readRules() self.readVariableFonts() self.readSources() self.readInstances() self.readLib() def readRules(self): # we also need to read any conditions that are outside of a condition set. rules = [] rulesElement = self.root.find(".rules") if rulesElement is not None: processingValue = rulesElement.attrib.get("processing", "first") if processingValue not in {"first", "last"}: raise DesignSpaceDocumentError( "<rules> processing attribute value is not valid: %r, " "expected 'first' or 'last'" % processingValue) self.documentObject.rulesProcessingLast = processingValue == "last" for ruleElement in self.root.findall(".rules/rule"): ruleObject = self.ruleDescriptorClass() ruleName = ruleObject.name = ruleElement.attrib.get("name") # read any stray conditions outside a condition set externalConditions = self._readConditionElements( ruleElement, ruleName, ) if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( "Found stray rule conditions outside a conditionset. " "Wrapped them in a new conditionset." ) # read the conditionsets for conditionSetElement in ruleElement.findall('.conditionset'): conditionSet = self._readConditionElements( conditionSetElement, ruleName, ) if conditionSet is not None: ruleObject.conditionSets.append(conditionSet) for subElement in ruleElement.findall('.sub'): a = subElement.attrib['name'] b = subElement.attrib['with'] ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules = rules def _readConditionElements(self, parentElement, ruleName=None): cds = [] for conditionElement in parentElement.findall('.condition'): cd = {} cdMin = conditionElement.attrib.get("minimum") if cdMin is not None: cd['minimum'] = float(cdMin) else: # will allow these to be None, assume axis.minimum cd['minimum'] = None cdMax = conditionElement.attrib.get("maximum") if cdMax is not None: cd['maximum'] = float(cdMax) else: # will allow these to be None, assume axis.maximum cd['maximum'] = None cd['name'] = conditionElement.attrib.get("name") # # test for things if cd.get('minimum') is None and cd.get('maximum') is None: raise DesignSpaceDocumentError( "condition missing required minimum or maximum in rule" + (" '%s'" % ruleName if ruleName is not None else "")) cds.append(cd) return cds def readAxes(self): # read the axes elements, including the warp map. axesElement = self.root.find(".axes") if axesElement is not None and 'elidedfallbackname' in axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname'] axisElements = self.root.findall(".axes/axis") if not axisElements: return for axisElement in axisElements: if self.documentObject.formatTuple >= (5, 0) and "values" in axisElement.attrib: axisObject = self.discreteAxisDescriptorClass() axisObject.values = [float(s) for s in axisElement.attrib["values"].split(" ")] else: axisObject = self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get("minimum")) axisObject.maximum = float(axisElement.attrib.get("maximum")) axisObject.default = float(axisElement.attrib.get("default")) axisObject.name = axisElement.attrib.get("name") if axisElement.attrib.get('hidden', False): axisObject.hidden = True axisObject.tag = axisElement.attrib.get("tag") for mapElement in axisElement.findall('map'): a = float(mapElement.attrib['input']) b = float(mapElement.attrib['output']) axisObject.map.append((a, b)) for labelNameElement in axisElement.findall('labelname'): # Note: elementtree reads the "xml:lang" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' for key, lang in labelNameElement.items(): if key == XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement = axisElement.find(".labels") if labelElement is not None: if "ordering" in labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib["ordering"]) for label in labelElement.findall(".label"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default def readAxisLabel(self, element: ET.Element): xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f"label element contains unknown attributes: {', '.join(unknown_attrs)}") name = element.get("name") if name is None: raise DesignSpaceDocumentError("label element must have a name attribute.") valueStr = element.get("uservalue") if valueStr is None: raise DesignSpaceDocumentError("label element must have a uservalue attribute.") value = float(valueStr) minimumStr = element.get("userminimum") minimum = float(minimumStr) if minimumStr is not None else None maximumStr = element.get("usermaximum") maximum = float(maximumStr) if maximumStr is not None else None linkedValueStr = element.get("linkeduservalue") linkedValue = float(linkedValueStr) if linkedValueStr is not None else None elidable = True if element.get("elidable") == "true" else False olderSibling = True if element.get("oldersibling") == "true" else False labelNames = { lang: label_name.text or "" for label_name in element.findall("labelname") for attr, lang in label_name.items() if attr == XML_LANG # Note: elementtree reads the "xml:lang" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, ) def readLabels(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs = {'name', 'elidable', 'oldersibling'} for labelElement in self.root.findall(".labels/label"): unknown_attrs = set(labelElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f"Label element contains unknown attributes: {', '.join(unknown_attrs)}") name = labelElement.get("name") if name is None: raise DesignSpaceDocumentError("label element must have a name attribute.") designLocation, userLocation = self.locationFromElement(labelElement) if designLocation: raise DesignSpaceDocumentError(f'<label> element "{name}" must only have user locations (using uservalue="").') elidable = True if labelElement.get("elidable") == "true" else False olderSibling = True if labelElement.get("oldersibling") == "true" else False labelNames = { lang: label_name.text or "" for label_name in labelElement.findall("labelname") for attr, lang in label_name.items() if attr == XML_LANG # Note: elementtree reads the "xml:lang" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel = self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs = {'name', 'filename'} for variableFontElement in self.root.findall(".variable-fonts/variable-font"): unknown_attrs = set(variableFontElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}") name = variableFontElement.get("name") if name is None: raise DesignSpaceDocumentError("variable-font element must have a name attribute.") filename = variableFontElement.get("filename") axisSubsetsElement = variableFontElement.find(".axis-subsets") if axisSubsetsElement is None: raise DesignSpaceDocumentError("variable-font element must contain an axis-subsets element.") axisSubsets = [] for axisSubset in axisSubsetsElement.iterfind(".axis-subset"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None libElement = variableFontElement.find(".lib") if libElement is not None: lib = plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element: ET.Element): if "uservalue" in element.attrib: xml_attrs = {'name', 'uservalue'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}") name = element.get("name") if name is None: raise DesignSpaceDocumentError("axis-subset element must have a name attribute.") userValueStr = element.get("uservalue") if userValueStr is None: raise DesignSpaceDocumentError( "The axis-subset element for a discrete subset must have a uservalue attribute." ) userValue = float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}") name = element.get("name") if name is None: raise DesignSpaceDocumentError("axis-subset element must have a name attribute.") userMinimum = element.get("userminimum") userDefault = element.get("userdefault") userMaximum = element.get("usermaximum") if userMinimum is not None and userDefault is not None and userMaximum is not None: return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), ) if all(v is None for v in (userMinimum, userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( "axis-subset element must have min/max/default values or none at all." ) def readSources(self): for sourceCount, sourceElement in enumerate(self.root.findall(".sources/source")): filename = sourceElement.attrib.get('filename') if filename is not None and self.path is not None: sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else: sourcePath = None sourceName = sourceElement.attrib.get('name') if sourceName is None: # add a temporary source name sourceName = "temp_master.%d" % (sourceCount) sourceObject = self.sourceDescriptorClass() sourceObject.path = sourcePath # absolute path to the ufo source sourceObject.filename = filename # path as it is stored in the document sourceObject.name = sourceName familyName = sourceElement.attrib.get("familyname") if familyName is not None: sourceObject.familyName = familyName styleName = sourceElement.attrib.get("stylename") if styleName is not None: sourceObject.styleName = styleName for familyNameElement in sourceElement.findall('familyname'): for key, lang in familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation, userLocation = self.locationFromElement(sourceElement) if userLocation: raise DesignSpaceDocumentError(f'<source> element "{sourceName}" must only have design locations (using xvalue="").') sourceObject.location = designLocation layerName = sourceElement.attrib.get('layer') if layerName is not None: sourceObject.layerName = layerName for libElement in sourceElement.findall('.lib'): if libElement.attrib.get('copy') == '1': sourceObject.copyLib = True for groupsElement in sourceElement.findall('.groups'): if groupsElement.attrib.get('copy') == '1': sourceObject.copyGroups = True for infoElement in sourceElement.findall(".info"): if infoElement.attrib.get('copy') == '1': sourceObject.copyInfo = True if infoElement.attrib.get('mute') == '1': sourceObject.muteInfo = True for featuresElement in sourceElement.findall(".features"): if featuresElement.attrib.get('copy') == '1': sourceObject.copyFeatures = True for glyphElement in sourceElement.findall(".glyph"): glyphName = glyphElement.attrib.get('name') if glyphName is None: continue if glyphElement.attrib.get('mute') == '1': sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in sourceElement.findall(".kerning"): if kerningElement.attrib.get('mute') == '1': sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject) def locationFromElement(self, element): """Read a nested ``<location>`` element inside the given ``element``. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) """ elementLocation = (None, None) for locationElement in element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self, locationElement): """Read a ``<location>`` element. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) """ if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError("No axes defined") userLoc = {} designLoc = {} for dimensionElement in locationElement.findall(".dimension"): dimName = dimensionElement.attrib.get("name") if self._strictAxisNames and dimName not in self.axisDefaults: # In case the document contains no axis definitions, self.log.warning("Location with undefined axis: \"%s\".", dimName) continue userValue = xValue = yValue = None try: userValue = dimensionElement.attrib.get('uservalue') if userValue is not None: userValue = float(userValue) except ValueError: self.log.warning("ValueError in readLocation userValue %3.3f", userValue) try: xValue = dimensionElement.attrib.get('xvalue') if xValue is not None: xValue = float(xValue) except ValueError: self.log.warning("ValueError in readLocation xValue %3.3f", xValue) try: yValue = dimensionElement.attrib.get('yvalue') if yValue is not None: yValue = float(yValue) except ValueError: self.log.warning("ValueError in readLocation yValue %3.3f", yValue) if userValue is None == xValue is None: raise DesignSpaceDocumentError(f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"') if yValue is not None: if xValue is None: raise DesignSpaceDocumentError(f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"') designLoc[dimName] = (xValue, yValue) elif xValue is not None: designLoc[dimName] = xValue else: userLoc[dimName] = userValue return designLoc, userLoc def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements = self.root.findall('.instances/instance') for instanceElement in instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True): filename = instanceElement.attrib.get('filename') if filename is not None and self.documentObject.path is not None: instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename) else: instancePath = None instanceObject = self.instanceDescriptorClass() instanceObject.path = instancePath # absolute path to the instance instanceObject.filename = filename # path as it is stored in the document name = instanceElement.attrib.get("name") if name is not None: instanceObject.name = name familyname = instanceElement.attrib.get('familyname') if familyname is not None: instanceObject.familyName = familyname stylename = instanceElement.attrib.get('stylename') if stylename is not None: instanceObject.styleName = stylename postScriptFontName = instanceElement.attrib.get('postscriptfontname') if postScriptFontName is not None: instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName is not None: instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName = instanceElement.attrib.get('stylemapstylename') if styleMapStyleName is not None: instanceObject.styleMapStyleName = styleMapStyleName # read localised names for styleNameElement in instanceElement.findall('stylename'): for key, lang in styleNameElement.items(): if key == XML_LANG: styleName = styleNameElement.text instanceObject.setStyleName(styleName, lang) for familyNameElement in instanceElement.findall('familyname'): for key, lang in familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): for key, lang in styleMapStyleNameElement.items(): if key == XML_LANG: styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): for key, lang in styleMapFamilyNameElement.items(): if key == XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation = self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get('location') if (designLocation or userLocation) and locationLabel is not None: raise DesignSpaceDocumentError('instance element must have at most one of the location="..." attribute or the nested location element') instanceObject.locationLabel = locationLabel instanceObject.userLocation = userLocation or {} instanceObject.designLocation = designLocation or {} for glyphElement in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject) for infoElement in instanceElement.findall("info"): self.readInfoElement(infoElement, instanceObject) for libElement in instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement, instanceObject): """Read the lib element for the given instance.""" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): """ Read the info element.""" instanceObject.info = True def readGlyphElement(self, glyphElement, instanceObject): """ Read the glyph element, which could look like either one of these: .. code-block:: xml <glyph name="b" unicode="0x62"/> <glyph name="b"/> <glyph name="b"> <master location="location-token-bbb" source="master-token-aaa2"/> <master glyphname="b.alt1" location="location-token-ccc" source="master-token-aaa3"/> <note> This is an instance from an anisotropic interpolation. </note> </glyph> """ glyphData = {} glyphName = glyphElement.attrib.get('name') if glyphName is None: raise DesignSpaceDocumentError("Glyph object without name attribute") mute = glyphElement.attrib.get("mute") if mute == "1": glyphData['mute'] = True # unicode unicodes = glyphElement.attrib.get('unicode') if unicodes is not None: try: unicodes = [int(u, 16) for u in unicodes.split(" ")] glyphData['unicodes'] = unicodes except ValueError: raise DesignSpaceDocumentError("unicode values %s are not integers" % unicodes) for noteElement in glyphElement.findall('.note'): glyphData['note'] = noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").') if designLocation is not None: glyphData['instanceLocation'] = designLocation glyphSources = None for masterElement in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").') masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName is None: # if we don't read a glyphname, use the one we have masterGlyphName = glyphName d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources is None: glyphSources = [] glyphSources.append(d) if glyphSources is not None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self): """Read the lib element for the whole document.""" for libElement in self.root.findall(".lib"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): """The DesignSpaceDocument object can read and write ``.designspace`` data. It imports the axes, sources, variable fonts and instances to very basic **descriptor** objects that store the data in attributes. Data is added to the document by creating such descriptor objects, filling them with data and then adding them to the document. This makes it easy to integrate this object in different contexts. The **DesignSpaceDocument** object can be subclassed to work with different objects, as long as they have the same attributes. Reader and Writer objects can be subclassed as well. **Note:** Python attribute names are usually camelCased, the corresponding `XML <document-xml-structure>`_ attributes are usually all lowercase. .. code:: python from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile("some/path/to/my.designspace") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib """ def __init__(self, readerClass=None, writerClass=None): self.path = None """String, optional. When the document is read from the disk, this is the full path that was given to :meth:`read` or :meth:`fromfile`. """ self.filename = None """String, optional. When the document is read from the disk, this is its original file name, i.e. the last part of its path. When the document is produced by a Python script and still only exists in memory, the producing script can write here an indication of a possible "good" filename, in case one wants to save the file somewhere. """ self.formatVersion: Optional[str] = None """Format version for this document, as a string. E.g. "4.0" """ self.elidedFallbackName: Optional[str] = None """STAT Style Attributes Header field ``elidedFallbackNameID``. See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0 """ self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = [] """List of this document's axes.""" self.locationLabels: List[LocationLabelDescriptor] = [] """List of this document's STAT format 4 labels. .. versionadded:: 5.0""" self.rules: List[RuleDescriptor] = [] """List of this document's rules.""" self.rulesProcessingLast: bool = False """This flag indicates whether the substitution rules should be applied before or after other glyph substitution features. - False: before - True: after. Default is False. For new projects, you probably want True. See the following issues for more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want to use a different feature altogether, e.g. ``calt``, use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib> """ self.sources: List[SourceDescriptor] = [] """List of this document's sources.""" self.variableFonts: List[VariableFontDescriptor] = [] """List of this document's variable fonts. .. versionadded:: 5.0""" self.instances: List[InstanceDescriptor] = [] """List of this document's instances.""" self.lib: Dict = {} """User defined, custom data associated with the whole document. Use reverse-DNS notation to identify your own data. Respect the data stored by others. """ self.default: Optional[str] = None """Name of the default master. This attribute is updated by the :meth:`findDefault` """ if readerClass is not None: self.readerClass = readerClass else: self.readerClass = BaseDocReader if writerClass is not None: self.writerClass = writerClass else: self.writerClass = BaseDocWriter @classmethod def fromfile(cls, path, readerClass=None, writerClass=None): """Read a designspace file from ``path`` and return a new instance of :class:. """ self = cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return self @classmethod def fromstring(cls, string, readerClass=None, writerClass=None): self = cls(readerClass=readerClass, writerClass=writerClass) reader = self.readerClass.fromstring(string, self) reader.read() if self.sources: self.findDefault() return self def tostring(self, encoding=None): """Returns the designspace as a string. Default encoding ``utf-8``.""" if encoding is str or ( encoding is not None and encoding.lower() == "unicode" ): f = StringIO() xml_declaration = False elif encoding is None or encoding == "utf-8": f = BytesIO() encoding = "UTF-8" xml_declaration = True else: raise ValueError("unsupported encoding: '%s'" % encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self, path): """Read a designspace file from ``path`` and populates the fields of ``self`` with the data. """ if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path) reader = self.readerClass(path, self) reader.read() if self.sources: self.findDefault() def write(self, path): """Write this designspace to ``path``.""" if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self) writer.write() def _posixRelativePath(self, otherPath): relative = os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative) def updatePaths(self): """ Right before we save we need to identify and respond to the following situations: In each descriptor, we have to do the right thing for the filename attribute. :: case 1. descriptor.filename == None descriptor.path == None -- action: write as is, descriptors will not have a filename attr. useless, but no reason to interfere. case 2. descriptor.filename == "../something" descriptor.path == None -- action: write as is. The filename attr should not be touched. case 3. descriptor.filename == None descriptor.path == "~/absolute/path/there" -- action: calculate the relative path for filename. We're not overwriting some other value for filename, it should be fine case 4. descriptor.filename == '../somewhere' descriptor.path == "~/absolute/path/there" -- action: there is a conflict between the given filename, and the path. So we know where the file is relative to the document. Can't guess why they're different, we just choose for path to be correct and update filename. """ assert self.path is not None for descriptor in self.sources + self.instances: if descriptor.path is not None: # case 3 and 4: filename gets updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): """Add the given ``sourceDescriptor`` to ``doc.sources``.""" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): """Instantiate a new :class:`SourceDescriptor` using the given ``kwargs`` and add it to ``doc.sources``. """ source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self, instanceDescriptor: InstanceDescriptor): """Add the given ``instanceDescriptor`` to :attr:`instances`.""" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): """Instantiate a new :class:`InstanceDescriptor` using the given ``kwargs`` and add it to :attr:`instances`. """ instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): """Add the given ``axisDescriptor`` to :attr:`axes`.""" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): """Instantiate a new :class:`AxisDescriptor` using the given ``kwargs`` and add it to :attr:`axes`. The axis will be and instance of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise. """ if "values" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addRule(self, ruleDescriptor: RuleDescriptor): """Add the given ``ruleDescriptor`` to :attr:`rules`.""" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): """Instantiate a new :class:`RuleDescriptor` using the given ``kwargs`` and add it to :attr:`rules`. """ rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): """Add the given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0 """ self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): """Instantiate a new :class:`VariableFontDescriptor` using the given ``kwargs`` and add it to :attr:`variableFonts`. .. versionadded:: 5.0 """ variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): """Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0 """ self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): """Instantiate a new :class:`LocationLabelDescriptor` using the given ``kwargs`` and add it to :attr:`locationLabels`. .. versionadded:: 5.0 """ locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): """Return a dict with the default location in design space coordinates.""" # Without OrderedDict, output XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return loc def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: """Return the :class:`LocationLabel` that matches the given ``userLocation``, or ``None`` if no such label exists. .. versionadded:: 5.0 """ return next( (label for label in self.locationLabels if label.userLocation == userLocation), None ) def updateFilenameFromPath(self, masters=True, instances=True, force=False): """Set a descriptor filename attr from the path and this document path. If the filename attribute is not None: skip it. """ if masters: for descriptor in self.sources: if descriptor.filename is not None and not force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for descriptor in self.instances: if descriptor.filename is not None and not force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): """Ask the writer class to make us a new axisDescriptor.""" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): """Ask the writer class to make us a new sourceDescriptor.""" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): """Ask the writer class to make us a new instanceDescriptor.""" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): """Return a list of axis names, in the same order as defined in the document.""" names = [] for axisDescriptor in self.axes: names.append(axisDescriptor.name) return names def getAxis(self, name): """Return the axis with the given ``name``, or ``None`` if no such axis exists.""" for axisDescriptor in self.axes: if axisDescriptor.name == name: return axisDescriptor return None def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: """Return the top-level location label with the given ``name``, or ``None`` if no such label exists. .. versionadded:: 5.0 """ for label in self.locationLabels: if label.name == name: return label return None def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: """Map a user location to a design location. Assume that missing coordinates are at the default location for that axis. Note: the output won't be anisotropic, only the xvalue is set. .. versionadded:: 5.0 """ return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes } def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict: """Map a design location to a user location. Assume that missing coordinates are at the default location for that axis. When the input has anisotropic locations, only the xvalue is used. .. versionadded:: 5.0 """ return { axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else axis.default ) for axis in self.axes } def findDefault(self): """Set and return SourceDescriptor at the default location or None. The default location is the set of all `default` values in user space of all axes. This function updates the document's :attr:`default` value. .. versionchanged:: 5.0 Allow the default source to not specify some of the axis values, and they are assumed to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` """ self.default = None # Convert the default location from user space to design space before comparing # it against the SourceDescriptor locations (always in design space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor return None def normalizeLocation(self, location): """Return a dict with normalized axis values.""" from fontTools.varLib.models import normalizeValue new = {} for axis in self.axes: if axis.name not in location: # skipping this dimension it seems continue value = location[axis.name] # 'anisotropic' location, take first coord only if isinstance(value, tuple): value = value[0] triple = [ axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value, triple) return new def normalize(self): """ Normalise the geometry of this designspace: - scale all the locations of all masters and instances to the -1 - 0 - 1 value. - we need the axis data to do the scaling, so we do those last. """ # masters for item in self.sources: item.location = self.normalizeLocation(item.location) # instances for item in self.instances: # glyph masters for this instance for _, glyphData in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) # the axes for axis in self.axes: # scale the map first newMap = [] for inputValue, outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap: axis.map = newMap # finally the axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them in the axis.minimum axis.minimum = minimum axis.maximum = maximum axis.default = default # now the rules for rule in self.rules: newConditionSets = [] for conditions in rule.conditionSets: newConditions = [] for cond in conditions: if cond.get('minimum') is not None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum = None if cond.get('maximum') is not None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self, opener, **kwargs): """Ensure SourceDescriptor.font attributes are loaded, and return list of fonts. Takes a callable which initializes a new font object (e.g. TTFont, or defcon.Font, etc.) from the SourceDescriptor.path, and sets the SourceDescriptor.font attribute. If the font attribute is already not None, it is not loaded again. Fonts with the same path are only loaded once and shared among SourceDescriptors. For example, to load UFO sources using defcon: designspace = DesignSpaceDocument.fromfile("path/to/my.designspace") designspace.loadSourceFonts(defcon.Font) Or to load masters as FontTools binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes one required positional argument, the source.path, and an optional list of keyword arguments, and returns a new font object loaded from the path. **kwargs: extra options passed on to the opener function. Returns: List of font objects in the order they appear in the sources list. """ # we load fonts with the same source.path only once loaded = {} fonts = [] for source in self.sources: if source.font is not None: # font already loaded fonts.append(source.font) continue if source.path in loaded: source.font = loaded[source.path] else: if source.path is None: raise DesignSpaceDocumentError( "Designspace source '%s' has no 'path' attribute" % (source.name or "<Unknown>") ) source.font = opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font) return fonts @property def formatTuple(self): """Return the formatVersion as a tuple of (major, minor). .. versionadded:: 5.0 """ if self.formatVersion is None: return (5, 0) numbers = (int(i) for i in self.formatVersion.split(".")) major = next(numbers) minor = next(numbers, 0) return (major, minor) def getVariableFonts(self) -> List[VariableFontDescriptor]: """Return all variable fonts defined in this document, or implicit variable fonts that can be built from the document's continuous axes. In the case of Designspace documents before version 5, the whole document was implicitly describing a variable font that covers the whole space. In version 5 and above documents, there can be as many variable fonts as there are locations on discrete axes. .. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 """ if self.variableFonts: return self.variableFonts variableFonts = [] discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for axis in self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) for values in valueCombinations: basename = None if self.filename is not None: basename = os.path.splitext(self.filename)[0] + "-VF" if self.path is not None: basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF" if basename is None: basename = "VF" axisNames = "".join([f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f"{basename}{axisNames}", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in zip(discreteAxes, values) ] )) return variableFonts def deepcopyExceptFonts(self): """Allow deep-copying a DesignSpace document without deep-copying attached UFO fonts or TTFont objects. The :attr:`font` attribute is shared by reference between the original and the copy. .. versionadded:: 5.0 """ fonts = [source.font for source in self.sources] try: for source in self.sources: source.font = None res = copy.deepcopy(self) for source, font in zip(res.sources, fonts): res.font = font return res finally: for source, font in zip(self.sources, fonts): source.font = font
40.406579
147
0.612266
11,854
119,159
6.130251
0.096676
0.006949
0.01189
0.006743
0.295729
0.237092
0.203845
0.177066
0.155213
0.139897
0
0.005225
0.294934
119,159
2,948
148
40.420285
0.859561
0.168514
0
0.272568
0
0
0.073511
0.004484
0
0
0
0
0.001747
1
0.075131
false
0
0.009901
0.006407
0.175306
0.001165
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4af82655248e89ae648896a2197ee327a71bd7a6
3,230
py
Python
ax/models/torch/posterior_mean.py
dme65/Ax
c460eab90d464df87e6478b5765fd02fb5126adb
[ "MIT" ]
1
2021-01-11T18:16:28.000Z
2021-01-11T18:16:28.000Z
ax/models/torch/posterior_mean.py
dme65/Ax
c460eab90d464df87e6478b5765fd02fb5126adb
[ "MIT" ]
null
null
null
ax/models/torch/posterior_mean.py
dme65/Ax
c460eab90d464df87e6478b5765fd02fb5126adb
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Optional, Tuple import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost from botorch.models.model import Model from botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim from torch import Tensor def get_PosteriorMean( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor] = None, **kwargs: Any, ) -> AcquisitionFunction: r"""Instantiates a PosteriorMean acquisition function. Note: If no OutcomeConstraints given, return an analytic acquisition function. This requires {optimizer_kwargs: {joint_optimization: True}} or an optimizer that does not assume pending point support. Args: objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) X_observed: A tensor containing points observed for all objective outcomes and outcomes that appear in the outcome constraints (if there are any). X_pending: A tensor containing points whose evaluation is pending (i.e. that have been submitted for evaluation) present for all objective outcomes and outcomes that appear in the outcome constraints (if there are any). Returns: PosteriorMean: The instantiated acquisition function. """ if X_observed is None: raise ValueError("There are no feasible observed points.") # construct Objective module if kwargs.get("chebyshev_scalarization", False): obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get("Ys")).transpose(0, 1)), ) else: obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints is None: objective = GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost ) # Use qSimpleRegret, not analytic posterior, to handle arbitrary objective fns. acq_func = qSimpleRegret(model, objective=objective) return acq_func
41.948052
84
0.728793
410
3,230
5.604878
0.37561
0.038294
0.038294
0.024369
0.069626
0.069626
0.069626
0.069626
0.069626
0.069626
0
0.001566
0.208978
3,230
76
85
42.5
0.897847
0.419195
0
0.047619
0
0
0.035255
0.012871
0
0
0
0
0
1
0.02381
false
0
0.261905
0
0.309524
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4af8cc653e14393ff950e095171d139b4a633baf
2,240
py
Python
src/drivers/velodyne_nodes/test/velodyne_node.test.py
fanyu2021/fyAutowareAuto
073661c0634de671ff01bda8a316a5ce10c96ca9
[ "Apache-2.0" ]
null
null
null
src/drivers/velodyne_nodes/test/velodyne_node.test.py
fanyu2021/fyAutowareAuto
073661c0634de671ff01bda8a316a5ce10c96ca9
[ "Apache-2.0" ]
null
null
null
src/drivers/velodyne_nodes/test/velodyne_node.test.py
fanyu2021/fyAutowareAuto
073661c0634de671ff01bda8a316a5ce10c96ca9
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 the Autoware Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #    http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Co-developed by Tier IV, Inc. and Apex.AI, Inc. import ament_index_python import launch import launch.actions import launch_ros.actions import lidar_integration def generate_test_description(ready_fn): PORT = lidar_integration.get_open_port() # The node under test and the checker node that will pass/fail our tests: test_topic = "veloyne_cloud_node_test_topic" velodyne_cloud_node = launch_ros.actions.Node( package="velodyne_nodes", node_executable="velodyne_cloud_node_exe", node_name="vlp16_driver_node", node_namespace="lidar_front", parameters=[ "{}/param/vlp16_test.param.yaml".format( ament_index_python.get_package_share_directory("velodyne_nodes") ), { "port": PORT, "expected_num_subscribers": 1, } ], remappings=[("points_raw", test_topic)], arguments=["--model", "vlp16"] ) pcl_checker = lidar_integration.make_pcl_checker( topic=test_topic, size=55000, period=100, period_tolerance=2.2, size_tolerance=1.4, ) return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda context: ready_fn()) ], port=PORT ) # Test cases are created automatically by the lidar_integration package. We just need to # instantiate them active = lidar_integration.make_active_tests() after_shutdown = lidar_integration.make_post_shutdown_tests()
32
89
0.692857
284
2,240
5.235915
0.535211
0.075319
0.034297
0.02152
0
0
0
0
0
0
0
0.015571
0.225893
2,240
69
90
32.463768
0.841984
0.350446
0
0.047619
0
0
0.130919
0.073816
0
0
0
0
0
1
0.02381
false
0
0.119048
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4af90b86c50a3ccef625f31f883eb2072c6ed40c
1,425
py
Python
example.py
manhcuogntin4/Color-transfer
14b139efa86bb49a07a118c905d9d82cd7ad10d3
[ "MIT" ]
null
null
null
example.py
manhcuogntin4/Color-transfer
14b139efa86bb49a07a118c905d9d82cd7ad10d3
[ "MIT" ]
null
null
null
example.py
manhcuogntin4/Color-transfer
14b139efa86bb49a07a118c905d9d82cd7ad10d3
[ "MIT" ]
1
2020-04-13T13:17:58.000Z
2020-04-13T13:17:58.000Z
# USAGE # python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg # import the necessary packages from color_transfer import color_transfer import numpy as np import argparse import cv2 def show_image(title, image, width = 300): # resize the image to have a constant width, just to # make displaying the images take up less screen real # estate r = width / float(image.shape[1]) dim = (width, int(image.shape[0] * r)) resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) # show the resized image cv2.imshow(title, resized) # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-s", "--source", required = True, help = "Path to the source image") ap.add_argument("-t", "--target", required = True, help = "Path to the target image") ap.add_argument("-o", "--output", help = "Path to the output image (optional)") args = vars(ap.parse_args()) # load the images source = cv2.imread(args["source"]) target = cv2.imread(args["target"]) # transfer the color distribution from the source image # to the target image transfer = color_transfer(source, target) # check to see if the output image should be saved if args["output"] is not None: cv2.imwrite(args["output"], transfer) # show the images and wait for a key press show_image("Source", source) show_image("Target", target) show_image("Transfer", transfer) cv2.waitKey(0)
30.978261
82
0.733333
218
1,425
4.729358
0.426606
0.034918
0.037827
0.037827
0.048497
0.048497
0
0
0
0
0
0.011523
0.147368
1,425
46
83
30.978261
0.837037
0.338947
0
0
0
0
0.168999
0
0
0
0
0
0
1
0.04
false
0
0.16
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4afa3809e5300d1250cfab7d62f27391e130c231
9,060
py
Python
scripts/registration_pipeline.py
heethesh/Argoverse-HDMap-Update
61e9bf965a1fa7a0c74a2671457a2778d849bfe5
[ "Apache-2.0" ]
null
null
null
scripts/registration_pipeline.py
heethesh/Argoverse-HDMap-Update
61e9bf965a1fa7a0c74a2671457a2778d849bfe5
[ "Apache-2.0" ]
null
null
null
scripts/registration_pipeline.py
heethesh/Argoverse-HDMap-Update
61e9bf965a1fa7a0c74a2671457a2778d849bfe5
[ "Apache-2.0" ]
1
2020-09-08T04:32:21.000Z
2020-09-08T04:32:21.000Z
import copy import numpy as np import open3d as o3d from tqdm import tqdm from scipy import stats import utils_o3d as utils def remove_ground_plane(pcd, z_thresh=-2.7): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, -1] > z_thresh] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def remove_y_plane(pcd, y_thresh=5): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, 0] < y_thresh] cropped_points[:, -1] = -cropped_points[:, -1] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True): normals_radius = voxel_size * 2 features_radius = voxel_size * 4 # Downsample the point cloud using Voxel grids if downsample: print(':: Input size:', np.array(pcd.points).shape) pcd_down = utils.downsample_point_cloud(pcd, voxel_size) print(':: Downsample with a voxel size %.3f' % voxel_size) print(':: Downsample size', np.array(pcd_down.points).shape) else: pcd_down = copy.deepcopy(pcd) # Estimate normals print(':: Estimate normal with search radius %.3f' % normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) # Compute FPFH features print(':: Compute FPFH feature with search radius %.3f' % features_radius) features = o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return pcd_down, features def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False): pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1) print(':: Input size 0:', np.array(pcd0.points).shape) print(':: Input size 1:', np.array(pcd1.points).shape) print(':: Features size 0:', np.array(feature0.data).shape) print(':: Features size 1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1, 0.706, 0]) utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929]) scores, indices = [], [] fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'): [_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]])) indices.append([i, idx[0]]) scores, indices = np.array(scores), np.array(indices) median = np.median(scores) if thresh is None: thresh = median inliers_idx = np.where(scores <= thresh)[0] pcd0_idx = indices[inliers_idx, 0] pcd1_idx = indices[inliers_idx, 1] print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % ( np.min(scores), np.max(scores), median, len(inliers_idx))) if display: for i, j in zip(pcd0_idx, pcd1_idx): pcd0.colors[i] = [1, 0, 0] pcd1.colors[j] = [1, 0, 0] utils.display([pcd0, pcd1]) return pcd0_idx, pcd1_idx def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0, ransac_iters=5000, sample_size=50): points0 = np.asarray(pcd0.points)[pcd0_idx] points1 = np.asarray(pcd1.points)[pcd1_idx] mean0 = np.mean(points0, axis=0) mean1 = np.mean(points1, axis=0) top_count = int(top_percent * len(pcd0_idx)) assert top_count > sample_size, 'top_count <= sample_size' scales = [] for i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'): args = np.random.choice(top_count, sample_size, replace=False) points0_r = points0[args] points1_r = points1[args] score0 = np.sum((points0_r - mean0) ** 2, axis=1) score1 = np.sum((points1_r - mean1) ** 2, axis=1) scale = np.sqrt(np.mean(score1) / np.mean(score0)) scales.append(scale) best_scale = stats.mode(scales)[0][0] print(':: Estimated scale:', best_scale) return best_scale def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size, distance_threshold=1.0, num_iters=4000000, num_val_iters=500): print(':: Distance threshold %.3f' % distance_threshold) result = o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return result def fast_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size): distance_threshold = 1.0 result = o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return result def refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size): distance_threshold = 0.1 print(':: Distance threshold %.3f' % distance_threshold) result = o3d.registration.registration_icp( source, target, distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return result def registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'): if method == 'global': print('\nRANSAC global registration on scaled point clouds...') initial_result = global_registration(pcd0, pcd1, feature1, feature2, voxel_size) elif method == 'fast_global': print('\nFast global registration on scaled point clouds...') initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size) else: print(':: Registration method not supported') return print(':: Initial registration results:') print(initial_result) print('\nDisplaying initial result...') draw_registration_result(pcd0, pcd1, initial_result.transformation) print('\nRefine registration...') result = refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size) print(':: Final registration results:') print(result) return result def draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp]) def run(): voxel_size = 0.2 dso_scale = 0.03 pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar = remove_ground_plane(pcd_lidar) pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2) # pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30]) # Ground plane removal results # utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) # utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]]) # return print('\nComputing FPFH features for lidar point cloud...') pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size) print('\nComputing FPFH features for DSO point cloud...') pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale < 1 else 1)) print('\nMatching FPFH features...') pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down, features_lidar, features_dso, thresh=None) print('\nEstimating scale using matches...') scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx) scale = 0.06 print('\nCorrecting scale...') pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) # return # Registration pcd_dso_scaled_down, features_dso_scaled = compute_features( pcd_dso_scaled, voxel_size=voxel_size) result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar, features_dso_scaled, voxel_size, method='global') print('\nDisplaying result...') draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation) if __name__ == '__main__': run()
37.90795
119
0.703422
1,213
9,060
5.009068
0.177247
0.021725
0.01975
0.025675
0.276498
0.231402
0.193384
0.176103
0.142857
0.142857
0
0.037027
0.177263
9,060
238
120
38.067227
0.778106
0.042715
0
0.096386
0
0.006024
0.113253
0.006119
0
0
0
0
0.006024
1
0.066265
false
0
0.036145
0
0.162651
0.168675
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4afb20e82e1f9cc5d13cde9492b76ec1886669d1
36,825
py
Python
neo4j/aio/__init__.py
michaelcraige/neo4j-python-driver
27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a
[ "Apache-2.0" ]
1
2021-05-18T14:11:39.000Z
2021-05-18T14:11:39.000Z
neo4j/aio/__init__.py
michaelcraige/neo4j-python-driver
27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a
[ "Apache-2.0" ]
null
null
null
neo4j/aio/__init__.py
michaelcraige/neo4j-python-driver
27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2002-2019 "Neo4j," # Neo4j Sweden AB [http://neo4j.com] # # This file is part of Neo4j. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from asyncio import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from collections import deque from logging import getLogger from os import strerror from random import choice from ssl import SSLError from sys import platform, version_info from time import perf_counter from neo4j.addressing import Address from neo4j.aio._collections import WaitingList from neo4j.aio._mixins import Addressable, Breakable from neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import Version from neo4j.conf import Config, PoolConfig from neo4j.meta import version as neo4j_version from neo4j.routing import RoutingTable log = getLogger(__name__) MAGIC = b"\x60\x60\xB0\x17" class Bolt(Addressable, object): #: True if this instance uses secure communication, false #: otherwise. secure = None #: As a class attribute, this denotes the version of Bolt handled #: by that subclass. As an instance attribute, this represents the #: version of the protocol in use. protocol_version = () # Record of the time at which this connection was opened. __t_opened = None # Handle to the StreamReader object. __reader = None # Handle to the StreamWriter object, which can be used on close. __writer = None # Flag to indicate that the connection is closed __closed = False @classmethod def default_user_agent(cls): """ Return the default user agent string for a connection. """ template = "neo4j-python/{} Python/{}.{}.{}-{}-{} ({})" fields = (neo4j_version,) + tuple(version_info) + (platform,) return template.format(*fields) @classmethod def protocol_handlers(cls, protocol_version=None): """ Return a dictionary of available Bolt protocol handlers, keyed by version tuple. If an explicit protocol version is provided, the dictionary will contain either zero or one items, depending on whether that version is supported. If no protocol version is provided, all available versions will be returned. :param protocol_version: tuple identifying a specific protocol version (e.g. (3, 5)) or None :return: dictionary of version tuple to handler class for all relevant and supported protocol versions :raise TypeError: if protocol version is not passed in a tuple """ # Carry out subclass imports locally to avoid circular # dependency issues. from neo4j.aio.bolt3 import Bolt3 handlers = {bolt.protocol_version: bolt for bolt in [ # This list can be updated as protocol # versions are added and removed. Bolt3, ]} if protocol_version is None: return handlers if not isinstance(protocol_version, tuple): raise TypeError("Protocol version must be specified as a tuple") return {version: handler for version, handler in handlers.items() if version == protocol_version} @classmethod def opener(cls, auth=None, **config): """ Create and return an opener function for a given set of configuration parameters. This is useful when multiple servers share the same configuration details, such as within a connection pool. """ async def f(address, *, loop=None): return await Bolt.open(address, auth=auth, loop=loop, **config) return f @classmethod async def open(cls, address, *, auth=None, loop=None, **config): """ Open a socket connection and perform protocol version negotiation, in order to construct and return a Bolt client instance for a supported Bolt protocol version. :param address: tuples of host and port, such as ("127.0.0.1", 7687) :param auth: :param loop: :param config: :return: instance of a Bolt subclass :raise BoltConnectionError: if a connection could not be established :raise BoltConnectionLost: if an I/O error occurs on the underlying socket connection :raise BoltHandshakeError: if handshake completes without a successful negotiation :raise TypeError: if any of the arguments provided are passed as incompatible types :raise ValueError: if any of the arguments provided are passed with unsupported values """ # Args address = Address(address) if loop is None: loop = get_event_loop() config = PoolConfig.consume(config) # Connect reader, writer = await cls._connect(address, loop, config) try: # Handshake subclass = await cls._handshake(reader, writer, config.protocol_version) # Instantiation obj = subclass(reader, writer) obj.secure = bool(config.secure) assert hasattr(obj, "__ainit__") await obj.__ainit__(auth) return obj except BoltError: writer.write_eof() writer.close() raise @classmethod async def _connect(cls, address, loop, config): """ Attempt to establish a TCP connection to the address provided. :param address: :param loop: :param config: :return: a 3-tuple of reader, writer and security settings for the new connection :raise BoltConnectionError: if a connection could not be established """ assert isinstance(address, Address) assert loop is not None assert isinstance(config, Config) connection_args = { "host": address.host, "port": address.port, "family": address.family, # TODO: other args } ssl_context = config.get_ssl_context() if ssl_context: connection_args["ssl"] = ssl_context connection_args["server_hostname"] = address.host log.debug("[#0000] C: <DIAL> %s", address) try: reader = BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_connection(lambda: protocol, **connection_args) writer = BoltStreamWriter(transport, protocol, reader, loop) except SSLError as err: log.debug("[#%04X] S: <REJECT> %s (%d %s)", 0, address, err.errno, strerror(err.errno)) raise BoltSecurityError("Failed to establish a secure connection", address) from err except OSError as err: log.debug("[#%04X] S: <REJECT> %s (%d %s)", 0, address, err.errno, strerror(err.errno)) raise BoltConnectionError("Failed to establish a connection", address) from err else: local_address = Address(transport.get_extra_info("sockname")) remote_address = Address(transport.get_extra_info("peername")) log.debug("[#%04X] S: <ACCEPT> %s -> %s", local_address.port_number, local_address, remote_address) return reader, writer @classmethod async def _handshake(cls, reader, writer, protocol_version): """ Carry out a Bolt handshake, optionally requesting a specific protocol version. :param reader: :param writer: :param protocol_version: :return: :raise BoltConnectionLost: if an I/O error occurs on the underlying socket connection :raise BoltHandshakeError: if handshake completes without a successful negotiation """ local_address = Address(writer.transport.get_extra_info("sockname")) remote_address = Address(writer.transport.get_extra_info("peername")) handlers = cls.protocol_handlers(protocol_version) if not handlers: raise ValueError("No protocol handlers available (requested Bolt %r)", protocol_version) offered_versions = sorted(handlers.keys(), reverse=True)[:4] request_data = MAGIC + b"".join( v.to_bytes() for v in offered_versions).ljust(16, b"\x00") log.debug("[#%04X] C: <HANDSHAKE> %r", local_address.port_number, request_data) writer.write(request_data) await writer.drain() response_data = await reader.readexactly(4) log.debug("[#%04X] S: <HANDSHAKE> %r", local_address.port_number, response_data) try: agreed_version = Version.from_bytes(response_data) except ValueError as err: writer.close() raise BoltHandshakeError("Unexpected handshake response %r" % response_data, remote_address, request_data, response_data) from err try: subclass = handlers[agreed_version] except KeyError: log.debug("Unsupported Bolt protocol version %s", agreed_version) raise BoltHandshakeError("Unsupported Bolt protocol version", remote_address, request_data, response_data) else: return subclass def __new__(cls, reader, writer): obj = super().__new__(cls) obj.__t_opened = perf_counter() obj.__reader = reader obj.__writer = writer Addressable.set_transport(obj, writer.transport) return obj def __repr__(self): return "<Bolt address=%r protocol_version=%r>" % (self.remote_address, self.protocol_version) async def __ainit__(self, auth): """ Asynchronous initializer for implementation by subclasses. :param auth: """ @property def age(self): """ The age of this connection in seconds. """ return perf_counter() - self.__t_opened @property def broken(self): """ Flag to indicate whether this connection has been broken by the network or remote peer. """ return self.__reader.broken or self.__writer.broken @property def closed(self): """ Flag to indicate whether this connection has been closed locally.""" return self.__closed async def close(self): """ Close the connection. """ if self.closed: return if not self.broken: log.debug("[#%04X] S: <HANGUP>", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed = True async def reset(self, force=False): """ Reset the connection to a clean state. By default, a RESET message will only be sent if required, i.e. if the connection is not already in a clean state. If forced, this check will be overridden and a RESET will be sent regardless. """ async def run(self, cypher, parameters=None, discard=False, readonly=False, bookmarks=None, timeout=None, metadata=None): """ Run an auto-commit transaction. :param cypher: :param parameters: :param discard: :param readonly: :param bookmarks: :param timeout: :param metadata: :raise BoltTransactionError: if a transaction cannot be carried out at this time """ async def begin(self, readonly=False, bookmarks=None, timeout=None, metadata=None): """ Begin an explicit transaction. :param readonly: :param bookmarks: :param timeout: :param metadata: :return: """ async def run_tx(self, f, args=None, kwargs=None, readonly=False, bookmarks=None, timeout=None, metadata=None): """ Run a transaction function and return the return value from that function. """ async def get_routing_table(self, context=None): """ Fetch a new routing table. :param context: the routing context to use for this call :return: a new RoutingTable instance or None if the given router is currently unable to provide routing information :raise ServiceUnavailable: if no writers are available :raise ProtocolError: if the routing information received is unusable """ class BoltStreamReader(Addressable, Breakable, StreamReader): """ Wrapper for asyncio.streams.StreamReader """ def set_transport(self, transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport) async def readuntil(self, separator=b'\n'): # pragma: no cover assert False # not used by current implementation async def read(self, n=-1): # pragma: no cover assert False # not used by current implementation async def readexactly(self, n): try: return await super().readexactly(n) except IncompleteReadError as err: message = ("Network read incomplete (received {} of {} " "bytes)".format(len(err.partial), err.expected)) log.debug("[#%04X] S: <CLOSE>", self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address) from err except OSError as err: log.debug("[#%04X] S: <CLOSE> %d %s", err.errno, strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken("Network read failed", self.remote_address) from err class BoltStreamWriter(Addressable, Breakable, StreamWriter): """ Wrapper for asyncio.streams.StreamWriter """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport) async def drain(self): try: await super().drain() except OSError as err: log.debug("[#%04X] S: <CLOSE> (%s)", self.local_address.port_number, err) Breakable.set_broken(self) raise BoltConnectionBroken("Network write failed", self.remote_address) from err async def wait_closed(self): try: await super().wait_closed() except AttributeError: # pragma: no cover # This is a dirty hack for Python 3.6, which didn't include # 'wait_closed'. The code polls waiting for the stream # reader inside the protocol to go away which, by the # implementation of 3.6, occurs on 'connection_lost'. This # hack is likely safe unless the implementation of 3.6 # changes in a subsequent patch, and can be removed when # Python 3.6 support is no longer required. # from asyncio import sleep try: while self._protocol._stream_reader is not None: await sleep(0.1) except AttributeError: pass class Pool: def acquire(self, *, force_reset=False, timeout=None): raise NotImplementedError def release(self, *connections, force_reset=False): raise NotImplementedError def close(self, *, force=False): raise NotImplementedError class BoltPool: """ A pool of connections to a single address. :param opener: a function to which an address can be passed that returns an open and ready Bolt connection :param address: the remote address for which this pool operates :param max_size: the maximum permitted number of simultaneous connections that may be owned by this pool, both in-use and free :param max_age: the maximum permitted age, in seconds, for connections to be retained in this pool """ @classmethod async def open(cls, address, *, auth=None, loop=None, **config): """ Create a new connection pool, with an option to seed one or more initial connections. """ pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, loop=loop, **pool_config) pool = cls(loop, opener, pool_config, address) seeds = [await pool.acquire() for _ in range(pool_config.init_size)] for seed in seeds: await pool.release(seed) return pool def __init__(self, loop, opener, config, address): if loop is None: self._loop = get_event_loop() else: self._loop = loop self._opener = opener self._address = Address(address) self._max_size = config.max_size self._max_age = config.max_age self._in_use_list = deque() self._free_list = deque() self._waiting_list = WaitingList(loop=self._loop) def __repr__(self): return "<{} addr'{}' [{}{}{}]>".format( self.__class__.__name__, self.address, "|" * len(self._in_use_list), "." * len(self._free_list), " " * (self.max_size - self.size), ) def __contains__(self, cx): return cx in self._in_use_list or cx in self._free_list def __len__(self): return self.size @property def address(self): """ The remote address for which this pool operates. """ return self._address @property def max_size(self): """ The maximum permitted number of simultaneous connections that may be owned by this pool, both in-use and free. """ return self._max_size @max_size.setter def max_size(self, value): old_value = self._max_size self._max_size = value if value > old_value: # The maximum size has grown, so new slots have become # available. Notify any waiting acquirers of this extra # capacity. self._waiting_list.notify() @property def max_age(self): """ The maximum permitted age, in seconds, for connections to be retained in this pool. """ return self._max_age @property def in_use(self): """ The number of connections in this pool that are currently in use. """ return len(self._in_use_list) @property def size(self): """ The total number of connections (both in-use and free) currently owned by this connection pool. """ return len(self._in_use_list) + len(self._free_list) async def _sanitize(self, cx, *, force_reset): """ Attempt to clean up a connection, such that it can be reused. If the connection is broken or closed, it can be discarded. Otherwise, the age of the connection is checked against the maximum age permitted by this pool, consequently closing it on expiry. Should the connection be neither broken, closed nor expired, it will be reset (optionally forcibly so) and the connection object will be returned, indicating success. """ if cx.broken or cx.closed: return None expired = self.max_age is not None and cx.age > self.max_age if expired: await cx.close() return None await cx.reset(force=force_reset) return cx async def acquire(self, *, force_reset=False): """ Acquire a connection from the pool. In the simplest case, this will return an existing open connection, if one is free. If not, and the pool is not full, a new connection will be created. If the pool is full and no free connections are available, this will block until a connection is released, or until the acquire call is cancelled. :param force_reset: if true, the connection will be forcibly reset before being returned; if false, this will only occur if the connection is not already in a clean state :return: a Bolt connection object """ log.debug("Acquiring connection from pool %r", self) cx = None while cx is None or cx.broken or cx.closed: try: # Plan A: select a free connection from the pool cx = self._free_list.popleft() except IndexError: if self.size < self.max_size: # Plan B: if the pool isn't full, open # a new connection cx = await self._opener(self.address) else: # Plan C: wait for more capacity to become # available, then try again log.debug("Joining waiting list") await self._waiting_list.join() else: cx = await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return cx async def release(self, cx, *, force_reset=False): """ Release a Bolt connection, putting it back into the pool if the connection is healthy and the pool is not already at capacity. :param cx: the connection to release :param force_reset: if true, the connection will be forcibly reset before being released back into the pool; if false, this will only occur if the connection is not already in a clean state :raise ValueError: if the connection is not currently in use, or if it does not belong to this pool """ log.debug("Releasing connection %r", cx) if cx in self._in_use_list: self._in_use_list.remove(cx) if self.size < self.max_size: # If there is spare capacity in the pool, attempt to # sanitize the connection and return it to the pool. cx = await self._sanitize(cx, force_reset=force_reset) if cx: # Carry on only if sanitation succeeded. if self.size < self.max_size: # Check again if there is still capacity. self._free_list.append(cx) self._waiting_list.notify() else: # Otherwise, close the connection. await cx.close() else: # If the pool is full, simply close the connection. await cx.close() elif cx in self._free_list: raise ValueError("Connection is not in use") else: raise ValueError("Connection does not belong to this pool") async def prune(self): """ Close all free connections. """ await self.__close(self._free_list) async def close(self): """ Close all connections immediately. This does not permanently disable the connection pool, it merely shuts down all open connections, including those in use. Depending on the applications, it may be perfectly acceptable to re-acquire connections after pool closure, which will have the implicit affect of reopening the pool. To close gracefully, allowing work in progress to continue until connections are released, use the following sequence instead: pool.max_size = 0 pool.prune() This will force all future connection acquisitions onto the waiting list, and released connections will be closed instead of being returned to the pool. """ await self.prune() await self.__close(self._in_use_list) async def __close(self, connections): """ Close all connections in the given list. """ closers = deque() while True: try: cx = connections.popleft() except IndexError: break else: closers.append(cx.close()) if closers: await wait(closers, loop=self._loop) class Neo4jPool: """ Connection pool with routing table. """ @classmethod async def open(cls, *addresses, auth=None, routing_context=None, loop=None, **config): pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, **pool_config) obj = cls(loop, opener, config, addresses, routing_context) # TODO: get initial routing table and construct await obj._ensure_routing_table_is_fresh() return obj def __init__(self, loop, opener, config, addresses, routing_context): if loop is None: self._loop = get_event_loop() else: self._loop = loop self._opener = opener self._config = config self._pools = {} self._missing_writer = False self._refresh_lock = Lock(loop=self._loop) self._routing_context = routing_context self._max_size_per_host = config.max_size self._initial_routers = addresses self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table): """ Add pools for addresses that exist in the given routing table but which don't already have pools. """ for address in routing_table.servers(): if address not in self._pools: self._pools[address] = BoltPool(self._loop, self._opener, self._config, address) async def _deactivate_pools_not_in(self, routing_table): """ Deactivate any pools that aren't represented in the given routing table. """ for address in self._pools: if address not in routing_table: await self._deactivate(address) async def _get_routing_table_from(self, *routers): """ Try to update routing tables with the given routers. :return: True if the routing table is successfully updated, otherwise False """ log.debug("Attempting to update routing table from " "{}".format(", ".join(map(repr, routers)))) for router in routers: pool = self._pools[router] cx = await pool.acquire() try: new_routing_table = await cx.get_routing_table(self._routing_context) except BoltError: await self._deactivate(router) else: num_routers = len(new_routing_table.routers) num_readers = len(new_routing_table.readers) num_writers = len(new_routing_table.writers) # No writers are available. This likely indicates a temporary state, # such as leader switching, so we should not signal an error. # When no writers available, then we flag we are reading in absence of writer self._missing_writer = (num_writers == 0) # No routers if num_routers == 0: continue # No readers if num_readers == 0: continue log.debug("Successfully updated routing table from " "{!r} ({!r})".format(router, self._routing_table)) return new_routing_table finally: await pool.release(cx) return None async def _get_routing_table(self): """ Update the routing table from the first router able to provide valid routing information. """ # copied because it can be modified existing_routers = list(self._routing_table.routers) has_tried_initial_routers = False if self._missing_writer: has_tried_initial_routers = True rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt rt = await self._get_routing_table_from(*existing_routers) if rt: return rt if not has_tried_initial_routers and self._initial_routers not in existing_routers: rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt # None of the routers have been successful, so just fail log.error("Unable to retrieve routing information") raise Neo4jAvailabilityError("Unable to retrieve routing information") async def _ensure_routing_table_is_fresh(self, readonly=False): """ Update the routing table if stale. This method performs two freshness checks, before and after acquiring the refresh lock. If the routing table is already fresh on entry, the method exits immediately; otherwise, the refresh lock is acquired and the second freshness check that follows determines whether an update is still required. """ if self._routing_table.is_fresh(readonly=readonly): return async with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if readonly: # if reader is fresh but writers are not, then # we are reading in absence of writer self._missing_writer = not self._routing_table.is_fresh(readonly=False) else: rt = await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async def _select_pool(self, readonly=False): """ Selects the pool with the fewest in-use connections. """ await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses = self._routing_table.readers else: addresses = self._routing_table.writers pools = [pool for address, pool in self._pools.items() if address in addresses] pools_by_usage = {} for pool in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if not pools_by_usage: raise Neo4jAvailabilityError("No {} service currently " "available".format("read" if readonly else "write")) return choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self, *, readonly=False, force_reset=False): """ Acquire a connection to a server that can satisfy a set of parameters. :param readonly: true if a readonly connection is required, otherwise false :param force_reset: """ while True: pool = await self._select_pool(readonly=readonly) try: cx = await pool.acquire(force_reset=force_reset) except BoltError: await self._deactivate(pool.address) else: if not readonly: # If we're not acquiring a connection as # readonly, then intercept NotALeader and # ForbiddenOnReadOnlyDatabase errors to # invalidate the routing table. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) def handler(failure): """ Invalidate the routing table before raising the failure. """ log.debug("[#0000] C: <ROUTING> Invalidating routing table") self._routing_table.ttl = 0 raise failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx async def release(self, connection, *, force_reset=False): """ Release a connection back into the pool. This method is thread safe. """ for pool in self._pools.values(): try: await pool.release(connection, force_reset=force_reset) except ValueError: pass else: # Unhook any custom error handling and exit. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise ValueError("Connection does not belong to this pool") async def _deactivate(self, address): """ Deactivate an address from the connection pool, if present, remove from the routing table and also closing all idle connections to that address. """ log.debug("[#0000] C: <ROUTING> Deactivating address %r", address) # We use `discard` instead of `remove` here since the former # will not fail if the address has already been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug("[#0000] C: <ROUTING> table=%r", self._routing_table) try: pool = self._pools.pop(address) except KeyError: pass # assume the address has already been removed else: pool.max_size = 0 await pool.prune() async def close(self, force=False): """ Close all connections and empty the pool. If forced, in-use connections will be closed immediately; if not, they will remain open until released. """ pools = dict(self._pools) self._pools.clear() for address, pool in pools.items(): if force: await pool.close() else: pool.max_size = 0 await pool.prune() class Neo4j: # The default router address list to use if no addresses are specified. default_router_addresses = Address.parse_list(":7687 :17601 :17687") # TODO # @classmethod # async def open(cls, *addresses, auth=None, security=False, protocol_version=None, loop=None): # opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) # router_addresses = Address.parse_list(" ".join(addresses), default_port=7687) # return cls(opener, router_addresses, loop=loop) # # def __init__(self, opener, router_addresses, loop=None): # self._routers = Neo4jPool(opener, router_addresses or self.default_router_addresses) # self._writers = Neo4jPool(opener) # self._readers = Neo4jPool(opener) # self._routing_table = None # # @property # def routing_table(self): # return self._routing_table # # async def update_routing_table(self): # cx = await self._routers.acquire() # try: # result = await cx.run("CALL dbms.cluster.routing.getRoutingTable($context)", {"context": {}}) # record = await result.single() # self._routing_table = RoutingTable.parse_routing_info([record]) # TODO: handle ValueError? # return self._routing_table # finally: # self._routers.release(cx) # async def main(): # from neo4j.debug import watch; watch("neo4j") # neo4j = await Neo4j.open(":17601 :17602 :17603", auth=("neo4j", "password")) # await neo4j.update_routing_table() # print(neo4j.routing_table) # # # if __name__ == "__main__": # run(main())
37.34787
107
0.609993
4,289
36,825
5.098624
0.155514
0.032376
0.015365
0.00535
0.258643
0.204957
0.154289
0.14272
0.11135
0.088668
0
0.006414
0.314134
36,825
985
108
37.385787
0.859405
0.182675
0
0.299213
0
0
0.059288
0.000937
0
0
0
0.003046
0.011811
1
0.055118
false
0.007874
0.03937
0.011811
0.198819
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4afb4cc5b7dcd90ef9395d1a97095b2b0c885c49
1,831
py
Python
python/setup.py
bubriks/feature-store-api
fa286f257b87a09c081e86811b853b3e564ce197
[ "Apache-2.0" ]
49
2020-09-07T17:43:11.000Z
2021-12-28T10:41:03.000Z
python/setup.py
bubriks/feature-store-api
fa286f257b87a09c081e86811b853b3e564ce197
[ "Apache-2.0" ]
132
2020-08-06T12:12:09.000Z
2022-03-29T16:28:25.000Z
python/setup.py
bubriks/feature-store-api
fa286f257b87a09c081e86811b853b3e564ce197
[ "Apache-2.0" ]
35
2020-08-06T12:09:02.000Z
2022-01-10T08:50:45.000Z
import os import imp from setuptools import setup, find_packages __version__ = imp.load_source( "hsfs.version", os.path.join("hsfs", "version.py") ).__version__ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name="hsfs", version=__version__, install_requires=[ "pyhumps==1.6.1", "requests", "furl", "boto3", "pandas", "numpy", "pyjks", "mock", "avro==1.10.2", "sqlalchemy", "PyMySQL", ], extras_require={ "dev": [ "pytest", "flake8", "black"], "docs": [ "mkdocs==1.1.2", "mkdocs-material==6.2.2", "mike==0.5.5", "sphinx==3.5.4", "keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties", "markdown-include"], "hive": ["pyhopshive[thrift]"] }, author="Logical Clocks AB", author_email="moritz@logicalclocks.com", description="HSFS: An environment independent client to interact with the Hopsworks Featurestore", license="Apache License 2.0", keywords="Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps", url="https://github.com/logicalclocks/feature-store-api", download_url="https://github.com/logicalclocks/feature-store-api/releases/tag/" + __version__, packages=find_packages(), long_description=read("../README.md"), long_description_content_type="text/markdown", classifiers=[ "Development Status :: 5 - Production/Stable", "Topic :: Utilities", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Intended Audience :: Developers", ], )
28.609375
107
0.589842
193
1,831
5.430052
0.647668
0.031489
0.019084
0.032443
0.085878
0.085878
0.085878
0.085878
0
0
0
0.018355
0.256144
1,831
63
108
29.063492
0.751101
0
0
0.035088
0
0.017544
0.462043
0.025123
0
0
0
0
0
1
0.017544
false
0
0.052632
0.017544
0.087719
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aff2d34953f2e2be532801520dad5c0dc9065e8
15,609
py
Python
autotest/ogr/ogr_gpx.py
HongqiangWei/gdal
f7c427926438cc39d31e4459fa6401321f8e62f0
[ "MIT" ]
3
2016-07-25T16:30:13.000Z
2022-02-11T11:09:08.000Z
autotest/ogr/ogr_gpx.py
HongqiangWei/gdal
f7c427926438cc39d31e4459fa6401321f8e62f0
[ "MIT" ]
null
null
null
autotest/ogr/ogr_gpx.py
HongqiangWei/gdal
f7c427926438cc39d31e4459fa6401321f8e62f0
[ "MIT" ]
null
null
null
#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test GPX driver functionality. # Author: Even Rouault <even dot rouault at mines dash paris dot org> # ############################################################################### # Copyright (c) 2007, Even Rouault <even dot rouault at mines dash paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import os import sys import string sys.path.append( '../pymod' ) import gdaltest import ogrtest import ogr import osr import gdal def ogr_gpx_init(): gdaltest.gpx_ds = None try: gdaltest.gpx_ds = ogr.Open( 'data/test.gpx' ) except: gdaltest.gpx_ds = None if gdaltest.gpx_ds is None: gdaltest.have_gpx = 0 else: gdaltest.have_gpx = 1 if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds.GetLayerCount() != 5: gdaltest.post_reason( 'wrong number of layers' ) return 'fail' return 'success' ############################################################################### # Test waypoints gpx layer. def ogr_gpx_1(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = [2, None] tr = ogrtest.check_features_against_list( lyr, 'ele', expect ) if not tr: return 'fail' lyr.ResetReading() expect = ['waypoint name', None] tr = ogrtest.check_features_against_list( lyr, 'name', expect ) if not tr: return 'fail' lyr.ResetReading() expect = ['href', None] tr = ogrtest.check_features_against_list( lyr, 'link1_href', expect ) if not tr: return 'fail' lyr.ResetReading() expect = ['text', None] tr = ogrtest.check_features_against_list( lyr, 'link1_text', expect ) if not tr: return 'fail' lyr.ResetReading() expect = ['type', None] tr = ogrtest.check_features_against_list( lyr, 'link1_type', expect ) if not tr: return 'fail' lyr.ResetReading() expect = ['href2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_href', expect ) if not tr: return 'fail' lyr.ResetReading() expect = ['text2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_text', expect ) if not tr: return 'fail' lyr.ResetReading() expect = ['type2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_type', expect ) if not tr: return 'fail' lyr.ResetReading() expect = ['2007/11/25 17:58:00+01', None] tr = ogrtest.check_features_against_list( lyr, 'time', expect ) if not tr: return 'fail' lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (1 0)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (4 3)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Test routes gpx layer. def ogr_gpx_2(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING (6 5,9 8,12 11)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Test route_points gpx layer. def ogr_gpx_3(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points' ) expect = ['route point name', None, None] tr = ogrtest.check_features_against_list( lyr, 'name', expect ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (6 5)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Test tracks gpx layer. def ogr_gpx_4(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15 14,18 17),(21 20,24 23))', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() f_geom = feat.GetGeometryRef() if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY': return 'fail' feat.Destroy() return 'success' ############################################################################### # Test route_points gpx layer. def ogr_gpx_5(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points' ) expect = ['track point name', None, None, None] tr = ogrtest.check_features_against_list( lyr, 'name', expect ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (15 14)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Copy our small gpx file to a new gpx file. def ogr_gpx_6(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'skip' try: gdal.PushErrorHandler( 'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' ) gdal.PopErrorHandler() except: pass co_opts = [ ] # Duplicate waypoints gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate routes gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) gpx2_lyr = gpx2_ds.CreateLayer( 'routes', geom_type = ogr.wkbLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate tracks gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) gpx2_lyr = gpx2_ds.CreateLayer( 'tracks', geom_type = ogr.wkbMultiLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr = None gpx2_lyr = None # Explicit destroy is required for old-gen python bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx' ) return 'success' ############################################################################### # Output extra fields as <extensions>. def ogr_gpx_7(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None bna_ds = ogr.Open( 'data/bna_for_gpx.bna' ) try: os.remove ('tmp/gpx.gpx') except: pass co_opts = [ 'GPX_USE_EXTENSIONS=yes' ] # Duplicate waypoints bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points' ) gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) bna_lyr.ResetReading() for i in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn ) dst_feat = ogr.Feature( feature_def = gpx_lyr.GetLayerDefn() ) feat = bna_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat ) if gpx_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None #Now check that the extensions fields have been well written gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx') gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = ['PID1', 'PID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect ) if not tr: return 'fail' gpx_lyr.ResetReading() expect = ['SID1', 'SID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect ) if not tr: return 'fail' gpx_lyr.ResetReading() expect = ['TID1', None] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect ) if not tr: return 'fail' return 'success' ############################################################################### # Output extra fields as <extensions>. def ogr_gpx_8(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx') except: pass gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF']) lyr = gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name', '--ignored--') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr = gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name', '--ignored--') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid', 1) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None f = open('tmp/gpx.gpx','rb') f_ref = open('data/ogr_gpx_8_ref.txt','rb') f_content = f.read() f_ref_content = f_ref.read() f.close() f_ref.close() if f_content.find(f_ref_content) == -1: gdaltest.post_reason('did not get expected result') print(f_content) return 'fail' return 'success' ############################################################################### # def ogr_gpx_cleanup(): if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx') except: pass return 'success' gdaltest_list = [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun test 1, 2 and 4 with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if __name__ == '__main__': gdaltest.setup_run( 'ogr_gpx' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize()
27.19338
120
0.599206
1,870
15,609
4.826203
0.163636
0.049972
0.059058
0.034127
0.71036
0.677452
0.659058
0.635125
0.606427
0.544155
0
0.016851
0.235825
15,609
573
121
27.240838
0.739772
0.112627
0
0.661111
0
0
0.118118
0.003421
0
0
0
0
0
1
0.027778
false
0.011111
0.022222
0
0.194444
0.002778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4aff7e3e13035260de4953a62861c9d0ec4fffb5
22,377
py
Python
mwp_solver/models/sausolver.py
max-stack/MWP-SS-Metrics
01268f2d6da716596216b04de4197e345b96c219
[ "MIT" ]
null
null
null
mwp_solver/models/sausolver.py
max-stack/MWP-SS-Metrics
01268f2d6da716596216b04de4197e345b96c219
[ "MIT" ]
null
null
null
mwp_solver/models/sausolver.py
max-stack/MWP-SS-Metrics
01268f2d6da716596216b04de4197e345b96c219
[ "MIT" ]
null
null
null
# Code Taken from https://github.com/LYH-YF/MWPToolkit # -*- encoding: utf-8 -*- # @Author: Yihuai Lan # @Time: 2021/08/21 04:59:55 # @File: sausolver.py import random import torch from torch import nn import copy from module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import MSELoss from utils.utils import copy_list from utils.enum_type import NumMask, SpecialTokens class SAUSolver(nn.Module): """ Reference: Qin et al. "Semantically-Aligned Universal Tree-Structured Solver for Math Word Problems" in EMNLP 2020. """ def __init__(self, config, dataset): super(SAUSolver, self).__init__() # parameter self.hidden_size = config["hidden_size"] self.device = config["device"] self.USE_CUDA = True if self.device == torch.device('cuda') else False self.beam_size = config['beam_size'] self.max_out_len = config['max_output_len'] self.embedding_size = config["embedding_size"] self.dropout_ratio = config["dropout_ratio"] self.num_layers = config["num_layers"] self.rnn_cell_type = config["rnn_cell_type"] self.loss_weight = config['loss_weight'] self.vocab_size = len(dataset.in_idx2word) self.out_symbol2idx = dataset.out_symbol2idx self.out_idx2symbol = dataset.out_idx2symbol generate_list = dataset.generate_list self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list] self.mask_list = NumMask.number self.num_start = dataset.num_start self.operator_nums = dataset.operator_nums self.generate_size = len(generate_list) self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token = None try: self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token = None try: self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token = None # module self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio) # self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio) self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False) #self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio) self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio) self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio) self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 = MaskedCrossEntropyLoss() # def calculate_loss(self, batch_data:dict) -> float: """Finish forward-propagating, calculating loss and back-propagation. :param batch_data: one batch data. :return: loss value. batch_data should include keywords 'question', 'ques len', 'equation', 'equ len', 'num stack', 'num size', 'num pos' """ seq = torch.tensor(batch_data["question"]).to(self.device) seq_length = torch.tensor(batch_data["ques len"]).long() target = torch.tensor(batch_data["equation"]).to(self.device) target_length = torch.LongTensor(batch_data["equ len"]).to(self.device) nums_stack = copy.deepcopy(batch_data["num stack"]) num_size = batch_data["num size"] num_pos = batch_data["num pos"] generate_nums = self.generate_nums num_start = self.num_start # sequence mask for attention unk = self.unk_token loss = self.train_tree(seq, seq_length, target, target_length, nums_stack, num_size, generate_nums, num_pos, unk, num_start) return loss def model_test(self, batch_data:dict) -> tuple: """Model test. :param batch_data: one batch data. :return: predicted equation, target equation. batch_data should include keywords 'question', 'ques len', 'equation', 'num stack', 'num pos', 'num list' """ seq = torch.tensor(batch_data["question"]).to(self.device) seq_length = torch.tensor(batch_data["ques len"]).long() target = torch.tensor(batch_data["equation"]).to(self.device) nums_stack = copy.deepcopy(batch_data["num stack"]) num_pos = batch_data["num pos"] num_list = batch_data['num list'] generate_nums = self.generate_nums num_start = self.num_start # sequence mask for attention all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size, self.max_out_len) all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0])) targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0])) return all_output, targets def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start, english=False,var_nums=[], batch_first=False): # sequence mask for attention seq_mask = [] max_len = max(input_length) for i in input_length: seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)]) seq_mask = torch.ByteTensor(seq_mask) num_mask = [] max_num_size = max(num_size_batch) + len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表 for i in num_size_batch: d = i + len(generate_nums) + len(var_nums) num_mask.append([0] * d + [1] * (max_num_size - d)) num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx #unk = output_lang.word2index["UNK"] # Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size) input_var = input_batch.transpose(0, 1) target = target_batch.transpose(0, 1) padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size = len(input_length) if self.USE_CUDA: input_var = input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() # Zero gradients of both optimizers # Run words through encoder #encoder_outputs, problem_output = self.encoder(input_var, input_length) seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length) problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:] encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:] # Prepare input and output variables node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] # root embedding B x 1 max_target_length = max(target_length) all_node_outputs = [] all_sa_outputs = [] # all_leafs = [] copy_num_len = [len(_) for _ in num_pos] num_size = max(copy_num_len) # 提取与问题相关的数字embedding all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.encoder.hidden_size) embeddings_stacks = [[] for _ in range(batch_size)] # B x 1 当前的tree state/ subtree embedding / output left_childs = [None for _ in range(batch_size)] # B x 1 for t in range(max_target_length): num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder( node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) # all_leafs.append(p_leaf) outputs = torch.cat((op, num_score), 1) all_node_outputs.append(outputs) target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk) target[t] = target_t if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) left_childs = [] for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks): if len(node_stack) != 0: node = node_stack.pop() else: left_childs.append(None) continue # 未知数当数字处理,SEP当操作符处理 if i < num_start: # 非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) # print(o[-1].embedding.size()) # print(encoder_outputs[idx].size()) else: # 数字 current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0) while len(o) > 0 and o[-1].terminal: sub_stree = o.pop() op = o.pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding if batch_first: encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx]) else: temp_encoder_outputs = encoder_outputs.transpose(0, 1) encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True)) if len(o) > 0 and o[-1].terminal: left_childs.append(o[-1].embedding) else: left_childs.append(None) # all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2 all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N target = target.transpose(0, 1).contiguous() # B x S if self.USE_CUDA: # all_leafs = all_leafs.cuda() all_node_outputs = all_node_outputs.cuda() target = target.cuda() new_all_sa_outputs = [] for sa_pair in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs = new_all_sa_outputs # target_length = torch.LongTensor(target_length).cuda() else: pass # target_length = torch.LongTensor(target_length) semantic_alignment_loss = nn.MSELoss() total_semanti_alognment_loss = 0 sa_len = len(all_sa_outputs) for sa_pair in all_sa_outputs: total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1]) # print(total_semanti_alognment_loss) total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len # print(total_semanti_alognment_loss) # op_target = target < num_start # loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length) loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss # loss = loss_0 + loss_1 loss.backward() # clip the grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) # torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) # torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) # Update parameters with optimizers return loss.item() # , loss_0.item(), loss_1.item() def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30): seq_mask = torch.BoolTensor(1, input_length).fill_(0) # Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size) input_var = input_batch.transpose(0, 1) num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0) padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0) batch_size = 1 if self.USE_CUDA: input_var = input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() # Run words through encoder seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length) problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:] encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:] # Prepare input and output variables node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] num_size = len(num_pos[0]) all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size) # B x P x N embeddings_stacks = [[] for _ in range(batch_size)] left_childs = [None for _ in range(batch_size)] beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])] for t in range(max_length): current_beams = [] while len(beams) > 0: b = beams.pop() if len(b.node_stack[0]) == 0: current_beams.append(b) continue # left_childs = torch.stack(b.left_childs) left_childs = b.left_childs num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1) # out_score = p_leaf * out_score topv, topi = out_score.topk(beam_size) for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)): current_node_stack = copy_list(b.node_stack) current_left_childs = [] current_embeddings_stacks = copy_list(b.embedding_stack) current_out = copy.deepcopy(b.out) out_token = int(ti) current_out.append(out_token) node = current_node_stack[0].pop() if out_token < num_start: generate_input = torch.LongTensor([out_token]) if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else: current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0) while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: sub_stree = current_embeddings_stacks[0].pop() op = current_embeddings_stacks[0].pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out)) beams = sorted(current_beams, key=lambda x: x.score, reverse=True) beams = beams[:beam_size] flag = True for b in beams: if len(b.node_stack[0]) != 0: flag = False if flag: break return beams[0].out def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size): indices = list() sen_len = encoder_outputs.size(0) masked_index = [] temp_1 = [1 for _ in range(hidden_size)] temp_0 = [0 for _ in range(hidden_size)] for b in range(batch_size): for i in num_pos[b]: indices.append(i + b * sen_len) masked_index.append(temp_0) indices += [0 for _ in range(len(num_pos[b]), num_size)] masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)] indices = torch.LongTensor(indices) masked_index = torch.BoolTensor(masked_index) masked_index = masked_index.view(batch_size, num_size, hidden_size) if self.USE_CUDA: indices = indices.cuda() masked_index = masked_index.cuda() all_outputs = encoder_outputs.transpose(0, 1).contiguous() all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H all_num = all_embedding.index_select(0, indices) all_num = all_num.view(batch_size, num_size, hidden_size) return all_num.masked_fill_(masked_index, 0.0) def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk): # when the decoder input is copied num but the num has two pos, chose the max target_input = copy.deepcopy(target) for i in range(len(target)): if target[i] == unk: num_stack = nums_stack_batch[i].pop() max_score = -float("1e12") for num in num_stack: if decoder_output[i, num_start + num] > max_score: target[i] = num + num_start max_score = decoder_output[i, num_start + num] if target_input[i] >= num_start: target_input[i] = 0 return torch.LongTensor(target), torch.LongTensor(target_input) def mse_loss(self, outputs, targets, mask=None): # outputs : [batch_size,output_len,hidden_size] # targets : [batch_size,output_len,hidden_size] # mask : [batch_size,output_len] mask = mask.to(self.device) x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len] y = torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size] return torch.sum(y) def convert_idx2symbol(self, output, num_list, num_stack): # batch_size=output.size(0) '''batch_size=1''' seq_len = len(output) num_len = len(num_list) output_list = [] res = [] for s_i in range(seq_len): idx = output[s_i] if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]: break symbol = self.out_idx2symbol[idx] if "NUM" in symbol: num_idx = self.mask_list.index(symbol) if num_idx >= num_len: res = [] break res.append(num_list[num_idx]) elif symbol == SpecialTokens.UNK_TOKEN: try: pos_list = num_stack.pop() c = num_list[pos_list[0]] res.append(c) except: return None else: res.append(symbol) output_list.append(res) return output_list
48.751634
154
0.591634
2,696
22,377
4.614614
0.122033
0.024114
0.022506
0.013021
0.44924
0.404469
0.328109
0.286151
0.246443
0.233583
0
0.011242
0.316262
22,377
458
155
48.858079
0.801895
0.126693
0
0.235474
0
0
0.011105
0
0
0
0
0
0
1
0.027523
false
0.003058
0.042813
0
0.100917
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4affd0e7b393c14db6c40989539fbd205424aa8e
8,128
py
Python
rosetta/tests/test_parallel.py
rafacarrascosa/rosetta
d5a964756b4f51e1032df40ee24f18398e3193b7
[ "BSD-3-Clause" ]
1
2015-01-21T06:00:46.000Z
2015-01-21T06:00:46.000Z
rosetta/tests/test_parallel.py
rafacarrascosa/rosetta
d5a964756b4f51e1032df40ee24f18398e3193b7
[ "BSD-3-Clause" ]
null
null
null
rosetta/tests/test_parallel.py
rafacarrascosa/rosetta
d5a964756b4f51e1032df40ee24f18398e3193b7
[ "BSD-3-Clause" ]
null
null
null
import unittest from functools import partial import pandas as pd from pandas.util.testing import assert_frame_equal, assert_series_equal import numpy as np import threading from StringIO import StringIO from rosetta.parallel import parallel_easy, pandas_easy from rosetta.parallel.threading_easy import threading_easy, LockIterateApply # A couple functions for testing parallel easy # Must be defined outside of the test class for some reason. def _abfunc(x, a, b=1): return x * a * b abfunc = partial(_abfunc, 2, 3) def frame_to_series(frame): x = frame.iloc[0, 0] return pd.Series([x] * len(frame.columns), index=frame.columns) def rightmax(mylist): return [max(mylist[i: i+2]) for i in range(len(mylist))] def leftmax(mylist): for i in range(len(mylist)): if i == 0: result = [mylist[0]] else: result.append(max(mylist[i - 1: i+1])) return result class TestBase(unittest.TestCase): """ Tests the parallel_easy module. """ def setUp(self): self.numbers = range(5) self.benchmark = [0, 6, 12, 18, 24] def test_map_easy_1job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 1) self.assertEqual(result, self.benchmark) def test_map_easy_3job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 3) self.assertEqual(result, self.benchmark) def test_imap_easy_1job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 1, 1) result = [] for number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_imap_easy_3job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 3, 1) result = [] for number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_n_jobs_wrap_positive(self): """ For n_jobs positive, the wrap should return n_jobs. """ for n_jobs in range(1, 5): result = parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs) def test_n_jobs_wrap_zero(self): """ For n_jobs zero, the wrap should raise a ValueError """ self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0) class TestMapEasyPaddedBlock(unittest.TestCase): """ Tests the parallel_easy.map_easy_padded_blocks function. """ def setUp(self): #self.numbers_1 = [ # 0, 0, 2, -1, 4, 2, 6, 7, 6, 9, 12, 11, 11, 14, 55, 55, 44, 33, 33] self.numbers_10 = np.random.randint(0, 5, 10) self.numbers_101 = np.random.randint(0, 5, 101) self.numbers_51 = np.random.randint(0, 5, 101) #self.numbers_1 = [0, 1, 2, 0, 3, 2, 4, 3, 2, 3, 3] self.n_jobs = 1 def lefttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( leftmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = leftmax(numbers) self.assertEqual(result, benchmark) def righttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( rightmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = rightmax(numbers) self.assertEqual(result, benchmark) def test_map_easy_padded_blocks_14(self): buffer_len = 1 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_24(self): buffer_len = 2 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_37(self): buffer_len = 3 blocksize = 7 self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_17(self): buffer_len = 1 blocksize = 7 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) class TestPandasEasy(unittest.TestCase): """ Tests the pandas_easy module. """ def setUp(self): pass def test_groupby_to_scalar_to_series_1(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]}) benchmark = df.groupby('a').apply(max) result = pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a') assert_series_equal(result, benchmark) def test_groupby_to_scalar_to_series_2(self): s = pd.Series([1, 2, 3, 4]) labels = ['a', 'a', 'b', 'b'] benchmark = s.groupby(labels).apply(max) result = pandas_easy.groupby_to_scalar_to_series( s, max, 1, by=labels) assert_series_equal(result, benchmark) def test_groupby_to_series_to_frame_1(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]}) labels = ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).mean() result = pandas_easy.groupby_to_series_to_frame( df, np.mean, 1, use_apply=True, by=labels) assert_frame_equal(result, benchmark) def test_groupby_to_series_to_frame_2(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]}) labels = ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).apply(frame_to_series) result = pandas_easy.groupby_to_series_to_frame( df, frame_to_series, 1, use_apply=False, by=labels) assert_frame_equal(result, benchmark) class TestLockIterateApply(unittest.TestCase): """ Test the Locked Iterator Class """ def setUp(self): self.data = ['my', 'name', 'is', 'daniel'] self.num_threads = 4 def bytwo(x): return 2 * x self.func = bytwo def it(): for i in self.data: yield i self.myiter = it() def test_locked_iterator(self): threads = [] lock = threading.Lock() out = StringIO() for i in range(self.num_threads): t = LockIterateApply(self.func, self.myiter, lock, ',', out) threads.append(t) for t in threads: t.start() for t in threads: t.join() benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy(self): out = StringIO() threading_easy(self.func, self.myiter, self.num_threads, ',', out) benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy_single(self): out = StringIO() threading_easy(self.func, self.myiter, 1, ',', out) benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark)
33.866667
79
0.637549
1,071
8,128
4.631186
0.146592
0.075403
0.094355
0.079839
0.672177
0.626815
0.575605
0.546774
0.534274
0.436492
0
0.032963
0.246063
8,128
239
80
34.008368
0.776436
0.061024
0
0.422619
0
0
0.017021
0
0
0
0
0
0.095238
1
0.172619
false
0.005952
0.053571
0.017857
0.279762
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab0084518a26b1bf65b7efbbe0be36485aedb9e2
1,165
py
Python
thecsvparser.py
rbago/CEBD1160_Class4_hwk
1012c81663dc60ea9d139d96f368f8289d4b363e
[ "MIT" ]
null
null
null
thecsvparser.py
rbago/CEBD1160_Class4_hwk
1012c81663dc60ea9d139d96f368f8289d4b363e
[ "MIT" ]
null
null
null
thecsvparser.py
rbago/CEBD1160_Class4_hwk
1012c81663dc60ea9d139d96f368f8289d4b363e
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import numpy as np import pandas as pd os.getcwd() # Request for the filename # Current version of this script works only with TSV type files mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt): ') print() # To create proper dataframe, transforming it with numpy # Then changing it with pandas filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData) # Obtains first row to identify header is string or numeric headers = filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings to numbers (self identifies for float or integer) filenameData = filenameData.apply(pd.to_numeric) # Obtains the mean and standard deviation of the columns listMean = filenameData.mean() listStd = filenameData.std() print(filenameData) # Prints out the results print('Mean for each column:') for idx in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation for each column:') for idx in filenameData.columns: print(idx,':',listStd[idx])
25.326087
85
0.758798
163
1,165
5.411043
0.546012
0.013605
0.052154
0.079365
0.108844
0.108844
0.108844
0.108844
0.108844
0.108844
0
0.002006
0.144206
1,165
45
86
25.888889
0.882648
0.337339
0
0.166667
0
0
0.159895
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.291667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab0219367d5f8fd8173529e4b59eaffa00517b4a
3,057
py
Python
donkeycar/tests/test_web_socket.py
wenxichen/donkeycar
d70ee60d35d7e0e004b885e6f6062fb51916dad1
[ "MIT" ]
12
2019-06-28T21:58:01.000Z
2021-01-08T14:25:12.000Z
donkeycar/tests/test_web_socket.py
wenxichen/donkeycar
d70ee60d35d7e0e004b885e6f6062fb51916dad1
[ "MIT" ]
6
2020-11-07T19:27:10.000Z
2021-01-23T22:47:37.000Z
donkeycar/tests/test_web_socket.py
Heavy02011/donkeycar
5a23b0fee170596e29c80826c3db0d3a4c4c5392
[ "MIT" ]
9
2019-07-13T10:12:31.000Z
2020-07-27T10:27:03.000Z
from donkeycar.parts.web_controller.web import WebSocketCalibrateAPI from functools import partial from tornado import testing import tornado.websocket import tornado.web import tornado.ioloop import json from unittest.mock import Mock from donkeycar.parts.actuator import PWMSteering, PWMThrottle class WebSocketCalibrateTest(testing.AsyncHTTPTestCase): """ Example of WebSocket usage as a client in AsyncHTTPTestCase-based unit tests. """ def get_app(self): app = tornado.web.Application([('/', WebSocketCalibrateAPI)]) self.app = app return app def get_ws_url(self): return "ws://localhost:" + str(self.get_http_port()) + "/" @tornado.testing.gen_test def test_calibrate_servo_esc_1(self): ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url()) # Now we can run a test on the WebSocket. self.app.drive_train = dict() self.app.drive_train['steering'] = Mock() self.app.drive_train_type = "SERVO_ESC" data = {"config": {"STEERING_LEFT_PWM": 444}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() assert self.app.drive_train['steering'].left_pulse == 444 assert isinstance(self.app.drive_train['steering'].right_pulse, Mock) @tornado.testing.gen_test def test_calibrate_servo_esc_2(self): ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url()) # Now we can run a test on the WebSocket. self.app.drive_train = dict() self.app.drive_train['steering'] = Mock() self.app.drive_train_type = "SERVO_ESC" data = {"config": {"STEERING_RIGHT_PWM": 555}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() assert self.app.drive_train['steering'].right_pulse == 555 assert isinstance(self.app.drive_train['steering'].left_pulse, Mock) @tornado.testing.gen_test def test_calibrate_servo_esc_3(self): ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url()) # Now we can run a test on the WebSocket. self.app.drive_train = dict() self.app.drive_train['throttle'] = Mock() self.app.drive_train_type = "SERVO_ESC" data = {"config": {"THROTTLE_FORWARD_PWM": 666}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() assert self.app.drive_train['throttle'].max_pulse == 666 assert isinstance(self.app.drive_train['throttle'].min_pulse, Mock) @tornado.testing.gen_test def test_calibrate_mm1(self): ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url()) # Now we can run a test on the WebSocket. self.app.drive_train = Mock() self.app.drive_train_type = "MM1" data = {"config": {"MM1_STEERING_MID": 1234}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() assert self.app.drive_train.STEERING_MID == 1234
34.348315
80
0.682368
406
3,057
4.91133
0.214286
0.070211
0.108325
0.15346
0.695587
0.683551
0.654463
0.599298
0.599298
0.553661
0
0.013234
0.209028
3,057
88
81
34.738636
0.811414
0.077854
0
0.413793
0
0
0.076565
0
0
0
0
0
0.12069
1
0.103448
false
0
0.155172
0.017241
0.310345
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab02c90f464edb9291e3105cd07e5c1bd2aaec14
12,497
py
Python
packages/google/cloud/logging/client.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
packages/google/cloud/logging/client.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
packages/google/cloud/logging/client.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Client for interacting with the Google Stackdriver Logging API.""" import os try: from google.cloud.gapic.logging.v2.config_service_v2_api import ( ConfigServiceV2Api as GeneratedSinksAPI) from google.cloud.gapic.logging.v2.logging_service_v2_api import ( LoggingServiceV2Api as GeneratedLoggingAPI) from google.cloud.gapic.logging.v2.metrics_service_v2_api import ( MetricsServiceV2Api as GeneratedMetricsAPI) from google.cloud.logging._gax import _LoggingAPI as GAXLoggingAPI from google.cloud.logging._gax import _MetricsAPI as GAXMetricsAPI from google.cloud.logging._gax import _SinksAPI as GAXSinksAPI except ImportError: # pragma: NO COVER _HAVE_GAX = False GeneratedLoggingAPI = GAXLoggingAPI = None GeneratedMetricsAPI = GAXMetricsAPI = None GeneratedSinksAPI = GAXSinksAPI = None else: _HAVE_GAX = True from google.cloud.client import JSONClient from google.cloud.environment_vars import DISABLE_GRPC from google.cloud.logging.connection import Connection from google.cloud.logging.connection import _LoggingAPI as JSONLoggingAPI from google.cloud.logging.connection import _MetricsAPI as JSONMetricsAPI from google.cloud.logging.connection import _SinksAPI as JSONSinksAPI from google.cloud.logging.entries import ProtobufEntry from google.cloud.logging.entries import StructEntry from google.cloud.logging.entries import TextEntry from google.cloud.logging.logger import Logger from google.cloud.logging.metric import Metric from google.cloud.logging.sink import Sink _DISABLE_GAX = os.getenv(DISABLE_GRPC, False) _USE_GAX = _HAVE_GAX and not _DISABLE_GAX class Client(JSONClient): """Client to bundle configuration needed for API requests. :type project: str :param project: the project which the client acts on behalf of. If not passed, falls back to the default inferred from the environment. :type credentials: :class:`oauth2client.client.OAuth2Credentials` or :class:`NoneType` :param credentials: The OAuth2 Credentials to use for the connection owned by this client. If not passed (and if no ``http`` object is passed), falls back to the default inferred from the environment. :type http: :class:`httplib2.Http` or class that defines ``request()``. :param http: An optional HTTP object to make requests. If not passed, an ``http`` object is created that is bound to the ``credentials`` for the current object. """ _connection_class = Connection _logging_api = _sinks_api = _metrics_api = None @property def logging_api(self): """Helper for logging-related API calls. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs """ if self._logging_api is None: if _USE_GAX: generated = GeneratedLoggingAPI() self._logging_api = GAXLoggingAPI(generated) else: self._logging_api = JSONLoggingAPI(self.connection) return self._logging_api @property def sinks_api(self): """Helper for log sink-related API calls. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks """ if self._sinks_api is None: if _USE_GAX: generated = GeneratedSinksAPI() self._sinks_api = GAXSinksAPI(generated) else: self._sinks_api = JSONSinksAPI(self.connection) return self._sinks_api @property def metrics_api(self): """Helper for log metric-related API calls. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics """ if self._metrics_api is None: if _USE_GAX: generated = GeneratedMetricsAPI() self._metrics_api = GAXMetricsAPI(generated) else: self._metrics_api = JSONMetricsAPI(self.connection) return self._metrics_api def logger(self, name): """Creates a logger bound to the current client. :type name: str :param name: the name of the logger to be constructed. :rtype: :class:`google.cloud.logging.logger.Logger` :returns: Logger created with the current client. """ return Logger(name, client=self) def _entry_from_resource(self, resource, loggers): """Detect correct entry type from resource and instantiate. :type resource: dict :param resource: one entry resource from API response :type loggers: dict or None :param loggers: A mapping of logger fullnames -> loggers. If not passed, the entry will have a newly-created logger. :rtype: One of: :class:`google.cloud.logging.entries.TextEntry`, :class:`google.cloud.logging.entries.StructEntry`, :class:`google.cloud.logging.entries.ProtobufEntry` :returns: the entry instance, constructed via the resource """ if 'textPayload' in resource: return TextEntry.from_api_repr(resource, self, loggers) elif 'jsonPayload' in resource: return StructEntry.from_api_repr(resource, self, loggers) elif 'protoPayload' in resource: return ProtobufEntry.from_api_repr(resource, self, loggers) raise ValueError('Cannot parse log entry resource') def list_entries(self, projects=None, filter_=None, order_by=None, page_size=None, page_token=None): """Return a page of log entries. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list :type projects: list of strings :param projects: project IDs to include. If not passed, defaults to the project bound to the client. :type filter_: str :param filter_: a filter expression. See: https://cloud.google.com/logging/docs/view/advanced_filters :type order_by: str :param order_by: One of :data:`~google.cloud.logging.ASCENDING` or :data:`~google.cloud.logging.DESCENDING`. :type page_size: int :param page_size: maximum number of entries to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of entries. If not passed, the API will return the first page of entries. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.entry.TextEntry`, plus a "next page token" string: if not None, indicates that more entries can be retrieved with another call (pass that value as ``page_token``). """ if projects is None: projects = [self.project] resources, token = self.logging_api.list_entries( projects=projects, filter_=filter_, order_by=order_by, page_size=page_size, page_token=page_token) loggers = {} entries = [self._entry_from_resource(resource, loggers) for resource in resources] return entries, token def sink(self, name, filter_=None, destination=None): """Creates a sink bound to the current client. :type name: str :param name: the name of the sink to be constructed. :type filter_: str :param filter_: (optional) the advanced logs filter expression defining the entries exported by the sink. If not passed, the instance should already exist, to be refreshed via :meth:`Sink.reload`. :type destination: str :param destination: destination URI for the entries exported by the sink. If not passed, the instance should already exist, to be refreshed via :meth:`Sink.reload`. :rtype: :class:`google.cloud.logging.sink.Sink` :returns: Sink created with the current client. """ return Sink(name, filter_, destination, client=self) def list_sinks(self, page_size=None, page_token=None): """List sinks for the project associated with this client. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list :type page_size: int :param page_size: maximum number of sinks to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of sinks. If not passed, the API will return the first page of sinks. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.sink.Sink`, plus a "next page token" string: if not None, indicates that more sinks can be retrieved with another call (pass that value as ``page_token``). """ resources, token = self.sinks_api.list_sinks( self.project, page_size, page_token) sinks = [Sink.from_api_repr(resource, self) for resource in resources] return sinks, token def metric(self, name, filter_=None, description=''): """Creates a metric bound to the current client. :type name: str :param name: the name of the metric to be constructed. :type filter_: str :param filter_: the advanced logs filter expression defining the entries tracked by the metric. If not passed, the instance should already exist, to be refreshed via :meth:`Metric.reload`. :type description: str :param description: the description of the metric to be constructed. If not passed, the instance should already exist, to be refreshed via :meth:`Metric.reload`. :rtype: :class:`google.cloud.logging.metric.Metric` :returns: Metric created with the current client. """ return Metric(name, filter_, client=self, description=description) def list_metrics(self, page_size=None, page_token=None): """List metrics for the project associated with this client. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list :type page_size: int :param page_size: maximum number of metrics to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of metrics. If not passed, the API will return the first page of metrics. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.metric.Metric`, plus a "next page token" string: if not None, indicates that more metrics can be retrieved with another call (pass that value as ``page_token``). """ resources, token = self.metrics_api.list_metrics( self.project, page_size, page_token) metrics = [Metric.from_api_repr(resource, self) for resource in resources] return metrics, token
41.244224
96
0.634632
1,512
12,497
5.143519
0.164683
0.041018
0.055548
0.036775
0.489263
0.45956
0.359907
0.336891
0.307059
0.307059
0
0.005532
0.29127
12,497
302
97
41.380795
0.87253
0.545251
0
0.151515
0
0
0.013931
0
0
0
0
0
0
1
0.10101
false
0
0.20202
0
0.474747
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab046a08c26c0e97b20f9dd2cde86b39dde408b7
1,468
py
Python
tests/test_core/test_graph_objs/test_instantiate_hierarchy.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
tests/test_core/test_graph_objs/test_instantiate_hierarchy.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
tests/test_core/test_graph_objs/test_instantiate_hierarchy.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
from __future__ import absolute_import from unittest import TestCase import os import importlib import inspect from plotly.basedatatypes import BasePlotlyType, BaseFigure datatypes_root = "new_plotly/graph_objs" datatype_modules = [ dirpath.replace("/", ".") for dirpath, _, _ in os.walk(datatypes_root) if not dirpath.endswith("__pycache__") ] class HierarchyTest(TestCase): def test_construct_datatypes(self): for datatypes_module in datatype_modules: module = importlib.import_module(datatypes_module) for name in getattr(module, "__all__", []): if name.startswith("_") or name[0].islower() or name == "FigureWidget": continue obj = getattr(module, name) try: v = obj() except Exception: print( "Failed to construct {obj} in module {module}".format( obj=obj, module=datatypes_module ) ) raise if obj.__module__ == "new_plotly.graph_objs._deprecations": self.assertTrue(isinstance(v, list) or isinstance(v, dict)) obj() elif name in ("Figure", "FigureWidget"): self.assertIsInstance(v, BaseFigure) else: self.assertIsInstance(v, BasePlotlyType)
34.952381
87
0.557221
136
1,468
5.757353
0.470588
0.057471
0.03576
0.045977
0
0
0
0
0
0
0
0.001063
0.358992
1,468
41
88
35.804878
0.831031
0
0
0
0
0
0.102861
0.038147
0
0
0
0
0.083333
1
0.027778
false
0
0.194444
0
0.25
0.027778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab04f30d858425d5d5583ebc3b9cb9eb5ad46681
4,184
py
Python
mycli/packages/special/main.py
lyrl/mycli
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
[ "BSD-3-Clause" ]
10,997
2015-07-27T06:59:04.000Z
2022-03-31T07:49:26.000Z
mycli/packages/special/main.py
lyrl/mycli
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
[ "BSD-3-Clause" ]
937
2015-07-29T09:25:30.000Z
2022-03-30T23:54:03.000Z
mycli/packages/special/main.py
lyrl/mycli
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
[ "BSD-3-Clause" ]
799
2015-07-27T13:13:49.000Z
2022-03-29T21:24:39.000Z
import logging from collections import namedtuple from . import export log = logging.getLogger(__name__) NO_QUERY = 0 PARSED_QUERY = 1 RAW_QUERY = 2 SpecialCommand = namedtuple('SpecialCommand', ['handler', 'command', 'shortcut', 'description', 'arg_type', 'hidden', 'case_sensitive']) COMMANDS = {} @export class CommandNotFound(Exception): pass @export def parse_special_command(sql): command, _, arg = sql.partition(' ') verbose = '+' in command command = command.strip().replace('+', '') return (command, verbose, arg.strip()) @export def special_command(command, shortcut, description, arg_type=PARSED_QUERY, hidden=False, case_sensitive=False, aliases=()): def wrapper(wrapped): register_special_command(wrapped, command, shortcut, description, arg_type, hidden, case_sensitive, aliases) return wrapped return wrapper @export def register_special_command(handler, command, shortcut, description, arg_type=PARSED_QUERY, hidden=False, case_sensitive=False, aliases=()): cmd = command.lower() if not case_sensitive else command COMMANDS[cmd] = SpecialCommand(handler, command, shortcut, description, arg_type, hidden, case_sensitive) for alias in aliases: cmd = alias.lower() if not case_sensitive else alias COMMANDS[cmd] = SpecialCommand(handler, command, shortcut, description, arg_type, case_sensitive=case_sensitive, hidden=True) @export def execute(cur, sql): """Execute a special command and return the results. If the special command is not supported a KeyError will be raised. """ command, verbose, arg = parse_special_command(sql) if (command not in COMMANDS) and (command.lower() not in COMMANDS): raise CommandNotFound try: special_cmd = COMMANDS[command] except KeyError: special_cmd = COMMANDS[command.lower()] if special_cmd.case_sensitive: raise CommandNotFound('Command not found: %s' % command) # "help <SQL KEYWORD> is a special case. We want built-in help, not # mycli help here. if command == 'help' and arg: return show_keyword_help(cur=cur, arg=arg) if special_cmd.arg_type == NO_QUERY: return special_cmd.handler() elif special_cmd.arg_type == PARSED_QUERY: return special_cmd.handler(cur=cur, arg=arg, verbose=verbose) elif special_cmd.arg_type == RAW_QUERY: return special_cmd.handler(cur=cur, query=sql) @special_command('help', '\\?', 'Show this help.', arg_type=NO_QUERY, aliases=('\\?', '?')) def show_help(): # All the parameters are ignored. headers = ['Command', 'Shortcut', 'Description'] result = [] for _, value in sorted(COMMANDS.items()): if not value.hidden: result.append((value.command, value.shortcut, value.description)) return [(None, result, headers, None)] def show_keyword_help(cur, arg): """ Call the built-in "show <command>", to display help for an SQL keyword. :param cur: cursor :param arg: string :return: list """ keyword = arg.strip('"').strip("'") query = "help '{0}'".format(keyword) log.debug(query) cur.execute(query) if cur.description and cur.rowcount > 0: headers = [x[0] for x in cur.description] return [(None, cur, headers, '')] else: return [(None, None, None, 'No help found for {0}.'.format(keyword))] @special_command('exit', '\\q', 'Exit.', arg_type=NO_QUERY, aliases=('\\q', )) @special_command('quit', '\\q', 'Quit.', arg_type=NO_QUERY) def quit(*_args): raise EOFError @special_command('\\e', '\\e', 'Edit command with editor (uses $EDITOR).', arg_type=NO_QUERY, case_sensitive=True) @special_command('\\clip', '\\clip', 'Copy query to the system clipboard.', arg_type=NO_QUERY, case_sensitive=True) @special_command('\\G', '\\G', 'Display current query results vertically.', arg_type=NO_QUERY, case_sensitive=True) def stub(): raise NotImplementedError
34.578512
91
0.649618
507
4,184
5.207101
0.242604
0.039773
0.068939
0.037121
0.301515
0.259091
0.235985
0.198485
0.178788
0.115909
0
0.002159
0.225143
4,184
120
92
34.866667
0.812153
0.085086
0
0.113636
0
0
0.09408
0
0
0
0
0
0
1
0.102273
false
0.011364
0.034091
0
0.261364
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab073bf68fc63959db0a0aa37e1caf26b750286a
466
py
Python
Mon_08_06/convert2.py
TungTNg/itc110_python
589ca1398f26d39b05a0b798100df0b05e556e3c
[ "Apache-2.0" ]
null
null
null
Mon_08_06/convert2.py
TungTNg/itc110_python
589ca1398f26d39b05a0b798100df0b05e556e3c
[ "Apache-2.0" ]
null
null
null
Mon_08_06/convert2.py
TungTNg/itc110_python
589ca1398f26d39b05a0b798100df0b05e556e3c
[ "Apache-2.0" ]
null
null
null
# convert2.py # A program to convert Celsius temps to Fahrenheit. # This version issues heat and cold warnings. def main(): celsius = float(input("What is the Celsius temperature? ")) fahrenheit = 9 / 5 * celsius + 32 print("The temperature is", fahrenheit, "degrees fahrenheit.") if fahrenheit >= 90: print("It's really hot out there, be careful!") if fahrenheit <= 30: print("Brrrrr. Be sure to dress warmly") main()
33.285714
66
0.650215
62
466
4.887097
0.709677
0.079208
0
0
0
0
0
0
0
0
0
0.025496
0.242489
466
14
67
33.285714
0.832861
0.246781
0
0
0
0
0.399425
0
0
0
0
0
0
1
0.111111
false
0
0
0
0.111111
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab078e438c6b69f3703aa8808d1800eb956179af
5,082
py
Python
homeassistant/components/wolflink/__init__.py
basicpail/core
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
[ "Apache-2.0" ]
11
2018-02-16T15:35:47.000Z
2020-01-14T15:20:00.000Z
homeassistant/components/wolflink/__init__.py
basicpail/core
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
[ "Apache-2.0" ]
77
2020-07-16T16:43:09.000Z
2022-03-31T06:14:37.000Z
homeassistant/components/wolflink/__init__.py
Vaarlion/core
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
[ "Apache-2.0" ]
11
2020-12-16T13:48:14.000Z
2022-02-01T00:28:05.000Z
"""The Wolf SmartSet Service integration.""" from datetime import timedelta import logging from httpx import ConnectError, ConnectTimeout from wolf_smartset.token_auth import InvalidAuth from wolf_smartset.wolf_client import FetchFailed, ParameterReadError, WolfClient from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import ( COORDINATOR, DEVICE_GATEWAY, DEVICE_ID, DEVICE_NAME, DOMAIN, PARAMETERS, ) _LOGGER = logging.getLogger(__name__) PLATFORMS = ["sensor"] async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Wolf SmartSet Service from a config entry.""" username = entry.data[CONF_USERNAME] password = entry.data[CONF_PASSWORD] device_name = entry.data[DEVICE_NAME] device_id = entry.data[DEVICE_ID] gateway_id = entry.data[DEVICE_GATEWAY] refetch_parameters = False _LOGGER.debug( "Setting up wolflink integration for device: %s (ID: %s, gateway: %s)", device_name, device_id, gateway_id, ) wolf_client = WolfClient(username, password) parameters = await fetch_parameters_init(wolf_client, gateway_id, device_id) async def async_update_data(): """Update all stored entities for Wolf SmartSet.""" try: nonlocal refetch_parameters nonlocal parameters await wolf_client.update_session() if not wolf_client.fetch_system_state_list(device_id, gateway_id): refetch_parameters = True raise UpdateFailed( "Could not fetch values from server because device is Offline." ) if refetch_parameters: parameters = await fetch_parameters(wolf_client, gateway_id, device_id) hass.data[DOMAIN][entry.entry_id][PARAMETERS] = parameters refetch_parameters = False values = { v.value_id: v.value for v in await wolf_client.fetch_value( gateway_id, device_id, parameters ) } return { parameter.parameter_id: ( parameter.value_id, values[parameter.value_id], ) for parameter in parameters if parameter.value_id in values } except ConnectError as exception: raise UpdateFailed( f"Error communicating with API: {exception}" ) from exception except FetchFailed as exception: raise UpdateFailed( f"Could not fetch values from server due to: {exception}" ) from exception except ParameterReadError as exception: refetch_parameters = True raise UpdateFailed( "Could not fetch values for parameter. Refreshing value IDs." ) from exception except InvalidAuth as exception: raise UpdateFailed("Invalid authentication during update.") from exception coordinator = DataUpdateCoordinator( hass, _LOGGER, name=DOMAIN, update_method=async_update_data, update_interval=timedelta(minutes=1), ) await coordinator.async_refresh() hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][entry.entry_id] = {} hass.data[DOMAIN][entry.entry_id][PARAMETERS] = parameters hass.data[DOMAIN][entry.entry_id][COORDINATOR] = coordinator hass.data[DOMAIN][entry.entry_id][DEVICE_ID] = device_id hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok async def fetch_parameters(client: WolfClient, gateway_id: int, device_id: int): """ Fetch all available parameters with usage of WolfClient. By default Reglertyp entity is removed because API will not provide value for this parameter. """ fetched_parameters = await client.fetch_parameters(gateway_id, device_id) return [param for param in fetched_parameters if param.name != "Reglertyp"] async def fetch_parameters_init(client: WolfClient, gateway_id: int, device_id: int): """Fetch all available parameters with usage of WolfClient but handles all exceptions and results in ConfigEntryNotReady.""" try: return await fetch_parameters(client, gateway_id, device_id) except (ConnectError, ConnectTimeout, FetchFailed) as exception: raise ConfigEntryNotReady( f"Error communicating with API: {exception}" ) from exception
36.042553
128
0.674538
559
5,082
5.944544
0.236136
0.033704
0.021065
0.025579
0.241649
0.194704
0.144448
0.144448
0.115558
0.052362
0
0.000265
0.256985
5,082
140
129
36.3
0.879767
0.007477
0
0.166667
0
0
0.08158
0
0
0
0
0
0
1
0
false
0.027778
0.101852
0
0.148148
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab088360fae7f84bdf36c27b8f0ab99458367940
932
py
Python
src/levenshtein_distance.py
chunribu/python-algorithms
0483df09b5b4f93bd96712d78e3ad34bcb7e57cc
[ "MIT" ]
null
null
null
src/levenshtein_distance.py
chunribu/python-algorithms
0483df09b5b4f93bd96712d78e3ad34bcb7e57cc
[ "MIT" ]
null
null
null
src/levenshtein_distance.py
chunribu/python-algorithms
0483df09b5b4f93bd96712d78e3ad34bcb7e57cc
[ "MIT" ]
null
null
null
class LevenshteinDistance: def solve(self, str_a, str_b): a, b = str_a, str_b dist = {(x,y):0 for x in range(len(a)) for y in range(len(b))} for x in range(len(a)): dist[(x,-1)] = x+1 for y in range(len(b)): dist[(-1,y)] = y+1 dist[(-1,-1)] = 0 for i in range(len(a)): for j in range(len(b)): need_edit = a[i]!=b[j] last_edits = min(dist[(i,j-1)], dist[(i-1,j)], dist[(i-1,j-1)]) dist[(i,j)] = last_edits + int(need_edit) self.distance = dist return dist[(i,j)] def show(self): if hasattr(self, 'distance'): dist = self.distance for x in range(-1,len(a)): row = [] for y in range(-1, len(b)): row.append(dist[(x,y)]) print(row) # test ld = LevenshteinDistance() ld.solve('kitten','sitting') ld.show()
35.846154
79
0.47103
147
932
2.931973
0.251701
0.12993
0.139211
0.076566
0.171694
0.139211
0
0
0
0
0
0.023064
0.348712
932
26
80
35.846154
0.686985
0.004292
0
0
0
0
0.022654
0
0
0
0
0
0
1
0.08
false
0
0
0
0.16
0.04
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab09f37cf048afa31bbd4f9b957124d830dcd972
24,156
py
Python
pyapprox/benchmarks/test_spectral_diffusion.py
ConnectedSystems/pyapprox
4f405654c707cba83d211f327c0f0fdbc95efa29
[ "MIT" ]
26
2019-12-16T02:21:15.000Z
2022-03-17T09:59:18.000Z
pyapprox/benchmarks/test_spectral_diffusion.py
ConnectedSystems/pyapprox
4f405654c707cba83d211f327c0f0fdbc95efa29
[ "MIT" ]
9
2020-03-03T03:04:55.000Z
2021-08-19T22:50:42.000Z
pyapprox/benchmarks/test_spectral_diffusion.py
ConnectedSystems/pyapprox
4f405654c707cba83d211f327c0f0fdbc95efa29
[ "MIT" ]
7
2020-03-02T03:49:17.000Z
2021-02-17T02:07:53.000Z
import numpy as np import unittest from pyapprox.benchmarks.spectral_diffusion import ( kronecker_product_2d, chebyshev_derivative_matrix, SteadyStateDiffusionEquation2D, SteadyStateDiffusionEquation1D ) from pyapprox.univariate_polynomials.quadrature import gauss_jacobi_pts_wts_1D import pyapprox as pya class TestSpectralDiffusion2D(unittest.TestCase): def setUp(self): np.random.seed(1) self.eps = 2 * np.finfo(np.float).eps def test_derivative_matrix(self): order = 4 model = SteadyStateDiffusionEquation1D() bndry_cond = [0., 0.0] xlim = [-1, 1] model.initialize(order, bndry_cond, xlim) derivative_matrix = model.get_derivative_matrix() true_matrix = \ [[5.5, -6.82842712, 2., -1.17157288, 0.5], [1.70710678, -0.70710678, -1.41421356, 0.70710678, -0.29289322], [-0.5, 1.41421356, -0., -1.41421356, 0.5], [0.29289322, -0.70710678, 1.41421356, 0.70710678, -1.70710678], [-0.5, 1.17157288, -2., 6.82842712, -5.5]] # I return points and calculate derivatives using reverse order of # points compared to what is used by Matlab cheb function thus the # derivative matrix I return will be the negative of the matlab version assert np.allclose(-derivative_matrix, true_matrix) def test_homogeneous_possion_equation(self): """ solve u(x)'' = 0, u(0) = 0, u(1) = 0.5 """ order = 4 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.5] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) mesh_pts = model.get_collocation_points() diff_vals = 0*mesh_pts.squeeze()+1 forcing_vals = 0*mesh_pts.squeeze() solution = model.solve(diff_vals, forcing_vals) def exact_sol(x): return 0.5*x assert np.linalg.norm(exact_sol(mesh_pts.squeeze())-solution) < 20*self.eps def test_inhomogeneous_possion_equation(self): """ solve u(x)'' = -1, u(0) = 0, u(1) = 1 solution u(x) = -0.5*(x-3.)*x """ order = 4 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 1.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) mesh_pts = model.get_collocation_points() diff_vals = 0*mesh_pts.squeeze()+1 forcing_vals = 0*mesh_pts.squeeze()-1 solution = model.solve(diff_vals, forcing_vals) def exact_sol(x): return -0.5*(x-3.)*x assert np.linalg.norm( exact_sol(mesh_pts.squeeze())-solution) < 30*self.eps def test_inhomogeneous_diffusion_equation_with_variable_coefficient(self): """ solve ((1+x)*u(x)')' = -1, u(0) = 0, u(1) = 0 solution u(x) = log(x+1)/log(2) - x """ order = 20 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) mesh_pts = model.get_collocation_points() def diffusivity_function(x): return x + 1 diff_vals = diffusivity_function(mesh_pts.squeeze()) forcing_vals = 0*mesh_pts.squeeze()-1 solution = model.solve(diff_vals, forcing_vals) def exact_sol(x): return np.log(x+1.) / np.log(2.) - x assert np.linalg.norm(exact_sol(mesh_pts.squeeze())-solution) < 3e-13 def test_integrate_1d(self): order = 4 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) mesh_pts = model.get_collocation_points() assert np.allclose(model.integrate(mesh_pts.T**2), 1./3.) assert np.allclose(model.integrate(mesh_pts.T**3), 1./4.) order = 4 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [-1, 1] model.initialize(order, bndry_cond, xlim) mesh_pts = model.get_collocation_points() assert np.allclose(model.integrate(mesh_pts.T**2), 2./3.) assert np.allclose(model.integrate(mesh_pts.T**3), 0.) def test_evaluate(self): """ for the PDE ((1+z*x)*u(x)')' = -1, u(0) = 0, u(1) = 0 use model.evaluate to extract QoI """ order = 20 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) model.diffusivity_function = lambda x, z: z*x + 1. model.forcing_function = lambda x, z: 0*x-1 qoi_coords = np.array([0.05, 0.5, 0.95]) model.qoi_functional = lambda x: model.interpolate(x, qoi_coords)[:, 0] sample = np.ones((1, 1), float) qoi = model(sample) assert np.allclose(np.log(qoi_coords+1.)/np.log(2.)-qoi_coords, qoi) sample = 0.5*np.ones((1, 1), float) qoi = model(sample) assert np.allclose( -(qoi_coords*np.log(9./4.)-2.*np.log(qoi_coords+2.) + np.log(4.))/np.log(3./2.), qoi) def test_evaluate_gradient_1d(self): """ for the PDE ((1+sum(z^2)*x)*u(x)')' = -2, u(0) = 0, u(1) = 1 use model.evaluate_gradient to evaluate the gradient of the QoI with respect to the random parameter vector z. The QoI is the intergral of the solution over the entire domain The adjoint rhs is then just 1. """ order = 20 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) model.diffusivity_function = lambda x, z: (z[0]**2+z[1]**2)*x + 1. model.forcing_function = lambda x, z: 0*x-2 sample = np.random.RandomState(2).uniform(-1, 1, (2, 1)) model.diffusivity_derivs_function = \ lambda x, z, i: np.array([2.*x*z[i]]).T model.forcing_derivs_function = \ lambda x, z, i: np.array([0.*x]).T model(sample) # evaluate_gradient has to be called before any more calls to # model.solve with different parameters, because we need to # access self.fwd_solution, which will change with any subsuquent calls errors = pya.check_gradients( model, lambda x: model.evaluate_gradient(x[:, 0]), sample) errors = errors[np.isfinite(errors)] assert errors.max() > 0.1 and errors.min() <= 6e-7 @unittest.skip("Not fully implemented") def test_compute_error_estimate(self): """ for the PDE ((1+z*x)*u(x)')' = -1, u(0) = 0, u(1) = 0 use model.compute_error_estomate to compute an error estimate of the deterministic error in the foward solution. The QoI is the intergral of the solution over the entire domain The adjoint rhs is then just 1. """ order = 5 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) model.diffusivity_function = lambda x, z: z[0]*x + 1. model.forcing_function = lambda x, z: 0.*x-1. sample = np.ones((1, 1), float) qoi = model(sample) error_estimate = model.compute_error_estimate(sample) solution = model.run(sample[:, 0]) def exact_solution(x): return np.log(x+1.)/np.log(2.)-x gl_pts, gl_wts = gauss_jacobi_pts_wts_1D(50, 0, 0) x_range = model.xlim[1]-model.xlim[0] gl_pts = x_range*(gl_pts+1.)/2.+model.xlim[0] gl_wts *= x_range gl_vals = exact_solution(gl_pts) exact_qoi = np.dot(gl_vals, gl_wts) exact_error = abs(exact_qoi-qoi) print('err estimate', error_estimate) print('exact err', exact_error) print('effectivity ratio', error_estimate / exact_error) # should be very close to 1. As adjoint order is increased # it will converge to 1 sample = 0.5*np.ones((1), float) qoi = model.evaluate(sample) exact_solution = -(model.mesh_pts*np.log(9./4.) - 2.*np.log(model.mesh_pts+2.) + np.log(4.))/np.log(3./2.) exact_qoi = model.qoi_functional(exact_solution) error = abs(exact_qoi-qoi) error_estimate = model.compute_error_estimate(sample) print(error_estimate, error) # print model.integrate( (exact_solution - solution )**2 ) assert np.allclose(error_estimate, error) def test_timestepping_without_forcing(self): r""" solve u_t(x,t) = u_xx(x,t), u(-1,t) = 0, u(1,t) = 0, u(x,0) = \sin(\pi*x) Exact solution u(x,t) = \exp(-\pi^2t)*sin(\pi*x) """ order = 16 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [-1, 1] model.initialize(order, bndry_cond, xlim) model.diffusivity_function = lambda x, z: 0*x + 1. model.forcing_function = lambda x, t, z: 0*x sample = np.ones((1), float) # dummy argument for this example model.num_time_steps = 1000 model.initial_sol = np.sin(np.pi*model.mesh_pts) model.time_step_size = 1e-4 model.time_step_method = 'adams-moulton-3' # model.time_step_method = 'crank-nicholson' model.time_step_method = 'backward-euler' model.num_stored_timesteps = 100 solution = model.transient_solve(sample) def exact_sol(x, t): return np.exp(-np.pi**2*t)*np.sin(np.pi*x) test_mesh_pts = np.linspace(xlim[0], xlim[1], 100) plot = False # True for i, t in enumerate(model.times): if plot: exact_sol_t = exact_sol(test_mesh_pts, t) model_sol_t = model.interpolate(solution[:, i], test_mesh_pts) pya.plt.plot(test_mesh_pts, model_sol_t, 'k', label='collocation', linewidth=2) pya.plt.plot(test_mesh_pts, exact_sol_t, 'r--', label='exact', linewidth=2) pya.plt.legend(loc=0) pya.plt.title('$t=%1.2f$' % t) pya.plt.show() L2_error = np.sqrt(model.integrate( (exact_sol(model.mesh_pts, t)-solution[:, i])**2)) factor = np.sqrt( model.integrate(exact_sol(model.mesh_pts, t)**2)) # print L2_error, 1e-3*factor assert L2_error < 1e-3*factor def test_timestepping_with_time_independent_forcing(self): r""" solve u_t(x,t) = u_xx(x,t)+sin(3\pi x), u(0,t) = 0, u(1,t) = 0, u(x,0) = 5\sin(2\pi x)+2\sin(3\pi x) Exact solution u(x,t) = 5\exp(-4\pi^2t)*sin(2\pi*x)+(2\exp(-9\pi^2t)+(1-\exp(-9\pi^2t))/(9\pi^2))*\sin(3\pi x) """ order = 32 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) model.diffusivity_function = lambda x, z: 0*x + 1. model.forcing_function = lambda x, t, z: np.sin(3*np.pi*x) sample = np.ones((1), float) # dummy argument for this example model.num_time_steps = 10000 model.initial_sol = 5*np.sin(2*np.pi*model.mesh_pts) + \ 2*np.sin(3*np.pi*model.mesh_pts) model.time_step_size = 1e-4 # model.time_step_method = 'adams-moulton-3' model.time_step_method = 'crank-nicholson' # model.time_step_method = 'backward-euler' model.num_stored_timesteps = 100 solution = model.transient_solve(sample) def exact_sol(x, t): return 5.*np.exp(-4.*np.pi**2*t)*np.sin(2.*np.pi*x) + \ (2.*np.exp(-9.*np.pi**2*t)+(1.-np.exp(-9.*np.pi**2*t))/(9.*np.pi**2))*np.sin(3.*np.pi*x) # test_mesh_pts = np.linspace(xlim[0], xlim[1], 100) for i, t in enumerate(model.times): # exact_sol_t = exact_sol(test_mesh_pts,t) # model_sol_t = model.interpolate(solution[:,i],test_mesh_pts) # pya.plt.plot(test_mesh_pts,model_sol_t,'k',label='collocation',linewidth=2) # pya.plt.plot(test_mesh_pts,exact_sol_t,'r--',label='exact',linewidth=2) # pya.plt.legend(loc=0) # pya.plt.title('$t=%1.2f$'%t) # pya.plt.show() L2_error = np.sqrt(model.integrate( (exact_sol(model.mesh_pts, t)-solution[:, i])**2)) factor = np.sqrt( model.integrate(exact_sol(model.mesh_pts, t)**2)) # print(L2_error, 1e-4*factor) assert L2_error < 1e-4*factor def test_timestepping_with_time_dependent_forcing(self): r""" solve u_t(x,t) = u_xx(x,t)+np.sin(3\pi x)*np.sin(t), u(0,t) = 0, u(1,t) = 0, u(x,0) = 5sin(2\pi x)+2sin(3\pi x) Exact solution u(x,t) = 5\exp(-4\pi^2t)*np.sin(2\pi*x)+(2\exp(-9\pi^2t)+\exp(-9\pi^2t)(9\pi^2sin(t)-cos(t)+\exp(-9\pi^2t))/(1+81\pi^4))*sin(3\pi x) """ order = 32 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) model.diffusivity_function = lambda x, z: 0*x + 1. model.forcing_function = lambda x, t, z: np.sin(3*np.pi*x)*np.sin(t) sample = np.ones((1), float) # dummy argument for this example model.num_time_steps = int(1e4) model.initial_sol = 5*np.sin(2*np.pi*model.mesh_pts) + \ 2*np.sin(3*np.pi*model.mesh_pts) model.time_step_size = 1e-4 model.num_stored_timesteps = 100 # model.time_step_method = 'adams-moulton-3' model.time_step_method = 'crank-nicholson' # model.time_step_method = 'backward-euler' # model.time_step_method = 'RK4' solution = model.transient_solve(sample) def exact_sol(x, t): return 5.*np.exp( -4.*np.pi**2*t)*np.sin(2.*np.pi*x)+( 2.*np.exp(-9.*np.pi**2*t)+( 9.*np.pi**2*np.sin(t)-np.cos(t) + np.exp(-9.*np.pi**2*t))/(1+81.*np.pi**4))*np.sin( 3.*np.pi*x) test_mesh_pts = np.linspace(xlim[0], xlim[1], 100) plot = False for i, t in enumerate(model.times): if plot: exact_sol_t = exact_sol(test_mesh_pts, t) model_sol_t = model.interpolate(solution[:, i], test_mesh_pts) pya.plt.plot(test_mesh_pts, model_sol_t, 'k', label='collocation', linewidth=2) pya.plt.plot(test_mesh_pts, exact_sol_t, 'r--', label='exact', linewidth=2) pya.plt.legend(loc=0) pya.plt.title('$t=%1.3f$' % t) pya.plt.show() L2_error = np.sqrt(model.integrate( (exact_sol(model.mesh_pts, t)-solution[:, i])**2)) factor = np.sqrt( model.integrate(exact_sol(model.mesh_pts, t)**2)) # print(L2_error, 1e-4*factor) assert L2_error < 1e-4*factor # print('time %1.2e: L2 error %1.2e' % (t, L2_error)) def test_convergence(self): order = 8 # 1e-5 # order = 16 #1e-11 order = 20 # 2e-15 model = SteadyStateDiffusionEquation1D() bndry_cond = [0.0, 0.0] xlim = [0, 1] model.initialize(order, bndry_cond, xlim) model.diffusivity_function = lambda x, z: 0*x + 1. model.forcing_function = lambda x, t, z: np.sin(3*np.pi*x)*np.sin(t) sample = np.ones((1), float) # dummy argument for this example model.initial_sol = 5*np.sin(2*np.pi*model.mesh_pts) + \ 2*np.sin(3*np.pi*model.mesh_pts) final_time = 1. model.time_step_size = 1e-2 model.num_stored_timesteps = 1 # model.time_step_method = 'crank-nicholson' # model.time_step_method = 'backward-euler' # model.time_step_method = 'RK4' needs bug fixes and testing def exact_sol(x, t): return 5.*np.exp( -4.*np.pi**2*t)*np.sin(2.*np.pi*x)+(2.*np.exp(-9.*np.pi**2*t) + ( 9.*np.pi**2*np.sin(t)-np.cos(t)+np.exp(-9.*np.pi**2*t))/(1+81.*np.pi**4))*np.sin(3.*np.pi*x) # test_mesh_pts = np.linspace(xlim[0], xlim[1], 1000) num_convergence_steps = 4 errors = np.empty((num_convergence_steps), float) time_step_sizes = np.empty((num_convergence_steps), float) num_time_steps = np.empty((num_convergence_steps), float) for i in range(num_convergence_steps): model.num_time_steps = int( np.ceil(final_time/model.time_step_size)) solution = model.transient_solve(sample) assert np.allclose(model.times[0], final_time, atol=1e-15) L2_error = np.sqrt(model.integrate( (exact_sol(model.mesh_pts, final_time)-solution[:, 0])**2)) # interpolated_sol = model.interpolate(exact_sol(model.mesh_pts,final_time),test_mesh_pts) # print(np.linalg.norm(exact_sol(test_mesh_pts,final_time)-interpolated_sol)/np.sqrt(interpolated_sol.shape[0])) # print(model.num_time_steps, L2_error) errors[i] = L2_error time_step_sizes[i] = model.time_step_size num_time_steps[i] = model.num_time_steps model.time_step_size /= 2 # print(errors) conv_rate = -np.log10(errors[-1]/errors[0])/np.log10( num_time_steps[-1]/num_time_steps[0]) assert np.allclose(conv_rate, 2, atol=1e-4) # pya.plt.loglog( # num_time_steps, errors, 'o-r', # label=r'$\lVert u(x,T)-\hat{u}(x,T)\\rVert_{\ell_2(D)}$', # linewidth=2) # # print errors[0]*num_time_steps[0]/num_time_steps # order = 1 # pya.plt.loglog( # num_time_steps, # errors[0]*num_time_steps[0]**order/num_time_steps**order, # 'o--', label=r'$(\Delta t)^{-%d}$' % order, linewidth=2) # order = 2 # pya.plt.loglog( # num_time_steps, # errors[0]*num_time_steps[0]**order/num_time_steps**order, # 'o--', label=r'$(\Delta t)^{-%d}$' % order, linewidth=2) # pya.plt.legend(loc=0) # pya.plt.show() def test_inhomogeneous_diffusion_equation_2d_variable_coefficient(self): """ wolfram alpha z random variable x and w are spatial dimension d/dx 16*exp(-z^2)*(x^2-1/4)*(w^2-1/4) d/dx (1+t/pi^2*z*cos(pi/2*(x^2+w^2)))*32*(w^2-1/4)*x*exp(-z^2) Peter zaspels thesis is wrong it is 1 = sigma * not 1 + sigma + """ sigma = 1 num_dims = 1 order = 16 model = SteadyStateDiffusionEquation2D() lims = [-0.5, 0.5, -0.5, 0.5] bndry_cond = [0., 0.] model.initialize(order, bndry_cond, lims) def forcing_function(x, y): return \ 32.*(1.+sigma*y[0]*sigma*np.cos(np.pi/2.*(x[0, :]**2+x[1, :]**2))/np.pi**2) * \ np.exp(-y[0]**2)*(x[0, :]**2+x[1, :]**2-0.5) -\ 32./np.pi*y[0]*sigma*np.sin(np.pi/2.*(x[0, :]**2+x[1, :]**2)) *\ (x[0, :]**2 * np.exp(-y[0]**2)*(x[1, :]**2-0.25)+x[1, :]**2 * np.exp(-y[0]**2)*(x[0, :]**2-0.25)) def diffusivity_function(x, y): return 1.+sigma/np.pi**2*y[0]*np.cos( np.pi/2.*(x[0, :]**2+x[1, :]**2)) # only well posed if |y| < pi^2/sigma def exact_sol(x, y): return 16.*np.exp(-y**2) * \ (x[0, :]**2-0.25)*(x[1, :]**2-0.25) rng = np.random.RandomState(1) sample = rng.uniform(-np.sqrt(3), np.sqrt(3), (num_dims)) mesh_pts = model.get_collocation_points() diff_vals = diffusivity_function(mesh_pts, sample) forcing_vals = forcing_function(mesh_pts, sample) solution = model.solve(diff_vals, forcing_vals) # print np.linalg.norm(exact_sol( mesh_pts, sample )- solution ) assert np.linalg.norm(exact_sol(mesh_pts, sample) - solution) < 2.e-12 def test_2d_matlab_example(self): """ Example from Spectral methods in Matlab. Specifically program 16 on page 70 (90 PDF page number) Solve Poisson eq. on [-1,1]x[-1,1] with u=0 on boundary and forcing 10*np.sin(8*xx.*(yy-1)) true_solution at (xx,yy)=(1/np.sqrt(2),1/np.sqrt(2))= 0.32071594511 """ num_dims = 10 order = 24 model = SteadyStateDiffusionEquation2D() lims = [-1, 1, -1, 1] bndry_cond = [0., 0.] model.initialize(order, bndry_cond, lims) def diffusivity(x, y): return np.ones(x.shape[1]) def forcing(x, y): return 10.*np.sin(8.*(x[0, :])*(x[1, :]-1)) rng = np.random.RandomState(1) sample = rng.uniform(-1, 1., (num_dims)) mesh_pts = model.get_collocation_points() diff_vals = diffusivity(mesh_pts, sample) forcing_vals = forcing(mesh_pts, sample) solution = model.solve(diff_vals, forcing_vals) # because I used reverse order of chebyshev points # and thus negative sign # of derivative matrix the solution returned here will have different # order to matlab which can be obtained by applying flipud(fliplr(x)), # e.g. we can obtain the correct coordinates used in the example with # index = np.arange((order+1)**2).reshape( # (order+1, order+1))[3*order//4, 3*order//4] # print(mesh_pts[:, index]) eval_samples = np.array([[1./np.sqrt(2), 1./np.sqrt(2)]]).T qoi = model.interpolate(solution, eval_samples) assert np.allclose(qoi, 0.32071594511) def test_integrate_2d(self): order = 4 model = SteadyStateDiffusionEquation2D() bndry_cond = [0.0, 0.0] lims = [0., 1., 0., 1.] model.initialize(order, bndry_cond, lims) mesh_pts = model.get_collocation_points() assert np.allclose( model.integrate(np.sum(mesh_pts**2, axis=0)[:, None]), 2./3.) order = 4 model = SteadyStateDiffusionEquation2D() bndry_cond = [0.0, 0.0] lims = [-1., 1., -1., 1.] model.initialize(order, bndry_cond, lims) mesh_pts = model.get_collocation_points() assert np.allclose( model.integrate(np.sum(mesh_pts**2, axis=0)[:, None]), 8./3.) def test_evaluate_gradient_2d(self): """ for the PDE ((1+sum(z^2)*x)*u(x)')' = -2, u(0) = 0, u(1) = 1 use model.evaluate_gradient to evaluate the gradient of the QoI with respect to the random parameter vector z. The QoI is the intergral of the solution over the entire domain The adjoint rhs is then just 1. """ order = 20 model = SteadyStateDiffusionEquation2D() lims = [0., 1., 0., 1.] bndry_cond = [0., 0.] model.initialize(order, bndry_cond, lims) model.diffusivity_function = \ lambda x, z: (z[0]**2+z[1]**2)*(x[0]+x[1]) + 1. model.forcing_function = lambda x, z: 0*x[0]-2 sample = np.random.RandomState(2).uniform(-1, 1, (2, 1)) model.diffusivity_derivs_function = \ lambda x, z, i: np.array([2.*(x[0]+x[1])*z[i]]).T model.forcing_derivs_function = \ lambda x, z, i: np.array([0.*x[0]]).T model(sample) # evaluate_gradient has to be called before any more calls to # model.solve with different parameters, because we need to # access self.fwd_solution, which will change with any subsuquent calls errors = pya.check_gradients( model, lambda x: model.evaluate_gradient(x[:, 0]), sample) errors = errors[np.isfinite(errors)] assert errors.max() > 0.1 and errors.min() <= 4e-6 if __name__ == "__main__": spectral_diffusion_test_suite = \ unittest.TestLoader().loadTestsFromTestCase(TestSpectralDiffusion2D) unittest.TextTestRunner(verbosity=2).run(spectral_diffusion_test_suite)
43.368043
140
0.573729
3,533
24,156
3.774696
0.100481
0.034643
0.005849
0.014847
0.705009
0.674565
0.636848
0.618701
0.603329
0.580609
0
0.056033
0.278192
24,156
556
141
43.446043
0.708821
0.218455
0
0.514745
0
0
0.010066
0
0
0
0
0
0.058981
1
0.08311
false
0
0.013405
0.037534
0.101877
0.010724
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab0b27f4e0cbd65087dec9065d3e682653bf37df
2,145
py
Python
torchdrug/layers/flow.py
wconnell/torchdrug
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
[ "Apache-2.0" ]
772
2021-08-10T05:03:46.000Z
2022-03-31T12:48:31.000Z
torchdrug/layers/flow.py
wconnell/torchdrug
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
[ "Apache-2.0" ]
77
2021-08-12T16:19:15.000Z
2022-03-30T14:32:14.000Z
torchdrug/layers/flow.py
wconnell/torchdrug
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
[ "Apache-2.0" ]
90
2021-08-11T16:27:13.000Z
2022-03-28T11:41:53.000Z
import torch from torch import nn from torch.nn import functional as F from torchdrug import layers class ConditionalFlow(nn.Module): """ Conditional flow transformation from `Masked Autoregressive Flow for Density Estimation`_. .. _Masked Autoregressive Flow for Density Estimation: https://arxiv.org/pdf/1705.07057.pdf Parameters: input_dim (int): input & output dimension condition_dim (int): condition dimension hidden_dims (list of int, optional): hidden dimensions activation (str or function, optional): activation function """ def __init__(self, input_dim, condition_dim, hidden_dims=None, activation="relu"): super(ConditionalFlow, self).__init__() self.input_dim = input_dim self.output_dim = input_dim if hidden_dims is None: hidden_dims = [] self.mlp = layers.MLP(condition_dim, list(hidden_dims) + [input_dim * 2], activation) self.rescale = nn.Parameter(torch.zeros(1)) def forward(self, input, condition): """ Transform data into latent representations. Parameters: input (Tensor): input representations condition (Tensor): conditional representations Returns: (Tensor, Tensor): latent representations, log-likelihood of the transformation """ scale, bias = self.mlp(condition).chunk(2, dim=-1) scale = (F.tanh(scale) * self.rescale) output = (input + bias) * scale.exp() log_det = scale return output, log_det def reverse(self, latent, condition): """ Transform latent representations into data. Parameters: latent (Tensor): latent representations condition (Tensor): conditional representations Returns: (Tensor, Tensor): input representations, log-likelihood of the transformation """ scale, bias = self.mlp(condition).chunk(2, dim=-1) scale = (F.tanh(scale) * self.rescale) output = latent / scale.exp() - bias log_det = scale return output, log_det
33.515625
94
0.640559
236
2,145
5.699153
0.322034
0.035688
0.035688
0.040149
0.389591
0.389591
0.324164
0.281041
0.169517
0.169517
0
0.00956
0.268531
2,145
64
95
33.515625
0.847674
0.420979
0
0.32
0
0
0.003728
0
0
0
0
0
0
1
0.12
false
0
0.16
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab0b550c21847a65b30237096b5b109cb3b79405
1,531
py
Python
olctools/accessoryFunctions/metadataprinter.py
lowandrew/OLCTools
c74e9d18e2ebe0159aa824e095091045ed227e95
[ "MIT" ]
1
2020-02-29T19:12:48.000Z
2020-02-29T19:12:48.000Z
olctools/accessoryFunctions/metadataprinter.py
lowandrew/OLCTools
c74e9d18e2ebe0159aa824e095091045ed227e95
[ "MIT" ]
3
2017-09-11T18:33:00.000Z
2019-02-01T18:03:29.000Z
olctools/accessoryFunctions/metadataprinter.py
lowandrew/OLCTools
c74e9d18e2ebe0159aa824e095091045ed227e95
[ "MIT" ]
1
2017-07-25T15:40:36.000Z
2017-07-25T15:40:36.000Z
#!/usr/bin/env python3 import logging import json import os __author__ = 'adamkoziol' class MetadataPrinter(object): def printmetadata(self): # Iterate through each sample in the analysis for sample in self.metadata: # Set the name of the json file jsonfile = os.path.join(sample.general.outputdirectory, '{}_metadata.json'.format(sample.name)) try: # Open the metadata file to write with open(jsonfile, 'w') as metadatafile: # Write the json dump of the object dump to the metadata file json.dump(sample.dump(), metadatafile, sort_keys=True, indent=4, separators=(',', ': ')) except IOError: # Print useful information in case of an error logging.warning('Error creating .json file for {sample}'.format(sample=sample.name)) raise except TypeError as e: logging.debug(f'Encountered TypeError writing metadata to file with the following details: {e}') def __init__(self, inputobject): try: self.metadata = inputobject.runmetadata.samples except AttributeError: try: self.metadata = inputobject.metadata.samples except AttributeError: try: self.metadata = inputobject.metadata except AttributeError: self.metadata = inputobject.runmetadata self.printmetadata()
39.25641
112
0.595689
159
1,531
5.672956
0.465409
0.066519
0.101996
0.086475
0.135255
0.135255
0.135255
0.135255
0
0
0
0.001944
0.32789
1,531
38
113
40.289474
0.874636
0.151535
0
0.25
0
0
0.112916
0
0
0
0
0
0
1
0.071429
false
0
0.107143
0
0.214286
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab0be1bc504d57d2eb757539f99f93b6066eb5bb
6,424
py
Python
mindware/estimators.py
aman-gupta-1995/Machine-Learning-Mindware
8b3050720711730520683c89949e3dbdfb168961
[ "MIT" ]
27
2021-07-19T09:03:34.000Z
2022-03-31T06:19:23.000Z
mindware/estimators.py
aman-gupta-1995/Machine-Learning-Mindware
8b3050720711730520683c89949e3dbdfb168961
[ "MIT" ]
4
2021-07-15T12:17:10.000Z
2022-01-26T17:16:58.000Z
mindware/estimators.py
aman-gupta-1995/Machine-Learning-Mindware
8b3050720711730520683c89949e3dbdfb168961
[ "MIT" ]
17
2020-05-12T20:24:50.000Z
2021-07-11T03:31:38.000Z
import numpy as np from sklearn.utils.multiclass import type_of_target from mindware.base_estimator import BaseEstimator from mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET from mindware.components.feature_engineering.transformation_graph import DataNode class Classifier(BaseEstimator): """This class implements the classification task. """ def initialize(self, data: DataNode, **kwargs): if self.metric is None: self.metric = 'acc' # Check the task type: {binary, multiclass} task_type = type_of_target(data.data[1]) if task_type in type_dict: task_type = type_dict[task_type] else: raise ValueError("Invalid Task Type: %s!" % task_type) self.task_type = task_type super().initialize(data=data, **kwargs) def fit(self, data: DataNode, **kwargs): """ Fit the classifier to given training data. :param data: instance of DataNode :return: self """ if self._ml_engine is None: self.initialize(data=data, **kwargs) super().fit(data, **kwargs) return self def predict(self, X, batch_size=None, n_jobs=1): """ Predict classes for X. :param X: Datanode :param batch_size: int :param n_jobs: int :return: y : array of shape = [n_samples] The predicted classes. """ if not isinstance(X, DataNode): raise ValueError("X is supposed to be a Data Node, but get %s" % type(X)) return super().predict(X, batch_size=batch_size, n_jobs=n_jobs) def refit(self): return super().refit() def predict_proba(self, X, batch_size=None, n_jobs=1): """ Predict probabilities of classes for all samples X. :param X: Datanode :param batch_size: int :param n_jobs: int :return: y : array of shape = [n_samples, n_classes] The predicted class probabilities. """ if not isinstance(X, DataNode): raise ValueError("X is supposed to be a Data Node, but get %s" % type(X)) pred_proba = super().predict_proba(X, batch_size=batch_size, n_jobs=n_jobs) if self.task_type != MULTILABEL_CLS: assert ( np.allclose( np.sum(pred_proba, axis=1), np.ones_like(pred_proba[:, 0])) ), "Prediction probability does not sum up to 1!" # Check that all probability values lie between 0 and 1. assert ( (pred_proba >= 0).all() and (pred_proba <= 1).all() ), "Found prediction probability value outside of [0, 1]!" return pred_proba def get_tree_importance(self, data: DataNode): from lightgbm import LGBMClassifier import pandas as pd X, y = self.data_transformer(data).data lgb = LGBMClassifier(random_state=1) lgb.fit(X, y) _importance = lgb.feature_importances_ h = {} h['feature_id'] = np.array(range(len(_importance))) h['feature_importance'] = _importance return pd.DataFrame(h) def get_linear_importance(self, data: DataNode): from sklearn.linear_model import LogisticRegression import pandas as pd X, y = self.data_transformer(data).data clf = LogisticRegression(random_state=1) clf.fit(X, y) _ef = clf.coef_ std_array = np.std(_ef, ddof=1, axis=0) abs_array = abs(_ef) mean_array = np.mean(abs_array, axis=0) _importance = std_array / mean_array h = {} h['feature_id'] = np.array(range(len(_importance))) h['feature_importance'] = _importance return pd.DataFrame(h) def get_linear_impact(self, data: DataNode): from sklearn.linear_model import LogisticRegression import pandas as pd if (len(set(data.data[1]))) > 2: print('ERROR! Only binary classification is supported!') return 0 X, y = self.data_transformer(data).data clf = LogisticRegression(random_state=1) clf.fit(X, y) _ef = clf.coef_ _impact = _ef[0] h = {} h['feature_id'] = np.array(range(len(_impact))) h['feature_impact'] = _impact return pd.DataFrame(h) class Regressor(BaseEstimator): """This class implements the regression task. """ def initialize(self, data: DataNode, **kwargs): self.metric = 'mse' if self.metric is None else self.metric # Check the task type: {continuous} task_type = type_dict['continuous'] self.task_type = task_type super().initialize(data=data, **kwargs) def fit(self, data, **kwargs): """ Fit the regressor to given training data. :param data: DataNode :return: self """ if self._ml_engine is None: self.initialize(data=data, **kwargs) super().fit(data, **kwargs) return self def predict(self, X, batch_size=None, n_jobs=1): """ Make predictions for X. :param X: DataNode :param batch_size: int :param n_jobs: int :return: y : array of shape = [n_samples] or [n_samples, n_labels] The predicted classes. """ if not isinstance(X, DataNode): raise ValueError("X is supposed to be a Data Node, but get %s" % type(X)) return super().predict(X, batch_size=batch_size, n_jobs=n_jobs) def get_tree_importance(self, data: DataNode): from lightgbm import LGBMRegressor import pandas as pd X, y = self.data_transformer(data).data lgb = LGBMRegressor(random_state=1) lgb.fit(X, y) _importance = lgb.feature_importances_ h = {} h['feature_id'] = np.array(range(len(_importance))) h['feature_importance'] = _importance return pd.DataFrame(h) def get_linear_impact(self, data: DataNode): from sklearn.linear_model import LinearRegression import pandas as pd X, y = self.data_transformer(data).data reg = LinearRegression() reg.fit(X, y) _impact = reg.coef_ h = {} h['feature_id'] = np.array(range(len(_impact))) h['feature_impact'] = _impact return pd.DataFrame(h)
35.103825
104
0.60523
815
6,424
4.604908
0.190184
0.029843
0.034106
0.026645
0.645883
0.614975
0.600053
0.57927
0.57927
0.561418
0
0.005275
0.291719
6,424
182
105
35.296703
0.81956
0.144614
0
0.605042
0
0
0.084947
0
0
0
0
0
0.016807
1
0.109244
false
0
0.226891
0.008403
0.453782
0.008403
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab0e2a7ca0afb7293dad4730992d135dc62fe897
2,271
py
Python
examples/industrial_quality_inspection/train_yolov3.py
petr-kalinin/PaddleX
e4f08b50dab01f3720570702a071188d1efd4042
[ "Apache-2.0" ]
1
2021-09-26T16:00:54.000Z
2021-09-26T16:00:54.000Z
examples/industrial_quality_inspection/train_yolov3.py
gq5227246/PaddleX
80b97ae4c9d7a290f9e7908d5cd54b7b053c2072
[ "Apache-2.0" ]
null
null
null
examples/industrial_quality_inspection/train_yolov3.py
gq5227246/PaddleX
80b97ae4c9d7a290f9e7908d5cd54b7b053c2072
[ "Apache-2.0" ]
1
2021-06-04T19:57:53.000Z
2021-06-04T19:57:53.000Z
# 环境变量配置,用于控制是否使用GPU # 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' from paddlex.det import transforms import paddlex as pdx # 下载和解压铝材缺陷检测数据集 aluminum_dataset = 'https://bj.bcebos.com/paddlex/examples/industrial_quality_inspection/datasets/aluminum_inspection.tar.gz' pdx.utils.download_and_decompress(aluminum_dataset, path='./') # 定义训练和验证时的transforms # API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(), transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize( target_size=608, interp='RANDOM'), transforms.RandomHorizontalFlip(), transforms.Normalize() ]) eval_transforms = transforms.Compose([ transforms.Resize( target_size=608, interp='CUBIC'), transforms.Normalize() ]) # 定义训练和验证所用的数据集 # API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='aluminum_inspection', file_list='aluminum_inspection/train_list.txt', label_list='aluminum_inspection/labels.txt', transforms=train_transforms, shuffle=True) eval_dataset = pdx.datasets.VOCDetection( data_dir='aluminum_inspection', file_list='aluminum_inspection/val_list.txt', label_list='aluminum_inspection/labels.txt', transforms=eval_transforms) # 初始化模型,并进行训练 # 可使用VisualDL查看训练指标,参考https://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html num_classes = len(train_dataset.labels) # API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3 model = pdx.det.YOLOv3(num_classes=num_classes, backbone='MobileNetV3_large') # API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train # 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=400, train_dataset=train_dataset, train_batch_size=8, eval_dataset=eval_dataset, warmup_steps=4000, learning_rate=0.000125, lr_decay_epochs=[240, 320], save_dir='output/yolov3_mobilenetv3', use_vdl=True)
37.85
125
0.784236
282
2,271
6.106383
0.397163
0.073171
0.081301
0.089431
0.408246
0.408246
0.349594
0.349594
0.349594
0.235772
0
0.017476
0.092911
2,271
59
126
38.491525
0.818447
0.303831
0
0.153846
0
0.025641
0.219949
0.096547
0
0
0
0
0
1
0
false
0
0.076923
0
0.076923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab0fb9e929f14279551c419b287e78a48d3a92f4
1,882
py
Python
wooey/migrations/0009_script_versioning.py
macdaliot/Wooey
3a0f40e3b3ab4d905f9acc72f5cd5d6453e14834
[ "BSD-3-Clause" ]
1
2020-11-05T15:04:33.000Z
2020-11-05T15:04:33.000Z
wooey/migrations/0009_script_versioning.py
macdaliot/Wooey
3a0f40e3b3ab4d905f9acc72f5cd5d6453e14834
[ "BSD-3-Clause" ]
null
null
null
wooey/migrations/0009_script_versioning.py
macdaliot/Wooey
3a0f40e3b3ab4d905f9acc72f5cd5d6453e14834
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import wooey.models.mixins class Migration(migrations.Migration): dependencies = [ ('wooey', '0008_short_param_admin'), ] operations = [ migrations.CreateModel( name='ScriptVersion', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('script_version', models.CharField(default='1', help_text='The script version.', max_length=50, blank=True)), ('script_iteration', models.PositiveSmallIntegerField(default=1)), ('script_path', models.FileField(upload_to=b'')), ('default_version', models.BooleanField(default=False)), ('created_date', models.DateTimeField(auto_now_add=True)), ('modified_date', models.DateTimeField(auto_now=True)), ('script', models.ForeignKey(related_name='script_version_new', to='wooey.Script')), ], bases=(wooey.models.mixins.ModelDiffMixin, wooey.models.mixins.WooeyPy2Mixin, models.Model), ), migrations.AddField( model_name='scriptparameter', name='script_version', field=models.ForeignKey(null=True, to='wooey.ScriptVersion'), preserve_default=False, ), migrations.AddField( model_name='scriptparametergroup', name='script_version', field=models.ForeignKey(null=True, to='wooey.ScriptVersion'), preserve_default=False, ), migrations.AddField( model_name='wooeyjob', name='script_version', field=models.ForeignKey(null=True, to='wooey.ScriptVersion'), preserve_default=False, ), ]
39.208333
126
0.609989
177
1,882
6.288136
0.423729
0.070081
0.061096
0.072776
0.334232
0.280323
0.280323
0.280323
0.280323
0.280323
0
0.00721
0.263018
1,882
47
127
40.042553
0.795242
0.011158
0
0.390244
0
0
0.173749
0.011834
0
0
0
0
0
1
0
false
0
0.073171
0
0.146341
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab10a7d42774f492876454acc8afc34598c448bf
15,849
py
Python
bicycleparameters/period.py
sandertyu/Simple-Geometry-Plot
6fa4dfb50aebc4215818f75ff56f916fc32f8cfa
[ "BSD-2-Clause-FreeBSD" ]
20
2015-07-06T06:25:07.000Z
2021-12-10T19:36:33.000Z
bicycleparameters/period.py
sandertyu/Simple-Geometry-Plot
6fa4dfb50aebc4215818f75ff56f916fc32f8cfa
[ "BSD-2-Clause-FreeBSD" ]
52
2015-11-10T16:21:02.000Z
2022-03-03T11:46:52.000Z
bicycleparameters/period.py
sandertyu/Simple-Geometry-Plot
6fa4dfb50aebc4215818f75ff56f916fc32f8cfa
[ "BSD-2-Clause-FreeBSD" ]
12
2015-07-13T23:32:58.000Z
2021-12-09T18:42:16.000Z
#!/usr/bin/env/ python import os from math import pi import numpy as np from numpy import ma from scipy.optimize import leastsq import matplotlib.pyplot as plt from uncertainties import ufloat # local modules from .io import load_pendulum_mat_file def average_rectified_sections(data): '''Returns a slice of an oscillating data vector based on the max and min of the mean of the sections created by retifiying the data. Parameters ---------- data : ndarray, shape(n,) Returns ------- data : ndarray, shape(m,) A slice where m is typically less than n. Notes ----- This is a function to try to handle the fact that some of the data from the torsional pendulum had a beating like phenomena and we only want to select a section of the data that doesn't seem to exhibit the phenomena. ''' # subtract the mean so that there are zero crossings meanSubData = data - np.mean(data) # find the zero crossings zeroCrossings = np.where(np.diff(np.sign(meanSubData)))[0] # add a zero to the beginning crossings = np.concatenate((np.array([0]), zeroCrossings)) # find the mean value of the rectified sections and the local indice secMean = [] localMeanInd = [] for sec in np.split(np.abs(meanSubData), zeroCrossings): localMeanInd.append(np.argmax(sec)) secMean.append(np.mean(sec)) meanInd = [] # make the global indices for i, val in enumerate(crossings): meanInd.append(val + localMeanInd[i]) # only take the top part of the data because some the zero crossings can be # a lot at one point mainly due to the resolution of the daq box threshold = np.mean(secMean) secMeanOverThresh = [] indice = [] for i, val in enumerate(secMean): if val > threshold: secMeanOverThresh.append(val) indice.append(meanInd[i]) # now return the data based on the max value and the min value maxInd = indice[np.argmax(secMeanOverThresh)] minInd = indice[np.argmin(secMeanOverThresh)] return data[maxInd:minInd] def calc_periods_for_files(directory, filenames, forkIsSplit): '''Calculates the period for all filenames in directory. Parameters ---------- directory : string This is the path to the RawData directory. filenames : list List of all the mat file names in the RawData directory. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- periods : dictionary Contains all the periods for the mat files in the RawData directory. ''' periods = {} def pathParts(path): '''Splits a path into a list of its parts.''' components = [] while True: (path,tail) = os.path.split(path) if tail == "": components.reverse() return components components.append(tail) pathToRawDataParts = pathParts(directory) pathToRawDataParts.pop() pathToBicycleDir = os.path.join(pathToRawDataParts[0], pathToRawDataParts[1], pathToRawDataParts[2]) pathToPlotDir = os.path.join(pathToBicycleDir, 'Plots', 'PendulumFit') # make sure there is a place to save the plots if not os.path.exists(pathToPlotDir): os.makedirs(pathToPlotDir) for f in filenames: print("Calculating the period for:", f) # load the pendulum data pathToMatFile = os.path.join(directory, f) matData = load_pendulum_mat_file(pathToMatFile) # generate a variable name for this period periodKey = get_period_key(matData, forkIsSplit) # calculate the period sampleRate = get_sample_rate(matData) pathToPlotFile = os.path.join(pathToPlotDir, os.path.splitext(f)[0] + '.png') period = get_period_from_truncated(matData['data'], sampleRate, pathToPlotFile) print("The period is:", period, "\n") # either append the the period or if it isn't there yet, then # make a new list try: periods[periodKey].append(period) except KeyError: periods[periodKey] = [period] # now average all the periods for k, v in periods.items(): if k.startswith('T'): periods[k] = np.mean(v) return periods def check_for_period(mp, forkIsSplit): '''Returns whether the fork is split into two pieces and whether the period calculations need to happen again. Parameters ---------- mp : dictionary Dictionary the measured parameters. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- forcePeriodCalc : boolean True if there wasn't enough period data in mp, false if there was. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. ''' forcePeriodCalc = False #Check to see if mp contains at enough periods to not need # recalculation ncTSum = 0 ntTSum = 0 for key in mp.keys(): # check for any periods in the keys if key[:2] == 'Tc': ncTSum += 1 elif key[:2] == 'Tt': ntTSum += 1 # if there isn't enough data then force the period cals again if forkIsSplit: if ncTSum < 5 or ntTSum < 11: forcePeriodCalc = True else: if ncTSum < 4 or ntTSum < 8: forcePeriodCalc = True return forcePeriodCalc def fit_goodness(ym, yp): ''' Calculate the goodness of fit. Parameters ---------- ym : ndarray, shape(n,) The vector of measured values. yp : ndarry, shape(n,) The vector of predicted values. Returns ------- rsq : float The r squared value of the fit. SSE : float The error sum of squares. SST : float The total sum of squares. SSR : float The regression sum of squares. ''' SSR = np.sum((yp - np.mean(ym))**2) SST = np.sum((ym - np.mean(ym))**2) SSE = SST - SSR rsq = SSR / SST return rsq, SSE, SST, SSR def get_period(data, sampleRate, pathToPlotFile): '''Returns the period and uncertainty for data resembling a decaying oscillation. Parameters ---------- data : ndarray, shape(n,) A time series that resembles a decaying oscillation. sampleRate : int The frequency that data was sampled at. pathToPlotFile : string A path to the file to print the plots. Returns ------- T : ufloat The period of oscillation and its uncertainty. ''' y = data x = np.linspace(0., (len(y) - 1) / float(sampleRate), num=len(y)) def fitfunc(p, t): '''Decaying oscillation function.''' a = p[0] b = np.exp(-p[3] * p[4] * t) c = p[1] * np.sin(p[4] * np.sqrt(1 - p[3]**2) * t) d = p[2] * np.cos(p[4] * np.sqrt(1 - p[3]**2) * t) return a + b * (c + d) # initial guesses #p0 = np.array([1.35, -.5, -.75, 0.01, 3.93]) # guess from delft #p0 = np.array([2.5, -.75, -.75, 0.001, 4.3]) # guess from ucd p0 = make_guess(data, sampleRate) # tries to make a good guess # create the error function errfunc = lambda p, t, y: fitfunc(p, t) - y # minimize the error function p1, success = leastsq(errfunc, p0[:], args=(x, y)) lscurve = fitfunc(p1, x) # find the uncertainty in the fit parameters rsq, SSE, SST, SSR = fit_goodness(y, lscurve) sigma = np.sqrt(SSE / (len(y) - len(p0))) # calculate the jacobian L = jac_fitfunc(p1, x) # the Hessian H = np.dot(L.T, L) # the covariance matrix U = sigma**2. * np.linalg.inv(H) # the standard deviations sigp = np.sqrt(U.diagonal()) # natural frequency wo = ufloat(p1[4], sigp[4]) # damping ratio zeta = ufloat(p1[3], sigp[3]) # damped natural frequency wd = (1. - zeta**2.)**(1. / 2.) * wo # damped natural frequency (hz) fd = wd / 2. / pi # period T = 1. / fd # plot the data and save it to file fig = plt.figure() plot_osfit(x, y, lscurve, p1, rsq, T, m=np.max(x), fig=fig) plt.savefig(pathToPlotFile) plt.close() # return the period return T def get_period_from_truncated(data, sampleRate, pathToPlotFile): #dataRec = average_rectified_sections(data) dataRec = data dataGood = select_good_data(dataRec, 0.1) return get_period(dataGood, sampleRate, pathToPlotFile) def get_period_key(matData, forkIsSplit): '''Returns a dictionary key for the period entries. Parameters ---------- matData : dictionary The data imported from a pendulum mat file. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- key : string A key of the form 'T[pendulum][part][orientation]'. For example, if it is the frame that was hung as a torsional pendulum at the second orientation angle then the key would be 'TtB2'. ''' # set up the subscripting for the period key subscripts = {'Fwheel': 'F', 'Rwheel': 'R', 'Frame': 'B', 'Flywheel': 'D'} # the Flywheel is for the gyro bike and it actually represents the front # wheel and the flywheel as one rigid body. It was easier to measure the # the inertia this way. So...the to get the actual flywheel inertia, one # must subtract the inertia of the Fwheel, F, from the Flywheel, D. if forkIsSplit: subscripts['Fork'] = 'S' subscripts['Handlebar'] = 'G' else: subscripts['Fork'] = 'H' try: subscripts[matData['rod']] = 'P' except KeyError: subscripts['Rod'] = 'P' # used to convert word ordinals to numbers ordinal = {'First' : '1', 'Second' : '2', 'Third' : '3', 'Fourth' : '4', 'Fifth' : '5', 'Sixth' : '6'} try: orienWord = matData['angleOrder'] except: orienWord = matData['angle'] pend = matData['pendulum'][0].lower() part = subscripts[matData['part']] orienNum = ordinal[orienWord] return 'T' + pend + part + orienNum def get_sample_rate(matData): '''Returns the sample rate for the data.''' if 'ActualRate' in matData.keys(): sampleRate = matData['ActualRate'] else: sampleRate = matData['sampleRate'] return sampleRate def jac_fitfunc(p, t): ''' Calculate the Jacobian of a decaying oscillation function. Uses the analytical formulations of the partial derivatives. Parameters ---------- p : the five parameters of the equation t : time vector Returns ------- jac : The jacobian, the partial of the vector function with respect to the parameters vector. A 5 x N matrix where N is the number of time steps. ''' jac = np.zeros((len(p), len(t))) e = np.exp(-p[3] * p[4] * t) dampsq = np.sqrt(1 - p[3]**2) s = np.sin(dampsq * p[4] * t) c = np.cos(dampsq * p[4] * t) jac[0] = np.ones_like(t) jac[1] = e * s jac[2] = e * c jac[3] = (-p[4] * t * e * (p[1] * s + p[2] * c) + e * (-p[1] * p[3] * p[4] * t / dampsq * c + p[2] * p[3] * p[4] * t / dampsq * s)) jac[4] = (-p[3] * t * e * (p[1] * s + p[2] * c) + e * dampsq * t * (p[1] * c - p[2] * s)) return jac.T def make_guess(data, sampleRate): '''Returns a decent starting point for fitting the decaying oscillation function. ''' p = np.zeros(5) # the first unknown is the shift along the y axis p[0] = np.mean(data) # work with the mean subtracted data from now on data = data - p[0] # what is the initial slope of the curve if data[10] > data[0]: slope = 1 else: slope = -1 # the second is the amplitude for the sin function p[1] = slope * np.max(data) / 2 # the third is the amplitude for the cos function p[2] = slope * np.max(data) # the fourth is the damping ratio and is typically small, 0.001 < zeta < 0.02 p[3] = 0.001 # the fifth is the undamped natural frequency # first remove the data around zero dataMasked = ma.masked_inside(data, -0.1, 0.1) # find the zero crossings zeroCrossings = np.where(np.diff(np.sign(dataMasked)))[0] # remove redundant crossings zero = [] for i, v in enumerate(zeroCrossings): if abs(v - zeroCrossings[i - 1]) > 20: zero.append(v) # get the samples per period samplesPerPeriod = 2*np.mean(np.diff(zero)) # now the frequency p[4] = (samplesPerPeriod / float(sampleRate) /2. / pi)**-1 if np.isnan(p[4]): p[4] = 4. return p def plot_osfit(t, ym, yf, p, rsq, T, m=None, fig=None): '''Plot fitted data over the measured Parameters ---------- t : ndarray (n,) Measurement time in seconds ym : ndarray (n,) The measured voltage yf : ndarray (n,) p : ndarray (5,) The fit parameters for the decaying osicallation fucntion rsq : float The r squared value of y (the fit) T : float The period m : float The maximum value to plot Returns ------- fig : the figure ''' # figure properties figwidth = 4. # in inches goldenMean = (np.sqrt(5) - 1.0) / 2.0 figsize = [figwidth, figwidth * goldenMean] params = {#'backend': 'ps', 'axes.labelsize': 8, 'axes.titlesize': 8, 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 6, 'ytick.labelsize': 6, 'text.usetex': True, #'figure.figsize': figsize } if fig: fig = fig else: fig = plt.figure(2) fig.set_size_inches(figsize) plt.rcParams.update(params) ax1 = plt.axes([0.125, 0.125, 0.9-0.125, 0.65]) #if m == None: #end = len(t) #else: #end = t[round(m/t[-1]*len(t))] ax1.plot(t, ym, '.', markersize=2) plt.plot(t, yf, 'k-') plt.xlabel('Time [s]') plt.ylabel('Amplitude [V]') equation = r'$f(t)={0:1.2f}+e^{{-({3:1.3f})({4:1.1f})t}}\left[{1:1.2f}\sin{{\sqrt{{1-{3:1.3f}^2}}{4:1.1f}t}}+{2:1.2f}\cos{{\sqrt{{1-{3:1.3f}^2}}{4:1.1f}t}}\right]$'.format(p[0], p[1], p[2], p[3], p[4]) rsquare = '$r^2={0:1.3f}$'.format(rsq) period = '$T={0} s$'.format(T) plt.title(equation + '\n' + rsquare + ', ' + period) plt.legend(['Measured', 'Fit']) if m is not None: plt.xlim((0, m)) else: pass return fig def select_good_data(data, percent): '''Returns a slice of the data from the index at maximum value to the index at a percent of the maximum value. Parameters ---------- data : ndarray, shape(1,) This should be a decaying function. percent : float The percent of the maximum to clip. This basically snips of the beginning and end of the data so that the super damped tails are gone and also any weirdness at the beginning. ''' meanSub = data - np.mean(data) maxVal = np.max(np.abs(meanSub)) maxInd = np.argmax(np.abs(meanSub)) for i, v in reversed(list(enumerate(meanSub))): if v > percent * maxVal: minInd = i break return data[maxInd:minInd]
30.188571
205
0.588618
2,185
15,849
4.248055
0.232037
0.010235
0.007757
0.002155
0.108274
0.080694
0.075738
0.067981
0.067981
0.063241
0
0.020828
0.294151
15,849
524
206
30.246183
0.808885
0.409994
0
0.072034
0
0.004237
0.061606
0.017305
0
0
0
0
0
1
0.059322
false
0.004237
0.033898
0
0.152542
0.008475
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab11420721f9d57dfd242653355836e981c854b9
11,597
py
Python
tectosaur2/analyze.py
tbenthompson/BIE_tutorials
02cd56ab7e63e36afc4a10db17072076541aab77
[ "MIT" ]
1
2021-06-18T18:02:55.000Z
2021-06-18T18:02:55.000Z
tectosaur2/analyze.py
tbenthompson/BIE_tutorials
02cd56ab7e63e36afc4a10db17072076541aab77
[ "MIT" ]
null
null
null
tectosaur2/analyze.py
tbenthompson/BIE_tutorials
02cd56ab7e63e36afc4a10db17072076541aab77
[ "MIT" ]
1
2021-07-14T19:47:00.000Z
2021-07-14T19:47:00.000Z
import time import warnings import matplotlib.pyplot as plt import numpy as np import sympy as sp from .global_qbx import global_qbx_self from .mesh import apply_interp_mat, gauss_rule, panelize_symbolic_surface, upsample def find_dcutoff_refine(kernel, src, tol, plot=False): # prep step 1: find d_cutoff and d_refine # The goal is to estimate the error due to the QBX local patch # The local surface will have singularities at the tips where it is cut off # These singularities will cause error in the QBX expansion. We want to make # the local patch large enough that these singularities are irrelevant. # To isolate the QBX patch cutoff error, we will use a very high upsampling. # We'll also choose p to be the minimum allowed value since that will result in # the largest cutoff error. Increasing p will reduce the cutoff error guaranteeing that # we never need to worry about cutoff error. density = np.ones_like(src.pts[:, 0]) # np.cos(src.pts[:,0] * src.pts[:,1]) if plot: plt.figure(figsize=(9, 13)) params = [] d_cutoffs = [1.1, 1.3, 1.6, 2.0] ps = np.arange(1, 55, 3) for di, direction in enumerate([-1.0, 1.0]): baseline = global_qbx_self(kernel, src, p=30, kappa=8, direction=direction) baseline_v = baseline.dot(density) # Check that the local qbx method matches the simple global qbx approach when d_cutoff is very large d_refine_high = 8.0 with warnings.catch_warnings(): warnings.simplefilter("ignore") local_baseline = kernel.integrate( src.pts, src, d_cutoff=3.0, tol=1e-20, max_p=50, d_refine=d_refine_high, on_src_direction=direction, ) local_baseline_v = local_baseline.dot(density) err = np.max(np.abs(baseline_v - local_baseline_v)) print(err) assert err < tol / 2 n_qbx_panels = [] drefine_optimal = [] p_for_full_accuracy = [] if plot: plt.subplot(3, 2, 1 + di) for i_d, d_cutoff in enumerate(d_cutoffs): errs = [] for i_p, p in enumerate(ps): # print(p, d_cutoff) with warnings.catch_warnings(): warnings.simplefilter("ignore") test, report = kernel.integrate( src.pts, src, d_cutoff=d_cutoff, tol=1e-15, max_p=p, on_src_direction=direction, d_refine=d_refine_high, return_report=True, ) testv = test.dot(density) err = np.max(np.abs(baseline_v - testv)) errs.append(err) # print(p, err) if err < tol: for d_refine_decrease in np.arange(1.0, d_refine_high, 0.25): refine_test, refine_report = kernel.integrate( src.pts, src, d_cutoff=d_cutoff, tol=1e-15, max_p=p + 10, # Increase p here to have a refinement safety margin on_src_direction=direction, d_refine=d_refine_decrease, return_report=True, ) refine_testv = refine_test.dot(density) refine_err = np.max(np.abs(baseline_v - refine_testv)) if refine_err < tol: drefine_optimal.append(d_refine_decrease) n_qbx_panels.append(refine_report["n_qbx_panels"]) p_for_full_accuracy.append(p) break if len(n_qbx_panels) <= i_d: print(f"Failed to find parameters for {d_cutoff}") drefine_optimal.append(1000) n_qbx_panels.append(1e6) p_for_full_accuracy.append(1e3) break if plot: print(d_cutoff, errs) plt.plot(ps[: i_p + 1], np.log10(errs), label=str(d_cutoff)) params.append((direction, n_qbx_panels, drefine_optimal, p_for_full_accuracy)) if plot: plt.legend() plt.title("interior" if direction > 0 else "exterior") plt.xlabel(r"$p_{\textrm{max}}$") if di == 0: plt.ylabel(r"$\log_{10}(\textrm{error})$") plt.yticks(-np.arange(0, 16, 3)) plt.xticks(np.arange(0, 61, 10)) plt.ylim([-15, 0]) plt.subplot(3, 2, 3 + di) plt.plot(d_cutoffs, np.array(n_qbx_panels) / src.n_pts, "k-*") plt.xlabel(r"$d_{\textrm{cutoff}}$") plt.ylim([0, 8]) if di == 0: plt.ylabel("QBX panels per point") plt.subplot(3, 2, 5 + di) plt.plot(d_cutoffs, np.array(drefine_optimal), "k-*") plt.xlabel(r"$d_{\textrm{cutoff}}$") plt.ylim([0, 6]) if di == 0: plt.ylabel(r"$d_{\textrm{refine}}$") if plot: plt.tight_layout() plt.show() total_cost = 0 for i in [0, 1]: direction, n_qbx_panels, drefine_optimal, p_for_full_accuracy = params[i] appx_cost = ( np.array(p_for_full_accuracy) * np.array(n_qbx_panels) * np.array(drefine_optimal) ) if plot: print(direction, appx_cost) total_cost += appx_cost if plot: plt.plot(d_cutoffs, total_cost, "k-o") plt.show() best_idx = np.argmin(total_cost) d_cutoff = d_cutoffs[best_idx] d_refine = drefine_optimal[best_idx] return d_cutoff, d_refine # prep step 2: find the minimum distance at which integrals are computed # to the required tolerance def _find_d_up_helper(kernel, nq, max_curvature, start_d, tol, kappa): t = sp.var("t") n_panels = 2 while True: panel_edges = np.linspace(-1, 1, n_panels + 1) panel_bounds = np.stack((panel_edges[:-1], panel_edges[1:]), axis=1) circle = panelize_symbolic_surface( t, sp.cos(sp.pi * t), sp.sin(sp.pi * t), panel_bounds, *gauss_rule(nq) ) n_panels_new = np.max(circle.panel_length / max_curvature * circle.panel_radius) if n_panels_new <= n_panels: break n_panels = np.ceil(n_panels_new).astype(int) # print(f"\nusing {n_panels} panels with max_curvature={max_curvature}") circle_kappa, _ = upsample(circle, kappa) circle_upsample, interp_mat_upsample = upsample(circle_kappa, 2) # TODO: Write more about the underlying regularity assumptions!! # Why is it acceptable to use this test_density here? Empirically, any # well-resolved density has approximately the same error as integrating sin(x). # For example, integrating: 1, cos(x)^2. # If we integrate a poorly resolved density, we do see higher errors. # # How poorly resolved does the density need to be in order to see higher error? # It seems like an interpolation Linfinity error of around 1e-5 causes the d_up value to start to drift upwards. # # As a simple heuristic that seems to perform very well, we compute the # error when integrating a constant and then double the required distance # in order to account for integrands that are not quite so perfectly # resolved. # if assume_regularity: # omega = 1.0 # else: # omega = 999.0# / max_curvature # f = lambda x: np.sin(omega * x) # test_density = interp_mat_upsample.dot(f(circle.pts[:,0])) # test_density_upsampled = f(circle_upsample.pts[:,0]) # print('l2 err', np.linalg.norm(test_density - test_density_upsampled) / np.linalg.norm(test_density_upsampled)) # print('linf err', np.max(np.abs(test_density - test_density_upsampled))) # test_density = f(circle.pts[:,0]) # test_density = np.sin(999 * circle.pts[:,0]) test_density = np.ones(circle_kappa.n_pts) d_up = 0 for direction in [-1.0, 1.0]: d = start_d for i in range(50): # In actuality, we only need to test interior points because the curvature # of the surface ensures that more source panels are near the observation # points and, as a result, the error will be higher for any given value of d. L = np.repeat(circle_kappa.panel_length, circle_kappa.panel_order) dist = L * d test_pts = ( circle_kappa.pts + direction * circle_kappa.normals * dist[:, None] ) # Check to make sure that the closest distance to a source point is # truly `dist`. This check might fail if the interior test_pts are # crossing over into the other half of the circle. min_src_dist = np.min( np.linalg.norm((test_pts[:, None] - circle_kappa.pts[None, :]), axis=2), axis=1, ) if not np.allclose(min_src_dist, dist): return False, d upsample_mat = np.transpose( apply_interp_mat( kernel._direct(test_pts, circle_upsample), interp_mat_upsample ), (0, 2, 1), ) est_mat = np.transpose(kernel._direct(test_pts, circle_kappa), (0, 2, 1)) # err = np.max(np.abs(upsample_mat - est_mat).sum(axis=2)) err = np.max( np.abs(upsample_mat.dot(test_density) - est_mat.dot(test_density)) ) # print(d, err) if err < tol: d_up = max(d, d_up) break d *= 1.2 return True, d_up def find_d_up(kernel, nq, max_curvature, start_d, tol, kappa): d = start_d for i in range(10): d_up = _find_d_up_helper(kernel, nq, max_curvature * (0.8) ** i, d, tol, kappa) if d_up[0]: return d_up[1] d = d_up[1] def final_check(kernel, src): density = np.ones_like(src.pts[:, 0]) # np.cos(source.pts[:,0] * src.pts[:,1]) baseline = global_qbx_self(kernel, src, p=50, kappa=10, direction=1.0) baseline_v = baseline.dot(density) tols = 10.0 ** np.arange(0, -15, -1) errs = [] runtimes = [] for tol in tols: runs = [] for i in range(10): start = time.time() local_baseline, report = kernel.integrate( src.pts, src, tol=tol, on_src_direction=1.0, return_report=True, ) runs.append(time.time() - start) runtimes.append(np.min(runs)) local_baseline_v = local_baseline.dot(density) errs.append(np.max(np.abs(baseline_v - local_baseline_v))) # print(tol, errs[-1], runtime) # assert(np.max(np.abs(baseline_v-local_baseline_v)) < 5e-14) plt.figure(figsize=(9, 5)) plt.subplot(1, 2, 1) plt.plot(-np.log10(tols), np.log10(errs)) plt.subplot(1, 2, 2) plt.plot(-np.log10(tols), runtimes) plt.tight_layout() plt.show()
40.407666
117
0.553419
1,538
11,597
3.992198
0.216515
0.015961
0.014658
0.013029
0.287622
0.207492
0.186482
0.119218
0.088599
0.06645
0
0.024303
0.347159
11,597
286
118
40.548951
0.786686
0.241528
0
0.280952
0
0
0.024937
0.010295
0
0
0
0.003497
0.004762
1
0.019048
false
0
0.033333
0
0.071429
0.019048
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab132bee6e66bf3b92342ce521bb86ee76d01876
7,308
py
Python
people/losses-bkp.py
dluvizon/3d-pose-consensus
7a829d5713d2c45c6b265c9886add0b69e0050a8
[ "MIT" ]
5
2020-05-11T14:18:12.000Z
2022-03-10T12:10:17.000Z
people/losses-bkp.py
dluvizon/3d-pose-consensus
7a829d5713d2c45c6b265c9886add0b69e0050a8
[ "MIT" ]
null
null
null
people/losses-bkp.py
dluvizon/3d-pose-consensus
7a829d5713d2c45c6b265c9886add0b69e0050a8
[ "MIT" ]
null
null
null
def structural_loss_dst68j3d(p_pred, v_pred): v_pred = K.stop_gradient(v_pred) def getlength(v): return K.sqrt(K.sum(K.square(v), axis=-1)) """Arms segments""" joints_arms = p_pred[:, :, 16:37+1, :] conf_arms = v_pred[:, :, 16:37+1] diff_arms_r = joints_arms[:, :, 2:-1:2, :] - joints_arms[:, :, 0:-3:2, :] diff_arms_l = joints_arms[:, :, 3::2, :] - joints_arms[:, :, 1:-2:2, :] c2_arms_r = conf_arms[:, :, 2:-1:2] * conf_arms[:, :, 0:-3:2] c2_arms_l = conf_arms[:, :, 3::2] * conf_arms[:, :, 1:-2:2] """Legs segments""" joints_legs = p_pred[:, :, 48:67+1, :] conf_legs = v_pred[:, :, 48:67+1] diff_legs_r = joints_legs[:, :, 2:-1:2, :] - joints_legs[:, :, 0:-3:2, :] diff_legs_l = joints_legs[:, :, 3::2, :] - joints_legs[:, :, 1:-2:2, :] c2_legs_r = conf_legs[:, :, 2:-1:2] * conf_legs[:, :, 0:-3:2] c2_legs_l = conf_legs[:, :, 3::2] * conf_legs[:, :, 1:-2:2] """Limbs segments""" segs_limbs_r = getlength(K.concatenate([diff_arms_r, diff_legs_r], axis=-2)) segs_limbs_l = getlength(K.concatenate([diff_arms_l, diff_legs_l], axis=-2)) c2_limbs_r = K.concatenate([c2_arms_r, c2_legs_r], axis=-1) c2_limbs_l = K.concatenate([c2_arms_l, c2_legs_l], axis=-1) len_upperarm_r = K.sum(segs_limbs_r[:, :, 2:5], axis=-1, keepdims=True) len_upperarm_l = K.sum(segs_limbs_l[:, :, 2:5], axis=-1, keepdims=True) len_forearm_r = K.sum(segs_limbs_r[:, :, 5:8], axis=-1, keepdims=True) len_forearm_l = K.sum(segs_limbs_l[:, :, 5:8], axis=-1, keepdims=True) len_hand_r = K.sum(segs_limbs_r[:, :, 8:10], axis=-1, keepdims=True) len_hand_l = K.sum(segs_limbs_r[:, :, 8:10], axis=-1, keepdims=True) c2_upperarm_r = K.sum(c2_limbs_r[:, :, 2:5], axis=-1, keepdims=True) c2_upperarm_l = K.sum(c2_limbs_l[:, :, 2:5], axis=-1, keepdims=True) c2_forearm_r = K.sum(c2_limbs_r[:, :, 5:8], axis=-1, keepdims=True) c2_forearm_l = K.sum(c2_limbs_l[:, :, 5:8], axis=-1, keepdims=True) c2_hand_r = K.sum(c2_limbs_r[:, :, 8:10], axis=-1, keepdims=True) c2_hand_l = K.sum(c2_limbs_r[:, :, 8:10], axis=-1, keepdims=True) len_femur_r = K.sum(K.concatenate([ segs_limbs_r[:, :, 10:11], segs_limbs_r[:, :, 12:14], ], axis=-1), axis=-1, keepdims=True) len_femur_l = K.sum(K.concatenate([ segs_limbs_l[:, :, 10:11], segs_limbs_l[:, :, 12:14], ], axis=-1), axis=-1, keepdims=True) c2_femur_r = K.sum(K.concatenate([ c2_limbs_r[:, :, 10:11], c2_limbs_r[:, :, 12:14], ], axis=-1), axis=-1, keepdims=True) c2_femur_l = K.sum(K.concatenate([ c2_limbs_l[:, :, 10:11], c2_limbs_l[:, :, 12:14], ], axis=-1), axis=-1, keepdims=True) len_shin_r = K.sum(segs_limbs_r[:, :, 14:17], axis=-1, keepdims=True) len_shin_l = K.sum(segs_limbs_l[:, :, 14:17], axis=-1, keepdims=True) len_feet_r = K.sum(segs_limbs_r[:, :, 17:19], axis=-1, keepdims=True) len_feet_l = K.sum(segs_limbs_l[:, :, 17:19], axis=-1, keepdims=True) c2_shin_r = K.sum(c2_limbs_r[:, :, 14:17], axis=-1, keepdims=True) c2_shin_l = K.sum(c2_limbs_l[:, :, 14:17], axis=-1, keepdims=True) c2_feet_r = K.sum(c2_limbs_r[:, :, 17:19], axis=-1, keepdims=True) c2_feet_l = K.sum(c2_limbs_l[:, :, 17:19], axis=-1, keepdims=True) joints_head = K.concatenate([ p_pred[:, :, 11:11+1, :], p_pred[:, :, 11:11+1, :], p_pred[:, :, 12:15+1, :], p_pred[:, :, 8:8+1, :], p_pred[:, :, 8:8+1, :], p_pred[:, :, 14:15+1, :], ], axis=-2) conf_head = K.concatenate([ v_pred[:, :, 11:11+1], v_pred[:, :, 11:11+1], v_pred[:, :, 12:15+1], v_pred[:, :, 8:8+1], v_pred[:, :, 8:8+1], v_pred[:, :, 14:15+1], ], axis=-1) diff_head_r = joints_head[:, :, 2:-1:2, :] - joints_head[:, :, 0:-3:2, :] diff_head_l = joints_head[:, :, 3::2, :] - joints_head[:, :, 1:-2:2, :] c2_head_r = conf_head[:, :, 2:-1:2] * conf_head[:, :, 0:-3:2] c2_head_l = conf_head[:, :, 3::2] * conf_head[:, :, 1:-2:2] diff_cross_r = K.concatenate([ p_pred[:, :, 3:3+1, :] - p_pred[:, :, 20:20+1, :], p_pred[:, :, 49:49+1, :] - p_pred[:, :, 3:3+1, :], ], axis=-2) diff_cross_l = K.concatenate([ p_pred[:, :, 3:3+1, :] - p_pred[:, :, 21:21+1, :], p_pred[:, :, 48:48+1, :] - p_pred[:, :, 3:3+1, :], ], axis=-2) diff_spine = K.concatenate([ p_pred[:, :, 0:0+1, :] - p_pred[:, :, 7:7+1, :], # euclidean p_pred[:, :, 1:7+1, :] - p_pred[:, :, 0:6+1, :], # geodesic ], axis=-2) segs_spine = getlength(diff_spine) spine_euclidian = K.stop_gradient(segs_spine[:, :, :1]) len_spine = K.sum(segs_spine[:, :, 1:], axis=-1, keepdims=True) segs_midhead = getlength(p_pred[:, :, 9:11+1, :] - p_pred[:, :, 8:10+1, :]) len_midhead = K.sum(segs_midhead, axis=-1, keepdims=True) segs_ears = getlength(K.concatenate([ p_pred[:, :, 12:12+1, :] - p_pred[:, :, 14:14+1, :], p_pred[:, :, 9:9+1, :] - p_pred[:, :, 12:12+1, :], p_pred[:, :, 13:13+1, :] - p_pred[:, :, 9:9+1, :], p_pred[:, :, 15:15+1, :] - p_pred[:, :, 13:13+1, :] ], axis=-2)) len_ears = K.sum(segs_ears, axis=-1, keepdims=True) len_cross_r = K.sum(getlength(diff_cross_r), axis=-1, keepdims=True) len_cross_l = K.sum(getlength(diff_cross_l), axis=-1, keepdims=True) ref_length = K.stop_gradient( K.clip((len_cross_r + len_cross_l) / 2., 0.1, 1.)) """Reference lengths based on ground truth poses from Human3.6M: Spine wrt. ref: 0.715 (0.032 std.) Spine wrt. euclidean: 1.430 (maximum) (0.046 std.) MidHead wrt. ref: 0.266 (0.019 std.) Shoulder wrt. ref: 0.150 (?? std.) Upper arms wrt. ref: 0.364 (0.019 std.) Fore arms wrt. ref: 0.326 (0.025 std.) Hands wrt. ref: 0.155 (0.014 std.) Femur wrt. ref: 0.721 (0.040 std.) Shin wrt. ref: 0.549 (0.063 std.) Feet wrt. ref: 0.294 (0.060 std.) """ rules_loss = K.concatenate([ c2_limbs_r * c2_limbs_l * (segs_limbs_r - segs_limbs_l), len_spine - 0.715 * ref_length, len_midhead - 0.266 * ref_length, c2_upperarm_r * (len_upperarm_r - 0.364 * ref_length), c2_upperarm_l * (len_upperarm_l - 0.364 * ref_length), c2_forearm_r * (len_forearm_r - 0.326 * ref_length), c2_forearm_l * (len_forearm_l - 0.326 * ref_length), c2_hand_r * (len_hand_r - 0.155 * ref_length), c2_hand_l * (len_hand_l - 0.155 * ref_length), c2_femur_r * (len_femur_r - 0.721 * ref_length), c2_femur_l * (len_femur_l - 0.721 * ref_length), c2_shin_r * (len_shin_r - 0.549 * ref_length), c2_shin_l * (len_shin_l - 0.549 * ref_length), c2_feet_r * (len_feet_r - 0.294 * ref_length), c2_feet_l * (len_feet_l - 0.294 * ref_length), len_ears - 0.213 * ref_length, ], axis=-1) rules = K.sum(K.square(rules_loss), axis=-1) spine_bent = K.squeeze(K.maximum(0., len_spine - 1.430 * spine_euclidian), axis=-1) return K.mean(spine_bent + rules, axis=-1)
44.024096
80
0.545293
1,218
7,308
2.993432
0.091954
0.056226
0.103401
0.135217
0.468733
0.376577
0.266868
0.236972
0.103127
0.075151
0
0.102684
0.240421
7,308
165
81
44.290909
0.554134
0.002463
0
0.081967
0
0
0
0
0
0
0
0
0
1
0.016393
false
0
0
0.008197
0.032787
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab1a1e11ddf7bd7dae943e8668ed1a5ba0c14a72
3,848
py
Python
applications/cli/commands/model/tests/test_export.py
nparkstar/nauta
1bda575a01f782d1dc2cd5221122651f184f7167
[ "Apache-2.0" ]
390
2019-01-23T09:07:00.000Z
2022-02-20T04:03:34.000Z
applications/cli/commands/model/tests/test_export.py
nparkstar/nauta
1bda575a01f782d1dc2cd5221122651f184f7167
[ "Apache-2.0" ]
52
2019-01-31T12:17:30.000Z
2022-02-10T00:01:39.000Z
applications/cli/commands/model/tests/test_export.py
nparkstar/nauta
1bda575a01f782d1dc2cd5221122651f184f7167
[ "Apache-2.0" ]
66
2019-01-23T18:59:39.000Z
2020-10-18T15:24:00.000Z
# # Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from click.testing import CliRunner from cli_text_consts import ModelExportCmdTexts as Texts from commands.model.common import workflow_description from commands.model.export import export from platform_resources.workflow import ArgoWorkflow, QUEUED_PHASE FEM_NAME = "EXPORT_1" SEM_NAME = "EXPORT_2" FEM_PARAMETERS = "PARAMS_1" SEM_PARAMETERS = "PARAMS_2" FEM_START_DATE = '2000-01-01' FEM_NAMESPACE = 'test-namespace' TEST_AGROWORKFLOW = ArgoWorkflow(name=FEM_NAME, started_at=FEM_START_DATE, finished_at=None, namespace=FEM_NAMESPACE, phase=None) TWO_MODEL_OUTPUT = [workflow_description(name=FEM_NAME, parameters=FEM_PARAMETERS), workflow_description(name=SEM_NAME, parameters=SEM_PARAMETERS)] def setup_mocks(mocker): mocker.patch('commands.model.export.get_kubectl_current_context_namespace', return_value='fake-namespace') mocker.patch('platform_resources.workflow.ArgoWorkflow.from_yaml', return_value=mocker.MagicMock()) mocker.patch('platform_resources.workflow.ArgoWorkflow.get', return_value=TEST_AGROWORKFLOW) mocker.patch('os.listdir', return_value=['openvino.yaml', 'tensorflow.yaml', 'some_other_file']) mocker.patch('commands.model.export.NAUTAConfigMap', return_value=mocker.MagicMock(registry='fake-addr')) mocker.patch('commands.model.export.Config') mocker.patch('os.path.isdir', return_value=True) def test_export(mocker): setup_mocks(mocker) result = CliRunner().invoke(export, ["/fake/path", "openvino"]) assert result.exit_code == 0 assert "Successfully created export workflow" in result.output assert QUEUED_PHASE in result.output assert FEM_NAME in result.output assert FEM_START_DATE in result.output assert FEM_NAMESPACE in result.output def test_export_inexistent_format(mocker): setup_mocks(mocker) result = CliRunner().invoke(export, ["/fake/path", "bad"]) assert result.exit_code == 2 assert "Format: bad does not exist. Choose from:" in result.output def test_export_failure(mocker): setup_mocks(mocker) mocker.patch('platform_resources.workflow.ArgoWorkflow.from_yaml', return_value=mocker.MagicMock(create=lambda: RuntimeError)) result = CliRunner().invoke(export, ["/fake/path", "openvino"]) assert result.exit_code == 1 assert "Failed to create export workflow" in result.output def test_export_list(mocker): mocker.patch("commands.model.export.get_list_of_workflows", return_value=TWO_MODEL_OUTPUT) result = CliRunner().invoke(export, ["formats"]) assert FEM_NAME in result.output assert SEM_NAME in result.output assert FEM_PARAMETERS in result.output assert SEM_PARAMETERS in result.output def test_export_list_error(mocker): mocker.patch("commands.model.export.get_list_of_workflows", side_effect=RuntimeError) result = CliRunner().invoke(export, ["formats"]) assert Texts.EXPORT_LIST_ERROR_MSG in result.output def test_export_missing_format(mocker): setup_mocks(mocker) result = CliRunner().invoke(export, ["wrong-option"]) assert Texts.MISSING_EXPORT_FORMAT.format(formats=["openvino", "tensorflow"]) in result.output
35.62963
109
0.747401
505
3,848
5.504951
0.312871
0.03741
0.065468
0.05036
0.414748
0.33741
0.253597
0.193525
0.193525
0.171583
0
0.007075
0.155146
3,848
107
110
35.962617
0.848047
0.14527
0
0.193548
0
0
0.213566
0.107852
0
0
0
0
0.258065
1
0.112903
false
0
0.080645
0
0.193548
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab1a9a9be99684f3bafd7d5cd35569aa18f68f49
786
py
Python
lesley-byte/graphpressure.py
lesley-byte/enviroplus-python
df08c238c8b550c9041ff06a0b6bef6b330af611
[ "MIT" ]
null
null
null
lesley-byte/graphpressure.py
lesley-byte/enviroplus-python
df08c238c8b550c9041ff06a0b6bef6b330af611
[ "MIT" ]
null
null
null
lesley-byte/graphpressure.py
lesley-byte/enviroplus-python
df08c238c8b550c9041ff06a0b6bef6b330af611
[ "MIT" ]
null
null
null
from requests import get import matplotlib.pyplot as plt import matplotlib.animation as animation import datetime as dt from bme280 import BME280 try: from smbus2 import SMBus except ImportError: from smbus import SMBus fig = plt.figure() ax = fig.add_subplot(1, 1, 1) xs = [] ys =[] bus = SMBus(1) bme280 = BME280(i2c_dev=bus) def animate(i, xs, ys): pressure = bme280.get_pressure() xs.append(dt.datetime.now().strftime('%H:%M:%S')) ys.append(pressure) xs = xs[-20:] ys = ys[-20:] ax.clear() ax.plot(xs, ys) plt.xticks(rotation=45, ha='right') plt.subplots_adjust(bottom=0.30) plt.title('Pressure over time') plt.ylabel("pressure") ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=60000) plt.show()
20.153846
75
0.670483
118
786
4.432203
0.516949
0.030593
0
0
0
0
0
0
0
0
0
0.054859
0.188295
786
38
76
20.684211
0.76489
0
0
0
0
0
0.049808
0
0
0
0
0
0
1
0.034483
false
0
0.275862
0
0.310345
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab1ab02c6fe0df3ffafd8d3c0b4bb24aea453027
5,912
py
Python
bootstrapvz/plugins/ova/tasks.py
brett-smith/bootstrap-vz
2eaa98db684b85186f3ecd6e5d1304aaceca6b73
[ "Apache-2.0" ]
null
null
null
bootstrapvz/plugins/ova/tasks.py
brett-smith/bootstrap-vz
2eaa98db684b85186f3ecd6e5d1304aaceca6b73
[ "Apache-2.0" ]
null
null
null
bootstrapvz/plugins/ova/tasks.py
brett-smith/bootstrap-vz
2eaa98db684b85186f3ecd6e5d1304aaceca6b73
[ "Apache-2.0" ]
null
null
null
from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import workspace import os import shutil assets = os.path.normpath(os.path.join(os.path.dirname(__file__), 'assets')) class CheckOVAPath(Task): description = 'Checking if the OVA file already exists' phase = phases.preparation @classmethod def run(cls, info): ova_basename = info.manifest.name.format(**info.manifest_vars) ova_name = ova_basename + '.ova' ova_path = os.path.join(info.manifest.bootstrapper['workspace'], ova_name) if os.path.exists(ova_path): from bootstrapvz.common.exceptions import TaskError msg = 'The OVA `{name}\' already exists at `{path}\''.format(name=ova_name, path=ova_path) raise TaskError(msg) info._ova['ova_basename'] = ova_basename info._ova['ova_name'] = ova_name info._ova['ova_path'] = ova_path class CreateOVADir(Task): description = 'Creating directory for the OVA' phase = phases.preparation predecessors = [workspace.CreateWorkspace, CheckOVAPath] @classmethod def run(cls, info): info._ova['folder'] = os.path.join(info.workspace, 'ova') os.mkdir(info._ova['folder']) class PackageOVA(Task): description = 'Packaging the volume as an OVA' phase = phases.image_registration @classmethod def run(cls, info): import random mac_address = '080027{mac:06X}'.format(mac=random.randrange(16 ** 6)) from bootstrapvz.common.tools import log_check_call disk_name = info._ova['ova_basename'] + '.' + info.volume.extension disk_link = os.path.join(info._ova['folder'], disk_name) log_check_call(['ln', '-s', info.volume.image_path, disk_link]) ovf_path = os.path.join(info._ova['folder'], info._ova['ova_basename'] + '.ovf') cls.write_ovf(info, ovf_path, mac_address, disk_name) ova_files = os.listdir(info._ova['folder']) log_check_call(['ovftool', ovf_path, info._ova['ova_path']] ) import logging logging.getLogger(__name__).info('The OVA has been placed at ' + info._ova['ova_path']) @classmethod def write_ovf(cls, info, destination, mac_address, disk_name): namespaces = {'ovf': 'http://schemas.dmtf.org/ovf/envelope/1', 'rasd': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData', 'vssd': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData', 'xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'vbox': 'http://www.virtualbox.org/ovf/machine', } def attr(element, name, value=None): for prefix, ns in namespaces.iteritems(): name = name.replace(prefix + ':', '{' + ns + '}') if value is None: return element.attrib[name] else: element.attrib[name] = str(value) template_path = os.path.join(assets, 'default.ovf') if 'ovf' in info.manifest.plugins['ova']: template_path = info.manifest.plugins['ova']['ovf'] import xml.etree.ElementTree as ET template = ET.parse(template_path) root = template.getroot() [disk_ref] = root.findall('./ovf:References/ovf:File', namespaces) attr(disk_ref, 'ovf:href', disk_name) # List of OVF disk format URIs # Snatched from VBox source (src/VBox/Main/src-server/ApplianceImpl.cpp:47) # ISOURI = "http://www.ecma-international.org/publications/standards/Ecma-119.htm" # VMDKStreamURI = "http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" # VMDKSparseURI = "http://www.vmware.com/specifications/vmdk.html#sparse" # VMDKCompressedURI = "http://www.vmware.com/specifications/vmdk.html#compressed" # VMDKCompressedURI2 = "http://www.vmware.com/interfaces/specifications/vmdk.html#compressed" # VHDURI = "http://go.microsoft.com/fwlink/?LinkId=137171" volume_uuid = info.volume.get_uuid() [disk] = root.findall('./ovf:DiskSection/ovf:Disk', namespaces) attr(disk, 'ovf:capacity', info.volume.size.bytes.get_qty_in('B')) attr(disk, 'ovf:format', info.volume.ovf_uri) attr(disk, 'vbox:uuid', volume_uuid) [system] = root.findall('./ovf:VirtualSystem', namespaces) attr(system, 'ovf:id', info._ova['ova_basename']) # Set the operating system [os_section] = system.findall('./ovf:OperatingSystemSection', namespaces) os_info = {'i386': {'id': 96, 'name': 'Debian'}, 'amd64': {'id': 96, 'name': 'Debian_64'} }.get(info.manifest.system['architecture']) attr(os_section, 'ovf:id', os_info['id']) [os_desc] = os_section.findall('./ovf:Description', namespaces) os_desc.text = os_info['name'] [os_type] = os_section.findall('./vbox:OSType', namespaces) os_type.text = os_info['name'] [sysid] = system.findall('./ovf:VirtualHardwareSection/ovf:System/' 'vssd:VirtualSystemIdentifier', namespaces) sysid.text = info._ova['ova_basename'] [machine] = system.findall('./vbox:Machine', namespaces) import uuid del machine.attrib['uuid'] attr(machine, 'uuid', uuid.uuid4()) del machine.attrib['name'] attr(machine, 'name', info._ova['ova_basename']) from datetime import datetime del machine.attrib['lastStateChange'] attr(machine, 'lastStateChange', datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')) [nic] = machine.findall('./ovf:Hardware/ovf:Network/ovf:Adapter', namespaces) attr(machine, 'MACAddress', mac_address) [device_img] = machine.findall('./ovf:StorageControllers' '/ovf:StorageController[1]' '/ovf:AttachedDevice/ovf:Image', namespaces) attr(device_img, 'uuid', '{' + str(volume_uuid) + '}') template.write(destination, xml_declaration=True) # , default_namespace=namespaces['ovf'] class RemoveOVADir(Task): description = 'Removing the OVA directory' phase = phases.cleaning successors = [workspace.DeleteWorkspace] @classmethod def run(cls, info): shutil.rmtree(info._ova['folder']) del info._ova['folder']
39.152318
113
0.692321
761
5,912
5.243101
0.310118
0.031579
0.025063
0.027068
0.115789
0.075188
0.063659
0.044612
0.020551
0.020551
0
0.009158
0.150372
5,912
150
114
39.413333
0.785188
0.107409
0
0.097345
0
0.017699
0.22881
0.049981
0
0
0
0
0
1
0.053097
false
0
0.106195
0
0.292035
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab1ab3780950be34d6065669fa02273afffb05ab
3,498
py
Python
docs/conf.py
PhilippJunk/homelette
d6e585a215d7eef75ef6c837d1faf2d0ad8025c1
[ "MIT" ]
null
null
null
docs/conf.py
PhilippJunk/homelette
d6e585a215d7eef75ef6c837d1faf2d0ad8025c1
[ "MIT" ]
null
null
null
docs/conf.py
PhilippJunk/homelette
d6e585a215d7eef75ef6c837d1faf2d0ad8025c1
[ "MIT" ]
null
null
null
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import shutil import sys sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- project = 'homelette' copyright = '2021, Philipp Junk, Christina Kiel' author = 'Philipp Junk, Christina Kiel' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'nbsphinx', 'sphinx_rtd_theme', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' html_logo = 'logo.png' html_theme_options = { 'logo_only': False, 'style_nav_header_background': '#000000', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # -- Options for LaTex output ------------------------------------------------ latex_elements = { 'preamble': r''' \setcounter{tocdepth}{1} \renewcommand{\hyperref}[2][]{#2} ''' } # -- Extension configuration: autodoc ---------------------------------------- autodoc_default_options = { 'member-order': 'bysource', } autoclass_content = 'class' autodoc_mock_imports = ['altmod', 'modeller', 'ost', 'promod3', 'qmean', 'pandas'] # -- Extension configuration: napoleon --------------------------------------- napoleon_use_ivar = True # -- Copy notebooks to include in the documentation -------------------------- notebooks = [ '../examples/Tutorial1_Basics.ipynb', '../examples/Tutorial2_Modelling.ipynb', '../examples/Tutorial3_Evaluation.ipynb', '../examples/Tutorial4_ExtendingHomelette.ipynb', '../examples/Tutorial5_Parallelization.ipynb', '../examples/Tutorial6_ComplexModelling.ipynb', '../examples/Tutorial7_AssemblingPipelines.ipynb', '../examples/Tutorial8_AlignmentGeneration.ipynb', ] for notebook in notebooks: if os.path.exists(notebook): shutil.copy(notebook, '.') # -- Copy logo --------------------------------------------------------------- if os.path.exists('../logo/logo.png'): shutil.copy('../logo/logo.png', '.')
33.314286
79
0.616638
384
3,498
5.513021
0.481771
0.042985
0.015588
0.022674
0.046292
0
0
0
0
0
0
0.007797
0.156661
3,498
104
80
33.634615
0.709831
0.543168
0
0
0
0
0.46901
0.268371
0
0
0
0.009615
0
1
0
false
0
0.081633
0
0.081633
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab1c5aded9a853b37a00d0b031cb2cff207d2b22
15,055
py
Python
netbox/extras/forms.py
orphanedgamboa/netbox
5cdc38ec3adb5278480b267a6c8e674e9d3fca39
[ "Apache-2.0" ]
1
2021-05-01T18:16:37.000Z
2021-05-01T18:16:37.000Z
netbox/extras/forms.py
orphanedgamboa/netbox
5cdc38ec3adb5278480b267a6c8e674e9d3fca39
[ "Apache-2.0" ]
null
null
null
netbox/extras/forms.py
orphanedgamboa/netbox
5cdc38ec3adb5278480b267a6c8e674e9d3fca39
[ "Apache-2.0" ]
null
null
null
from django import forms from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.utils.safestring import mark_safe from django.utils.translation import gettext as _ from dcim.models import DeviceRole, DeviceType, Platform, Region, Site, SiteGroup from tenancy.models import Tenant, TenantGroup from utilities.forms import ( add_blank_choice, APISelectMultiple, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect, ColorSelect, CommentField, ContentTypeMultipleChoiceField, CSVModelForm, DateTimePicker, DynamicModelMultipleChoiceField, JSONField, SlugField, StaticSelect2, BOOLEAN_WITH_BLANK_CHOICES, ) from virtualization.models import Cluster, ClusterGroup from .choices import * from .models import ConfigContext, CustomField, ImageAttachment, JournalEntry, ObjectChange, Tag from .utils import FeatureQuery # # Custom fields # class CustomFieldForm(forms.Form): """ Extend Form to include custom field support. """ model = None def __init__(self, *args, **kwargs): if self.model is None: raise NotImplementedError("CustomFieldForm must specify a model class.") self.custom_fields = [] super().__init__(*args, **kwargs) # Append relevant custom fields to the form instance obj_type = ContentType.objects.get_for_model(self.model) for cf in CustomField.objects.filter(content_types=obj_type): field_name = 'cf_{}'.format(cf.name) self.fields[field_name] = cf.to_form_field() # Annotate the field in the list of CustomField form fields self.custom_fields.append(field_name) class CustomFieldModelForm(forms.ModelForm): """ Extend ModelForm to include custom field support. """ def __init__(self, *args, **kwargs): self.obj_type = ContentType.objects.get_for_model(self._meta.model) self.custom_fields = [] super().__init__(*args, **kwargs) self._append_customfield_fields() def _append_customfield_fields(self): """ Append form fields for all CustomFields assigned to this model. """ # Append form fields; assign initial values if modifying and existing object for cf in CustomField.objects.filter(content_types=self.obj_type): field_name = 'cf_{}'.format(cf.name) if self.instance.pk: self.fields[field_name] = cf.to_form_field(set_initial=False) self.fields[field_name].initial = self.instance.custom_field_data.get(cf.name) else: self.fields[field_name] = cf.to_form_field() # Annotate the field in the list of CustomField form fields self.custom_fields.append(field_name) def clean(self): # Save custom field data on instance for cf_name in self.custom_fields: self.instance.custom_field_data[cf_name[3:]] = self.cleaned_data.get(cf_name) return super().clean() class CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelForm): def _append_customfield_fields(self): # Append form fields for cf in CustomField.objects.filter(content_types=self.obj_type): field_name = 'cf_{}'.format(cf.name) self.fields[field_name] = cf.to_form_field(for_csv_import=True) # Annotate the field in the list of CustomField form fields self.custom_fields.append(field_name) class CustomFieldBulkEditForm(BulkEditForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.custom_fields = [] self.obj_type = ContentType.objects.get_for_model(self.model) # Add all applicable CustomFields to the form custom_fields = CustomField.objects.filter(content_types=self.obj_type) for cf in custom_fields: # Annotate non-required custom fields as nullable if not cf.required: self.nullable_fields.append(cf.name) self.fields[cf.name] = cf.to_form_field(set_initial=False, enforce_required=False) # Annotate this as a custom field self.custom_fields.append(cf.name) class CustomFieldFilterForm(forms.Form): def __init__(self, *args, **kwargs): self.obj_type = ContentType.objects.get_for_model(self.model) super().__init__(*args, **kwargs) # Add all applicable CustomFields to the form custom_fields = CustomField.objects.filter(content_types=self.obj_type).exclude( filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED ) for cf in custom_fields: field_name = 'cf_{}'.format(cf.name) self.fields[field_name] = cf.to_form_field(set_initial=True, enforce_required=False) # # Tags # class TagForm(BootstrapMixin, forms.ModelForm): slug = SlugField() class Meta: model = Tag fields = [ 'name', 'slug', 'color', 'description' ] fieldsets = ( ('Tag', ('name', 'slug', 'color', 'description')), ) class TagCSVForm(CSVModelForm): slug = SlugField() class Meta: model = Tag fields = Tag.csv_headers help_texts = { 'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'), } class AddRemoveTagsForm(forms.Form): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Add add/remove tags fields self.fields['add_tags'] = DynamicModelMultipleChoiceField( queryset=Tag.objects.all(), required=False ) self.fields['remove_tags'] = DynamicModelMultipleChoiceField( queryset=Tag.objects.all(), required=False ) class TagFilterForm(BootstrapMixin, forms.Form): model = Tag q = forms.CharField( required=False, label=_('Search') ) content_type_id = ContentTypeMultipleChoiceField( queryset=ContentType.objects.filter(FeatureQuery('tags').get_query()), required=False, label=_('Tagged object type') ) class TagBulkEditForm(BootstrapMixin, BulkEditForm): pk = forms.ModelMultipleChoiceField( queryset=Tag.objects.all(), widget=forms.MultipleHiddenInput ) color = forms.CharField( max_length=6, required=False, widget=ColorSelect() ) description = forms.CharField( max_length=200, required=False ) class Meta: nullable_fields = ['description'] # # Config contexts # class ConfigContextForm(BootstrapMixin, forms.ModelForm): regions = DynamicModelMultipleChoiceField( queryset=Region.objects.all(), required=False ) site_groups = DynamicModelMultipleChoiceField( queryset=SiteGroup.objects.all(), required=False ) sites = DynamicModelMultipleChoiceField( queryset=Site.objects.all(), required=False ) device_types = DynamicModelMultipleChoiceField( queryset=DeviceType.objects.all(), required=False ) roles = DynamicModelMultipleChoiceField( queryset=DeviceRole.objects.all(), required=False ) platforms = DynamicModelMultipleChoiceField( queryset=Platform.objects.all(), required=False ) cluster_groups = DynamicModelMultipleChoiceField( queryset=ClusterGroup.objects.all(), required=False ) clusters = DynamicModelMultipleChoiceField( queryset=Cluster.objects.all(), required=False ) tenant_groups = DynamicModelMultipleChoiceField( queryset=TenantGroup.objects.all(), required=False ) tenants = DynamicModelMultipleChoiceField( queryset=Tenant.objects.all(), required=False ) tags = DynamicModelMultipleChoiceField( queryset=Tag.objects.all(), required=False ) data = JSONField( label='' ) class Meta: model = ConfigContext fields = ( 'name', 'weight', 'description', 'is_active', 'regions', 'site_groups', 'sites', 'roles', 'device_types', 'platforms', 'cluster_groups', 'clusters', 'tenant_groups', 'tenants', 'tags', 'data', ) class ConfigContextBulkEditForm(BootstrapMixin, BulkEditForm): pk = forms.ModelMultipleChoiceField( queryset=ConfigContext.objects.all(), widget=forms.MultipleHiddenInput ) weight = forms.IntegerField( required=False, min_value=0 ) is_active = forms.NullBooleanField( required=False, widget=BulkEditNullBooleanSelect() ) description = forms.CharField( required=False, max_length=100 ) class Meta: nullable_fields = [ 'description', ] class ConfigContextFilterForm(BootstrapMixin, forms.Form): field_order = [ 'q', 'region_id', 'site_group_id', 'site_id', 'role_id', 'platform_id', 'cluster_group_id', 'cluster_id', 'tenant_group_id', 'tenant_id', ] q = forms.CharField( required=False, label=_('Search') ) region_id = DynamicModelMultipleChoiceField( queryset=Region.objects.all(), required=False, label=_('Regions') ) site_group_id = DynamicModelMultipleChoiceField( queryset=SiteGroup.objects.all(), required=False, label=_('Site groups') ) site_id = DynamicModelMultipleChoiceField( queryset=Site.objects.all(), required=False, label=_('Sites') ) device_type_id = DynamicModelMultipleChoiceField( queryset=DeviceType.objects.all(), required=False, label=_('Device types') ) role_id = DynamicModelMultipleChoiceField( queryset=DeviceRole.objects.all(), required=False, label=_('Roles') ) platform_id = DynamicModelMultipleChoiceField( queryset=Platform.objects.all(), required=False, label=_('Platforms') ) cluster_group_id = DynamicModelMultipleChoiceField( queryset=ClusterGroup.objects.all(), required=False, label=_('Cluster groups') ) cluster_id = DynamicModelMultipleChoiceField( queryset=Cluster.objects.all(), required=False, label=_('Clusters') ) tenant_group_id = DynamicModelMultipleChoiceField( queryset=TenantGroup.objects.all(), required=False, label=_('Tenant groups') ) tenant_id = DynamicModelMultipleChoiceField( queryset=Tenant.objects.all(), required=False, label=_('Tenant') ) tag = DynamicModelMultipleChoiceField( queryset=Tag.objects.all(), to_field_name='slug', required=False, label=_('Tags') ) # # Filter form for local config context data # class LocalConfigContextFilterForm(forms.Form): local_context_data = forms.NullBooleanField( required=False, label=_('Has local config context data'), widget=StaticSelect2( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) # # Image attachments # class ImageAttachmentForm(BootstrapMixin, forms.ModelForm): class Meta: model = ImageAttachment fields = [ 'name', 'image', ] # # Journal entries # class JournalEntryForm(BootstrapMixin, forms.ModelForm): comments = CommentField() class Meta: model = JournalEntry fields = ['assigned_object_type', 'assigned_object_id', 'kind', 'comments'] widgets = { 'assigned_object_type': forms.HiddenInput, 'assigned_object_id': forms.HiddenInput, } class JournalEntryBulkEditForm(BootstrapMixin, BulkEditForm): pk = forms.ModelMultipleChoiceField( queryset=JournalEntry.objects.all(), widget=forms.MultipleHiddenInput ) kind = forms.ChoiceField( choices=JournalEntryKindChoices, required=False ) comments = forms.CharField( required=False, widget=forms.Textarea() ) class Meta: nullable_fields = [] class JournalEntryFilterForm(BootstrapMixin, forms.Form): model = JournalEntry q = forms.CharField( required=False, label=_('Search') ) created_after = forms.DateTimeField( required=False, label=_('After'), widget=DateTimePicker() ) created_before = forms.DateTimeField( required=False, label=_('Before'), widget=DateTimePicker() ) created_by_id = DynamicModelMultipleChoiceField( queryset=User.objects.all(), required=False, label=_('User'), widget=APISelectMultiple( api_url='/api/users/users/', ) ) assigned_object_type_id = DynamicModelMultipleChoiceField( queryset=ContentType.objects.all(), required=False, label=_('Object Type'), widget=APISelectMultiple( api_url='/api/extras/content-types/', ) ) kind = forms.ChoiceField( choices=add_blank_choice(JournalEntryKindChoices), required=False, widget=StaticSelect2() ) # # Change logging # class ObjectChangeFilterForm(BootstrapMixin, forms.Form): model = ObjectChange q = forms.CharField( required=False, label=_('Search') ) time_after = forms.DateTimeField( required=False, label=_('After'), widget=DateTimePicker() ) time_before = forms.DateTimeField( required=False, label=_('Before'), widget=DateTimePicker() ) action = forms.ChoiceField( choices=add_blank_choice(ObjectChangeActionChoices), required=False, widget=StaticSelect2() ) user_id = DynamicModelMultipleChoiceField( queryset=User.objects.all(), required=False, label=_('User'), widget=APISelectMultiple( api_url='/api/users/users/', ) ) changed_object_type_id = DynamicModelMultipleChoiceField( queryset=ContentType.objects.all(), required=False, label=_('Object Type'), widget=APISelectMultiple( api_url='/api/extras/content-types/', ) ) # # Scripts # class ScriptForm(BootstrapMixin, forms.Form): _commit = forms.BooleanField( required=False, initial=True, label="Commit changes", help_text="Commit changes to the database (uncheck for a dry-run)" ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Move _commit to the end of the form commit = self.fields.pop('_commit') self.fields['_commit'] = commit @property def requires_input(self): """ A boolean indicating whether the form requires user input (ignore the _commit field). """ return bool(len(self.fields) > 1)
28.40566
117
0.643374
1,438
15,055
6.550765
0.17872
0.069002
0.051592
0.065924
0.51518
0.464544
0.433652
0.250849
0.220382
0.164331
0
0.001607
0.256128
15,055
529
118
28.459357
0.839539
0.064032
0
0.419192
0
0
0.06639
0.00372
0
0
0
0
0
1
0.025253
false
0
0.032828
0
0.280303
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab1c9d3f78e7e9ff6cc93d1c78aab266fbaf43fb
3,122
py
Python
unwarp_models.py
zgjslc/Film-Recovery-master1
4497a9930398c9e826ac364056a79e5bcbf6c953
[ "Apache-2.0" ]
null
null
null
unwarp_models.py
zgjslc/Film-Recovery-master1
4497a9930398c9e826ac364056a79e5bcbf6c953
[ "Apache-2.0" ]
null
null
null
unwarp_models.py
zgjslc/Film-Recovery-master1
4497a9930398c9e826ac364056a79e5bcbf6c953
[ "Apache-2.0" ]
null
null
null
import torch import torch.nn as nn import torch.nn.functional as F from models.misc import modules constrain_path = { ('threeD', 'normal'): (True, True, ''), ('threeD', 'depth'): (True, True, ''), ('normal', 'depth'): (True, True, ''), ('depth', 'normal'): (True, True, ''), } class UnwarpNet(nn.Module): def __init__(self, use_simple=False, combine_num=3, use_constrain=True, constrain_configure=None): super(UnwarpNet, self).__init__() self.combine_num = combine_num self.use_simple = use_simple self.use_constrain = use_constrain self.constrain_configure = constrain_configure self.geo_encoder = modules.Encoder(downsample=6, in_channels=3) self.threeD_decoder = modules.Decoder(downsample=6, out_channels=3, combine_num=self.combine_num) self.normal_decoder = modules.Decoder(downsample=6, out_channels=3, combine_num=self.combine_num) self.depth_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=self.combine_num) self.mask_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=0) bottle_neck = sum([2 ** (i + 4) for i in range(self.combine_num)]) self.second_encoder = modules.Encoder(downsample=6, in_channels=bottle_neck * 3 + 3) self.uv_decoder = modules.Decoder(downsample=6, out_channels=2, combine_num=0) # self.albedo_decoder = modules.AlbedoDecoder(downsample=6, out_channels=1) self.albedo_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=0) self.deform_decoder = modules.Decoder(downsample=6, out_channels=2, combine_num=0) self.dep2nor = None self.threeD_to_nor2dep = None self.nor2dep = None def forward(self, x): gxvals, gx_encode = self.geo_encoder(x) threeD_map, threeD_feature = self.threeD_decoder(gxvals, gx_encode) threeD_map = nn.functional.tanh(threeD_map) dep_map, dep_feature = self.depth_decoder(gxvals, gx_encode) dep_map = nn.functional.tanh(dep_map) nor_map, nor_feature = self.normal_decoder(gxvals, gx_encode) nor_map = nn.functional.tanh(nor_map) mask_map, mask_feature = self.mask_decoder(gxvals, gx_encode) mask_map = torch.nn.functional.sigmoid(mask_map) # geo_feature = torch.cat([threeD_feature, nor_feature, dep_feature], dim=1) geo_feature = torch.cat([threeD_feature, nor_feature, dep_feature, x], dim=1) b, c, h, w = geo_feature.size() geo_feature_mask = geo_feature.mul(mask_map.expand(b, c, h, w)) secvals, sec_encode = self.second_encoder(geo_feature_mask) uv_map, _ = self.uv_decoder(secvals, sec_encode) uv_map = nn.functional.tanh(uv_map) alb_map, _ = self.albedo_decoder(secvals, sec_encode) alb_map = nn.functional.tanh(alb_map) deform_map, _ = self.deform_decoder(secvals, sec_encode) deform_map = nn.functional.tanh(deform_map) return uv_map, threeD_map, nor_map, alb_map, dep_map, mask_map, \ None, None, None, None, None, deform_map
51.180328
105
0.686739
432
3,122
4.675926
0.189815
0.069307
0.055446
0.087129
0.323267
0.311881
0.30297
0.261386
0.261386
0.261386
0
0.013189
0.198591
3,122
60
106
52.033333
0.794165
0.047406
0
0
0
0
0.015146
0
0
0
0
0
0
1
0.038462
false
0
0.076923
0
0.153846
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab1d101c4bcfc97cfc157f818f4f8698285ba31c
12,768
py
Python
endpoint/test_endpoint/update.py
pansila/Auto-Test-System
bfe51a277466939a32daa08f27a89cf3c1900def
[ "MIT" ]
14
2019-02-19T01:31:08.000Z
2021-12-12T12:56:08.000Z
endpoint/test_endpoint/update.py
pansila/Auto-Test-System
bfe51a277466939a32daa08f27a89cf3c1900def
[ "MIT" ]
2
2020-03-10T12:12:10.000Z
2020-03-10T12:12:10.000Z
endpoint/test_endpoint/update.py
pansila/Auto-Test-System
bfe51a277466939a32daa08f27a89cf3c1900def
[ "MIT" ]
4
2019-07-09T02:00:13.000Z
2020-08-18T14:04:24.000Z
import configparser import os import hashlib import json import shutil import sys import tempfile import subprocess import tarfile import re import stat from functools import cmp_to_key from contextlib import closing from gzip import GzipFile from pathlib import Path from urllib.error import HTTPError from urllib.request import Request from urllib.request import urlopen WINDOWS = sys.platform == "win32" BOOTSTRAP = """\ import os, sys import re import subprocess def _which_python(): allowed_executables = ["python3", "python"] if sys.platform == 'win32': # in favor of 32 bit python to be compatible with the 32bit dlls of test libraries allowed_executables[:0] = ["py.exe -3-32", "py.exe -2-32", "py.exe -3-64", "py.exe -2-64"] # \d in regex ensures we can convert to int later version_matcher = re.compile(r"^Python (?P<major>\d+)\.(?P<minor>\d+)\..+$") fallback = None for executable in allowed_executables: try: raw_version = subprocess.check_output( executable + " --version", stderr=subprocess.STDOUT, shell=True ).decode("utf-8") except subprocess.CalledProcessError: continue match = version_matcher.match(raw_version.strip()) if match and tuple(map(int, match.groups())) >= (3, 0): # favor the first py3 executable we can find. return executable if fallback is None: # keep this one as the fallback; it was the first valid executable we found. fallback = executable if fallback is None: # Avoid breaking existing scripts fallback = "python" return fallback if __name__ == '__main__': py_executable = _which_python() subprocess.run(py_executable + r' {collie_bin} ' + ' '.join(sys.argv[1:]), shell=True) """ BIN = """#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import argparse lib = os.path.normpath(os.path.join(os.path.realpath(__file__), "..", "..", "lib", "collie")) sys.path.insert(0, lib) from test_endpoint.app import main if __name__ == "__main__": sys.exit(main()) """ BAT = '@echo off\r\n{python_executable} "{collie_bootstrap}" %*\r\n' SH = '#!/bin/sh\npython3 "{collie_bootstrap}" $*\n' def expanduser(path): """ Expand ~ and ~user constructions. Includes a workaround for http://bugs.python.org/issue14768 """ expanded = os.path.expanduser(path) if path.startswith("~/") and expanded.startswith("//"): expanded = expanded[1:] return expanded class SelfUpdate: VERSION_REGEX = re.compile( r"v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?" "(" "[._-]?" r"(?:(stable|beta|b|RC|alpha|a|patch|pl|p)((?:[.-]?\d+)*)?)?" "([.-]?dev)?" ")?" r"(?:\+[^\s]+)?" ) def __init__(self, version=None, force=False): config = configparser.ConfigParser() config.read(self.config) self.server_host = config['tool.collie.settings']['server_host'] self.server_port = config['tool.collie.settings']['server_port'] self.join_id = config['tool.collie.settings']['join_id'] self.uuid = config['tool.collie.settings']['uuid'] server_host = self.server_host.strip('"') server_port = self.server_port.strip('"') self.SERVER_URL = f'http://{server_host}:{server_port}/api_v1' self.METADATA_URL = self.SERVER_URL + "/setting/get-endpoint/json" self.BASE_URL = self.SERVER_URL + "/setting/download" self._version = None if isinstance(version, bool) else version self._force = force @property def home(self): if os.environ.get("COLLIE_HOME"): return Path(expanduser(os.environ["COLLIE_HOME"])) home = Path(expanduser("~")) return home / ".collie" @property def bin(self): return self.home / "bin" @property def lib(self): return self.home / "lib" @property def lib_backup(self): return self.home / "lib-backup" @property def config(self): return self.home / "lib" / 'collie' / 'pyproject.toml' def get_version(self): from .__version__ import __version__ metadata = json.loads(self._get(self.METADATA_URL).decode()) def _compare_versions(x, y): mx = self.VERSION_REGEX.match(x) my = self.VERSION_REGEX.match(y) vx = tuple(int(p) for p in mx.groups()[:3]) + (mx.group(5),) vy = tuple(int(p) for p in my.groups()[:3]) + (my.group(5),) if vx < vy: return -1 elif vx > vy: return 1 return 0 releases = sorted( metadata["releases"], key=cmp_to_key(_compare_versions) ) if self._version and self._version not in releases: print("Version {} does not exist.".format(self._version)) return None, None version = self._version if not version: for release in reversed(releases): m = self.VERSION_REGEX.match(release) if m.group(5) and not self.allows_prereleases(): continue version = release break current_version = __version__ if current_version == version and not self._force: print("Latest version already installed.") return None, current_version return version, current_version def run(self): version, current_version = self.get_version() if not version: return self.update(version) self.restore_config() print(f'Succeeded to update collie to version {version}') def update(self, version): if self.lib_backup.exists(): shutil.rmtree(str(self.lib_backup)) # Backup the current installation if self.lib.exists(): shutil.copytree(str(self.lib), str(self.lib_backup)) shutil.rmtree(str(self.lib)) try: self._update(version) except Exception: if not self.lib_backup.exists(): raise shutil.copytree(str(self.lib_backup), str(self.lib)) shutil.rmtree(str(self.lib_backup)) raise finally: if self.lib_backup.exists(): shutil.rmtree(str(self.lib_backup)) self.make_bin() def _update(self, version): release_name = self._get_release_name(version) base_url = self.BASE_URL + '?' name = f"{release_name}.tar.gz" checksum = f"{release_name}.sha256sum" try: r = urlopen(base_url + "file={}".format(checksum)) except HTTPError as e: if e.code == 404: raise RuntimeError("Could not find {} file".format(checksum)) raise checksum = r.read().decode().strip() try: r = urlopen(base_url + "file={}".format(name)) except HTTPError as e: if e.code == 404: raise RuntimeError("Could not find {} file".format(name)) raise meta = r.info() size = int(meta["Content-Length"]) current = 0 block_size = 8192 sha = hashlib.sha256() with tempfile.TemporaryDirectory(prefix="collie-updater-") as dir_: tar = os.path.join(dir_, name) with open(tar, "wb") as f: while True: buffer = r.read(block_size) if not buffer: break current += len(buffer) f.write(buffer) sha.update(buffer) # Checking hashes if checksum != sha.hexdigest(): raise RuntimeError( "Hashes for {} do not match: {} != {}".format( name, checksum, sha.hexdigest() ) ) gz = GzipFile(tar, mode="rb") try: with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f: f.extractall(str(self.lib)) finally: gz.close() def restore_config(self): config = configparser.ConfigParser() config.read(self.config) config['tool.collie.settings']['server_host'] = self.server_host config['tool.collie.settings']['server_port'] = self.server_port config['tool.collie.settings']['join_id'] = self.join_id config['tool.collie.settings']['uuid'] = self.uuid with open(self.config, 'w') as config_file: config.write(config_file) def process(self, *args): return subprocess.check_output(list(args), stderr=subprocess.STDOUT) def _check_recommended_installation(self): current = Path(__file__) try: current.relative_to(self.home) except ValueError: raise RuntimeError( "Collie was not installed with the recommended installer. " "Cannot update automatically." ) def _get_release_name(self, version): platform = sys.platform if platform == "linux2": platform = "linux" return "collie-{}-{}".format(version, platform) def _bin_path(self, base_path, bin): if WINDOWS: return (base_path / "Scripts" / bin).with_suffix(".exe") return base_path / "bin" / bin def make_bin(self): self.bin.mkdir(0o755, parents=True, exist_ok=True) python_executable = self._which_python() with self.bin.joinpath("bootstrap.py").open("w", newline="") as f: f.write(BOOTSTRAP.format(collie_bin=str(self.bin / "collie.py"))) if WINDOWS: with self.bin.joinpath("collie.bat").open("w", newline="") as f: f.write( BAT.format( python_executable=python_executable, collie_bootstrap=str(self.bin / "bootstrap.py").replace( os.environ["USERPROFILE"], "%USERPROFILE%" ), ) ) else: with self.bin.joinpath("collie").open("w", newline="") as f: f.write( SH.format( collie_bootstrap=str(self.bin / "bootstrap.py").replace( os.getenv("HOME", ""), "$HOME" ), ) ) bin_content = BIN if not WINDOWS: bin_content = "#!/usr/bin/env {}\n".format(python_executable) + bin_content self.bin.joinpath("collie.py").write_text(bin_content, encoding="utf-8") if not WINDOWS: # Making the file executable st = os.stat(str(self.bin.joinpath("collie"))) os.chmod(str(self.bin.joinpath("collie")), st.st_mode | stat.S_IEXEC) def _which_python(self): """ Decides which python executable we'll embed in the launcher script. """ allowed_executables = ["python", "python3"] if WINDOWS: allowed_executables += ["py.exe -3", "py.exe -2"] # \d in regex ensures we can convert to int later version_matcher = re.compile(r"^Python (?P<major>\d+)\.(?P<minor>\d+)\..+$") fallback = None for executable in allowed_executables: try: raw_version = subprocess.check_output( executable + " --version", stderr=subprocess.STDOUT, shell=True ).decode("utf-8") except subprocess.CalledProcessError: continue match = version_matcher.match(raw_version.strip()) if match and tuple(map(int, match.groups())) >= (3, 0): # favor the first py3 executable we can find. return executable if fallback is None: # keep this one as the fallback; it was the first valid executable we found. fallback = executable if fallback is None: # Avoid breaking existing scripts fallback = "python" return fallback def _get(self, url): request = Request(url, headers={"User-Agent": "Python Robotest"}) with closing(urlopen(request)) as r: return r.read() def update_join_id(self, join_id): config = configparser.ConfigParser() config.read(self.config) config['tool.collie.settings']['join_id'] = f'"{join_id}"' with open(self.config, 'w') as config_file: config.write(config_file)
31.60396
98
0.56344
1,459
12,768
4.796436
0.21316
0.014004
0.020577
0.030866
0.364961
0.313947
0.302515
0.25493
0.238925
0.226065
0
0.00854
0.312187
12,768
403
99
31.682382
0.788317
0.034226
0
0.316993
0
0.01634
0.247517
0.042338
0
0
0
0
0
1
0.068627
false
0
0.084967
0.01634
0.235294
0.009804
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab202528012b6880e43938d0db79af54bf805f9b
1,145
py
Python
2021/day-12/solve.py
amochtar/adventofcode
292e7f00a1e19d2149d00246b0a77fedfcd3bd08
[ "MIT" ]
1
2019-12-27T22:36:30.000Z
2019-12-27T22:36:30.000Z
2021/day-12/solve.py
amochtar/adventofcode
292e7f00a1e19d2149d00246b0a77fedfcd3bd08
[ "MIT" ]
null
null
null
2021/day-12/solve.py
amochtar/adventofcode
292e7f00a1e19d2149d00246b0a77fedfcd3bd08
[ "MIT" ]
null
null
null
#!/usr/bin/env python from typing import List import aoc from collections import defaultdict @aoc.timing def solve(inp: str, part2=False): def find_path(current: str, path: List[str] = []): if current == 'end': yield path return for nxt in caves[current]: if nxt == 'start': continue if nxt.islower() and nxt in path: if not part2: continue elif any(path.count(c) > 1 for c in path if c.islower()): continue yield from find_path(nxt, path + [nxt]) caves = defaultdict(list) for line in inp.splitlines(): parts = line.split('-') caves[parts[0]].append(parts[1]) caves[parts[1]].append(parts[0]) return len(list(find_path('start'))) @aoc.timing def part2(inp: str): return inp with open('test2.txt', 'r') as f: inp = f.read() print("Part 1:", solve(inp)) print("Part 2:", solve(inp, True)) with open('input.txt', 'r') as f: inp = f.read() print("Part 1:", solve(inp)) print("Part 2:", solve(inp, True))
23.367347
73
0.541485
156
1,145
3.955128
0.378205
0.06483
0.038898
0.02269
0.178282
0.178282
0.178282
0.178282
0.178282
0.178282
0
0.016582
0.315284
1,145
48
74
23.854167
0.770408
0.017467
0
0.314286
0
0
0.05516
0
0
0
0
0
0
1
0.085714
false
0
0.085714
0.028571
0.257143
0.114286
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab207da0020d38ce47419c0053bab12a37bcf81b
11,387
py
Python
PaddleCV/tracking/ltr/data/processing.py
suytingwan/models
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
[ "Apache-2.0" ]
5
2021-09-28T13:28:01.000Z
2021-12-21T07:25:44.000Z
PaddleCV/tracking/ltr/data/processing.py
suytingwan/models
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
[ "Apache-2.0" ]
1
2020-07-02T03:05:00.000Z
2020-07-02T03:05:00.000Z
PaddleCV/tracking/ltr/data/processing.py
suytingwan/models
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
[ "Apache-2.0" ]
3
2021-09-28T15:33:45.000Z
2021-09-29T01:44:32.000Z
import numpy as np from ltr.data import transforms import ltr.data.processing_utils as prutils from pytracking.libs import TensorDict class BaseProcessing: """ Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it through the network. For example, it can be used to crop a search region around the object, apply various data augmentations, etc.""" def __init__(self, transform=transforms.ToArray(), train_transform=None, test_transform=None, joint_transform=None): """ args: transform - The set of transformations to be applied on the images. Used only if train_transform or test_transform is None. train_transform - The set of transformations to be applied on the train images. If None, the 'transform' argument is used instead. test_transform - The set of transformations to be applied on the test images. If None, the 'transform' argument is used instead. joint_transform - The set of transformations to be applied 'jointly' on the train and test images. For example, it can be used to convert both test and train images to grayscale. """ self.transform = { 'train': transform if train_transform is None else train_transform, 'test': transform if test_transform is None else test_transform, 'joint': joint_transform } def __call__(self, data: TensorDict): raise NotImplementedError class SiamFCProcessing(BaseProcessing): def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, mode='pair', scale_type='context', border_type='meanpad', *args, **kwargs): super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.mode = mode self.scale_type = scale_type self.border_type = border_type def _get_jittered_box(self, box, mode, rng): jittered_size = box[2:4] * np.exp( rng.randn(2) * self.scale_jitter_factor[mode]) max_offset = (np.sqrt(jittered_size.prod()) * self.center_jitter_factor[mode]) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (rng.rand(2) - 0.5) return np.concatenate( (jittered_center - 0.5 * jittered_size, jittered_size), axis=0) def __call__(self, data: TensorDict, rng=None): # Apply joint transforms if self.transform['joint'] is not None: num_train_images = len(data['train_images']) all_images = data['train_images'] + data['test_images'] all_images_trans = self.transform['joint'](*all_images) data['train_images'] = all_images_trans[:num_train_images] data['test_images'] = all_images_trans[num_train_images:] for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [ self._get_jittered_box(a, s, rng) for a in data[s + '_anno'] ] # Crop image region centered at jittered_anno box try: crops, boxes = prutils.jittered_center_crop( data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor[s], self.output_sz[s], scale_type=self.scale_type, border_type=self.border_type) except Exception as e: print('{}, anno: {}'.format(data['dataset'], data[s + '_anno'])) raise e # Apply transforms data[s + '_images'] = [self.transform[s](x) for x in crops] data[s + '_anno'] = boxes # Prepare output if self.mode == 'sequence': data = data.apply(prutils.stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data class ATOMProcessing(BaseProcessing): """ The processing class used for training ATOM. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params, mode='pair', *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.proposal_params = proposal_params self.mode = mode def _get_jittered_box(self, box, mode, rng): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: Variable - jittered box """ jittered_size = box[2:4] * np.exp( rng.randn(2) * self.scale_jitter_factor[mode]) max_offset = (np.sqrt(jittered_size.prod()) * self.center_jitter_factor[mode]) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (rng.rand(2) - 0.5) return np.concatenate( (jittered_center - 0.5 * jittered_size, jittered_size), axis=0) def _generate_proposals(self, box, rng): """ Generates proposals by adding noise to the input box args: box - input box returns: array - Array of shape (num_proposals, 4) containing proposals array - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The IoU is mapped to [-1, 1] """ # Generate proposals num_proposals = self.proposal_params['boxes_per_frame'] proposals = np.zeros((num_proposals, 4)) gt_iou = np.zeros(num_proposals) for i in range(num_proposals): proposals[i, :], gt_iou[i] = prutils.perturb_box( box, min_iou=self.proposal_params['min_iou'], sigma_factor=self.proposal_params['sigma_factor'], rng=rng) # Map to [-1, 1] gt_iou = gt_iou * 2 - 1 return proposals, gt_iou def __call__(self, data: TensorDict, rng=None): """ args: data - The input data, should contain the following fields: 'train_images' - 'test_images' - 'train_anno' - 'test_anno' - returns: TensorDict - output data block with following fields: 'train_images' - 'test_images' - 'train_anno' - 'test_anno' - 'test_proposals'- 'proposal_iou' - """ # Apply joint transforms if self.transform['joint'] is not None: num_train_images = len(data['train_images']) all_images = data['train_images'] + data['test_images'] all_images_trans = self.transform['joint'](*all_images) data['train_images'] = all_images_trans[:num_train_images] data['test_images'] = all_images_trans[num_train_images:] for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [ self._get_jittered_box(a, s, rng) for a in data[s + '_anno'] ] # Crop image region centered at jittered_anno box try: crops, boxes = prutils.jittered_center_crop( data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz) except Exception as e: print('{}, anno: {}'.format(data['dataset'], data[s + '_anno'])) raise e # Apply transforms data[s + '_images'] = [self.transform[s](x) for x in crops] data[s + '_anno'] = boxes # Generate proposals frame2_proposals, gt_iou = zip( * [self._generate_proposals(a, rng) for a in data['test_anno']]) data['test_proposals'] = list(frame2_proposals) data['proposal_iou'] = list(gt_iou) # Prepare output if self.mode == 'sequence': data = data.apply(prutils.stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data
43.296578
121
0.558971
1,323
11,387
4.608466
0.17158
0.035427
0.026242
0.019682
0.56946
0.563228
0.544202
0.526652
0.509431
0.49467
0
0.006462
0.361289
11,387
262
122
43.461832
0.831844
0.321244
0
0.650685
0
0
0.071787
0
0
0
0
0
0.013699
1
0.061644
false
0
0.027397
0
0.143836
0.013699
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab21105c56263980d75d2b1bb1e9d7beba919be5
884
py
Python
tqcli/config/config.py
Tranquant/tqcli
0cc12e0d80129a14cec8117cd73e2ca69fb25214
[ "Apache-2.0" ]
null
null
null
tqcli/config/config.py
Tranquant/tqcli
0cc12e0d80129a14cec8117cd73e2ca69fb25214
[ "Apache-2.0" ]
null
null
null
tqcli/config/config.py
Tranquant/tqcli
0cc12e0d80129a14cec8117cd73e2ca69fb25214
[ "Apache-2.0" ]
1
2016-08-16T03:43:36.000Z
2016-08-16T03:43:36.000Z
import logging from os.path import expanduser #TQ_API_ROOT_URL = 'http://127.0.1.1:8090/dataset' TQ_API_ROOT_URL = 'http://elb-tranquant-ecs-cluster-tqapi-1919110681.us-west-2.elb.amazonaws.com/dataset' LOG_PATH = expanduser('~/tqcli.log') # the chunk size must be at least 5MB for multipart upload DEFAULT_CHUNK_SIZE = 1024 * 1024 * 5 # 5MB logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename=LOG_PATH, filemode='w' ) # define a Handler which writes INFO messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(logging.INFO) # set a format which is simpler for console use formatter = logging.Formatter('%(message)s') # tell the handler to use this format console.setFormatter(formatter) # add the handler to the root logger logging.getLogger('').addHandler(console)
30.482759
105
0.745475
133
884
4.879699
0.616541
0.015408
0.027735
0.03698
0.049307
0
0
0
0
0
0
0.041775
0.133484
884
28
106
31.571429
0.805483
0.338235
0
0
0
0.0625
0.277778
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab21d266138fcacadbe38aeb0f70a2986ce949b2
8,564
py
Python
fqf_iqn_qrdqn/agent/base_agent.py
rainwangphy/fqf-iqn-qrdqn.pytorch
351e9c4722c8b1ed411cd8c1bbf46c93c07f0893
[ "MIT" ]
null
null
null
fqf_iqn_qrdqn/agent/base_agent.py
rainwangphy/fqf-iqn-qrdqn.pytorch
351e9c4722c8b1ed411cd8c1bbf46c93c07f0893
[ "MIT" ]
null
null
null
fqf_iqn_qrdqn/agent/base_agent.py
rainwangphy/fqf-iqn-qrdqn.pytorch
351e9c4722c8b1ed411cd8c1bbf46c93c07f0893
[ "MIT" ]
1
2022-02-14T02:55:01.000Z
2022-02-14T02:55:01.000Z
from abc import ABC, abstractmethod import os import numpy as np import torch from torch.utils.tensorboard import SummaryWriter from fqf_iqn_qrdqn.memory import LazyMultiStepMemory, \ LazyPrioritizedMultiStepMemory from fqf_iqn_qrdqn.utils import RunningMeanStats, LinearAnneaer class BaseAgent(ABC): def __init__(self, env, test_env, log_dir, num_steps=5 * (10 ** 7), batch_size=32, memory_size=10 ** 6, gamma=0.99, multi_step=1, update_interval=4, target_update_interval=10000, start_steps=50000, epsilon_train=0.01, epsilon_eval=0.001, epsilon_decay_steps=250000, double_q_learning=False, dueling_net=False, noisy_net=False, use_per=False, log_interval=100, eval_interval=250000, num_eval_steps=125000, max_episode_steps=27000, grad_cliping=5.0, cuda=True, seed=0): self.env = env self.test_env = test_env torch.manual_seed(seed) np.random.seed(seed) self.env.seed(seed) self.test_env.seed(2 ** 31 - 1 - seed) # torch.backends.cudnn.deterministic = True # It harms a performance. # torch.backends.cudnn.benchmark = False # It harms a performance. self.device = torch.device( "cuda" if cuda and torch.cuda.is_available() else "cpu") self.online_net = None self.target_net = None # Replay memory which is memory-efficient to store stacked frames. if use_per: beta_steps = (num_steps - start_steps) / update_interval self.memory = LazyPrioritizedMultiStepMemory( memory_size, self.env.observation_space.shape, self.device, gamma, multi_step, beta_steps=beta_steps) else: self.memory = LazyMultiStepMemory( memory_size, self.env.observation_space.shape, self.device, gamma, multi_step) self.log_dir = log_dir self.model_dir = os.path.join(log_dir, 'model') self.summary_dir = os.path.join(log_dir, 'summary') if not os.path.exists(self.model_dir): os.makedirs(self.model_dir) if not os.path.exists(self.summary_dir): os.makedirs(self.summary_dir) self.writer = SummaryWriter(log_dir=self.summary_dir) self.train_return = RunningMeanStats(log_interval) self.steps = 0 self.learning_steps = 0 self.episodes = 0 self.best_eval_score = -np.inf self.num_actions = self.env.action_space.n self.num_steps = num_steps self.batch_size = batch_size self.double_q_learning = double_q_learning self.dueling_net = dueling_net self.noisy_net = noisy_net self.use_per = use_per self.log_interval = log_interval self.eval_interval = eval_interval self.num_eval_steps = num_eval_steps self.gamma_n = gamma ** multi_step self.start_steps = start_steps self.epsilon_train = LinearAnneaer( 1.0, epsilon_train, epsilon_decay_steps) self.epsilon_eval = epsilon_eval self.update_interval = update_interval self.target_update_interval = target_update_interval self.max_episode_steps = max_episode_steps self.grad_cliping = grad_cliping def run(self): while True: self.train_episode() if self.steps > self.num_steps: break def is_update(self): return self.steps % self.update_interval == 0 \ and self.steps >= self.start_steps def is_random(self, eval=False): # Use e-greedy for evaluation. if self.steps < self.start_steps: return True if eval: return np.random.rand() < self.epsilon_eval if self.noisy_net: return False return np.random.rand() < self.epsilon_train.get() def update_target(self): self.target_net.load_state_dict( self.online_net.state_dict()) def explore(self): # Act with randomness. action = self.env.action_space.sample() return action def exploit(self, state): # Act without randomness. state = torch.ByteTensor( state).unsqueeze(0).to(self.device).float() / 255. with torch.no_grad(): action = self.online_net.calculate_q(states=state).argmax().item() return action @abstractmethod def learn(self): pass def save_models(self, save_dir): if not os.path.exists(save_dir): os.makedirs(save_dir) torch.save( self.online_net.state_dict(), os.path.join(save_dir, 'online_net.pth')) torch.save( self.target_net.state_dict(), os.path.join(save_dir, 'target_net.pth')) def load_models(self, save_dir): self.online_net.load_state_dict(torch.load( os.path.join(save_dir, 'online_net.pth'))) self.target_net.load_state_dict(torch.load( os.path.join(save_dir, 'target_net.pth'))) def train_episode(self): self.online_net.train() self.target_net.train() self.episodes += 1 episode_return = 0. episode_steps = 0 done = False state = self.env.reset() while (not done) and episode_steps <= self.max_episode_steps: # NOTE: Noises can be sampled only after self.learn(). However, I # sample noises before every action, which seems to lead better # performances. self.online_net.sample_noise() if self.is_random(eval=False): action = self.explore() else: action = self.exploit(state) next_state, reward, done, _ = self.env.step(action) # To calculate efficiently, I just set priority=max_priority here. self.memory.append(state, action, reward, next_state, done) self.steps += 1 episode_steps += 1 episode_return += reward state = next_state self.train_step_interval() # We log running mean of stats. self.train_return.append(episode_return) # We log evaluation results along with training frames = 4 * steps. if self.episodes % self.log_interval == 0: self.writer.add_scalar( 'return/train', self.train_return.get(), 4 * self.steps) print(f'Episode: {self.episodes:<4} ' f'episode steps: {episode_steps:<4} ' f'return: {episode_return:<5.1f}') def train_step_interval(self): self.epsilon_train.step() if self.steps % self.target_update_interval == 0: self.update_target() if self.is_update(): self.learn() if self.steps % self.eval_interval == 0: self.evaluate() self.save_models(os.path.join(self.model_dir, 'final')) self.online_net.train() def evaluate(self): self.online_net.eval() num_episodes = 0 num_steps = 0 total_return = 0.0 while True: state = self.test_env.reset() episode_steps = 0 episode_return = 0.0 done = False while (not done) and episode_steps <= self.max_episode_steps: if self.is_random(eval=True): action = self.explore() else: action = self.exploit(state) next_state, reward, done, _ = self.test_env.step(action) num_steps += 1 episode_steps += 1 episode_return += reward state = next_state num_episodes += 1 total_return += episode_return if num_steps > self.num_eval_steps: break mean_return = total_return / num_episodes if mean_return > self.best_eval_score: self.best_eval_score = mean_return self.save_models(os.path.join(self.model_dir, 'best')) # We log evaluation results along with training frames = 4 * steps. self.writer.add_scalar( 'return/test', mean_return, 4 * self.steps) print('-' * 60) print(f'Num steps: {self.steps:<5} ' f'return: {mean_return:<5.1f}') print('-' * 60) def __del__(self): self.env.close() self.test_env.close() self.writer.close()
34.119522
79
0.599603
1,062
8,564
4.607345
0.191149
0.02943
0.023912
0.012262
0.241774
0.208257
0.167995
0.167995
0.154711
0.13039
0
0.018038
0.307333
8,564
250
80
34.256
0.806811
0.074498
0
0.185185
0
0
0.032617
0.002781
0
0
0
0
0
1
0.074074
false
0.005291
0.037037
0.005291
0.153439
0.021164
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab2324a100ecb32532716cd76301eba78659a0c1
3,012
py
Python
quartet_condor.py
BotanyHunter/QuartetAnalysis
c9b21aac267718be5ea8a8a76632fc0a3feb8403
[ "MIT" ]
null
null
null
quartet_condor.py
BotanyHunter/QuartetAnalysis
c9b21aac267718be5ea8a8a76632fc0a3feb8403
[ "MIT" ]
null
null
null
quartet_condor.py
BotanyHunter/QuartetAnalysis
c9b21aac267718be5ea8a8a76632fc0a3feb8403
[ "MIT" ]
null
null
null
#quartet_condor.py #version 2.0.2 import random, sys def addToDict(d): ''' Ensures each quartet has three concordance factors (CFs) a dictionary d has less than three CFs, add CFs with the value 0 until there are three Input: a dictionary containing CFs, a counter of how many CFs are in the dictionary ''' if ("{1,2|3,4}" not in d): d["{1,2|3,4}"] = 0.0 if ("{1,3|2,4}" not in d): d["{1,3|2,4}"] = 0.0 if ("{1,4|2,3}" not in d): d["{1,4|2,3}"] = 0.0 class quartet: ''' Picks individual quartets and isolates concordance factors ''' def __init__(self): #length of a split in *.concordance file self.length_of_splits = 10 self.quartet_length = 4 #list to hold the 4 taxa self.taxa = [] #dictionaries to hold cfs with splits self.d = {} self.d2 = {} def pick_random_quartet(self, ntax): ''' Randomly select the 4 taxa to be included in the quartet analysis :Input: The total number of taxa in an analysis :Return: A sorted list of 4 unique taxa ''' self.taxa = [] while len(self.taxa) < self.quartet_length: num = random.randint(0, ntax-1) if num not in self.taxa: self.taxa.append(num) self.taxa = sorted(self.taxa) #return a sorted list of 4 random taxa return self.taxa def isolateCFs(self, file, num_genes): ''' Isolates the CFs within a *.concordance file, and sorts the three from largest to smallest :Input: A *.concordance file :Return: A sorted dictionary of three CFs ''' self.d = {} self.ciLow = {} self.ciHigh = {} split = "" cf = 0 #counter to ensure 3 entries counter = 0 for line in file: #finds all splits, which have CFs associated with them if (line[0] == '{' and len(line) == self.length_of_splits): split = line #find CF associated with the split found above if (line.startswith('mean')): words = line.split() #CF guarenteed by BUCKy to be the 4th "word" cf = float(words[3]) #add split/CF pair to dictionary self.d[split.strip()] = cf counter += 1 if( line.startswith('95% CI for CF')): useline = line.translate(None,"()") useline = useline.replace(","," ") words = useline.split() self.ciLow[split.strip()] = float(words[5]) / num_genes self.ciHigh[split.strip()] = float(words[6]) / num_genes #fill out dictionary if there were less than 3 splits if (counter < 3): addToDict(self.d) addToDict(self.ciLow) addToDict(self.ciHigh) return self.d, self.ciLow, self.ciHigh
33.466667
98
0.537849
399
3,012
4.02005
0.310777
0.0399
0.011222
0.013092
0.078554
0.066085
0
0
0
0
0
0.029366
0.355578
3,012
89
99
33.842697
0.797012
0.338645
0
0.081633
0
0
0.040773
0
0
0
0
0
0
1
0.081633
false
0
0.020408
0
0.163265
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab2601bcecd2c5b5f36345a106f14a3b9c2ff88d
5,668
py
Python
main.py
scottjr632/trump-twitter-bot
484b1324d752395338b0a9e5850acf294089b26f
[ "MIT" ]
null
null
null
main.py
scottjr632/trump-twitter-bot
484b1324d752395338b0a9e5850acf294089b26f
[ "MIT" ]
null
null
null
main.py
scottjr632/trump-twitter-bot
484b1324d752395338b0a9e5850acf294089b26f
[ "MIT" ]
null
null
null
import os import logging import argparse import sys import signal import subprocess from functools import wraps from dotenv import load_dotenv load_dotenv(verbose=True) from app.config import configure_app from app.bot import TrumpBotScheduler from app.sentimentbot import SentimentBot parser = argparse.ArgumentParser(description=r""" """) ROOT = os.getcwd() PID_FILE_PATH = os.path.join(ROOT, 'var/run-dev.pid') CMDS = [] FNCS = [] try: os.setpgrp() if not os.path.exists(os.path.dirname(PID_FILE_PATH)): os.makedirs(os.path.dirname(PID_FILE_PATH)) with open(PID_FILE_PATH, 'w+') as file: file.write(str(os.getpgrp()) + '\n') except Exception as e: logging.error(e) def _file_path_sanity_check(*args): for path in args: if not os.path.exists(path): raise Exception('Unable to find file %s' % path) def _start_client_server(*args, **kwargs): cmd = [ 'npm', '--prefix', '%s/client' % ROOT, 'run', 'start' ] CMDS.append(cmd) def inject_file_paths(fn): requests_path = os.environ.get('REQUESTS_FILE_PATH', 'requests/request.json') auth_path = os.environ.get('AUTH_FILE_PATH', 'requests/auth.json') _file_path_sanity_check(requests_path, auth_path) @wraps(fn) def wrapper(*args, **kwargs): return fn(requests_path=requests_path, auth_path=auth_path, *args, **kwargs) return wrapper @inject_file_paths def _initialize_trump_bot(auth_path, requests_path, send_posts: bool=True, *args, **kwargs) -> TrumpBotScheduler: trump_bot: TrumpBotScheduler = None if send_posts: logging.info('Post requests are not being sent.') class PostOverride(TrumpBotScheduler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __send_tweet_msg__(self, content, headers=None): return 200 trump_bot = PostOverride(file_path=requests_path, auth_file_path=auth_path) else: trump_bot = TrumpBotScheduler(file_path=requests_path, auth_file_path=auth_path) # this functions initialize the trump bot by getting the latest tweets # and trying to send any tweets that contained errors trump_bot.send_latest_tweets() trump_bot.resend_bad_tweets() logging.info('Trump bot initialization finished... please press ctrl-c to close program if finished.') return trump_bot @inject_file_paths def _start_sentiment_bot(auth_path: str, requests_path: str, trump_bot: TrumpBotScheduler, send_posts: bool=True) -> SentimentBot: bot: SentimentBot = None if send_posts: logging.info('Sentiment bot is not running') class PostOverride(SentimentBot): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __send_tweet_msg__(self, content) -> int: return 200 bot = PostOverride(file_path=requests_path, auth_file_path=auth_path) else: bot = SentimentBot(auth_file_path=auth_path, file_path=requests_path) trump_bot.add_job(bot.send_todays_tone, 'interval', hours=24, max_instances=1) return bot def _start_flask_server(*args, **kwargs): from app import app logging.info('Starting the flask server...') level = os.environ.get('CONFIG_LEVEL') configure_app(app, status='production' if level is None else level) port = app.config.get('PORT') app.run(host='0.0.0.0', port=port) def _start_dev_server(*args, **kwargs): _start_client_server() FNCS.append(_start_flask_server) def _start_prod_server(*args, **kwargs): _start_trump_bot(*args, **kwargs) _start_flask_server(*args, **kwargs) def _start_trump_bot(send_posts=True, start_sentiment_bot=False, *args, **kwargs): logging.info('Starting the trump bot...') # requests_path = os.environ.get('REQUESTS_FILE_PATH', 'requests/request.json') # auth_path = os.environ.get('AUTH_FILE_PATH', 'requests/auth.json') # _file_path_sanity_check(requests_path, auth_path) bot = _initialize_trump_bot(send_posts=send_posts) if not start_sentiment_bot: _start_sentiment_bot(trump_bot=bot, send_posts=send_posts) bot.start() ACTIONS = { "initialize": _initialize_trump_bot, "client": _start_client_server, "trumpbot": _start_trump_bot, "flask": _start_flask_server, "dev": _start_dev_server, "prod": _start_prod_server, } parser.add_argument('action', help='start the Flask app', type=str, choices=[key for key, v in ACTIONS.items()]) parser.add_argument('-np', '--no-post', dest='send_posts', action='store_true', help='Do not send post requests') parser.add_argument('-nsb', '--no-sentiment-bot', dest='start_sentiment_bot', action='store_true', help='Do not to start the sentiment bot') def signal_handler(sig, frame): os.killpg(0, signal.SIGTERM) os.remove(PID_FILE_PATH) sys.exit(0) def main(): options = parser.parse_args() for s in (signal.SIGINT, signal.SIGTERM): signal.signal(s, signal_handler) ACTIONS.get(options.action)(**options.__dict__) env = os.environ.copy() for cmd in CMDS: subprocess.Popen(cmd, env=env) for fn in FNCS: subprocess.Popen(fn(), env=env) signal.pause() if __name__ == "__main__": logging.basicConfig(level=logging.INFO) main()
27.64878
106
0.657728
733
5,668
4.792633
0.255116
0.045545
0.036436
0.018218
0.249929
0.2078
0.16567
0.16567
0.16567
0.154284
0
0.003433
0.229181
5,668
204
107
27.784314
0.800641
0.055575
0
0.102941
0
0
0.110716
0.003927
0
0
0
0
0
1
0.117647
false
0
0.088235
0.022059
0.264706
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab27a7af29a41d40eec1afd58d05fca20bfc3c8b
691
py
Python
010-summation-of-primes.py
dendi239/euler
71fcdca4a80f9e586aab05eb8acadf1a296dda90
[ "MIT" ]
null
null
null
010-summation-of-primes.py
dendi239/euler
71fcdca4a80f9e586aab05eb8acadf1a296dda90
[ "MIT" ]
null
null
null
010-summation-of-primes.py
dendi239/euler
71fcdca4a80f9e586aab05eb8acadf1a296dda90
[ "MIT" ]
null
null
null
#! /usr/bin/env python3 import itertools import typing as tp def primes() -> tp.Generator[int, None, None]: primes_ = [] d = 2 while True: is_prime = True for p in primes_: if p * p > d: break if d % p == 0: is_prime = False break if is_prime: primes_.append(d) yield d d += 1 def sum_primes_below(n: int) -> int: return sum(itertools.takewhile(lambda x: x < n, primes())) def test_ten() -> None: assert sum_primes_below(10) == 17 def main() -> None: print(sum_primes_below(2_000_000)) if __name__ == '__main__': main()
16.452381
62
0.51809
92
691
3.641304
0.5
0.062687
0.125373
0
0
0
0
0
0
0
0
0.034483
0.370478
691
41
63
16.853659
0.735632
0.031838
0
0.08
0
0
0.011976
0
0
0
0
0
0.04
1
0.16
false
0
0.08
0.04
0.28
0.04
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab27ed4a158779f6beba16216ad31870fa98bf95
11,368
py
Python
setup.py
letmaik/lensfunpy
ddadb6bfd5f3acde5640210aa9f575501e5c0914
[ "MIT" ]
94
2016-08-24T21:52:40.000Z
2022-03-05T07:17:21.000Z
setup.py
letmaik/lensfunpy
ddadb6bfd5f3acde5640210aa9f575501e5c0914
[ "MIT" ]
22
2016-10-21T07:15:21.000Z
2021-09-20T13:41:02.000Z
setup.py
letmaik/lensfunpy
ddadb6bfd5f3acde5640210aa9f575501e5c0914
[ "MIT" ]
11
2016-12-12T03:14:07.000Z
2021-05-06T17:47:30.000Z
from setuptools import setup, Extension, find_packages import subprocess import errno import re import os import shutil import sys import zipfile from urllib.request import urlretrieve import numpy from Cython.Build import cythonize isWindows = os.name == 'nt' isMac = sys.platform == 'darwin' is64Bit = sys.maxsize > 2**32 # adapted from cffi's setup.py # the following may be overridden if pkg-config exists libraries = ['lensfun'] include_dirs = [] library_dirs = [] extra_compile_args = [] extra_link_args = [] def _ask_pkg_config(resultlist, option, result_prefix='', sysroot=False): pkg_config = os.environ.get('PKG_CONFIG','pkg-config') try: p = subprocess.Popen([pkg_config, option, 'lensfun'], stdout=subprocess.PIPE) except OSError as e: if e.errno != errno.ENOENT: raise else: t = p.stdout.read().decode().strip() if p.wait() == 0: res = t.split() # '-I/usr/...' -> '/usr/...' for x in res: assert x.startswith(result_prefix) res = [x[len(result_prefix):] for x in res] sysroot = sysroot and os.environ.get('PKG_CONFIG_SYSROOT_DIR', '') if sysroot: # old versions of pkg-config don't support this env var, # so here we emulate its effect if needed res = [path if path.startswith(sysroot) else sysroot + path for path in res] resultlist[:] = res def use_pkg_config(): _ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True) _ask_pkg_config(extra_compile_args, '--cflags-only-other') _ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True) _ask_pkg_config(extra_link_args, '--libs-only-other') _ask_pkg_config(libraries, '--libs-only-l', '-l') if isWindows or isMac: cmake_build = os.path.abspath('external/lensfun/build') install_dir = os.path.join(cmake_build, 'install') include_dirs += [os.path.join(install_dir, 'include', 'lensfun')] library_dirs += [os.path.join(install_dir, 'lib')] else: use_pkg_config() # this must be after use_pkg_config()! include_dirs += [numpy.get_include()] # for version_helper.h include_dirs += [os.path.abspath('lensfunpy')] def clone_submodules(): if not os.path.exists('external/lensfun/README.md'): print('lensfun git submodule not cloned yet, will invoke "git submodule update --init" now') if os.system('git submodule update --init') != 0: raise Exception('git failed') def windows_lensfun_compile(): clone_submodules() cwd = os.getcwd() # Download cmake to build lensfun cmake_version = '3.13.4' cmake_url = 'https://github.com/Kitware/CMake/releases/download/v{v}/cmake-{v}-win32-x86.zip'.format(v=cmake_version) cmake = os.path.abspath('external/cmake-{}-win32-x86/bin/cmake.exe'.format(cmake_version)) # Download vcpkg to build dependencies of lensfun vcpkg_commit = '2021.05.12' vcpkg_url = 'https://github.com/Microsoft/vcpkg/archive/{}.zip'.format(vcpkg_commit) vcpkg_dir = os.path.abspath('external/vcpkg-{}'.format(vcpkg_commit)) vcpkg_bootstrap = os.path.join(vcpkg_dir, 'bootstrap-vcpkg.bat') vcpkg = os.path.join(vcpkg_dir, 'vcpkg.exe') files = [(cmake_url, 'external', cmake), (vcpkg_url, 'external', vcpkg_bootstrap)] for url, extractdir, extractcheck in files: if not os.path.exists(extractcheck): path = 'external/' + os.path.basename(url) if not os.path.exists(path): print('Downloading', url) try: urlretrieve(url, path) except: # repeat once in case of network issues urlretrieve(url, path) with zipfile.ZipFile(path) as z: print('Extracting', path, 'into', extractdir) z.extractall(extractdir) if not os.path.exists(path): raise RuntimeError(path + ' not found!') # Bootstrap vcpkg os.chdir(vcpkg_dir) if not os.path.exists(vcpkg): code = os.system(vcpkg_bootstrap) if code != 0: sys.exit(code) # lensfun depends on glib2, so let's build it with vcpkg vcpkg_arch = 'x64' if is64Bit else 'x86' vcpkg_triplet = '{}-windows'.format(vcpkg_arch) code = os.system(vcpkg + ' install glib:' + vcpkg_triplet) if code != 0: sys.exit(code) vcpkg_install_dir = os.path.join(vcpkg_dir, 'installed', vcpkg_triplet) # bundle runtime dlls vcpkg_bin_dir = os.path.join(vcpkg_install_dir, 'bin') glib2_dll = os.path.join(vcpkg_bin_dir, 'glib-2.0-0.dll') # configure and compile lensfun if not os.path.exists(cmake_build): os.mkdir(cmake_build) os.chdir(cmake_build) # temporary hack to avoid https://stackoverflow.com/a/53547931 # (python module not needed here anyway) patch_path = '../apps/CMakeLists.txt' with open(patch_path) as f: content = f.read() content = content.replace('IF(PYTHON)', 'IF(FALSE)') with open(patch_path, 'w') as f: f.write(content) cmds = [cmake + ' .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ' +\ '-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\ '-DCMAKE_TOOLCHAIN_FILE={}/scripts/buildsystems/vcpkg.cmake '.format(vcpkg_dir) +\ '-DGLIB2_BASE_DIR={} -DGLIB2_DLL={} -DCMAKE_INSTALL_PREFIX=install'.format(vcpkg_install_dir, glib2_dll), cmake + ' --build .', cmake + ' --build . --target install', ] for cmd in cmds: print(cmd) code = os.system(cmd) if code != 0: sys.exit(code) os.chdir(cwd) dll_runtime_libs = [('lensfun.dll', os.path.join(install_dir, 'bin')), ('glib-2.0-0.dll', vcpkg_bin_dir), # dependencies of glib ('pcre.dll', vcpkg_bin_dir), ('iconv-2.dll', vcpkg_bin_dir), ('charset-1.dll', vcpkg_bin_dir), ('intl-8.dll', vcpkg_bin_dir), ] for filename, folder in dll_runtime_libs: src = os.path.join(folder, filename) dest = 'lensfunpy/' + filename print('copying', src, '->', dest) shutil.copyfile(src, dest) def mac_lensfun_compile(): clone_submodules() # configure and compile lensfun cwd = os.getcwd() if not os.path.exists(cmake_build): os.mkdir(cmake_build) os.chdir(cmake_build) install_name_dir = os.path.join(install_dir, 'lib') cmds = ['cmake .. -DCMAKE_BUILD_TYPE=Release ' +\ '-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\ '-DCMAKE_INSTALL_PREFIX=install ' +\ '-DCMAKE_INSTALL_NAME_DIR=' + install_name_dir, 'cmake --build .', 'cmake --build . --target install', ] for cmd in cmds: print(cmd) code = os.system(cmd) if code != 0: sys.exit(code) os.chdir(cwd) def bundle_db_files(): import glob db_files = 'lensfunpy/db_files' if not os.path.exists(db_files): os.makedirs(db_files) for path in glob.glob('external/lensfun/data/db/*.xml'): dest = os.path.join(db_files, os.path.basename(path)) print('copying', path, '->', dest) shutil.copyfile(path, dest) package_data = {'lensfunpy': []} # evil hack, check cmd line for relevant commands # custom cmdclasses didn't work out in this case cmdline = ''.join(sys.argv[1:]) needsCompile = any(s in cmdline for s in ['install', 'bdist', 'build_ext', 'wheel', 'nosetests']) if isWindows and needsCompile: windows_lensfun_compile() package_data['lensfunpy'].append('*.dll') elif isMac and needsCompile: mac_lensfun_compile() if any(s in cmdline for s in ['clean', 'sdist']): # When running sdist after a previous run of bdist or build_ext # then even with the 'clean' command the .egg-info folder stays. # This folder contains SOURCES.txt which in turn is used by sdist # to include package data files, but we don't want .dll's and .xml # files in our source distribution. Therefore, to prevent accidents, # we help a little... egg_info = 'lensfunpy.egg-info' print('removing', egg_info) shutil.rmtree(egg_info, ignore_errors=True) if 'sdist' not in cmdline: # This assumes that the lensfun version from external/lensfun was used. # If that's not the case, the bundled files may fail to load, for example, # if lensfunpy was linked against an older lensfun version already on # the system (Linux mostly) and the database format changed in an incompatible way. # In that case, loading of bundled files can still be disabled # with Database(load_bundled=False). package_data['lensfunpy'].append('db_files/*.xml') bundle_db_files() # Support for optional Cython line tracing # run the following to generate a test coverage report: # $ export LINETRACE=1 # $ python setup.py build_ext --inplace # $ nosetests --with-coverage --cover-html --cover-package=lensfunpy compdirectives = {} macros = [] if (os.environ.get('LINETRACE', False)): compdirectives['linetrace'] = True macros.append(('CYTHON_TRACE', '1')) extensions = cythonize([Extension("lensfunpy._lensfun", include_dirs=include_dirs, sources=[os.path.join('lensfunpy', '_lensfun.pyx')], libraries=libraries, library_dirs=library_dirs, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, define_macros=macros )], compiler_directives=compdirectives) # make __version__ available (https://stackoverflow.com/a/16084844) exec(open('lensfunpy/_version.py').read()) setup( name = 'lensfunpy', version = __version__, description = 'Lens distortion correction for Python, a wrapper for lensfun', long_description = open('README.rst').read(), author = 'Maik Riechert', author_email = 'maik.riechert@arcor.de', url = 'https://github.com/letmaik/lensfunpy', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Programming Language :: Cython', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Operating System :: MacOS', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Topic :: Multimedia :: Graphics', 'Topic :: Software Development :: Libraries', ], packages = find_packages(), ext_modules = extensions, package_data = package_data, install_requires=['numpy'] )
37.518152
125
0.616555
1,407
11,368
4.832978
0.292111
0.023824
0.019118
0.012941
0.161765
0.123529
0.081765
0.065588
0.065588
0.065588
0
0.009507
0.259764
11,368
302
126
37.642384
0.798574
0.155084
0
0.159292
0
0.004425
0.235835
0.047773
0
0
0
0
0.004425
1
0.026549
false
0
0.053097
0
0.079646
0.035398
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab2c89bde44269f1533806cfa45910e25d77ed66
2,771
py
Python
services/postprocess/src/postprocess.py
hadarohana/myCosmos
6e4682a2af822eb828180658aaa6d3e304cc85bf
[ "Apache-2.0" ]
null
null
null
services/postprocess/src/postprocess.py
hadarohana/myCosmos
6e4682a2af822eb828180658aaa6d3e304cc85bf
[ "Apache-2.0" ]
5
2020-01-28T23:13:10.000Z
2022-02-10T00:28:15.000Z
services/postprocess/src/postprocess.py
hadarohana/myCosmos
6e4682a2af822eb828180658aaa6d3e304cc85bf
[ "Apache-2.0" ]
1
2021-03-10T19:25:44.000Z
2021-03-10T19:25:44.000Z
""" Post processing on detected objects """ import pymongo from pymongo import MongoClient import time import logging logging.basicConfig(format='%(levelname)s :: %(asctime)s :: %(message)s', level=logging.DEBUG) from joblib import Parallel, delayed import click from xgboost_model.inference import run_inference, PostprocessException import os def load_detected_pages(db, buffer_size): """ """ current_docs = [] for doc in db.propose_pages.find({'postprocess': None, 'ocr': True}, no_cursor_timeout=True): current_docs.append(doc) if len(current_docs) == buffer_size: yield current_docs current_docs = [] yield current_docs def do_skip(page, client): db = client.pdfs coll = db.postprocess_pages return coll.count_documents({'pdf_name': page['pdf_name'], 'page_num': page['page_num']}, limit=1) != 0 def postprocess(db_insert_fn, num_processes, weights_pth, skip): logging.info('Starting post-processing over detected objects') start_time = time.time() client = MongoClient(os.environ["DBCONNECT"]) logging.info(f'Connected to client: {client}.') db = client.pdfs for batch in load_detected_pages(db, 100): logging.info('Loaded next batch. Running postprocessing') try: pages = Parallel(n_jobs=num_processes)(delayed(run_inference)(page, weights_pth) for page in batch) except PostprocessException as e: logging.error(f'Postprocessing error in referenced page: {e.page}') logging.error(f'Original Exception: {e.original_exception}') continue db_insert_fn(pages, client) end_time = time.time() logging.info(f'Exiting post-processing. Time up: {end_time - start_time}') def mongo_insert_fn(objs, client): db = client.pdfs for obj in objs: try: result = db.propose_pages.update_one({'_id': obj['_id']}, {'$set': { 'pp_detected_objs': obj['pp_detected_objs'], 'postprocess': True } }, upsert=False) logging.info(f'Updated result: {result}') except pymongo.errors.WriterError as e: logging.error(f'Document write error: {e}\n Document id: obj["_id"]') @click.command() @click.argument("num_processes") @click.argument("weights_pth") @click.option('--skip/--no-skip') def click_wrapper(num_processes, weights_pth, skip): postprocess(mongo_insert_fn, int(num_processes), weights_pth, skip) if __name__ == '__main__': click_wrapper()
37.445946
111
0.616745
325
2,771
5.049231
0.381538
0.040219
0.025594
0.032907
0.092626
0
0
0
0
0
0
0.002473
0.2703
2,771
73
112
37.958904
0.8091
0.012631
0
0.15
0
0
0.198381
0.008097
0
0
0
0
0
1
0.083333
false
0
0.133333
0
0.233333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab2d00e90fa00656e5b245ed372443c5a0686b39
2,619
py
Python
model-optimizer/mo/front/common/partial_infer/multi_box_prior_test.py
calvinfeng/openvino
11f591c16852637506b1b40d083b450e56d0c8ac
[ "Apache-2.0" ]
null
null
null
model-optimizer/mo/front/common/partial_infer/multi_box_prior_test.py
calvinfeng/openvino
11f591c16852637506b1b40d083b450e56d0c8ac
[ "Apache-2.0" ]
19
2021-03-26T08:11:00.000Z
2022-02-21T13:06:26.000Z
model-optimizer/mo/front/common/partial_infer/multi_box_prior_test.py
calvinfeng/openvino
11f591c16852637506b1b40d083b450e56d0c8ac
[ "Apache-2.0" ]
1
2021-07-28T17:30:46.000Z
2021-07-28T17:30:46.000Z
""" Copyright (C) 2018-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import numpy as np from mo.front.common.partial_infer.multi_box_prior import multi_box_prior_infer_mxnet from mo.graph.graph import Node from mo.utils.unittest.graph import build_graph nodes_attributes = {'node_1': {'value': None, 'kind': 'data'}, 'node_2': {'value': None, 'kind': 'data'}, 'prior_box_1': {'type': 'PriorBox', 'kind': 'op'}, 'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'} } class TestMultiBoxPriorInfer(unittest.TestCase): def test_prior_box_infer_ideal(self): graph = build_graph(nodes_attributes, [('node_1', 'prior_box_1'), ('node_2', 'prior_box_1'), ('prior_box_1', 'node_3')], {'node_1': {'shape': np.array([1, 1024, 19, 19])}, 'node_2': {'shape': np.array([1, 3, 300, 300])}, 'prior_box_1': {'aspect_ratio': [1.0, 2.0, 0.5, 3.0, 0.333333333333], 'min_size': [0.2, 0.272], 'max_size': '', 'offset': 0.5, 'step': 0.2, 'sizes': [0.2, 0.272]}, 'node_3': {'shape': np.array([1, 2, 3])}, }) multi_box_prior_node = Node(graph, 'prior_box_1') multi_box_prior_infer_mxnet(multi_box_prior_node) exp_shape = np.array([1, 2, 8664]) res_shape = graph.node['node_3']['shape'] for i in range(0, len(exp_shape)): self.assertEqual(exp_shape[i], res_shape[i]) self.assertEqual(multi_box_prior_node.min_size, [0.2, 0.272]) self.assertEqual(multi_box_prior_node.max_size, '') self.assertEqual(multi_box_prior_node.aspect_ratio, [1.0, 2.0, 0.5, 3.0, 0.333333333333]) self.assertEqual(round(multi_box_prior_node.step, 1), 0.2) self.assertEqual(round(multi_box_prior_node.offset, 1), 0.5)
44.389831
112
0.583047
355
2,619
4.095775
0.352113
0.055021
0.089409
0.081843
0.289546
0.220083
0.094911
0.044017
0.044017
0.044017
0
0.066738
0.284842
2,619
58
113
45.155172
0.709557
0.216495
0
0
0
0
0.126844
0
0
0
0
0
0.176471
1
0.029412
false
0
0.147059
0
0.205882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab2dd4e23245d0ab9d1e255dfa3fc732936ba5f1
4,557
py
Python
cmake/utils/gen-ninja-deps.py
stamhe/bitcoin-abc
a1ba303c6b4f164ae94612e83b824e564405a96e
[ "MIT" ]
1,266
2017-05-02T07:02:29.000Z
2022-03-31T17:15:44.000Z
cmake/utils/gen-ninja-deps.py
EGYVOICE/bitcoin-abc-avalanche
e0f1fe857e1fc85f01903f1c323c2d5c54aecc1c
[ "MIT" ]
426
2017-05-07T12:40:52.000Z
2022-03-29T18:12:01.000Z
cmake/utils/gen-ninja-deps.py
EGYVOICE/bitcoin-abc-avalanche
e0f1fe857e1fc85f01903f1c323c2d5c54aecc1c
[ "MIT" ]
721
2017-05-07T10:36:11.000Z
2022-03-15T09:07:48.000Z
#!/usr/bin/env python3 import argparse import os import subprocess parser = argparse.ArgumentParser(description='Produce a dep file from ninja.') parser.add_argument( '--build-dir', help='The build directory.', required=True) parser.add_argument( '--base-dir', help='The directory for which dependencies are rewriten.', required=True) parser.add_argument('--ninja', help='The ninja executable to use.') parser.add_argument( 'base_target', help="The target from the base's perspective.") parser.add_argument( 'targets', nargs='+', help='The target for which dependencies are extracted.') parser.add_argument( '--extra-deps', nargs='+', help='Extra dependencies.') args = parser.parse_args() build_dir = os.path.abspath(args.build_dir) base_dir = os.path.abspath(args.base_dir) ninja = args.ninja base_target = args.base_target targets = args.targets extra_deps = args.extra_deps # Make sure we operate in the right folder. os.chdir(build_dir) if ninja is None: ninja = subprocess.check_output(['command', '-v', 'ninja'])[:-1] # Construct the set of all targets all_targets = set() doto_targets = set() for t in subprocess.check_output([ninja, '-t', 'targets', 'all']).splitlines(): t, r = t.split(b':') all_targets.add(t) if r[:13] == b' C_COMPILER__' or r[:15] == b' CXX_COMPILER__': doto_targets.add(t) def parse_ninja_query(query): deps = dict() lines = query.splitlines() while len(lines): line = lines.pop(0) if line[0] == ord(' '): continue # We have a new target target = line.split(b':')[0] assert lines.pop(0)[:8] == b' input:' inputs = set() while True: i = lines.pop(0) if i[:4] != b' ': break ''' ninja has 3 types of input: 1. Explicit dependencies, no prefix; 2. Implicit dependencies, | prefix. 3. Order only dependencies, || prefix. Order only dependency do not require the target to be rebuilt and so we ignore them. ''' i = i[4:] if i[0] == ord('|'): if i[1] == ord('|'): # We reached the order only dependencies. break i = i[2:] inputs.add(i) deps[target] = inputs return deps def extract_deps(workset): # Recursively extract the dependencies of the target. deps = dict() while len(workset) > 0: query = subprocess.check_output([ninja, '-t', 'query'] + list(workset)) target_deps = parse_ninja_query(query) deps.update(target_deps) workset = set() for d in target_deps.values(): workset.update(t for t in d if t in all_targets and t not in deps) # Extract build time dependencies. bt_targets = [t for t in deps if t in doto_targets] if len(bt_targets) == 0: return deps ndeps = subprocess.check_output( [ninja, '-t', 'deps'] + bt_targets, stderr=subprocess.DEVNULL) lines = ndeps.splitlines() while len(lines) > 0: line = lines.pop(0) t, m = line.split(b':') if m == b' deps not found': continue inputs = set() while True: i = lines.pop(0) if i == b'': break assert i[:4] == b' ' inputs.add(i[4:]) deps[t] = inputs return deps base_dir = base_dir.encode() def rebase_deps(deps): rebased = dict() cache = dict() def rebase(path): if path in cache: return cache[path] abspath = os.path.abspath(path) newpath = path if path == abspath else os.path.relpath( abspath, base_dir) cache[path] = newpath return newpath for t, s in deps.items(): rebased[rebase(t)] = set(rebase(d) for d in s) return rebased deps = extract_deps(set(targets)) deps = rebase_deps(deps) def dump(deps): for t, d in deps.items(): if len(d) == 0: continue str = t.decode() + ": \\\n " str += " \\\n ".join(sorted(map((lambda x: x.decode()), d))) print(str) # Collapse everything under the base target. basedeps = set() if extra_deps is None else set(d.encode() for d in extra_deps) for d in deps.values(): basedeps.update(d) base_target = base_target.encode() basedeps.discard(base_target) dump({base_target: basedeps})
25.038462
79
0.577354
605
4,557
4.256198
0.261157
0.031068
0.039612
0.030291
0.112233
0.024078
0.024078
0.024078
0.024078
0.024078
0
0.009031
0.29537
4,557
181
80
25.176796
0.792899
0.062541
0
0.214876
0
0
0.104796
0
0
0
0
0
0.016529
1
0.041322
false
0
0.024793
0
0.115702
0.008264
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab30b98300e549b0e8401f690d6ee36c03180fdb
2,493
py
Python
sysinv/sysinv/sysinv/sysinv/helm/garbd.py
Wind-River/starlingx-config
96b92e5179d54dde10cb84c943eb239adf26b958
[ "Apache-2.0" ]
null
null
null
sysinv/sysinv/sysinv/sysinv/helm/garbd.py
Wind-River/starlingx-config
96b92e5179d54dde10cb84c943eb239adf26b958
[ "Apache-2.0" ]
null
null
null
sysinv/sysinv/sysinv/sysinv/helm/garbd.py
Wind-River/starlingx-config
96b92e5179d54dde10cb84c943eb239adf26b958
[ "Apache-2.0" ]
null
null
null
# # Copyright (c) 2018-2019 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # from sysinv.common import constants from sysinv.common import exception from sysinv.common import utils from sysinv.helm import common from sysinv.helm import base class GarbdHelm(base.BaseHelm): """Class to encapsulate helm operations for the galera arbitrator chart""" # The service name is used to build the standard docker image location. # It is intentionally "mariadb" and not "garbd" as they both use the # same docker image. SERVICE_NAME = common.HELM_CHART_MARIADB CHART = common.HELM_CHART_GARBD SUPPORTED_NAMESPACES = \ base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK] SUPPORTED_APP_NAMESPACES = { constants.HELM_APP_OPENSTACK: base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK] } def _is_enabled(self, app_name, chart_name, namespace): # First, see if this chart is enabled by the user then adjust based on # system conditions enabled = super(GarbdHelm, self)._is_enabled( app_name, chart_name, namespace) # If there are fewer than 2 controllers or we're on AIO-DX or we are on # distributed cloud system controller, we'll use a single mariadb server # and so we don't want to run garbd. if enabled and (self._num_controllers() < 2 or utils.is_aio_duplex_system(self.dbapi) or (self._distributed_cloud_role() == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER)): enabled = False return enabled def execute_manifest_updates(self, operator): # On application load this chart is enabled in the mariadb chart group if not self._is_enabled(operator.APP, self.CHART, common.HELM_NS_OPENSTACK): operator.chart_group_chart_delete( operator.CHART_GROUPS_LUT[self.CHART], operator.CHARTS_LUT[self.CHART]) def get_overrides(self, namespace=None): overrides = { common.HELM_NS_OPENSTACK: { } } if namespace in self.SUPPORTED_NAMESPACES: return overrides[namespace] elif namespace: raise exception.InvalidHelmNamespace(chart=self.CHART, namespace=namespace) else: return overrides
37.208955
80
0.649017
298
2,493
5.258389
0.416107
0.03829
0.030632
0.053606
0.098277
0.066369
0.066369
0.066369
0
0
0
0.006749
0.286803
2,493
66
81
37.772727
0.874578
0.257521
0
0.04878
0
0
0
0
0
0
0
0
0
1
0.073171
false
0
0.121951
0
0.390244
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab33de96dbc34b33ac4aed99648c2c63749addef
8,913
py
Python
armi/physics/fuelCycle/settings.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
1
2021-05-29T16:02:31.000Z
2021-05-29T16:02:31.000Z
armi/physics/fuelCycle/settings.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
null
null
null
armi/physics/fuelCycle/settings.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
null
null
null
"""Settings for generic fuel cycle code.""" import re import os from armi.settings import setting from armi.operators import settingsValidation CONF_ASSEMBLY_ROTATION_ALG = "assemblyRotationAlgorithm" CONF_ASSEM_ROTATION_STATIONARY = "assemblyRotationStationary" CONF_CIRCULAR_RING_MODE = "circularRingMode" CONF_CIRCULAR_RING_ORDER = "circularRingOrder" CONF_CUSTOM_FUEL_MANAGEMENT_INDEX = "customFuelManagementIndex" CONF_RUN_LATTICE_BEFORE_SHUFFLING = "runLatticePhysicsBeforeShuffling" CONF_SHUFFLE_LOGIC = "shuffleLogic" CONF_PLOT_SHUFFLE_ARROWS = "plotShuffleArrows" CONF_FUEL_HANDLER_NAME = "fuelHandlerName" CONF_JUMP_RING_NUM = "jumpRingNum" CONF_LEVELS_PER_CASCADE = "levelsPerCascade" def getFuelCycleSettings(): """Define settings for fuel cycle.""" settings = [ setting.Setting( CONF_ASSEMBLY_ROTATION_ALG, default="", label="Assembly Rotation Algorithm", description="The algorithm to use to rotate the detail assemblies while shuffling", options=["", "buReducingAssemblyRotation", "simpleAssemblyRotation"], enforcedOptions=True, ), setting.Setting( CONF_ASSEM_ROTATION_STATIONARY, default=False, label="Rotate stationary assems", description=( "Whether or not to rotate assemblies that are not shuffled." "This can only be True if 'rotation' is true." ), ), setting.Setting( CONF_CIRCULAR_RING_MODE, default=False, description="Toggle between circular ring definitions to hexagonal ring definitions", label="Use Circular Rings", ), setting.Setting( CONF_CIRCULAR_RING_ORDER, default="angle", description="Order by which locations are sorted in circular rings for equilibrium shuffling", label="Eq. circular sort type", options=["angle", "distance", "distanceSmart"], ), setting.Setting( CONF_CUSTOM_FUEL_MANAGEMENT_INDEX, default=0, description=( "An index that determines which of various options is used in management. " "Useful for optimization sweeps. " ), label="Custom Shuffling Index", ), setting.Setting( CONF_RUN_LATTICE_BEFORE_SHUFFLING, default=False, description=( "Forces the Generation of Cross Sections Prior to Shuffling the Fuel Assemblies. " "Note: This is recommended when performing equilibrium shuffling branching searches." ), label="Generate XS Prior to Fuel Shuffling", ), setting.Setting( CONF_SHUFFLE_LOGIC, default="", label="Shuffle Logic", description=( "Python script written to handle the fuel shuffling for this case. " "This is user-defined per run as a dynamic input." ), # schema here could check if file exists, but this is a bit constraining in testing. # For example, some tests have relative paths for this but aren't running in # the right directory, and IsFile doesn't seem to work well with relative paths. # This is left here as an FYI about how we could check existence of files if we get # around these problem. # schema=vol.All( # vol.IsFile(), # pylint: disable=no-value-for-parameter # msg="Shuffle logic input must be an existing file", # ), ), setting.Setting( CONF_FUEL_HANDLER_NAME, default="", label="Fuel Handler Name", description="The name of the FuelHandler class in the shuffle logic module to activate", ), setting.Setting( CONF_PLOT_SHUFFLE_ARROWS, default=False, description="Make plots with arrows showing each move.", label="Plot shuffle arrows", ), setting.Setting( CONF_JUMP_RING_NUM, default=8, label="Jump Ring Number", description="None" ), setting.Setting( CONF_LEVELS_PER_CASCADE, default=14, label="Move per cascade", description="None", ), ] return settings def getFuelCycleSettingValidators(inspector): queries = [] queries.append( settingsValidation.Query( lambda: bool(inspector.cs["shuffleLogic"]) ^ bool(inspector.cs["fuelHandlerName"]), "A value was provided for `fuelHandlerName` or `shuffleLogic`, but not " "the other. Either both `fuelHandlerName` and `shuffleLogic` should be " "defined, or neither of them.", "", inspector.NO_ACTION, ) ) # Check for code fixes for input code on the fuel shuffling outside the version control of ARMI # These are basically auto-migrations for untracked code using # the ARMI API. (This may make sense at a higher level) regex_solutions = [ ( r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[1-3]{1}\s*)\)", r"\1runLog.important(\2)", ), ( r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[4-5]{1,2}\s*)\)", r"\1runLog.info(\2)", ), ( r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[6-8]{1,2}\s*)\)", r"\1runLog.extra(\2)", ), ( r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*\d{1,2}\s*)\)", r"\1runLog.debug(\2)", ), (r"(#{0,20}?)[^\s#]*output\s*?\((.*?)\)", r"\1runLog.important(\2)"), (r"output = self.cs.output", r""), (r"cs\.getSetting\(\s*([^\)]+)\s*\)", r"cs[\1]"), (r"cs\.setSetting\(\s*([^\)]+)\s*,\s*([^\)]+)\s*\)", r"cs[\1] = \2"), ( r"import\s*armi\.components\s*as\s*components", r"from armi.reactor import components", ), (r"\[['\"]caseTitle['\"]\]", r".caseTitle"), ( r"self.r.core.bolAssems\['(.*?)'\]", r"self.r.blueprints.assemblies['\1']", ), (r"copyAssembly", r"duplicate"), ] def _locateRegexOccurences(): with open(inspector._csRelativePath(inspector.cs["shuffleLogic"])) as src: src = src.read() matches = [] for pattern, _sub in regex_solutions: matches += re.findall(pattern, src) return matches def _applyRegexSolutions(): srcFile = inspector._csRelativePath(inspector.cs["shuffleLogic"]) destFile = os.path.splitext(srcFile)[0] + "migrated.py" with open(srcFile) as src, open(destFile, "w") as dest: srcContent = src.read() # get the buffer content regexContent = srcContent # keep the before and after changes separate for pattern, sub in regex_solutions: regexContent = re.sub(pattern, sub, regexContent) if regexContent != srcContent: dest.write("from armi import runLog\n") dest.write(regexContent) inspector.cs["shuffleLogic"] = destFile queries.append( settingsValidation.Query( lambda: " " in inspector.cs["shuffleLogic"], "Spaces are not allowed in shuffleLogic file location. You have specified {0}. " "Shuffling will not occur.".format(inspector.cs["shuffleLogic"]), "", inspector.NO_ACTION, ) ) def _clearShufflingInput(): inspector._assignCS("shuffleLogic", "") inspector._assignCS("fuelHandlerName", "") queries.append( settingsValidation.Query( lambda: inspector.cs["shuffleLogic"] and not inspector._csRelativePathExists(inspector.cs["shuffleLogic"]), "The specified shuffle logic file '{0}' cannot be found. " "Shuffling will not occur.".format(inspector.cs["shuffleLogic"]), "Clear specified file value?", _clearShufflingInput, ) ) queries.append( settingsValidation.Query( lambda: inspector.cs["shuffleLogic"] and inspector._csRelativePathExists(inspector.cs["shuffleLogic"]) and _locateRegexOccurences(), "The shuffle logic file {} uses deprecated code." " It will not work unless you permit some automated changes to occur." " The logic file will be backed up to the current directory under a timestamped name" "".format(inspector.cs["shuffleLogic"]), "Proceed?", _applyRegexSolutions, ) ) return queries
39.092105
106
0.574554
903
8,913
5.575858
0.349945
0.028401
0.054816
0.004965
0.178749
0.079245
0.060377
0.055214
0.027011
0
0
0.007961
0.309436
8,913
227
107
39.264317
0.810073
0.099181
0
0.299492
0
0.020305
0.3502
0.079835
0
0
0
0
0
1
0.025381
false
0
0.045685
0
0.086294
0.005076
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab38ae2d8c17a7a5df07314f47034bda8a636085
3,845
py
Python
tests/test_color_background.py
erykoff/redmapper
23fb66c7369de784c67ce6c41ada2f1f51a84acb
[ "Apache-2.0" ]
17
2016-03-06T07:51:02.000Z
2022-02-03T15:17:26.000Z
tests/test_color_background.py
erykoff/redmapper
23fb66c7369de784c67ce6c41ada2f1f51a84acb
[ "Apache-2.0" ]
42
2016-07-27T20:48:20.000Z
2022-01-31T20:47:51.000Z
tests/test_color_background.py
erykoff/redmapper
23fb66c7369de784c67ce6c41ada2f1f51a84acb
[ "Apache-2.0" ]
8
2017-01-26T01:38:41.000Z
2020-11-14T07:41:53.000Z
import unittest import numpy.testing as testing import numpy as np import fitsio import tempfile import os from redmapper import ColorBackground from redmapper import ColorBackgroundGenerator from redmapper import Configuration class ColorBackgroundTestCase(unittest.TestCase): """ Tests for the redmapper.ColorBackground and redmapper.ColorBackgroundGenerator classes. """ def runTest(self): """ Run the ColorBackground and ColorBackgroundGenerator tests. """ file_name = 'test_dr8_col_bkg.fit' file_path = 'data_for_tests' cbkg = ColorBackground('%s/%s' % (file_path, file_name)) col1 = np.array([0.572300, 1.39560]) col2 = np.array([0.7894, 0.9564]) refmags = np.array([17.587, 18.956]) refmagindex = np.array([258, 395]) col1index = np.array([1, 17]) col2index = np.array([15, 19]) # These are new values that are based on improvements in the binning. idl_bkg1 = np.array([0.76778, 0.80049]) idl_bkg2 = np.array([0.04012, 0.10077]) idl_bkg12 = np.array([0.01085, 0.081]) # Test color1 py_outputs = cbkg.lookup_diagonal(1, col1, refmags) testing.assert_almost_equal(py_outputs, idl_bkg1, decimal=5) # Test color2 py_outputs = cbkg.lookup_diagonal(2, col2, refmags) testing.assert_almost_equal(py_outputs, idl_bkg2, decimal=5) # Test off-diagonal py_outputs = cbkg.lookup_offdiag(1, 2, col1, col2, refmags) testing.assert_almost_equal(py_outputs, idl_bkg12, decimal=5) # And a test sigma_g with the usehdrarea=True cbkg2 = ColorBackground('%s/%s' % (file_path, file_name), usehdrarea=True) col1 = np.array([0.572300, 1.39560, 1.0]) col2 = np.array([0.7894, 0.9564, 1.0]) refmags = np.array([17.587, 18.956, 25.0]) idl_sigma_g1 = np.array([127.698, 591.112, np.inf]) idl_sigma_g2 = np.array([7.569, 82.8938, np.inf]) # Test color1 py_outputs = cbkg2.sigma_g_diagonal(1, col1, refmags) testing.assert_almost_equal(py_outputs, idl_sigma_g1, decimal=3) # Test color2 py_outputs = cbkg2.sigma_g_diagonal(2, col2, refmags) testing.assert_almost_equal(py_outputs, idl_sigma_g2, decimal=3) ##################################################### # Now a test of the generation of a color background conf_filename = 'testconfig.yaml' config = Configuration(file_path + "/" + conf_filename) tfile = tempfile.mkstemp() os.close(tfile[0]) config.bkgfile_color = tfile[1] config.d.nside = 128 config.d.hpix = [8421] config.border = 0.0 cbg = ColorBackgroundGenerator(config, minrangecheck=5) # Need to set clobber=True because the tempfile was created cbg.run(clobber=True) fits = fitsio.FITS(config.bkgfile_color) # Make sure we have 11 extensions testing.assert_equal(len(fits), 11) # These tests are obsolete, but could be refactored # Check the 01_01 and 01_02 # bkg11 = fits['01_01_REF'].read() # bkg11_compare = fitsio.read(file_path + "/test_dr8_bkg_zredc_sub.fits", ext='01_01_REF') # testing.assert_almost_equal(bkg11['BC'], bkg11_compare['BC'], 3) # testing.assert_almost_equal(bkg11['N'], bkg11_compare['N'], 3) # bkg12 = fits['01_02_REF'].read() # bkg12_compare = fitsio.read(file_path + "/test_dr8_bkg_zredc_sub.fits", ext='01_02_REF') # testing.assert_almost_equal(bkg12['BC'], bkg12_compare['BC'], 2) # testing.assert_almost_equal(bkg12['N'], bkg12_compare['N'], 4) # And delete the tempfile os.remove(config.bkgfile_color) if __name__=='__main__': unittest.main()
34.954545
98
0.635371
512
3,845
4.572266
0.333984
0.041862
0.073046
0.092268
0.337035
0.268689
0.251602
0.16446
0.16446
0.140111
0
0.086942
0.237191
3,845
109
99
35.275229
0.711217
0.277503
0
0
0
0
0.025545
0
0
0
0
0
0.111111
1
0.018519
false
0
0.166667
0
0.203704
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ab392fd8e80c256d42ff5f34b47b1e8775e1c4cb
33,538
py
Python
src/metpy/calc/basic.py
Exi666/MetPy
c3cf8b9855e0ce7c14347e9d000fc3d531a18e1c
[ "BSD-3-Clause" ]
null
null
null
src/metpy/calc/basic.py
Exi666/MetPy
c3cf8b9855e0ce7c14347e9d000fc3d531a18e1c
[ "BSD-3-Clause" ]
null
null
null
src/metpy/calc/basic.py
Exi666/MetPy
c3cf8b9855e0ce7c14347e9d000fc3d531a18e1c
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """Contains a collection of basic calculations. These include: * wind components * heat index * windchill """ import warnings import numpy as np from scipy.ndimage import gaussian_filter from .. import constants as mpconsts from ..package_tools import Exporter from ..units import atleast_1d, check_units, masked_array, units from ..xarray import preprocess_xarray exporter = Exporter(globals()) # The following variables are constants for a standard atmosphere t0 = 288. * units.kelvin p0 = 1013.25 * units.hPa @exporter.export @preprocess_xarray @check_units('[speed]', '[speed]') def wind_speed(u, v): r"""Compute the wind speed from u and v-components. Parameters ---------- u : `pint.Quantity` Wind component in the X (East-West) direction v : `pint.Quantity` Wind component in the Y (North-South) direction Returns ------- wind speed: `pint.Quantity` The speed of the wind See Also -------- wind_components """ speed = np.sqrt(u * u + v * v) return speed @exporter.export @preprocess_xarray @check_units('[speed]', '[speed]') def wind_direction(u, v, convention='from'): r"""Compute the wind direction from u and v-components. Parameters ---------- u : `pint.Quantity` Wind component in the X (East-West) direction v : `pint.Quantity` Wind component in the Y (North-South) direction convention : str Convention to return direction. 'from' returns the direction the wind is coming from (meteorological convention). 'to' returns the direction the wind is going towards (oceanographic convention). Default is 'from'. Returns ------- direction: `pint.Quantity` The direction of the wind in interval [0, 360] degrees, with 360 being North, with the direction defined by the convention kwarg. See Also -------- wind_components Notes ----- In the case of calm winds (where `u` and `v` are zero), this function returns a direction of 0. """ wdir = 90. * units.deg - np.arctan2(-v, -u) origshape = wdir.shape wdir = atleast_1d(wdir) # Handle oceanographic convection if convention == 'to': wdir -= 180 * units.deg elif convention not in ('to', 'from'): raise ValueError('Invalid kwarg for "convention". Valid options are "from" or "to".') wdir[wdir <= 0] += 360. * units.deg # avoid unintended modification of `pint.Quantity` by direct use of magnitude calm_mask = (np.asarray(u.magnitude) == 0.) & (np.asarray(v.magnitude) == 0.) # np.any check required for legacy numpy which treats 0-d False boolean index as zero if np.any(calm_mask): wdir[calm_mask] = 0. * units.deg return wdir.reshape(origshape).to('degrees') @exporter.export @preprocess_xarray @check_units('[speed]') def wind_components(speed, wdir): r"""Calculate the U, V wind vector components from the speed and direction. Parameters ---------- speed : `pint.Quantity` The wind speed (magnitude) wdir : `pint.Quantity` The wind direction, specified as the direction from which the wind is blowing (0-2 pi radians or 0-360 degrees), with 360 degrees being North. Returns ------- u, v : tuple of `pint.Quantity` The wind components in the X (East-West) and Y (North-South) directions, respectively. See Also -------- wind_speed wind_direction Examples -------- >>> from metpy.units import units >>> metpy.calc.wind_components(10. * units('m/s'), 225. * units.deg) (<Quantity(7.071067811865475, 'meter / second')>, <Quantity(7.071067811865477, 'meter / second')>) """ wdir = _check_radians(wdir, max_radians=4 * np.pi) u = -speed * np.sin(wdir) v = -speed * np.cos(wdir) return u, v @exporter.export @preprocess_xarray @check_units(temperature='[temperature]', speed='[speed]') def windchill(temperature, speed, face_level_winds=False, mask_undefined=True): r"""Calculate the Wind Chill Temperature Index (WCTI). Calculates WCTI from the current temperature and wind speed using the formula outlined by the FCM [FCMR192003]_. Specifically, these formulas assume that wind speed is measured at 10m. If, instead, the speeds are measured at face level, the winds need to be multiplied by a factor of 1.5 (this can be done by specifying `face_level_winds` as `True`.) Parameters ---------- temperature : `pint.Quantity` The air temperature speed : `pint.Quantity` The wind speed at 10m. If instead the winds are at face level, `face_level_winds` should be set to `True` and the 1.5 multiplicative correction will be applied automatically. face_level_winds : bool, optional A flag indicating whether the wind speeds were measured at facial level instead of 10m, thus requiring a correction. Defaults to `False`. mask_undefined : bool, optional A flag indicating whether a masked array should be returned with values where wind chill is undefined masked. These are values where the temperature > 50F or wind speed <= 3 miles per hour. Defaults to `True`. Returns ------- `pint.Quantity` The corresponding Wind Chill Temperature Index value(s) See Also -------- heat_index """ # Correct for lower height measurement of winds if necessary if face_level_winds: # No in-place so that we copy # noinspection PyAugmentAssignment speed = speed * 1.5 temp_limit, speed_limit = 10. * units.degC, 3 * units.mph speed_factor = speed.to('km/hr').magnitude ** 0.16 wcti = units.Quantity((0.6215 + 0.3965 * speed_factor) * temperature.to('degC').magnitude - 11.37 * speed_factor + 13.12, units.degC).to(temperature.units) # See if we need to mask any undefined values if mask_undefined: mask = np.array((temperature > temp_limit) | (speed <= speed_limit)) if mask.any(): wcti = masked_array(wcti, mask=mask) return wcti @exporter.export @preprocess_xarray @check_units('[temperature]') def heat_index(temperature, rh, mask_undefined=True): r"""Calculate the Heat Index from the current temperature and relative humidity. The implementation uses the formula outlined in [Rothfusz1990]_, which is a multi-variable least-squares regression of the values obtained in [Steadman1979]_. Additional conditional corrections are applied to match what the National Weather Service operationally uses. See Figure 3 of [Anderson2013]_ for a depiction of this algorithm and further discussion. Parameters ---------- temperature : `pint.Quantity` Air temperature rh : `pint.Quantity` The relative humidity expressed as a unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are attached. Returns ------- `pint.Quantity` The corresponding Heat Index value(s) Other Parameters ---------------- mask_undefined : bool, optional A flag indicating whether a masked array should be returned with values masked where the temperature < 80F. Defaults to `True`. See Also -------- windchill """ temperature = atleast_1d(temperature) rh = atleast_1d(rh) # assign units to rh if they currently are not present if not hasattr(rh, 'units'): rh = rh * units.dimensionless delta = temperature.to(units.degF) - 0. * units.degF rh2 = rh * rh delta2 = delta * delta # Simplifed Heat Index -- constants converted for RH in [0, 1] a = -10.3 * units.degF + 1.1 * delta + 4.7 * units.delta_degF * rh # More refined Heat Index -- constants converted for RH in [0, 1] b = (-42.379 * units.degF + 2.04901523 * delta + 1014.333127 * units.delta_degF * rh - 22.475541 * delta * rh - 6.83783e-3 / units.delta_degF * delta2 - 5.481717e2 * units.delta_degF * rh2 + 1.22874e-1 / units.delta_degF * delta2 * rh + 8.5282 * delta * rh2 - 1.99e-2 / units.delta_degF * delta2 * rh2) # Create return heat index hi = np.full(np.shape(temperature), np.nan) * units.degF # Retain masked status of temperature with resulting heat index if hasattr(temperature, 'mask'): hi = masked_array(hi) # If T <= 40F, Heat Index is T sel = (temperature <= 40. * units.degF) if np.any(sel): hi[sel] = temperature[sel].to(units.degF) # If a < 79F and hi is unset, Heat Index is a sel = (a < 79. * units.degF) & np.isnan(hi) if np.any(sel): hi[sel] = a[sel] # Use b now for anywhere hi has yet to be set sel = np.isnan(hi) if np.any(sel): hi[sel] = b[sel] # Adjustment for RH <= 13% and 80F <= T <= 112F sel = ((rh <= 13. * units.percent) & (temperature >= 80. * units.degF) & (temperature <= 112. * units.degF)) if np.any(sel): rh15adj = ((13. - rh * 100.) / 4. * ((17. * units.delta_degF - np.abs(delta - 95. * units.delta_degF)) / 17. * units.delta_degF) ** 0.5) hi[sel] = hi[sel] - rh15adj[sel] # Adjustment for RH > 85% and 80F <= T <= 87F sel = ((rh > 85. * units.percent) & (temperature >= 80. * units.degF) & (temperature <= 87. * units.degF)) if np.any(sel): rh85adj = 0.02 * (rh * 100. - 85.) * (87. * units.delta_degF - delta) hi[sel] = hi[sel] + rh85adj[sel] # See if we need to mask any undefined values if mask_undefined: mask = np.array(temperature < 80. * units.degF) if mask.any(): hi = masked_array(hi, mask=mask) return hi @exporter.export @preprocess_xarray @check_units(temperature='[temperature]', speed='[speed]') def apparent_temperature(temperature, rh, speed, face_level_winds=False, mask_undefined=True): r"""Calculate the current apparent temperature. Calculates the current apparent temperature based on the wind chill or heat index as appropriate for the current conditions. Follows [NWS10201]_. Parameters ---------- temperature : `pint.Quantity` The air temperature rh : `pint.Quantity` The relative humidity expressed as a unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are attached. speed : `pint.Quantity` The wind speed at 10m. If instead the winds are at face level, `face_level_winds` should be set to `True` and the 1.5 multiplicative correction will be applied automatically. face_level_winds : bool, optional A flag indicating whether the wind speeds were measured at facial level instead of 10m, thus requiring a correction. Defaults to `False`. mask_undefined : bool, optional A flag indicating whether a masked array should be returned with values where wind chill or heat_index is undefined masked. For wind chill, these are values where the temperature > 50F or wind speed <= 3 miles per hour. For heat index, these are values where the temperature < 80F. Defaults to `True`. Returns ------- `pint.Quantity` The corresponding apparent temperature value(s) See Also -------- heat_index, windchill """ is_not_scalar = isinstance(temperature.m, (list, tuple, np.ndarray)) temperature = atleast_1d(temperature) rh = atleast_1d(rh) speed = atleast_1d(speed) # NB: mask_defined=True is needed to know where computed values exist wind_chill_temperature = windchill(temperature, speed, face_level_winds=face_level_winds, mask_undefined=True).to(temperature.units) heat_index_temperature = heat_index(temperature, rh, mask_undefined=True).to(temperature.units) # Combine the heat index and wind chill arrays (no point has a value in both) # NB: older numpy.ma.where does not return a masked array app_temperature = masked_array( np.ma.where(masked_array(wind_chill_temperature).mask, heat_index_temperature.to(temperature.units), wind_chill_temperature.to(temperature.units) ), temperature.units) # If mask_undefined is False, then set any masked values to the temperature if not mask_undefined: app_temperature[app_temperature.mask] = temperature[app_temperature.mask] # If no values are masked and provided temperature does not have a mask # we should return a non-masked array if not np.any(app_temperature.mask) and not hasattr(temperature, 'mask'): app_temperature = np.array(app_temperature.m) * temperature.units if is_not_scalar: return app_temperature else: return atleast_1d(app_temperature)[0] @exporter.export @preprocess_xarray @check_units('[pressure]') def pressure_to_height_std(pressure): r"""Convert pressure data to heights using the U.S. standard atmosphere [NOAA1976]_. The implementation uses the formula outlined in [Hobbs1977]_ pg.60-61. Parameters ---------- pressure : `pint.Quantity` Atmospheric pressure Returns ------- `pint.Quantity` The corresponding height value(s) Notes ----- .. math:: Z = \frac{T_0}{\Gamma}[1-\frac{p}{p_0}^\frac{R\Gamma}{g}] """ gamma = 6.5 * units('K/km') return (t0 / gamma) * (1 - (pressure / p0).to('dimensionless')**( mpconsts.Rd * gamma / mpconsts.g)) @exporter.export @preprocess_xarray @check_units('[length]') def height_to_geopotential(height): r"""Compute geopotential for a given height. Calculates the geopotential from height using the following formula, which is derived from the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21: .. math:: \Phi = G m_e \left( \frac{1}{R_e} - \frac{1}{R_e + z}\right) (where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the approximate mass of Earth.) Parameters ---------- height : `pint.Quantity` Height above sea level Returns ------- `pint.Quantity` The corresponding geopotential value(s) Examples -------- >>> import metpy.calc >>> from metpy.units import units >>> height = np.linspace(0, 10000, num=11) * units.m >>> geopot = metpy.calc.height_to_geopotential(height) >>> geopot <Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887 39251.39289118 49056.54621087 58858.62446524 68657.62910064 78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')> """ # Direct implementation of formula from Hobbs yields poor numerical results (see # gh-1075), so was replaced with algebraic equivalent. return (mpconsts.G * mpconsts.me / mpconsts.Re) * (height / (mpconsts.Re + height)) @exporter.export @preprocess_xarray def geopotential_to_height(geopot): r"""Compute height from a given geopotential. Calculates the height from geopotential using the following formula, which is derived from the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21: .. math:: z = \frac{1}{\frac{1}{R_e} - \frac{\Phi}{G m_e}} - R_e (where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the approximate mass of Earth.) Parameters ---------- geopotential : `pint.Quantity` Geopotential Returns ------- `pint.Quantity` The corresponding height value(s) Examples -------- >>> import metpy.calc >>> from metpy.units import units >>> height = np.linspace(0, 10000, num=11) * units.m >>> geopot = metpy.calc.height_to_geopotential(height) >>> geopot <Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887 39251.39289118 49056.54621087 58858.62446524 68657.62910064 78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')> >>> height = metpy.calc.geopotential_to_height(geopot) >>> height <Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000. 9000. 10000.], 'meter')> """ # Direct implementation of formula from Hobbs yields poor numerical results (see # gh-1075), so was replaced with algebraic equivalent. scaled = geopot * mpconsts.Re return scaled * mpconsts.Re / (mpconsts.G * mpconsts.me - scaled) @exporter.export @preprocess_xarray @check_units('[length]') def height_to_pressure_std(height): r"""Convert height data to pressures using the U.S. standard atmosphere [NOAA1976]_. The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61. Parameters ---------- height : `pint.Quantity` Atmospheric height Returns ------- `pint.Quantity` The corresponding pressure value(s) Notes ----- .. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})} """ gamma = 6.5 * units('K/km') return p0 * (1 - (gamma / t0) * height) ** (mpconsts.g / (mpconsts.Rd * gamma)) @exporter.export @preprocess_xarray def coriolis_parameter(latitude): r"""Calculate the coriolis parameter at each point. The implementation uses the formula outlined in [Hobbs1977]_ pg.370-371. Parameters ---------- latitude : array_like Latitude at each point Returns ------- `pint.Quantity` The corresponding coriolis force at each point """ latitude = _check_radians(latitude, max_radians=np.pi / 2) return (2. * mpconsts.omega * np.sin(latitude)).to('1/s') @exporter.export @preprocess_xarray @check_units('[pressure]', '[length]') def add_height_to_pressure(pressure, height): r"""Calculate the pressure at a certain height above another pressure level. This assumes a standard atmosphere [NOAA1976]_. Parameters ---------- pressure : `pint.Quantity` Pressure level height : `pint.Quantity` Height above a pressure level Returns ------- `pint.Quantity` The corresponding pressure value for the height above the pressure level See Also -------- pressure_to_height_std, height_to_pressure_std, add_pressure_to_height """ pressure_level_height = pressure_to_height_std(pressure) return height_to_pressure_std(pressure_level_height + height) @exporter.export @preprocess_xarray @check_units('[length]', '[pressure]') def add_pressure_to_height(height, pressure): r"""Calculate the height at a certain pressure above another height. This assumes a standard atmosphere [NOAA1976]_. Parameters ---------- height : `pint.Quantity` Height level pressure : `pint.Quantity` Pressure above height level Returns ------- `pint.Quantity` The corresponding height value for the pressure above the height level See Also -------- pressure_to_height_std, height_to_pressure_std, add_height_to_pressure """ pressure_at_height = height_to_pressure_std(height) return pressure_to_height_std(pressure_at_height - pressure) @exporter.export @preprocess_xarray @check_units('[dimensionless]', '[pressure]', '[pressure]') def sigma_to_pressure(sigma, psfc, ptop): r"""Calculate pressure from sigma values. Parameters ---------- sigma : ndarray The sigma levels to be converted to pressure levels. psfc : `pint.Quantity` The surface pressure value. ptop : `pint.Quantity` The pressure value at the top of the model domain. Returns ------- `pint.Quantity` The pressure values at the given sigma levels. Notes ----- Sigma definition adapted from [Philips1957]_. .. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top} * :math:`p` is pressure at a given `\sigma` level * :math:`\sigma` is non-dimensional, scaled pressure * :math:`p_{sfc}` is pressure at the surface or model floor * :math:`p_{top}` is pressure at the top of the model domain """ if np.any(sigma < 0) or np.any(sigma > 1): raise ValueError('Sigma values should be bounded by 0 and 1') if psfc.magnitude < 0 or ptop.magnitude < 0: raise ValueError('Pressure input should be non-negative') return sigma * (psfc - ptop) + ptop @exporter.export @preprocess_xarray def smooth_gaussian(scalar_grid, n): """Filter with normal distribution of weights. Parameters ---------- scalar_grid : `pint.Quantity` Some n-dimensional scalar grid. If more than two axes, smoothing is only done across the last two. n : int Degree of filtering Returns ------- `pint.Quantity` The filtered 2D scalar grid Notes ----- This function is a close replication of the GEMPAK function GWFS, but is not identical. The following notes are incorporated from the GEMPAK source code: This function smoothes a scalar grid using a moving average low-pass filter whose weights are determined by the normal (Gaussian) probability distribution function for two dimensions. The weight given to any grid point within the area covered by the moving average for a target grid point is proportional to EXP [ -( D ** 2 ) ], where D is the distance from that point to the target point divided by the standard deviation of the normal distribution. The value of the standard deviation is determined by the degree of filtering requested. The degree of filtering is specified by an integer. This integer is the number of grid increments from crest to crest of the wave for which the theoretical response is 1/e = .3679. If the grid increment is called delta_x, and the value of this integer is represented by N, then the theoretical filter response function value for the N * delta_x wave will be 1/e. The actual response function will be greater than the theoretical value. The larger N is, the more severe the filtering will be, because the response function for all wavelengths shorter than N * delta_x will be less than 1/e. Furthermore, as N is increased, the slope of the filter response function becomes more shallow; so, the response at all wavelengths decreases, but the amount of decrease lessens with increasing wavelength. (The theoretical response function can be obtained easily--it is the Fourier transform of the weight function described above.) The area of the patch covered by the moving average varies with N. As N gets bigger, the smoothing gets stronger, and weight values farther from the target grid point are larger because the standard deviation of the normal distribution is bigger. Thus, increasing N has the effect of expanding the moving average window as well as changing the values of weights. The patch is a square covering all points whose weight values are within two standard deviations of the mean of the two dimensional normal distribution. The key difference between GEMPAK's GWFS and this function is that, in GEMPAK, the leftover weight values representing the fringe of the distribution are applied to the target grid point. In this function, the leftover weights are not used. When this function is invoked, the first argument is the grid to be smoothed, the second is the value of N as described above: GWFS ( S, N ) where N > 1. If N <= 1, N = 2 is assumed. For example, if N = 4, then the 4 delta x wave length is passed with approximate response 1/e. """ # Compute standard deviation in a manner consistent with GEMPAK n = int(round(n)) if n < 2: n = 2 sgma = n / (2 * np.pi) # Construct sigma sequence so smoothing occurs only in horizontal direction nax = len(scalar_grid.shape) # Assume the last two axes represent the horizontal directions sgma_seq = [sgma if i > nax - 3 else 0 for i in range(nax)] # Compute smoothed field and reattach units res = gaussian_filter(scalar_grid, sgma_seq, truncate=2 * np.sqrt(2)) if hasattr(scalar_grid, 'units'): res = res * scalar_grid.units return res @exporter.export @preprocess_xarray def smooth_n_point(scalar_grid, n=5, passes=1): """Filter with normal distribution of weights. Parameters ---------- scalar_grid : array-like or `pint.Quantity` Some 2D scalar grid to be smoothed. n: int The number of points to use in smoothing, only valid inputs are 5 and 9. Defaults to 5. passes : int The number of times to apply the filter to the grid. Defaults to 1. Returns ------- array-like or `pint.Quantity` The filtered 2D scalar grid. Notes ----- This function is a close replication of the GEMPAK function SM5S and SM9S depending on the choice of the number of points to use for smoothing. This function can be applied multiple times to create a more smoothed field and will only smooth the interior points, leaving the end points with their original values. If a masked value or NaN values exists in the array, it will propagate to any point that uses that particular grid point in the smoothing calculation. Applying the smoothing function multiple times will propogate NaNs further throughout the domain. """ if n == 9: p = 0.25 q = 0.125 r = 0.0625 elif n == 5: p = 0.5 q = 0.125 r = 0.0 else: raise ValueError('The number of points to use in the smoothing ' 'calculation must be either 5 or 9.') smooth_grid = scalar_grid[:].copy() for _i in range(passes): smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1] + q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:] + smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2]) + r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] + + smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2])) return smooth_grid @exporter.export @preprocess_xarray @check_units('[pressure]', '[length]') def altimeter_to_station_pressure(altimeter_value, height): r"""Convert the altimeter measurement to station pressure. This function is useful for working with METARs since they do not provide altimeter values, but not sea-level pressure or station pressure. The following definitions of altimeter setting and station pressure are taken from [Smithsonian1951]_ Altimeter setting is the pressure value to which an aircraft altimeter scale is set so that it will indicate the altitude above mean sea-level of an aircraft on the ground at the location for which the value is determined. It assumes a standard atmosphere [NOAA1976]_. Station pressure is the atmospheric pressure at the designated station elevation. Finding the station pressure can be helpful for calculating sea-level pressure or other parameters. Parameters ---------- altimeter_value : `pint.Quantity` The altimeter setting value as defined by the METAR or other observation, which can be measured in either inches of mercury (in. Hg) or millibars (mb) height: `pint.Quantity` Elevation of the station measuring pressure. Returns ------- `pint.Quantity` The station pressure in hPa or in. Hg, which can be used to calculate sea-level pressure See Also -------- altimeter_to_sea_level_pressure Notes ----- This function is implemented using the following equations from the Smithsonian Handbook (1951) p. 269 Equation 1: .. math:: A_{mb} = (p_{mb} - 0.3)F Equation 3: .. math:: F = \left [1 + \left(\frac{p_{0}^n a}{T_{0}} \right) \frac{H_{b}}{p_{1}^n} \right ] ^ \frac{1}{n} Where :math:`p_{0}` = standard sea-level pressure = 1013.25 mb :math:`p_{1} = p_{mb} - 0.3` when :math:`p_{0} = 1013.25 mb` gamma = lapse rate in [NOAA1976]_ standard atmosphere below the isothermal layer :math:`6.5^{\circ}C. km.^{-1}` :math:`t_{0}` = standard sea-level temperature 288 K :math:`H_{b} =` station elevation in meters (elevation for which station pressure is given) :math:`n = \frac{a R_{d}}{g} = 0.190284` where :math:`R_{d}` is the gas constant for dry air And solving for :math:`p_{mb}` results in the equation below, which is used to calculate station pressure :math:`(p_{mb})` .. math:: p_{mb} = \left [A_{mb} ^ n - \left (\frac{p_{0} a H_{b}}{T_0} \right) \right] ^ \frac{1}{n} + 0.3 """ # Gamma Value for this case gamma = 0.0065 * units('K/m') # N-Value n = (mpconsts.Rd * gamma / mpconsts.g).to_base_units() return ((altimeter_value ** n - ((p0.to(altimeter_value.units) ** n * gamma * height) / t0)) ** (1 / n) + 0.3 * units.hPa) @exporter.export @preprocess_xarray @check_units('[pressure]', '[length]', '[temperature]') def altimeter_to_sea_level_pressure(altimeter_value, height, temperature): r"""Convert the altimeter setting to sea-level pressure. This function is useful for working with METARs since most provide altimeter values, but not sea-level pressure, which is often plotted on surface maps. The following definitions of altimeter setting, station pressure, and sea-level pressure are taken from [Smithsonian1951]_ Altimeter setting is the pressure value to which an aircraft altimeter scale is set so that it will indicate the altitude above mean sea-level of an aircraft on the ground at the location for which the value is determined. It assumes a standard atmosphere. Station pressure is the atmospheric pressure at the designated station elevation. Sea-level pressure is a pressure value obtained by the theoretical reduction of barometric pressure to sea level. It is assumed that atmosphere extends to sea level below the station and that the properties of the atmosphere are related to conditions observed at the station. This value is recorded by some surface observation stations, but not all. If the value is recorded, it can be found in the remarks section. Finding the sea-level pressure is helpful for plotting purposes and different calculations. Parameters ---------- altimeter_value : 'pint.Quantity' The altimeter setting value is defined by the METAR or other observation, with units of inches of mercury (in Hg) or millibars (hPa) height : 'pint.Quantity' Elevation of the station measuring pressure. Often times measured in meters temperature : 'pint.Quantity' Temperature at the station Returns ------- 'pint.Quantity' The sea-level pressure in hPa and makes pressure values easier to compare between different stations See Also -------- altimeter_to_station_pressure Notes ----- This function is implemented using the following equations from Wallace and Hobbs (1977) Equation 2.29: .. math:: \Delta z = Z_{2} - Z_{1} = \frac{R_{d} \bar T_{v}}{g_0}ln\left(\frac{p_{1}}{p_{2}}\right) = \bar H ln \left (\frac {p_{1}}{p_{2}} \right) Equation 2.31: .. math:: p_{0} = p_{g}exp \left(\frac{Z_{g}}{\bar H} \right) \\ = p_{g}exp \left(\frac{g_{0}Z_{g}}{R_{d}\bar T_{v}} \right) Then by substituting :math:`Delta_{Z}` for :math:`Z_{g}` in Equation 2.31: .. math:: p_{sea_level} = p_{station} exp\left(\frac{\Delta z}{H}\right) where :math:`Delta_{Z}` is the elevation in meters and :math:`H = \frac{R_{d}T}{g}` """ # Calculate the station pressure using function altimeter_to_station_pressure() psfc = altimeter_to_station_pressure(altimeter_value, height) # Calculate the scale height h = mpconsts.Rd * temperature / mpconsts.g return psfc * np.exp(height / h) def _check_radians(value, max_radians=2 * np.pi): """Input validation of values that could be in degrees instead of radians. Parameters ---------- value : `pint.Quantity` The input value to check. max_radians : float Maximum absolute value of radians before warning. Returns ------- `pint.Quantity` The input value """ try: value = value.to('radians').m except AttributeError: pass if np.greater(np.nanmax(np.abs(value)), max_radians): warnings.warn('Input over {} radians. ' 'Ensure proper units are given.'.format(max_radians)) return value
33.945344
94
0.653706
4,568
33,538
4.720884
0.168783
0.028379
0.022258
0.025041
0.40793
0.363552
0.334338
0.27744
0.255646
0.217065
0
0.037713
0.245751
33,538
987
95
33.979737
0.814793
0.638649
0
0.254902
0
0
0.058806
0
0
0
0
0
0
1
0.07451
false
0.011765
0.027451
0
0.180392
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0