hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f722240a67244d196847090cdf5dd01aa951f23d | 61,618 | py | Python | cumulusci/robotframework/Salesforce.py | leboff/CumulusCI | 81edbb1d64f2cc215a951c570052a1e423821cc1 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/robotframework/Salesforce.py | leboff/CumulusCI | 81edbb1d64f2cc215a951c570052a1e423821cc1 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/robotframework/Salesforce.py | leboff/CumulusCI | 81edbb1d64f2cc215a951c570052a1e423821cc1 | [
"BSD-3-Clause"
] | null | null | null | import importlib
import logging
import re
import time
from datetime import datetime
from dateutil.parser import parse as parse_date, ParserError
from pprint import pformat
from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError
from robot.utils import timestr_to_secs
from cumulusci.robotframework.utils import get_locator_module_name
from cumulusci.robotframework.form_handlers import get_form_handler
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import (
StaleElementReferenceException,
NoSuchElementException,
JavascriptException,
WebDriverException,
)
import faker
from simple_salesforce import SalesforceResourceNotFound
from cumulusci.robotframework.utils import selenium_retry, capture_screenshot_on_error
from SeleniumLibrary.errors import ElementNotFound, NoOpenBrowser
from urllib3.exceptions import ProtocolError
from cumulusci.core.template_utils import format_str
from cumulusci.robotframework import locator_manager
OID_REGEX = r"^(%2F)?([a-zA-Z0-9]{15,18})$"
STATUS_KEY = ("status",)
lex_locators = {} # will be initialized when Salesforce is instantiated
# https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_composite_sobjects_collections_create.htm
SF_COLLECTION_INSERTION_LIMIT = 200
@selenium_retry
class Salesforce(object):
"""A keyword library for working with Salesforce Lightning pages
While you can import this directly into any suite, the recommended way
to include this in a test suite is to import the ``Salesforce.robot``
resource file.
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self, debug=False, locators=None):
self.debug = debug
self._session_records = []
# Turn off info logging of all http requests
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
if locators:
lex_locators.update(locators)
else:
self._init_locators()
self._faker = faker.Faker("en_US")
try:
self.builtin.set_global_variable("${faker}", self._faker)
except RobotNotRunningError:
# this only happens during unit tests, and we don't care.
pass
def _init_locators(self):
"""Load the appropriate locator file for the current version
If no version can be determined, we'll use the highest numbered
locator file name.
"""
try:
version = int(float(self.get_latest_api_version()))
except RobotNotRunningError:
# Likely this means we are running in the context of
# documentation generation. Setting the version to
# None will result in using the latest version of
# locators.
version = None
locator_module_name = get_locator_module_name(version)
self.locators_module = importlib.import_module(locator_module_name)
lex_locators.update(self.locators_module.lex_locators)
@property
def builtin(self):
return BuiltIn()
@property
def cumulusci(self):
return self.builtin.get_library_instance("cumulusci.robotframework.CumulusCI")
def initialize_location_strategies(self):
"""Initialize the Salesforce custom location strategies
Note: This keyword is called automatically from *Open Test Browser*
"""
if not self.builtin.get_variable_value(
"${LOCATION STRATEGIES INITIALIZED}", False
):
# this manages strategies based on locators in a dictionary
locator_manager.register_locators("sf", lex_locators)
locator_manager.add_location_strategies()
# these are more traditional location strategies based on keywords
# or functions
self.selenium.add_location_strategy(
"text", "Salesforce.Locate Element by Text"
)
self.selenium.add_location_strategy(
"title", "Salesforce.Locate Element by Title"
)
self.selenium.add_location_strategy("label", self.locate_element_by_label)
self.builtin.set_suite_variable("${LOCATION STRATEGIES INITIALIZED}", True)
@selenium_retry(False)
def _jsclick(self, locator):
"""Use javascript to click an element on the page
See https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1
"""
self.selenium.wait_until_page_contains_element(locator)
self.selenium.wait_until_element_is_enabled(locator)
for should_retry in (True, False):
try:
# Setting the focus first seems to be required as of Spring'20
# (read: without it, tests started failing in that release). I
# suspect it's because there is a focusOut handler on form
# fields which need to be triggered for data to be accepted.
element = self.selenium.get_webelement(locator)
self.selenium.driver.execute_script(
"arguments[0].focus(); arguments[0].click()", element
)
return
except StaleElementReferenceException:
if should_retry:
time.sleep(1)
else:
raise
def set_faker_locale(self, locale):
"""Set the locale for fake data
This sets the locale for all calls to the ``Faker`` keyword
and ``${faker}`` variable. The default is en_US
For a list of supported locales see
[https://faker.readthedocs.io/en/master/locales.html|Localized Providers]
in the Faker documentation.
Example
| Set Faker Locale fr_FR
| ${french_address}= Faker address
"""
try:
self._faker = faker.Faker(locale)
except AttributeError:
raise Exception(f"Unknown locale for fake data: '{locale}'")
def get_fake_data(self, fake, *args, **kwargs):
"""Return fake data
This uses the [https://faker.readthedocs.io/en/master/|Faker]
library to provide fake data in a variety of formats (names,
addresses, credit card numbers, dates, phone numbers, etc) and
locales (en_US, fr_FR, etc).
The _fake_ argument is the name of a faker property such as
``first_name``, ``address``, ``lorem``, etc. Additional
arguments depend on type of data requested. For a
comprehensive list of the types of fake data that can be
generated see
[https://faker.readthedocs.io/en/master/providers.html|Faker
providers] in the Faker documentation.
The return value is typically a string, though in some cases
some other type of object will be returned. For example, the
``date_between`` fake returns a
[https://docs.python.org/3/library/datetime.html#date-objects|datetime.date
object]. Each time a piece of fake data is requested it will
be regenerated, so that multiple calls will usually return
different data.
This keyword can also be called using robot's extended variable
syntax using the variable ``${faker}``. In such a case, the
data being asked for is a method call and arguments must be
enclosed in parentheses and be quoted. Arguments should not be
quoted when using the keyword.
To generate fake data for a locale other than en_US, use
the keyword ``Set Faker Locale`` prior to calling this keyword.
Examples
| # Generate a fake first name
| ${first_name}= Get fake data first_name
| # Generate a fake date in the default format
| ${date}= Get fake data date
| # Generate a fake date with an explicit format
| ${date}= Get fake data date pattern=%Y-%m-%d
| # Generate a fake date using extended variable syntax
| Input text //input ${faker.date(pattern='%Y-%m-%d')}
"""
try:
return self._faker.format(fake, *args, **kwargs)
except AttributeError:
raise Exception(f"Unknown fake data request: '{fake}'")
def get_latest_api_version(self):
return self.cumulusci.org.latest_api_version
def create_webdriver_with_retry(self, *args, **kwargs):
"""Call the Create Webdriver keyword.
Retry on connection resets which can happen if custom domain propagation is slow.
"""
# Get selenium without referencing selenium.driver which doesn't exist yet
selenium = self.builtin.get_library_instance("SeleniumLibrary")
for _ in range(12):
try:
return selenium.create_webdriver(*args, **kwargs)
except ProtocolError:
# Give browser some more time to start up
time.sleep(5)
raise Exception("Could not connect to remote webdriver after 1 minute")
@capture_screenshot_on_error
def click_modal_button(self, title):
"""Clicks a button in a Lightning modal."""
locator = lex_locators["modal"]["button"].format(title)
self.selenium.wait_until_page_contains_element(locator)
self.selenium.wait_until_element_is_enabled(locator)
self._jsclick(locator)
@capture_screenshot_on_error
def click_object_button(self, title):
"""Clicks a button in an object's actions."""
locator = lex_locators["object"]["button"].format(title=title)
self._jsclick(locator)
self.wait_until_modal_is_open()
@capture_screenshot_on_error
def scroll_element_into_view(self, locator):
"""Scroll the element identified by 'locator'
This is a replacement for the keyword of the same name in
SeleniumLibrary. The SeleniumLibrary implementation uses
an unreliable method on Firefox. This keyword uses
a more reliable technique.
For more info see https://stackoverflow.com/a/52045231/7432
"""
element = self.selenium.get_webelement(locator)
self.selenium.driver.execute_script("arguments[0].scrollIntoView()", element)
@capture_screenshot_on_error
def load_related_list(self, heading, tries=10):
"""Scrolls down until the specified related list loads.
If the related list isn't found, the keyword will scroll down
in 100 pixel increments to trigger lightning into loading the
list. This process of scrolling will be repeated until the
related list has been loaded or we've tried several times
(the default is 10 tries)
"""
locator = lex_locators["record"]["related"]["card"].format(heading)
for i in range(tries):
try:
self.selenium.scroll_element_into_view(locator)
return
except (ElementNotFound, JavascriptException, WebDriverException):
self.builtin.log(
f"related list '{heading}' not found; scrolling...", "DEBUG"
)
self.selenium.execute_javascript("window.scrollBy(0, 100)")
self.wait_for_aura()
raise AssertionError(f"Timed out waiting for related list '{heading}' to load.")
def click_related_list_button(self, heading, button_title):
"""Clicks a button in the heading of a related list.
Waits for a modal to open after clicking the button.
"""
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["button"].format(
heading, button_title
)
self._jsclick(locator)
self.wait_until_modal_is_open()
@capture_screenshot_on_error
def click_related_item_link(self, heading, title):
"""Clicks a link in the related list with the specified heading.
This keyword will automatically call *Wait until loading is complete*.
"""
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["link"].format(heading, title)
try:
self._jsclick(locator)
except Exception as e:
self.builtin.log(f"Exception: {e}", "DEBUG")
raise Exception(
f"Unable to find related link under heading '{heading}' with the text '{title}'"
)
self.wait_until_loading_is_complete()
def click_related_item_popup_link(self, heading, title, link):
"""Clicks a link in the popup menu for a related list item.
heading specifies the name of the list,
title specifies the name of the item,
and link specifies the name of the link
"""
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["popup_trigger"].format(
heading, title
)
self.selenium.wait_until_page_contains_element(locator)
self._jsclick(locator)
locator = lex_locators["popup"]["link"].format(link)
self._jsclick(locator)
self.wait_until_loading_is_complete()
def close_modal(self):
"""Closes the open modal"""
locator = lex_locators["modal"]["close"]
self._jsclick(locator)
def current_app_should_be(self, app_name):
"""Validates the currently selected Salesforce App"""
locator = lex_locators["app_launcher"]["current_app"].format(app_name)
elem = self.selenium.get_webelement(locator)
assert app_name == elem.text, "Expected app to be {} but found {}".format(
app_name, elem.text
)
def delete_session_records(self):
"""Deletes records that were created while running this test case.
(Only records specifically recorded using the Store Session Record
keyword are deleted.)
"""
self._session_records.reverse()
self.builtin.log("Deleting {} records".format(len(self._session_records)))
for record in self._session_records[:]:
self.builtin.log(" Deleting {type} {id}".format(**record))
try:
self.salesforce_delete(record["type"], record["id"])
except SalesforceResourceNotFound:
self.builtin.log(" {type} {id} is already deleted".format(**record))
except Exception as e:
self.builtin.log(
" {type} {id} could not be deleted:".format(**record),
level="WARN",
)
self.builtin.log(" {}".format(e), level="WARN")
def get_active_browser_ids(self):
"""Return the id of all open browser ids"""
# This relies on some private data structures, but presently
# there is no other way. There's been a discussion in the
# robot slack channels about adding a new keyword that does
# what this keyword does. When that happens, we can remove
# this keyword.
driver_ids = []
try:
driver_cache = self.selenium._drivers
except NoOpenBrowser:
return []
for index, driver in enumerate(driver_cache._connections):
if driver not in driver_cache._closed:
# SeleniumLibrary driver ids start at one rather than zero
driver_ids.append(index + 1)
return driver_ids
def get_current_record_id(self):
"""Parses the current url to get the object id of the current record.
Expects url format like: [a-zA-Z0-9]{15,18}
"""
url = self.selenium.get_location()
for part in url.split("/"):
oid_match = re.match(OID_REGEX, part)
if oid_match is not None:
return oid_match.group(2)
raise AssertionError("Could not parse record id from url: {}".format(url))
def field_value_should_be(self, label, expected_value):
"""Verify that the form field for the given label is the expected value
Example:
| Field value should be Account Name ACME Labs
"""
value = self.get_field_value(label)
self.builtin.should_be_equal(value, expected_value)
def get_field_value(self, label):
"""Return the current value of a form field based on the field label"""
api_version = int(float(self.get_latest_api_version()))
locator = self._get_input_field_locator(label)
if api_version >= 51:
# this works for both First Name (input) and Account Name (picklist)
value = self.selenium.get_value(locator)
else:
# older releases it's a bit more complex
element = self.selenium.get_webelement(locator)
if element.get_attribute("role") == "combobox":
value = self.selenium.get_text(f"sf:object.field_lookup_value:{label}")
else:
value = self.selenium.get_value(f"sf:object.field:{label}")
return value
def get_locator(self, path, *args, **kwargs):
"""Returns a rendered locator string from the Salesforce lex_locators
dictionary. This can be useful if you want to use an element in
a different way than the built in keywords allow.
"""
locator = lex_locators
for key in path.split("."):
locator = locator[key]
return locator.format(*args, **kwargs)
def get_record_type_id(self, obj_type, developer_name):
"""Returns the Record Type Id for a record type name"""
soql = "SELECT Id FROM RecordType WHERE SObjectType='{}' and DeveloperName='{}'".format(
obj_type, developer_name
)
res = self.cumulusci.sf.query_all(soql)
return res["records"][0]["Id"]
def get_related_list_count(self, heading):
"""Returns the number of items indicated for a related list."""
locator = lex_locators["record"]["related"]["count"].format(heading)
count = self.selenium.get_webelement(locator).text
count = count.replace("(", "").replace(")", "")
return int(count)
def go_to_object_home(self, obj_name):
"""Navigates to the Home view of a Salesforce Object"""
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/o/{}/home".format(url, obj_name)
self.selenium.go_to(url)
self.wait_until_loading_is_complete(lex_locators["actions"])
def go_to_object_list(self, obj_name, filter_name=None):
"""Navigates to the Home view of a Salesforce Object"""
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/o/{}/list".format(url, obj_name)
if filter_name:
url += "?filterName={}".format(filter_name)
self.selenium.go_to(url)
self.wait_until_loading_is_complete(lex_locators["actions"])
def go_to_record_home(self, obj_id):
"""Navigates to the Home view of a Salesforce Object"""
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/r/{}/view".format(url, obj_id)
self.selenium.go_to(url)
self.wait_until_loading_is_complete(lex_locators["actions"])
def go_to_setup_home(self):
"""Navigates to the Home tab of Salesforce Setup"""
url = self.cumulusci.org.lightning_base_url
self.selenium.go_to(url + "/lightning/setup/SetupOneHome/home")
self.wait_until_loading_is_complete()
def go_to_setup_object_manager(self):
"""Navigates to the Object Manager tab of Salesforce Setup"""
url = self.cumulusci.org.lightning_base_url
self.selenium.go_to(url + "/lightning/setup/ObjectManager/home")
self.wait_until_loading_is_complete()
def header_field_should_have_value(self, label):
"""Validates that a field in the record header has a text value.
NOTE: Use other keywords for non-string value types
"""
locator = lex_locators["record"]["header"]["field_value"].format(label)
self.selenium.page_should_contain_element(locator)
def header_field_should_not_have_value(self, label):
"""Validates that a field in the record header does not have a value.
NOTE: Use other keywords for non-string value types
"""
locator = lex_locators["record"]["header"]["field_value"].format(label)
self.selenium.page_should_not_contain_element(locator)
def header_field_should_have_link(self, label):
"""Validates that a field in the record header has a link as its value"""
locator = lex_locators["record"]["header"]["field_value_link"].format(label)
self.selenium.page_should_contain_element(locator)
def header_field_should_not_have_link(self, label):
"""Validates that a field in the record header does not have a link as its value"""
locator = lex_locators["record"]["header"]["field_value_link"].format(label)
self.selenium.page_should_not_contain_element(locator)
def click_header_field_link(self, label):
"""Clicks a link in record header."""
locator = lex_locators["record"]["header"]["field_value_link"].format(label)
self._jsclick(locator)
def header_field_should_be_checked(self, label):
"""Validates that a checkbox field in the record header is checked"""
locator = lex_locators["record"]["header"]["field_value_checked"].format(label)
self.selenium.page_should_contain_element(locator)
def header_field_should_be_unchecked(self, label):
"""Validates that a checkbox field in the record header is unchecked"""
locator = lex_locators["record"]["header"]["field_value_unchecked"].format(
label
)
self.selenium.page_should_contain_element(locator)
def log_browser_capabilities(self, loglevel="INFO"):
"""Logs all of the browser capabilities as reported by selenium"""
output = "selenium browser capabilities:\n"
output += pformat(self.selenium.driver.capabilities, indent=4)
self.builtin.log(output, level=loglevel)
@capture_screenshot_on_error
def open_app_launcher(self, retry=True):
"""Opens the Saleforce App Launcher Modal
Note: starting with Spring '20 the app launcher button opens a
menu rather than a modal. To maintain backwards compatibility,
this keyword will continue to open the modal rather than the
menu. If you need to interact with the app launcher menu, you
will need to create a custom keyword.
If the retry parameter is true, the keyword will
close and then re-open the app launcher if it times out
while waiting for the dialog to open.
"""
self._jsclick("sf:app_launcher.button")
self.selenium.wait_until_element_is_visible("sf:app_launcher.view_all")
self._jsclick("sf:app_launcher.view_all")
self.wait_until_modal_is_open()
try:
# the modal may be open, but not yet fully rendered
# wait until at least one link appears. We've seen that sometimes
# the dialog hangs prior to any links showing up
self.selenium.wait_until_element_is_visible(
"xpath://ul[contains(@class, 'al-modal-list')]//li"
)
except Exception as e:
# This should never happen, yet it does. Experience has
# shown that sometimes (at least in spring '20) the modal
# never renders. Refreshing the modal seems to fix it.
if retry:
self.builtin.log(
f"caught exception {e} waiting for app launcher; retrying", "DEBUG"
)
self.selenium.press_keys("sf:modal.is_open", "ESCAPE")
self.wait_until_modal_is_closed()
self.open_app_launcher(retry=False)
else:
self.builtin.log(
"caught exception waiting for app launcher; not retrying", "DEBUG"
)
raise
def populate_field(self, name, value):
"""Enters a value into an input or textarea field.
'name' represents the label on the page (eg: "First Name"),
and 'value' is the new value.
Any existing value will be replaced.
"""
locator = self._get_input_field_locator(name)
self._populate_field(locator, value)
def populate_lookup_field(self, name, value):
"""Enters a value into a lookup field."""
input_locator = self._get_input_field_locator(name)
menu_locator = lex_locators["object"]["field_lookup_link"].format(value)
self._populate_field(input_locator, value)
for x in range(3):
self.wait_for_aura()
try:
self.selenium.get_webelement(menu_locator)
except ElementNotFound:
# Give indexing a chance to catch up
time.sleep(2)
field = self.selenium.get_webelement(input_locator)
field.send_keys(Keys.BACK_SPACE)
else:
break
self.selenium.set_focus_to_element(menu_locator)
self._jsclick(menu_locator)
self.wait_for_aura()
def _get_input_field_locator(self, name):
"""Given an input field label, return a locator for the related input field
This looks for a <label> element with the given text, or
a label with a span with the given text. The value of the
'for' attribute is then extracted from the label and used
to create a new locator with that id.
For example, the locator 'abc123' will be returned
for the following html:
<label for='abc123'>First Name</label>
-or-
<label for='abc123'><span>First Name</span>
"""
try:
# we need to make sure that if a modal is open, we only find
# the input element inside the modal. Otherwise it's possible
# that the xpath could pick the wrong element.
self.selenium.get_webelement(lex_locators["modal"]["is_open"])
modal_prefix = "//div[contains(@class, 'modal-container')]"
except ElementNotFound:
modal_prefix = ""
locator = modal_prefix + lex_locators["object"]["field_label"].format(
name, name
)
input_element_id = self.selenium.get_element_attribute(locator, "for")
return input_element_id
def _populate_field(self, locator, value):
self.builtin.log(f"value: {value}' locator: '{locator}'", "DEBUG")
field = self.selenium.get_webelement(locator)
self._focus(field)
if field.get_attribute("value"):
self._clear(field)
field.send_keys(value)
def _focus(self, element):
"""Set focus to an element
In addition to merely setting the focus, we click the mouse
to the field in case there are functions tied to that event.
"""
actions = ActionChains(self.selenium.driver)
actions.move_to_element(element).click().perform()
self.selenium.set_focus_to_element(element)
def _clear(self, element):
"""Clear the field, using any means necessary
This is surprisingly hard to do with a generic solution. Some
methods work for some components and/or on some browsers but
not others. Therefore, several techniques are employed.
"""
element.clear()
self.selenium.driver.execute_script("arguments[0].value = '';", element)
# Select all and delete just in case the element didn't get cleared
element.send_keys(Keys.HOME + Keys.SHIFT + Keys.END)
element.send_keys(Keys.BACKSPACE)
if element.get_attribute("value"):
# Give the UI a chance to settle down. The sleep appears
# necessary. Without it, this keyword sometimes fails to work
# properly. With it, I was able to run 700+ tests without a single
# failure.
time.sleep(0.25)
# Even after all that, some elements refuse to be cleared out.
# I'm looking at you, currency fields on Firefox.
if element.get_attribute("value"):
self._force_clear(element)
def _force_clear(self, element):
"""Use brute-force to clear an element
This moves the cursor to the end of the input field and
then issues a series of backspace keys to delete the data
in the field.
"""
value = element.get_attribute("value")
actions = ActionChains(self.selenium.driver)
actions.move_to_element(element).click().send_keys(Keys.END)
for character in value:
actions.send_keys(Keys.BACKSPACE)
actions.perform()
def populate_form(self, **kwargs):
"""Enters multiple values from a mapping into form fields."""
for name, value in kwargs.items():
self.populate_field(name, value)
def remove_session_record(self, obj_type, obj_id):
"""Remove a record from the list of records that should be automatically removed."""
try:
self._session_records.remove({"type": obj_type, "id": obj_id})
except ValueError:
self.builtin.log(
"Did not find record {} {} in the session records list".format(
obj_type, obj_id
)
)
def select_record_type(self, label):
"""Selects a record type while adding an object."""
self.wait_until_modal_is_open()
locator = lex_locators["object"]["record_type_option"].format(label)
self._jsclick(locator)
self.selenium.click_button("Next")
@capture_screenshot_on_error
def select_app_launcher_app(self, app_name):
"""Navigates to a Salesforce App via the App Launcher"""
locator = lex_locators["app_launcher"]["app_link"].format(app_name)
self.open_app_launcher()
self.selenium.wait_until_page_contains_element(locator, timeout=30)
self.selenium.set_focus_to_element(locator)
elem = self.selenium.get_webelement(locator)
link = elem.find_element_by_xpath("../../..")
self.selenium.set_focus_to_element(link)
link.click()
self.wait_until_modal_is_closed()
@capture_screenshot_on_error
def select_app_launcher_tab(self, tab_name):
"""Navigates to a tab via the App Launcher"""
locator = lex_locators["app_launcher"]["tab_link"].format(tab_name)
self.open_app_launcher()
self.selenium.wait_until_page_contains_element(locator)
self.selenium.set_focus_to_element(locator)
self._jsclick(locator)
self.wait_until_modal_is_closed()
def salesforce_delete(self, obj_name, obj_id):
"""Deletes a Salesforce object by object name and Id.
Example:
The following example assumes that ``${contact id}`` has been
previously set. The example deletes the Contact with that Id.
| Salesforce Delete Contact ${contact id}
"""
self.builtin.log("Deleting {} with Id {}".format(obj_name, obj_id))
obj_class = getattr(self.cumulusci.sf, obj_name)
obj_class.delete(obj_id)
self.remove_session_record(obj_name, obj_id)
def salesforce_get(self, obj_name, obj_id):
"""Gets a Salesforce object by Id and returns the result as a dict.
Example:
The following example assumes that ``${contact id}`` has been
previously set. The example retrieves the Contact object with
that Id and then logs the Name field.
| &{contact}= Salesforce Get Contact ${contact id}
| log Contact name: ${contact['Name']}
"""
self.builtin.log(f"Getting {obj_name} with Id {obj_id}")
obj_class = getattr(self.cumulusci.sf, obj_name)
return obj_class.get(obj_id)
def salesforce_insert(self, obj_name, **kwargs):
"""Creates a new Salesforce object and returns the Id.
The fields of the object may be defined with keyword arguments
where the keyword name is the same as the field name.
The object name and Id is passed to the *Store Session
Record* keyword, and will be deleted when the keyword
*Delete Session Records* is called.
As a best practice, either *Delete Session Records* or
*Delete Records and Close Browser* from Salesforce.robot
should be called as a suite teardown.
Example:
The following example creates a new Contact with the
first name of "Eleanor" and the last name of "Rigby".
| ${contact id}= Salesforce Insert Contact
| ... FirstName=Eleanor
| ... LastName=Rigby
"""
self.builtin.log("Inserting {} with values {}".format(obj_name, kwargs))
obj_class = getattr(self.cumulusci.sf, obj_name)
res = obj_class.create(kwargs)
self.store_session_record(obj_name, res["id"])
return res["id"]
def _salesforce_generate_object(self, obj_name, **fields):
obj = {"attributes": {"type": obj_name}} # Object type to create
obj.update(fields)
return obj
def generate_test_data(self, obj_name, number_to_create, **fields):
"""Generate bulk test data
This returns an array of dictionaries with template-formatted
arguments which can be passed to the *Salesforce Collection Insert*
keyword.
You can use ``{{number}}`` to represent the unique index of
the row in the list of rows. If the entire string consists of
a number, Salesforce API will treat the value as a number.
Example:
The following example creates three new Contacts:
| @{objects} = Generate Test Data Contact 3
| ... Name=User {{number}}
| ... Age={{number}}
The example code will generate Contact objects with these fields:
| [{'Name': 'User 0', 'Age': '0'},
| {'Name': 'User 1', 'Age': '1'},
| {'Name': 'User 2', 'Age': '2'}]
Python Expression Syntax is allowed so computed templates like this are also allowed: ``{{1000 + number}}``
Python operators can be used, but no functions or variables are provided, so mostly you just
have access to mathematical and logical operators. The Python operators are described here:
https://www.digitalocean.com/community/tutorials/how-to-do-math-in-python-3-with-operators
Contact the CCI team if you have a use-case that
could benefit from more expression language power.
Templates can also be based on faker patterns like those described here:
https://faker.readthedocs.io/en/master/providers.html
Most examples can be pasted into templates verbatim:
| @{objects}= Generate Test Data Contact 200
| ... Name={{fake.first_name}} {{fake.last_name}}
| ... MailingStreet={{fake.street_address}}
| ... MailingCity=New York
| ... MailingState=NY
| ... MailingPostalCode=12345
| ... Email={{fake.email(domain="salesforce.com")}}
"""
objs = []
for i in range(int(number_to_create)):
formatted_fields = {
name: format_str(value, {"number": i}) for name, value in fields.items()
}
newobj = self._salesforce_generate_object(obj_name, **formatted_fields)
objs.append(newobj)
return objs
def salesforce_collection_insert(self, objects):
"""Inserts records that were created with *Generate Test Data*.
_objects_ is a list of data, typically generated by the
*Generate Test Data* keyword.
A 200 record limit is enforced by the Salesforce APIs.
The object name and Id is passed to the *Store Session
Record* keyword, and will be deleted when the keyword *Delete
Session Records* is called.
As a best practice, either *Delete Session Records* or
**Delete Records and Close Browser* from Salesforce.robot
should be called as a suite teardown.
Example:
| @{objects}= Generate Test Data Contact 200
| ... FirstName=User {{number}}
| ... LastName={{fake.last_name}}
| Salesforce Collection Insert ${objects}
"""
assert (
not obj.get("id", None) for obj in objects
), "Insertable objects should not have IDs"
assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, (
"Cannot insert more than %s objects with this keyword"
% SF_COLLECTION_INSERTION_LIMIT
)
records = self.cumulusci.sf.restful(
"composite/sobjects",
method="POST",
json={"allOrNone": True, "records": objects},
)
for idx, (record, obj) in enumerate(zip(records, objects)):
if record["errors"]:
raise AssertionError(
"Error on Object {idx}: {record} : {obj}".format(**vars())
)
self.store_session_record(obj["attributes"]["type"], record["id"])
obj["id"] = record["id"]
obj[STATUS_KEY] = record
return objects
def salesforce_collection_update(self, objects):
"""Updates records described as Robot/Python dictionaries.
_objects_ is a dictionary of data in the format returned
by the *Salesforce Collection Insert* keyword.
A 200 record limit is enforced by the Salesforce APIs.
Example:
The following example creates ten accounts and then updates
the Rating from "Cold" to "Hot"
| ${data}= Generate Test Data Account 10
| ... Name=Account #{{number}}
| ... Rating=Cold
| ${accounts}= Salesforce Collection Insert ${data}
|
| FOR ${account} IN @{accounts}
| Set to dictionary ${account} Rating Hot
| END
| Salesforce Collection Update ${accounts}
"""
for obj in objects:
assert obj[
"id"
], "Should be a list of objects with Ids returned by Salesforce Collection Insert"
if STATUS_KEY in obj:
del obj[STATUS_KEY]
assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, (
"Cannot update more than %s objects with this keyword"
% SF_COLLECTION_INSERTION_LIMIT
)
records = self.cumulusci.sf.restful(
"composite/sobjects",
method="PATCH",
json={"allOrNone": True, "records": objects},
)
for record, obj in zip(records, objects):
obj[STATUS_KEY] = record
for idx, (record, obj) in enumerate(zip(records, objects)):
if record["errors"]:
raise AssertionError(
"Error on Object {idx}: {record} : {obj}".format(**vars())
)
def salesforce_query(self, obj_name, **kwargs):
"""Constructs and runs a simple SOQL query and returns a list of dictionaries.
By default the results will only contain object Ids. You can
specify a SOQL SELECT clause via keyword arguments by passing
a comma-separated list of fields with the ``select`` keyword
argument.
You can supply keys and values to match against
in keyword arguments, or a full SOQL where-clause
in a keyword argument named ``where``. If you supply
both, they will be combined with a SOQL "AND".
``order_by`` and ``limit`` keyword arguments are also
supported as shown below.
Examples:
The following example searches for all Contacts where the
first name is "Eleanor". It returns the "Name" and "Id"
fields and logs them to the robot report:
| @{records}= Salesforce Query Contact select=Id,Name
| ... FirstName=Eleanor
| FOR ${record} IN @{records}
| log Name: ${record['Name']} Id: ${record['Id']}
| END
Or with a WHERE-clause, we can look for the last contact where
the first name is NOT Eleanor.
| @{records}= Salesforce Query Contact select=Id,Name
| ... where=FirstName!='Eleanor'
| ... order_by=LastName desc
| ... limit=1
"""
query = self._soql_query_builder(obj_name, **kwargs)
self.builtin.log("Running SOQL Query: {}".format(query))
return self.cumulusci.sf.query_all(query).get("records", [])
def _soql_query_builder(
self, obj_name, select=None, order_by=None, limit=None, where=None, **kwargs
):
query = "SELECT "
if select:
query += select
else:
query += "Id"
query += " FROM {}".format(obj_name)
where_clauses = []
if where:
where_clauses = [where]
for key, value in kwargs.items():
where_clauses.append("{} = '{}'".format(key, value))
if where_clauses:
query += " WHERE " + " AND ".join(where_clauses)
if order_by:
query += " ORDER BY " + order_by
if limit:
assert int(limit), "Limit should be an integer"
query += f" LIMIT {limit}"
return query
def salesforce_update(self, obj_name, obj_id, **kwargs):
"""Updates a Salesforce object by Id.
The keyword returns the result from the underlying
simple_salesforce ``insert`` method, which is an HTTP
status code. As with `Salesforce Insert`, field values
are specified as keyword arguments.
The following example assumes that ${contact id} has been
previously set, and adds a Description to the given
contact.
| &{contact}= Salesforce Update Contact ${contact id}
| ... Description=This Contact created during a test
| Should be equal as numbers ${result} 204
"""
self.builtin.log(
"Updating {} {} with values {}".format(obj_name, obj_id, kwargs)
)
obj_class = getattr(self.cumulusci.sf, obj_name)
return obj_class.update(obj_id, kwargs)
def soql_query(self, query):
"""Runs a simple SOQL query and returns the dict results
The _query_ parameter must be a properly quoted SOQL query statement. The
return value is a dictionary. The dictionary contains the keys
as documented for the raw API call. The most useful key is ``records``,
which contains a list of records which were matched by the query.
Example
The following example searches for all Contacts with a first
name of "Eleanor" and a last name of "Rigby", and then prints
the name of the first record found.
| ${result}= SOQL Query
| ... SELECT Name, Id FROM Contact WHERE FirstName='Eleanor' AND LastName='Rigby'
| Run keyword if len($result['records']) == 0 Fail No records found
|
| ${contact}= Get from list ${result['records']} 0
| Should be equal ${contact['Name']} Eleanor Rigby
"""
self.builtin.log("Running SOQL Query: {}".format(query))
return self.cumulusci.sf.query_all(query)
def store_session_record(self, obj_type, obj_id):
"""Stores a Salesforce record's Id for use in the *Delete Session Records* keyword.
This keyword is automatically called by *Salesforce Insert*.
"""
self.builtin.log("Storing {} {} to session records".format(obj_type, obj_id))
self._session_records.append({"type": obj_type, "id": obj_id})
@capture_screenshot_on_error
def wait_until_modal_is_open(self):
"""Wait for modal to open"""
self.selenium.wait_until_page_contains_element(
lex_locators["modal"]["is_open"],
timeout=15,
error="Expected to see a modal window, but didn't",
)
def wait_until_modal_is_closed(self):
"""Wait for modal to close"""
self.selenium.wait_until_page_does_not_contain_element(
lex_locators["modal"]["is_open"], timeout=15
)
def wait_until_loading_is_complete(self, locator=None):
"""Wait for LEX page to load.
(We're actually waiting for the actions ribbon to appear.)
"""
locator = lex_locators["body"] if locator is None else locator
try:
self.selenium.wait_until_page_contains_element(locator)
self.wait_for_aura()
# this knowledge article recommends waiting a second. I don't
# like it, but it seems to help. We should do a wait instead,
# but I can't figure out what to wait on.
# https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1
time.sleep(1)
except Exception:
try:
self.selenium.capture_page_screenshot()
except Exception as e:
self.builtin.warn("unable to capture screenshot: {}".format(str(e)))
raise
@capture_screenshot_on_error
def wait_until_salesforce_is_ready(self, locator=None, timeout=None, interval=5):
"""Waits until we are able to render the initial salesforce landing page
It will continue to refresh the page until we land on a
lightning page or until a timeout has been reached. The
timeout can be specified in any time string supported by robot
(eg: number of seconds, "3 minutes", etc.). If not specified,
the default selenium timeout will be used.
This keyword will wait a few seconds between each refresh, as
well as wait after each refresh for the page to fully render
(ie: it calls wait_for_aura())
"""
# Note: we can't just ask selenium to wait for an element,
# because the org might not be availble due to infrastructure
# issues (eg: the domain not being propagated). In such a case
# the element will never come. Instead, what we need to do is
# repeatedly refresh the page until the org responds.
#
# This assumes that any lightning page is a valid stopping
# point. If salesforce starts rendering error pages with
# lightning, or an org's default home page is not a lightning
# page, we may have to rethink that strategy.
interval = 5 # seconds between each refresh.
timeout = timeout if timeout else self.selenium.get_selenium_timeout()
timeout_seconds = timestr_to_secs(timeout)
start_time = time.time()
login_url = self.cumulusci.login_url()
locator = lex_locators["body"] if locator is None else locator
while True:
try:
self.selenium.wait_for_condition(
"return (document.readyState == 'complete')"
)
self.wait_for_aura()
# If the following doesn't throw an error, we're good to go.
self.selenium.get_webelement(locator)
break
except Exception as e:
self.builtin.log(
"caught exception while waiting: {}".format(str(e)), "DEBUG"
)
if time.time() - start_time > timeout_seconds:
self.selenium.log_location()
raise Exception("Timed out waiting for a lightning page")
# known edge cases that can be worked around
if self._check_for_login_failure():
continue
elif self._check_for_classic():
continue
# not a known edge case; take a deep breath and
# try again.
time.sleep(interval)
self.selenium.go_to(login_url)
def breakpoint(self):
"""Serves as a breakpoint for the robot debugger
Note: this keyword is a no-op unless the debug option for
the task has been set to True. Unless the option has been
set, this keyword will have no effect on a running test.
"""
return None
def _check_for_classic(self):
"""Switch to lightning if we land on a classic page
This seems to happen randomly, causing tests to fail
catastrophically. The idea is to detect such a case and
auto-click the "switch to lightning" link
"""
try:
# we don't actually want to wait here, but if we don't
# explicitly wait, we'll implicitly wait longer than
# necessary. This needs to be a quick-ish check.
self.selenium.wait_until_element_is_visible(
"class:switch-to-lightning", timeout=2
)
self.builtin.log(
"It appears we are on a classic page; attempting to switch to lightning",
"WARN",
)
# this screenshot should be removed at some point,
# but for now I want to make sure we see what the
# page looks like if we get here.
self.selenium.capture_page_screenshot()
# just in case there's a modal present we'll try simulating
# the escape key. Then, click on the switch-to-lightning link
self.selenium.press_keys(None, "ESC")
self.builtin.sleep("1 second")
self.selenium.click_link("class:switch-to-lightning")
return True
except (NoSuchElementException, AssertionError):
return False
def _check_for_login_failure(self):
"""Handle the case where we land on a login screen
Sometimes we get redirected to a login URL rather than
being logged in, and we've yet to figure out precisely why
that happens. Experimentation shows that authentication has
already happened, so in this case we'll try going back to
the instance url rather than the front door servlet.
Admittedly, this is a bit of a hack, but it's better than
never getting past this redirect.
"""
location = self.selenium.get_location()
if "//test.salesforce.com" in location or "//login.salesforce.com" in location:
login_url = self.cumulusci.org.config["instance_url"]
self.builtin.log(f"setting login_url temporarily to {login_url}", "DEBUG")
self.selenium.go_to(login_url)
return True
return False
def elapsed_time_for_last_record(
self, obj_name, start_field, end_field, order_by, **kwargs
):
"""For records representing jobs or processes, compare the record's start-time to its end-time to see how long a process took.
Arguments:
obj_name: SObject to look for last record
start_field: Name of the datetime field that represents the process start
end_field: Name of the datetime field that represents the process end
order_by: Field name to order by. Should be a datetime field, and usually is just the same as end_field.
where: Optional Where-clause to use for filtering
Other keywords are used for filtering as in the Salesforce Query keywordf
The last matching record queried and summarized.
Example:
${time_in_seconds} = Elapsed Time For Last Record
... obj_name=AsyncApexJob
... where=ApexClass.Name='BlahBlah'
... start_field=CreatedDate
... end_field=CompletedDate
... order_by=CompletedDate
"""
if len(order_by.split()) != 1:
raise Exception("order_by should be a simple field name")
query = self._soql_query_builder(
obj_name,
select=f"{start_field}, {end_field}",
order_by=order_by + " DESC NULLS LAST",
limit=1,
**kwargs,
)
response = self.soql_query(query)
results = response["records"]
if results:
record = results[0]
return _duration(record[start_field], record[end_field], record)
else:
raise Exception(f"Matching record not found: {query}")
def start_performance_timer(self):
"""Start an elapsed time stopwatch for performance tests.
See the docummentation for **Stop Performance Timer** for more
information.
Example:
Start Performance Timer
Do Something
Stop Performance Timer
"""
BuiltIn().set_test_variable("${__start_time}", datetime.now())
def stop_performance_timer(self):
"""Record the results of a stopwatch. For perf testing.
This keyword uses Set Test Elapsed Time internally and therefore
outputs in all of the ways described there.
Example:
Start Performance Timer
Do Something
Stop Performance Timer
"""
builtins = BuiltIn()
start_time = builtins.get_variable_value("${__start_time}")
if start_time:
seconds = (datetime.now() - start_time).seconds
assert seconds is not None
self.set_test_elapsed_time(seconds)
else:
raise Exception(
"Elapsed time clock was not started. "
"Use the Start Elapsed Time keyword to do so."
)
def set_test_elapsed_time(self, elapsedtime):
"""This keyword captures a computed rather than measured elapsed time for performance tests.
For example, if you were performance testing a Salesforce batch process, you might want to
store the Salesforce-measured elapsed time of the batch process instead of the time measured
in the CCI client process.
The keyword takes a single argument which is either a number of seconds or a Robot time string
(https://robotframework.org/robotframework/latest/libraries/DateTime.html#Time%20formats).
Using this keyword will automatically add the tag cci_metric_elapsed_time to the test case
and ${cci_metric_elapsed_time} to the test's variables. cci_metric_elapsed_time is not
included in Robot's html statistical roll-ups.
Example:
Set Test Elapsed Time 11655.9
Performance test times are output in the CCI logs and are captured in MetaCI instead of the
"total elapsed time" measured by Robot Framework. The Robot "test message" is also updated."""
builtins = BuiltIn()
try:
seconds = float(elapsedtime)
except ValueError:
seconds = timestr_to_secs(elapsedtime)
assert seconds is not None
builtins.set_test_message(f"Elapsed time set by test : {seconds}")
builtins.set_tags("cci_metric_elapsed_time")
builtins.set_test_variable("${cci_metric_elapsed_time}", seconds)
def set_test_metric(self, metric: str, value=None):
"""This keyword captures any metric for performance monitoring.
For example: number of queries, rows processed, CPU usage, etc.
The keyword takes a metric name, which can be any string, and a value, which
can be any number.
Using this keyword will automatically add the tag cci_metric to the test case
and ${cci_metric_<metric_name>} to the test's variables. These permit downstream
processing in tools like CCI and MetaCI.
cci_metric is not included in Robot's html statistical roll-ups.
Example:
| Set Test Metric Max_CPU_Percent 30
Performance test metrics are output in the CCI logs, log.html and output.xml.
MetaCI captures them but does not currently have a user interface for displaying
them."""
builtins = BuiltIn()
value = float(value)
builtins.set_tags("cci_metric")
builtins.set_test_variable("${cci_metric_%s}" % metric, value)
@capture_screenshot_on_error
def input_form_data(self, *args):
"""Fill in one or more labeled input fields fields with data
Arguments should be pairs of field labels and values. Labels
for required fields should not include the asterisk. Labels
must be exact, including case.
This keyword uses the keyword *Locate Element by Label* to
locate elements. More details about how elements are found are
in the documentation for that keyword.
For most input form fields the actual value string will be
used. For a checkbox, passing the value "checked" will check
the checkbox and any other value will uncheck it. Using
"unchecked" is recommended for clarity.
Example:
| Input form data
| ... Opportunity Name The big one # required text field
| ... Amount 1b # currency field
| ... Close Date 4/01/2022 # date field
| ... Private checked # checkbox
| ... Type New Customer # combobox
| ... Primary Campaign Source The Big Campaign # picklist
This keyword will eventually replace the "populate form"
keyword once it has been more thoroughly tested in production.
"""
it = iter(args)
errors = []
for label, value in list(zip(it, it)):
# this uses our custom "label" locator strategy
locator = f"label:{label}"
# FIXME: we should probably only wait for the first label;
# after that we can assume the fields have been rendered
# so that we fail quickly if we can't find the element
element = self.selenium.get_webelement(locator)
handler = get_form_handler(element, locator)
try:
if handler:
handler.set(value)
else:
raise Exception(
f"No form handler found for tag '{element.tag_name}'"
)
except Exception as e:
errors.append(f"{label}: {str(e)}")
if errors:
message = "There were errors with the following fields:\n"
message += "\n".join(errors)
raise Exception(message)
# FIXME: maybe we should automatically set the focus to some
# other element to trigger any event handlers on the last
# element? But what should we set the focus to?
def locate_element_by_label(self, browser, locator, tag, constraints):
"""Find a lightning component, input, or textarea based on a label
If the component is inside a fieldset, the fieldset label can
be prefixed to the label with a double colon in order to
disambiguate the label. (eg: Other address::First Name)
If the label is inside nested ligntning components (eg:
``<lightning-input>...<lightning-combobox>...<label>``), the
lightning component closest to the label will be
returned (in this case, ``lightning-combobox``).
If a lightning component cannot be found for the label, an
attempt will be made to find an input or textarea associated
with the label.
This is registered as a custom locator strategy named "label"
Example:
The following example is for a form with a formset named
"Expected Delivery Date", and inside of that a date input field
with a label of "Date".
These examples produce identical results:
| ${element}= Locate element by label Expected Delivery Date::Date
| ${element}= Get webelement label:Expected Delivery Date::Date
"""
if "::" in locator:
fieldset, label = [x.strip() for x in locator.split("::", 1)]
fieldset_prefix = f'//fieldset[.//*[.="{fieldset}"]]'
else:
label = locator
fieldset_prefix = ""
xpath = fieldset_prefix + (
# a label with the given text, optionally with a leading
# or trailing "*" (ie: required field)
f'//label[.="{label}" or .="*{label}" or .="{label}*"]'
# then find the nearest ancestor lightning component
'/ancestor::*[starts-with(local-name(), "lightning-")][1]'
)
elements = browser.find_elements_by_xpath(xpath)
if not elements:
# fall back to finding an input or textarea based on the 'for'
# attribute of a label
xpath = fieldset_prefix + (
"//*[self::input or self::textarea]"
f'[@id=string(//label[.="{label}" or .="*{label}" or .="{label}*"]/@for)]'
)
elements = browser.find_elements_by_xpath(xpath)
return elements
def _duration(start_date: str, end_date: str, record: dict):
try:
start_date = parse_date(start_date)
end_date = parse_date(end_date)
except (ParserError, TypeError) as e:
raise Exception(f"Date parse error: {e} in record {record}")
duration = end_date - start_date
return duration.total_seconds()
| 40.115885 | 134 | 0.628242 | import importlib
import logging
import re
import time
from datetime import datetime
from dateutil.parser import parse as parse_date, ParserError
from pprint import pformat
from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError
from robot.utils import timestr_to_secs
from cumulusci.robotframework.utils import get_locator_module_name
from cumulusci.robotframework.form_handlers import get_form_handler
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import (
StaleElementReferenceException,
NoSuchElementException,
JavascriptException,
WebDriverException,
)
import faker
from simple_salesforce import SalesforceResourceNotFound
from cumulusci.robotframework.utils import selenium_retry, capture_screenshot_on_error
from SeleniumLibrary.errors import ElementNotFound, NoOpenBrowser
from urllib3.exceptions import ProtocolError
from cumulusci.core.template_utils import format_str
from cumulusci.robotframework import locator_manager
OID_REGEX = r"^(%2F)?([a-zA-Z0-9]{15,18})$"
STATUS_KEY = ("status",)
lex_locators = {}
SF_COLLECTION_INSERTION_LIMIT = 200
@selenium_retry
class Salesforce(object):
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self, debug=False, locators=None):
self.debug = debug
self._session_records = []
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
if locators:
lex_locators.update(locators)
else:
self._init_locators()
self._faker = faker.Faker("en_US")
try:
self.builtin.set_global_variable("${faker}", self._faker)
except RobotNotRunningError:
pass
def _init_locators(self):
try:
version = int(float(self.get_latest_api_version()))
except RobotNotRunningError:
# Likely this means we are running in the context of
# documentation generation. Setting the version to
# None will result in using the latest version of
# locators.
version = None
locator_module_name = get_locator_module_name(version)
self.locators_module = importlib.import_module(locator_module_name)
lex_locators.update(self.locators_module.lex_locators)
@property
def builtin(self):
return BuiltIn()
@property
def cumulusci(self):
return self.builtin.get_library_instance("cumulusci.robotframework.CumulusCI")
def initialize_location_strategies(self):
if not self.builtin.get_variable_value(
"${LOCATION STRATEGIES INITIALIZED}", False
):
# this manages strategies based on locators in a dictionary
locator_manager.register_locators("sf", lex_locators)
locator_manager.add_location_strategies()
# these are more traditional location strategies based on keywords
# or functions
self.selenium.add_location_strategy(
"text", "Salesforce.Locate Element by Text"
)
self.selenium.add_location_strategy(
"title", "Salesforce.Locate Element by Title"
)
self.selenium.add_location_strategy("label", self.locate_element_by_label)
self.builtin.set_suite_variable("${LOCATION STRATEGIES INITIALIZED}", True)
@selenium_retry(False)
def _jsclick(self, locator):
self.selenium.wait_until_page_contains_element(locator)
self.selenium.wait_until_element_is_enabled(locator)
for should_retry in (True, False):
try:
# Setting the focus first seems to be required as of Spring'20
# fields which need to be triggered for data to be accepted.
element = self.selenium.get_webelement(locator)
self.selenium.driver.execute_script(
"arguments[0].focus(); arguments[0].click()", element
)
return
except StaleElementReferenceException:
if should_retry:
time.sleep(1)
else:
raise
def set_faker_locale(self, locale):
try:
self._faker = faker.Faker(locale)
except AttributeError:
raise Exception(f"Unknown locale for fake data: '{locale}'")
def get_fake_data(self, fake, *args, **kwargs):
try:
return self._faker.format(fake, *args, **kwargs)
except AttributeError:
raise Exception(f"Unknown fake data request: '{fake}'")
def get_latest_api_version(self):
return self.cumulusci.org.latest_api_version
def create_webdriver_with_retry(self, *args, **kwargs):
# Get selenium without referencing selenium.driver which doesn't exist yet
selenium = self.builtin.get_library_instance("SeleniumLibrary")
for _ in range(12):
try:
return selenium.create_webdriver(*args, **kwargs)
except ProtocolError:
time.sleep(5)
raise Exception("Could not connect to remote webdriver after 1 minute")
@capture_screenshot_on_error
def click_modal_button(self, title):
locator = lex_locators["modal"]["button"].format(title)
self.selenium.wait_until_page_contains_element(locator)
self.selenium.wait_until_element_is_enabled(locator)
self._jsclick(locator)
@capture_screenshot_on_error
def click_object_button(self, title):
locator = lex_locators["object"]["button"].format(title=title)
self._jsclick(locator)
self.wait_until_modal_is_open()
@capture_screenshot_on_error
def scroll_element_into_view(self, locator):
element = self.selenium.get_webelement(locator)
self.selenium.driver.execute_script("arguments[0].scrollIntoView()", element)
@capture_screenshot_on_error
def load_related_list(self, heading, tries=10):
locator = lex_locators["record"]["related"]["card"].format(heading)
for i in range(tries):
try:
self.selenium.scroll_element_into_view(locator)
return
except (ElementNotFound, JavascriptException, WebDriverException):
self.builtin.log(
f"related list '{heading}' not found; scrolling...", "DEBUG"
)
self.selenium.execute_javascript("window.scrollBy(0, 100)")
self.wait_for_aura()
raise AssertionError(f"Timed out waiting for related list '{heading}' to load.")
def click_related_list_button(self, heading, button_title):
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["button"].format(
heading, button_title
)
self._jsclick(locator)
self.wait_until_modal_is_open()
@capture_screenshot_on_error
def click_related_item_link(self, heading, title):
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["link"].format(heading, title)
try:
self._jsclick(locator)
except Exception as e:
self.builtin.log(f"Exception: {e}", "DEBUG")
raise Exception(
f"Unable to find related link under heading '{heading}' with the text '{title}'"
)
self.wait_until_loading_is_complete()
def click_related_item_popup_link(self, heading, title, link):
self.load_related_list(heading)
locator = lex_locators["record"]["related"]["popup_trigger"].format(
heading, title
)
self.selenium.wait_until_page_contains_element(locator)
self._jsclick(locator)
locator = lex_locators["popup"]["link"].format(link)
self._jsclick(locator)
self.wait_until_loading_is_complete()
def close_modal(self):
locator = lex_locators["modal"]["close"]
self._jsclick(locator)
def current_app_should_be(self, app_name):
locator = lex_locators["app_launcher"]["current_app"].format(app_name)
elem = self.selenium.get_webelement(locator)
assert app_name == elem.text, "Expected app to be {} but found {}".format(
app_name, elem.text
)
def delete_session_records(self):
self._session_records.reverse()
self.builtin.log("Deleting {} records".format(len(self._session_records)))
for record in self._session_records[:]:
self.builtin.log(" Deleting {type} {id}".format(**record))
try:
self.salesforce_delete(record["type"], record["id"])
except SalesforceResourceNotFound:
self.builtin.log(" {type} {id} is already deleted".format(**record))
except Exception as e:
self.builtin.log(
" {type} {id} could not be deleted:".format(**record),
level="WARN",
)
self.builtin.log(" {}".format(e), level="WARN")
def get_active_browser_ids(self):
# robot slack channels about adding a new keyword that does
# what this keyword does. When that happens, we can remove
# this keyword.
driver_ids = []
try:
driver_cache = self.selenium._drivers
except NoOpenBrowser:
return []
for index, driver in enumerate(driver_cache._connections):
if driver not in driver_cache._closed:
# SeleniumLibrary driver ids start at one rather than zero
driver_ids.append(index + 1)
return driver_ids
def get_current_record_id(self):
url = self.selenium.get_location()
for part in url.split("/"):
oid_match = re.match(OID_REGEX, part)
if oid_match is not None:
return oid_match.group(2)
raise AssertionError("Could not parse record id from url: {}".format(url))
def field_value_should_be(self, label, expected_value):
value = self.get_field_value(label)
self.builtin.should_be_equal(value, expected_value)
def get_field_value(self, label):
api_version = int(float(self.get_latest_api_version()))
locator = self._get_input_field_locator(label)
if api_version >= 51:
# this works for both First Name (input) and Account Name (picklist)
value = self.selenium.get_value(locator)
else:
# older releases it's a bit more complex
element = self.selenium.get_webelement(locator)
if element.get_attribute("role") == "combobox":
value = self.selenium.get_text(f"sf:object.field_lookup_value:{label}")
else:
value = self.selenium.get_value(f"sf:object.field:{label}")
return value
def get_locator(self, path, *args, **kwargs):
locator = lex_locators
for key in path.split("."):
locator = locator[key]
return locator.format(*args, **kwargs)
def get_record_type_id(self, obj_type, developer_name):
soql = "SELECT Id FROM RecordType WHERE SObjectType='{}' and DeveloperName='{}'".format(
obj_type, developer_name
)
res = self.cumulusci.sf.query_all(soql)
return res["records"][0]["Id"]
def get_related_list_count(self, heading):
locator = lex_locators["record"]["related"]["count"].format(heading)
count = self.selenium.get_webelement(locator).text
count = count.replace("(", "").replace(")", "")
return int(count)
def go_to_object_home(self, obj_name):
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/o/{}/home".format(url, obj_name)
self.selenium.go_to(url)
self.wait_until_loading_is_complete(lex_locators["actions"])
def go_to_object_list(self, obj_name, filter_name=None):
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/o/{}/list".format(url, obj_name)
if filter_name:
url += "?filterName={}".format(filter_name)
self.selenium.go_to(url)
self.wait_until_loading_is_complete(lex_locators["actions"])
def go_to_record_home(self, obj_id):
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/r/{}/view".format(url, obj_id)
self.selenium.go_to(url)
self.wait_until_loading_is_complete(lex_locators["actions"])
def go_to_setup_home(self):
url = self.cumulusci.org.lightning_base_url
self.selenium.go_to(url + "/lightning/setup/SetupOneHome/home")
self.wait_until_loading_is_complete()
def go_to_setup_object_manager(self):
url = self.cumulusci.org.lightning_base_url
self.selenium.go_to(url + "/lightning/setup/ObjectManager/home")
self.wait_until_loading_is_complete()
def header_field_should_have_value(self, label):
locator = lex_locators["record"]["header"]["field_value"].format(label)
self.selenium.page_should_contain_element(locator)
def header_field_should_not_have_value(self, label):
locator = lex_locators["record"]["header"]["field_value"].format(label)
self.selenium.page_should_not_contain_element(locator)
def header_field_should_have_link(self, label):
locator = lex_locators["record"]["header"]["field_value_link"].format(label)
self.selenium.page_should_contain_element(locator)
def header_field_should_not_have_link(self, label):
locator = lex_locators["record"]["header"]["field_value_link"].format(label)
self.selenium.page_should_not_contain_element(locator)
def click_header_field_link(self, label):
locator = lex_locators["record"]["header"]["field_value_link"].format(label)
self._jsclick(locator)
def header_field_should_be_checked(self, label):
locator = lex_locators["record"]["header"]["field_value_checked"].format(label)
self.selenium.page_should_contain_element(locator)
def header_field_should_be_unchecked(self, label):
locator = lex_locators["record"]["header"]["field_value_unchecked"].format(
label
)
self.selenium.page_should_contain_element(locator)
def log_browser_capabilities(self, loglevel="INFO"):
output = "selenium browser capabilities:\n"
output += pformat(self.selenium.driver.capabilities, indent=4)
self.builtin.log(output, level=loglevel)
@capture_screenshot_on_error
def open_app_launcher(self, retry=True):
self._jsclick("sf:app_launcher.button")
self.selenium.wait_until_element_is_visible("sf:app_launcher.view_all")
self._jsclick("sf:app_launcher.view_all")
self.wait_until_modal_is_open()
try:
# the dialog hangs prior to any links showing up
self.selenium.wait_until_element_is_visible(
"xpath://ul[contains(@class, 'al-modal-list')]//li"
)
except Exception as e:
# This should never happen, yet it does. Experience has
# shown that sometimes (at least in spring '20) the modal
if retry:
self.builtin.log(
f"caught exception {e} waiting for app launcher; retrying", "DEBUG"
)
self.selenium.press_keys("sf:modal.is_open", "ESCAPE")
self.wait_until_modal_is_closed()
self.open_app_launcher(retry=False)
else:
self.builtin.log(
"caught exception waiting for app launcher; not retrying", "DEBUG"
)
raise
def populate_field(self, name, value):
locator = self._get_input_field_locator(name)
self._populate_field(locator, value)
def populate_lookup_field(self, name, value):
input_locator = self._get_input_field_locator(name)
menu_locator = lex_locators["object"]["field_lookup_link"].format(value)
self._populate_field(input_locator, value)
for x in range(3):
self.wait_for_aura()
try:
self.selenium.get_webelement(menu_locator)
except ElementNotFound:
time.sleep(2)
field = self.selenium.get_webelement(input_locator)
field.send_keys(Keys.BACK_SPACE)
else:
break
self.selenium.set_focus_to_element(menu_locator)
self._jsclick(menu_locator)
self.wait_for_aura()
def _get_input_field_locator(self, name):
try:
# that the xpath could pick the wrong element.
self.selenium.get_webelement(lex_locators["modal"]["is_open"])
modal_prefix = "//div[contains(@class, 'modal-container')]"
except ElementNotFound:
modal_prefix = ""
locator = modal_prefix + lex_locators["object"]["field_label"].format(
name, name
)
input_element_id = self.selenium.get_element_attribute(locator, "for")
return input_element_id
def _populate_field(self, locator, value):
self.builtin.log(f"value: {value}' locator: '{locator}'", "DEBUG")
field = self.selenium.get_webelement(locator)
self._focus(field)
if field.get_attribute("value"):
self._clear(field)
field.send_keys(value)
def _focus(self, element):
actions = ActionChains(self.selenium.driver)
actions.move_to_element(element).click().perform()
self.selenium.set_focus_to_element(element)
def _clear(self, element):
element.clear()
self.selenium.driver.execute_script("arguments[0].value = '';", element)
element.send_keys(Keys.HOME + Keys.SHIFT + Keys.END)
element.send_keys(Keys.BACKSPACE)
if element.get_attribute("value"):
# Give the UI a chance to settle down. The sleep appears
# necessary. Without it, this keyword sometimes fails to work
# properly. With it, I was able to run 700+ tests without a single
# failure.
time.sleep(0.25)
# Even after all that, some elements refuse to be cleared out.
# I'm looking at you, currency fields on Firefox.
if element.get_attribute("value"):
self._force_clear(element)
def _force_clear(self, element):
value = element.get_attribute("value")
actions = ActionChains(self.selenium.driver)
actions.move_to_element(element).click().send_keys(Keys.END)
for character in value:
actions.send_keys(Keys.BACKSPACE)
actions.perform()
def populate_form(self, **kwargs):
for name, value in kwargs.items():
self.populate_field(name, value)
def remove_session_record(self, obj_type, obj_id):
try:
self._session_records.remove({"type": obj_type, "id": obj_id})
except ValueError:
self.builtin.log(
"Did not find record {} {} in the session records list".format(
obj_type, obj_id
)
)
def select_record_type(self, label):
self.wait_until_modal_is_open()
locator = lex_locators["object"]["record_type_option"].format(label)
self._jsclick(locator)
self.selenium.click_button("Next")
@capture_screenshot_on_error
def select_app_launcher_app(self, app_name):
locator = lex_locators["app_launcher"]["app_link"].format(app_name)
self.open_app_launcher()
self.selenium.wait_until_page_contains_element(locator, timeout=30)
self.selenium.set_focus_to_element(locator)
elem = self.selenium.get_webelement(locator)
link = elem.find_element_by_xpath("../../..")
self.selenium.set_focus_to_element(link)
link.click()
self.wait_until_modal_is_closed()
@capture_screenshot_on_error
def select_app_launcher_tab(self, tab_name):
locator = lex_locators["app_launcher"]["tab_link"].format(tab_name)
self.open_app_launcher()
self.selenium.wait_until_page_contains_element(locator)
self.selenium.set_focus_to_element(locator)
self._jsclick(locator)
self.wait_until_modal_is_closed()
def salesforce_delete(self, obj_name, obj_id):
self.builtin.log("Deleting {} with Id {}".format(obj_name, obj_id))
obj_class = getattr(self.cumulusci.sf, obj_name)
obj_class.delete(obj_id)
self.remove_session_record(obj_name, obj_id)
def salesforce_get(self, obj_name, obj_id):
self.builtin.log(f"Getting {obj_name} with Id {obj_id}")
obj_class = getattr(self.cumulusci.sf, obj_name)
return obj_class.get(obj_id)
def salesforce_insert(self, obj_name, **kwargs):
self.builtin.log("Inserting {} with values {}".format(obj_name, kwargs))
obj_class = getattr(self.cumulusci.sf, obj_name)
res = obj_class.create(kwargs)
self.store_session_record(obj_name, res["id"])
return res["id"]
def _salesforce_generate_object(self, obj_name, **fields):
obj = {"attributes": {"type": obj_name}}
obj.update(fields)
return obj
def generate_test_data(self, obj_name, number_to_create, **fields):
objs = []
for i in range(int(number_to_create)):
formatted_fields = {
name: format_str(value, {"number": i}) for name, value in fields.items()
}
newobj = self._salesforce_generate_object(obj_name, **formatted_fields)
objs.append(newobj)
return objs
def salesforce_collection_insert(self, objects):
assert (
not obj.get("id", None) for obj in objects
), "Insertable objects should not have IDs"
assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, (
"Cannot insert more than %s objects with this keyword"
% SF_COLLECTION_INSERTION_LIMIT
)
records = self.cumulusci.sf.restful(
"composite/sobjects",
method="POST",
json={"allOrNone": True, "records": objects},
)
for idx, (record, obj) in enumerate(zip(records, objects)):
if record["errors"]:
raise AssertionError(
"Error on Object {idx}: {record} : {obj}".format(**vars())
)
self.store_session_record(obj["attributes"]["type"], record["id"])
obj["id"] = record["id"]
obj[STATUS_KEY] = record
return objects
def salesforce_collection_update(self, objects):
for obj in objects:
assert obj[
"id"
], "Should be a list of objects with Ids returned by Salesforce Collection Insert"
if STATUS_KEY in obj:
del obj[STATUS_KEY]
assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, (
"Cannot update more than %s objects with this keyword"
% SF_COLLECTION_INSERTION_LIMIT
)
records = self.cumulusci.sf.restful(
"composite/sobjects",
method="PATCH",
json={"allOrNone": True, "records": objects},
)
for record, obj in zip(records, objects):
obj[STATUS_KEY] = record
for idx, (record, obj) in enumerate(zip(records, objects)):
if record["errors"]:
raise AssertionError(
"Error on Object {idx}: {record} : {obj}".format(**vars())
)
def salesforce_query(self, obj_name, **kwargs):
query = self._soql_query_builder(obj_name, **kwargs)
self.builtin.log("Running SOQL Query: {}".format(query))
return self.cumulusci.sf.query_all(query).get("records", [])
def _soql_query_builder(
self, obj_name, select=None, order_by=None, limit=None, where=None, **kwargs
):
query = "SELECT "
if select:
query += select
else:
query += "Id"
query += " FROM {}".format(obj_name)
where_clauses = []
if where:
where_clauses = [where]
for key, value in kwargs.items():
where_clauses.append("{} = '{}'".format(key, value))
if where_clauses:
query += " WHERE " + " AND ".join(where_clauses)
if order_by:
query += " ORDER BY " + order_by
if limit:
assert int(limit), "Limit should be an integer"
query += f" LIMIT {limit}"
return query
def salesforce_update(self, obj_name, obj_id, **kwargs):
self.builtin.log(
"Updating {} {} with values {}".format(obj_name, obj_id, kwargs)
)
obj_class = getattr(self.cumulusci.sf, obj_name)
return obj_class.update(obj_id, kwargs)
def soql_query(self, query):
self.builtin.log("Running SOQL Query: {}".format(query))
return self.cumulusci.sf.query_all(query)
def store_session_record(self, obj_type, obj_id):
self.builtin.log("Storing {} {} to session records".format(obj_type, obj_id))
self._session_records.append({"type": obj_type, "id": obj_id})
@capture_screenshot_on_error
def wait_until_modal_is_open(self):
self.selenium.wait_until_page_contains_element(
lex_locators["modal"]["is_open"],
timeout=15,
error="Expected to see a modal window, but didn't",
)
def wait_until_modal_is_closed(self):
self.selenium.wait_until_page_does_not_contain_element(
lex_locators["modal"]["is_open"], timeout=15
)
def wait_until_loading_is_complete(self, locator=None):
locator = lex_locators["body"] if locator is None else locator
try:
self.selenium.wait_until_page_contains_element(locator)
self.wait_for_aura()
# this knowledge article recommends waiting a second. I don't
# https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1
time.sleep(1)
except Exception:
try:
self.selenium.capture_page_screenshot()
except Exception as e:
self.builtin.warn("unable to capture screenshot: {}".format(str(e)))
raise
@capture_screenshot_on_error
def wait_until_salesforce_is_ready(self, locator=None, timeout=None, interval=5):
# Note: we can't just ask selenium to wait for an element,
# page, we may have to rethink that strategy.
interval = 5 # seconds between each refresh.
timeout = timeout if timeout else self.selenium.get_selenium_timeout()
timeout_seconds = timestr_to_secs(timeout)
start_time = time.time()
login_url = self.cumulusci.login_url()
locator = lex_locators["body"] if locator is None else locator
while True:
try:
self.selenium.wait_for_condition(
"return (document.readyState == 'complete')"
)
self.wait_for_aura()
# If the following doesn't throw an error, we're good to go.
self.selenium.get_webelement(locator)
break
except Exception as e:
self.builtin.log(
"caught exception while waiting: {}".format(str(e)), "DEBUG"
)
if time.time() - start_time > timeout_seconds:
self.selenium.log_location()
raise Exception("Timed out waiting for a lightning page")
# known edge cases that can be worked around
if self._check_for_login_failure():
continue
elif self._check_for_classic():
continue
# not a known edge case; take a deep breath and
# try again.
time.sleep(interval)
self.selenium.go_to(login_url)
def breakpoint(self):
return None
def _check_for_classic(self):
try:
# we don't actually want to wait here, but if we don't
# explicitly wait, we'll implicitly wait longer than
self.selenium.wait_until_element_is_visible(
"class:switch-to-lightning", timeout=2
)
self.builtin.log(
"It appears we are on a classic page; attempting to switch to lightning",
"WARN",
)
self.selenium.capture_page_screenshot()
self.selenium.press_keys(None, "ESC")
self.builtin.sleep("1 second")
self.selenium.click_link("class:switch-to-lightning")
return True
except (NoSuchElementException, AssertionError):
return False
def _check_for_login_failure(self):
location = self.selenium.get_location()
if "//test.salesforce.com" in location or "//login.salesforce.com" in location:
login_url = self.cumulusci.org.config["instance_url"]
self.builtin.log(f"setting login_url temporarily to {login_url}", "DEBUG")
self.selenium.go_to(login_url)
return True
return False
def elapsed_time_for_last_record(
self, obj_name, start_field, end_field, order_by, **kwargs
):
if len(order_by.split()) != 1:
raise Exception("order_by should be a simple field name")
query = self._soql_query_builder(
obj_name,
select=f"{start_field}, {end_field}",
order_by=order_by + " DESC NULLS LAST",
limit=1,
**kwargs,
)
response = self.soql_query(query)
results = response["records"]
if results:
record = results[0]
return _duration(record[start_field], record[end_field], record)
else:
raise Exception(f"Matching record not found: {query}")
def start_performance_timer(self):
BuiltIn().set_test_variable("${__start_time}", datetime.now())
def stop_performance_timer(self):
builtins = BuiltIn()
start_time = builtins.get_variable_value("${__start_time}")
if start_time:
seconds = (datetime.now() - start_time).seconds
assert seconds is not None
self.set_test_elapsed_time(seconds)
else:
raise Exception(
"Elapsed time clock was not started. "
"Use the Start Elapsed Time keyword to do so."
)
def set_test_elapsed_time(self, elapsedtime):
builtins = BuiltIn()
try:
seconds = float(elapsedtime)
except ValueError:
seconds = timestr_to_secs(elapsedtime)
assert seconds is not None
builtins.set_test_message(f"Elapsed time set by test : {seconds}")
builtins.set_tags("cci_metric_elapsed_time")
builtins.set_test_variable("${cci_metric_elapsed_time}", seconds)
def set_test_metric(self, metric: str, value=None):
builtins = BuiltIn()
value = float(value)
builtins.set_tags("cci_metric")
builtins.set_test_variable("${cci_metric_%s}" % metric, value)
@capture_screenshot_on_error
def input_form_data(self, *args):
it = iter(args)
errors = []
for label, value in list(zip(it, it)):
locator = f"label:{label}"
element = self.selenium.get_webelement(locator)
handler = get_form_handler(element, locator)
try:
if handler:
handler.set(value)
else:
raise Exception(
f"No form handler found for tag '{element.tag_name}'"
)
except Exception as e:
errors.append(f"{label}: {str(e)}")
if errors:
message = "There were errors with the following fields:\n"
message += "\n".join(errors)
raise Exception(message)
# FIXME: maybe we should automatically set the focus to some
# other element to trigger any event handlers on the last
# element? But what should we set the focus to?
def locate_element_by_label(self, browser, locator, tag, constraints):
if "::" in locator:
fieldset, label = [x.strip() for x in locator.split("::", 1)]
fieldset_prefix = f'//fieldset[.//*[.="{fieldset}"]]'
else:
label = locator
fieldset_prefix = ""
xpath = fieldset_prefix + (
# a label with the given text, optionally with a leading
# or trailing "*" (ie: required field)
f'//label[.="{label}" or .="*{label}" or .="{label}*"]'
# then find the nearest ancestor lightning component
'/ancestor::*[starts-with(local-name(), "lightning-")][1]'
)
elements = browser.find_elements_by_xpath(xpath)
if not elements:
# fall back to finding an input or textarea based on the 'for'
# attribute of a label
xpath = fieldset_prefix + (
"//*[self::input or self::textarea]"
f'[@id=string(//label[.="{label}" or .="*{label}" or .="{label}*"]/@for)]'
)
elements = browser.find_elements_by_xpath(xpath)
return elements
def _duration(start_date: str, end_date: str, record: dict):
try:
start_date = parse_date(start_date)
end_date = parse_date(end_date)
except (ParserError, TypeError) as e:
raise Exception(f"Date parse error: {e} in record {record}")
duration = end_date - start_date
return duration.total_seconds()
| true | true |
f72224100c9bc8d6e1fa39209af147abe4365ee7 | 2,044 | py | Python | learning/alembic/env.py | koichiro8/learning | ffc0785dfeeeee4240aed0a1742a64b86ff28369 | [
"MIT"
] | null | null | null | learning/alembic/env.py | koichiro8/learning | ffc0785dfeeeee4240aed0a1742a64b86ff28369 | [
"MIT"
] | null | null | null | learning/alembic/env.py | koichiro8/learning | ffc0785dfeeeee4240aed0a1742a64b86ff28369 | [
"MIT"
] | null | null | null | # type: ignore
import os
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
from learning.entities import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = os.environ["DB_URL"]
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
url=os.environ["ALEMBIC_DB_URL"],
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 26.205128 | 81 | 0.716732 |
import os
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
from learning.entities import Base
config = context.config
fileConfig(config.config_file_name)
# for 'autogenerate' support
# from myapp import mymodel
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
url = os.environ["DB_URL"]
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
url=os.environ["ALEMBIC_DB_URL"],
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| true | true |
f72225b6395da1cc12e591bea2de48caa9c295f3 | 19,777 | py | Python | example/VCU118/fpga/tb/test_fpga_core.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 35 | 2018-07-06T04:59:29.000Z | 2022-03-08T22:45:11.000Z | example/VCU118/fpga/tb/test_fpga_core.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 4 | 2021-04-23T15:24:32.000Z | 2022-03-04T10:31:12.000Z | example/VCU118/fpga/tb/test_fpga_core.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 15 | 2017-03-19T08:04:58.000Z | 2022-02-11T18:38:59.000Z | #!/usr/bin/env python
"""
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import struct
import xfcp
import uart_ep
import i2c
import eth_ep
import arp_ep
import udp_ep
import gmii_ep
module = 'fpga_core'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../lib/xfcp/rtl/xfcp_interface_uart.v")
srcs.append("../lib/xfcp/rtl/xfcp_interface_udp.v")
srcs.append("../lib/xfcp/rtl/xfcp_mod_i2c_master.v")
srcs.append("../lib/xfcp/rtl/xfcp_mod_wb.v")
srcs.append("../lib/xfcp/rtl/xfcp_arb.v")
srcs.append("../lib/xfcp/rtl/xfcp_switch.v")
srcs.append("../lib/i2c/rtl/i2c_master.v")
srcs.append("../lib/eth/rtl/eth_mac_1g_fifo.v")
srcs.append("../lib/eth/rtl/eth_mac_1g.v")
srcs.append("../lib/eth/rtl/axis_gmii_rx.v")
srcs.append("../lib/eth/rtl/axis_gmii_tx.v")
srcs.append("../lib/eth/rtl/lfsr.v")
srcs.append("../lib/eth/rtl/eth_axis_rx.v")
srcs.append("../lib/eth/rtl/eth_axis_tx.v")
srcs.append("../lib/eth/rtl/udp_complete.v")
srcs.append("../lib/eth/rtl/udp_checksum_gen.v")
srcs.append("../lib/eth/rtl/udp.v")
srcs.append("../lib/eth/rtl/udp_ip_rx.v")
srcs.append("../lib/eth/rtl/udp_ip_tx.v")
srcs.append("../lib/eth/rtl/ip_complete.v")
srcs.append("../lib/eth/rtl/ip.v")
srcs.append("../lib/eth/rtl/ip_eth_rx.v")
srcs.append("../lib/eth/rtl/ip_eth_tx.v")
srcs.append("../lib/eth/rtl/ip_arb_mux.v")
srcs.append("../lib/eth/rtl/arp.v")
srcs.append("../lib/eth/rtl/arp_cache.v")
srcs.append("../lib/eth/rtl/arp_eth_rx.v")
srcs.append("../lib/eth/rtl/arp_eth_tx.v")
srcs.append("../lib/eth/rtl/eth_arb_mux.v")
srcs.append("../lib/uart/rtl/uart.v")
srcs.append("../lib/uart/rtl/uart_rx.v")
srcs.append("../lib/uart/rtl/uart_tx.v")
srcs.append("../lib/wb/rtl/wb_ram.v")
srcs.append("../lib/axis/rtl/arbiter.v")
srcs.append("../lib/axis/rtl/priority_encoder.v")
srcs.append("../lib/axis/rtl/axis_cobs_encode.v")
srcs.append("../lib/axis/rtl/axis_cobs_decode.v")
srcs.append("../lib/axis/rtl/axis_fifo.v")
srcs.append("../lib/axis/rtl/axis_async_fifo.v")
srcs.append("../lib/axis/rtl/axis_async_fifo_adapter.v")
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
TARGET = "SIM"
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
btnu = Signal(bool(0))
btnl = Signal(bool(0))
btnd = Signal(bool(0))
btnr = Signal(bool(0))
btnc = Signal(bool(0))
sw = Signal(intbv(0)[4:])
i2c_scl_i = Signal(bool(1))
i2c_sda_i = Signal(bool(1))
phy_gmii_clk = Signal(bool(0))
phy_gmii_rst = Signal(bool(0))
phy_gmii_clk_en = Signal(bool(0))
phy_gmii_rxd = Signal(intbv(0)[8:])
phy_gmii_rx_dv = Signal(bool(0))
phy_gmii_rx_er = Signal(bool(0))
phy_int_n = Signal(bool(1))
uart_rxd = Signal(bool(1))
uart_cts = Signal(bool(1))
s1_scl_i = Signal(bool(1))
s1_sda_i = Signal(bool(1))
s2_scl_i = Signal(bool(1))
s2_sda_i = Signal(bool(1))
# Outputs
led = Signal(intbv(0)[8:])
i2c_scl_o = Signal(bool(1))
i2c_scl_t = Signal(bool(1))
i2c_sda_o = Signal(bool(1))
i2c_sda_t = Signal(bool(1))
phy_gmii_txd = Signal(intbv(0)[8:])
phy_gmii_tx_en = Signal(bool(0))
phy_gmii_tx_er = Signal(bool(0))
phy_reset_n = Signal(bool(0))
uart_txd = Signal(bool(1))
uart_rts = Signal(bool(1))
s1_scl_o = Signal(bool(1))
s1_scl_t = Signal(bool(1))
s1_sda_o = Signal(bool(1))
s1_sda_t = Signal(bool(1))
s2_scl_o = Signal(bool(1))
s2_scl_t = Signal(bool(1))
s2_sda_o = Signal(bool(1))
s2_sda_t = Signal(bool(1))
# sources and sinks
gmii_source = gmii_ep.GMIISource()
gmii_source_logic = gmii_source.create_logic(
phy_gmii_clk,
phy_gmii_rst,
txd=phy_gmii_rxd,
tx_en=phy_gmii_rx_dv,
tx_er=phy_gmii_rx_er,
clk_enable=phy_gmii_clk_en,
name='gmii_source'
)
gmii_sink = gmii_ep.GMIISink()
gmii_sink_logic = gmii_sink.create_logic(
phy_gmii_clk,
phy_gmii_rst,
rxd=phy_gmii_txd,
rx_dv=phy_gmii_tx_en,
rx_er=phy_gmii_tx_er,
clk_enable=phy_gmii_clk_en,
name='gmii_sink'
)
uart_source = uart_ep.UARTSource()
uart_source_logic = uart_source.create_logic(
clk,
rst,
txd=uart_rxd,
prescale=int(125000000/(115200*8)),
name='uart_source'
)
uart_sink = uart_ep.UARTSink()
uart_sink_logic = uart_sink.create_logic(
clk,
rst,
rxd=uart_txd,
prescale=int(125000000/(115200*8)),
name='uart_sink'
)
# I2C memory model 1
i2c_mem1 = i2c.I2CMem(1024)
i2c_mem_logic1 = i2c_mem1.create_logic(
scl_i=s1_scl_i,
scl_o=s1_scl_o,
scl_t=s1_scl_t,
sda_i=s1_sda_i,
sda_o=s1_sda_o,
sda_t=s1_sda_t,
abw=2,
address=0x50,
latency=0,
name='slave1'
)
# I2C memory model 2
i2c_mem2 = i2c.I2CMem(1024)
i2c_mem_logic2 = i2c_mem2.create_logic(
scl_i=s2_scl_i,
scl_o=s2_scl_o,
scl_t=s2_scl_t,
sda_i=s2_sda_i,
sda_o=s2_sda_o,
sda_t=s2_sda_t,
abw=2,
address=0x51,
latency=1000,
name='slave2'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
btnu=btnu,
btnl=btnl,
btnd=btnd,
btnr=btnr,
btnc=btnc,
sw=sw,
led=led,
i2c_scl_i=i2c_scl_i,
i2c_scl_o=i2c_scl_o,
i2c_scl_t=i2c_scl_t,
i2c_sda_i=i2c_sda_i,
i2c_sda_o=i2c_sda_o,
i2c_sda_t=i2c_sda_t,
phy_gmii_clk=phy_gmii_clk,
phy_gmii_rst=phy_gmii_rst,
phy_gmii_clk_en=phy_gmii_clk_en,
phy_gmii_rxd=phy_gmii_rxd,
phy_gmii_rx_dv=phy_gmii_rx_dv,
phy_gmii_rx_er=phy_gmii_rx_er,
phy_gmii_txd=phy_gmii_txd,
phy_gmii_tx_en=phy_gmii_tx_en,
phy_gmii_tx_er=phy_gmii_tx_er,
phy_reset_n=phy_reset_n,
phy_int_n=phy_int_n,
uart_rxd=uart_rxd,
uart_txd=uart_txd,
uart_rts=uart_rts,
uart_cts=uart_cts
)
@always_comb
def bus():
# emulate I2C wired AND
i2c_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
i2c_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
s1_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
s1_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
s2_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
s2_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
@always(delay(4))
def clkgen():
clk.next = not clk
phy_gmii_clk.next = not phy_gmii_clk
clk_enable_rate = Signal(int(0))
clk_enable_div = Signal(int(0))
@always(clk.posedge)
def clk_enable_gen():
if clk_enable_div.next > 0:
phy_gmii_clk_en.next = 0
clk_enable_div.next = clk_enable_div - 1
else:
phy_gmii_clk_en.next = 1
clk_enable_div.next = clk_enable_rate - 1
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
phy_gmii_rst.next = 1
yield clk.posedge
rst.next = 0
phy_gmii_rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: enumerate via UDP")
current_test.next = 1
pkt = xfcp.XFCPFrame()
pkt.path = []
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0x020000000000
test_frame.eth_src_mac = 0xDAD1D2D3D4D5
test_frame.eth_type = 0x0800
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80181
test_frame.ip_dest_ip = 0xc0a80180
test_frame.udp_source_port = 1234
test_frame.udp_dest_port = 14000
test_frame.payload = pkt.build_axis()
test_frame.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame.build_eth().build_axis_fcs().data)
# wait for ARP request packet
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0x020000000000
assert check_frame.arp_spa == 0xc0a80180
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80181
# generate response
arp_frame = arp_ep.ARPFrame()
arp_frame.eth_dest_mac = 0x020000000000
arp_frame.eth_src_mac = 0xDAD1D2D3D4D5
arp_frame.eth_type = 0x0806
arp_frame.arp_htype = 0x0001
arp_frame.arp_ptype = 0x0800
arp_frame.arp_hlen = 6
arp_frame.arp_plen = 4
arp_frame.arp_oper = 2
arp_frame.arp_sha = 0xDAD1D2D3D4D5
arp_frame.arp_spa = 0xc0a80181
arp_frame.arp_tha = 0x020000000000
arp_frame.arp_tpa = 0xc0a80180
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+arp_frame.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0800
assert check_frame.ip_version == 4
assert check_frame.ip_ihl == 5
assert check_frame.ip_dscp == 0
assert check_frame.ip_ecn == 0
assert check_frame.ip_identification == 0
assert check_frame.ip_flags == 2
assert check_frame.ip_fragment_offset == 0
assert check_frame.ip_ttl == 64
assert check_frame.ip_protocol == 0x11
assert check_frame.ip_source_ip == 0xc0a80180
assert check_frame.ip_dest_ip == 0xc0a80181
assert check_frame.udp_source_port == 14000
assert check_frame.udp_dest_port == 1234
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == []
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 64
pkt = xfcp.XFCPFrame()
pkt.path = [0]
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0x020000000000
test_frame.eth_src_mac = 0xDAD1D2D3D4D5
test_frame.eth_type = 0x0800
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80181
test_frame.ip_dest_ip = 0xc0a80180
test_frame.udp_source_port = 1234
test_frame.udp_dest_port = 14000
test_frame.payload = pkt.build_axis()
test_frame.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0800
assert check_frame.ip_version == 4
assert check_frame.ip_ihl == 5
assert check_frame.ip_dscp == 0
assert check_frame.ip_ecn == 0
assert check_frame.ip_identification == 0
assert check_frame.ip_flags == 2
assert check_frame.ip_fragment_offset == 0
assert check_frame.ip_ttl == 64
assert check_frame.ip_protocol == 0x11
assert check_frame.ip_source_ip == 0xc0a80180
assert check_frame.ip_dest_ip == 0xc0a80181
assert check_frame.udp_source_port == 14000
assert check_frame.udp_dest_port == 1234
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == [0]
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 32
assert gmii_source.empty()
assert gmii_sink.empty()
yield delay(100)
yield clk.posedge
print("test 1: test write and read RAM 0")
current_test.next = 1
pkt1 = xfcp.XFCPFrame()
pkt1.path = [0]
pkt1.ptype = 0x12
pkt1.payload = bytearray(struct.pack('<BH', 0, 4)+b'\x11\x22\x33\x44')
pkt2 = xfcp.XFCPFrame()
pkt2.path = [0]
pkt2.ptype = 0x10
pkt2.payload = bytearray(struct.pack('<BH', 0, 4))
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0x020000000000
test_frame1.eth_src_mac = 0xDAD1D2D3D4D5
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80181
test_frame1.ip_dest_ip = 0xc0a80180
test_frame1.udp_source_port = 1234
test_frame1.udp_dest_port = 14000
test_frame1.payload = pkt1.build_axis()
test_frame1.build()
test_frame2 = udp_ep.UDPFrame(test_frame1)
test_frame2.payload = pkt2.build_axis()
test_frame2.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame1.build_eth().build_axis_fcs().data)
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame2.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0x13
assert rx_pkt.payload.data == struct.pack('<BH', 0, 4)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0x11
assert rx_pkt.payload.data == struct.pack('<BH', 0, 4)+b'\x11\x22\x33\x44'
assert gmii_source.empty()
assert gmii_sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: enumerate via UART")
current_test.next = 3
pkt = xfcp.XFCPFrame()
pkt.path = []
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
uart_source.write(pkt.build_axis_cobs().data+b'\x00')
yield clk.posedge
rx_data = b''
while True:
if not uart_sink.empty():
b = bytearray(uart_sink.read(1))
rx_data += b
if b[0] == 0:
break
yield clk.posedge
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis_cobs(rx_data[:-1])
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == []
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 64
pkt = xfcp.XFCPFrame()
pkt.path = [0]
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
uart_source.write(pkt.build_axis_cobs().data+b'\x00')
yield clk.posedge
rx_data = b''
while True:
if not uart_sink.empty():
b = bytearray(uart_sink.read(1))
rx_data += b
if b[0] == 0:
break
yield clk.posedge
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis_cobs(rx_data[:-1])
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == [0]
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 32
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 29.874622 | 107 | 0.630682 |
from myhdl import *
import os
import struct
import xfcp
import uart_ep
import i2c
import eth_ep
import arp_ep
import udp_ep
import gmii_ep
module = 'fpga_core'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../lib/xfcp/rtl/xfcp_interface_uart.v")
srcs.append("../lib/xfcp/rtl/xfcp_interface_udp.v")
srcs.append("../lib/xfcp/rtl/xfcp_mod_i2c_master.v")
srcs.append("../lib/xfcp/rtl/xfcp_mod_wb.v")
srcs.append("../lib/xfcp/rtl/xfcp_arb.v")
srcs.append("../lib/xfcp/rtl/xfcp_switch.v")
srcs.append("../lib/i2c/rtl/i2c_master.v")
srcs.append("../lib/eth/rtl/eth_mac_1g_fifo.v")
srcs.append("../lib/eth/rtl/eth_mac_1g.v")
srcs.append("../lib/eth/rtl/axis_gmii_rx.v")
srcs.append("../lib/eth/rtl/axis_gmii_tx.v")
srcs.append("../lib/eth/rtl/lfsr.v")
srcs.append("../lib/eth/rtl/eth_axis_rx.v")
srcs.append("../lib/eth/rtl/eth_axis_tx.v")
srcs.append("../lib/eth/rtl/udp_complete.v")
srcs.append("../lib/eth/rtl/udp_checksum_gen.v")
srcs.append("../lib/eth/rtl/udp.v")
srcs.append("../lib/eth/rtl/udp_ip_rx.v")
srcs.append("../lib/eth/rtl/udp_ip_tx.v")
srcs.append("../lib/eth/rtl/ip_complete.v")
srcs.append("../lib/eth/rtl/ip.v")
srcs.append("../lib/eth/rtl/ip_eth_rx.v")
srcs.append("../lib/eth/rtl/ip_eth_tx.v")
srcs.append("../lib/eth/rtl/ip_arb_mux.v")
srcs.append("../lib/eth/rtl/arp.v")
srcs.append("../lib/eth/rtl/arp_cache.v")
srcs.append("../lib/eth/rtl/arp_eth_rx.v")
srcs.append("../lib/eth/rtl/arp_eth_tx.v")
srcs.append("../lib/eth/rtl/eth_arb_mux.v")
srcs.append("../lib/uart/rtl/uart.v")
srcs.append("../lib/uart/rtl/uart_rx.v")
srcs.append("../lib/uart/rtl/uart_tx.v")
srcs.append("../lib/wb/rtl/wb_ram.v")
srcs.append("../lib/axis/rtl/arbiter.v")
srcs.append("../lib/axis/rtl/priority_encoder.v")
srcs.append("../lib/axis/rtl/axis_cobs_encode.v")
srcs.append("../lib/axis/rtl/axis_cobs_decode.v")
srcs.append("../lib/axis/rtl/axis_fifo.v")
srcs.append("../lib/axis/rtl/axis_async_fifo.v")
srcs.append("../lib/axis/rtl/axis_async_fifo_adapter.v")
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
TARGET = "SIM"
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
btnu = Signal(bool(0))
btnl = Signal(bool(0))
btnd = Signal(bool(0))
btnr = Signal(bool(0))
btnc = Signal(bool(0))
sw = Signal(intbv(0)[4:])
i2c_scl_i = Signal(bool(1))
i2c_sda_i = Signal(bool(1))
phy_gmii_clk = Signal(bool(0))
phy_gmii_rst = Signal(bool(0))
phy_gmii_clk_en = Signal(bool(0))
phy_gmii_rxd = Signal(intbv(0)[8:])
phy_gmii_rx_dv = Signal(bool(0))
phy_gmii_rx_er = Signal(bool(0))
phy_int_n = Signal(bool(1))
uart_rxd = Signal(bool(1))
uart_cts = Signal(bool(1))
s1_scl_i = Signal(bool(1))
s1_sda_i = Signal(bool(1))
s2_scl_i = Signal(bool(1))
s2_sda_i = Signal(bool(1))
led = Signal(intbv(0)[8:])
i2c_scl_o = Signal(bool(1))
i2c_scl_t = Signal(bool(1))
i2c_sda_o = Signal(bool(1))
i2c_sda_t = Signal(bool(1))
phy_gmii_txd = Signal(intbv(0)[8:])
phy_gmii_tx_en = Signal(bool(0))
phy_gmii_tx_er = Signal(bool(0))
phy_reset_n = Signal(bool(0))
uart_txd = Signal(bool(1))
uart_rts = Signal(bool(1))
s1_scl_o = Signal(bool(1))
s1_scl_t = Signal(bool(1))
s1_sda_o = Signal(bool(1))
s1_sda_t = Signal(bool(1))
s2_scl_o = Signal(bool(1))
s2_scl_t = Signal(bool(1))
s2_sda_o = Signal(bool(1))
s2_sda_t = Signal(bool(1))
gmii_source = gmii_ep.GMIISource()
gmii_source_logic = gmii_source.create_logic(
phy_gmii_clk,
phy_gmii_rst,
txd=phy_gmii_rxd,
tx_en=phy_gmii_rx_dv,
tx_er=phy_gmii_rx_er,
clk_enable=phy_gmii_clk_en,
name='gmii_source'
)
gmii_sink = gmii_ep.GMIISink()
gmii_sink_logic = gmii_sink.create_logic(
phy_gmii_clk,
phy_gmii_rst,
rxd=phy_gmii_txd,
rx_dv=phy_gmii_tx_en,
rx_er=phy_gmii_tx_er,
clk_enable=phy_gmii_clk_en,
name='gmii_sink'
)
uart_source = uart_ep.UARTSource()
uart_source_logic = uart_source.create_logic(
clk,
rst,
txd=uart_rxd,
prescale=int(125000000/(115200*8)),
name='uart_source'
)
uart_sink = uart_ep.UARTSink()
uart_sink_logic = uart_sink.create_logic(
clk,
rst,
rxd=uart_txd,
prescale=int(125000000/(115200*8)),
name='uart_sink'
)
i2c_mem1 = i2c.I2CMem(1024)
i2c_mem_logic1 = i2c_mem1.create_logic(
scl_i=s1_scl_i,
scl_o=s1_scl_o,
scl_t=s1_scl_t,
sda_i=s1_sda_i,
sda_o=s1_sda_o,
sda_t=s1_sda_t,
abw=2,
address=0x50,
latency=0,
name='slave1'
)
i2c_mem2 = i2c.I2CMem(1024)
i2c_mem_logic2 = i2c_mem2.create_logic(
scl_i=s2_scl_i,
scl_o=s2_scl_o,
scl_t=s2_scl_t,
sda_i=s2_sda_i,
sda_o=s2_sda_o,
sda_t=s2_sda_t,
abw=2,
address=0x51,
latency=1000,
name='slave2'
)
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
btnu=btnu,
btnl=btnl,
btnd=btnd,
btnr=btnr,
btnc=btnc,
sw=sw,
led=led,
i2c_scl_i=i2c_scl_i,
i2c_scl_o=i2c_scl_o,
i2c_scl_t=i2c_scl_t,
i2c_sda_i=i2c_sda_i,
i2c_sda_o=i2c_sda_o,
i2c_sda_t=i2c_sda_t,
phy_gmii_clk=phy_gmii_clk,
phy_gmii_rst=phy_gmii_rst,
phy_gmii_clk_en=phy_gmii_clk_en,
phy_gmii_rxd=phy_gmii_rxd,
phy_gmii_rx_dv=phy_gmii_rx_dv,
phy_gmii_rx_er=phy_gmii_rx_er,
phy_gmii_txd=phy_gmii_txd,
phy_gmii_tx_en=phy_gmii_tx_en,
phy_gmii_tx_er=phy_gmii_tx_er,
phy_reset_n=phy_reset_n,
phy_int_n=phy_int_n,
uart_rxd=uart_rxd,
uart_txd=uart_txd,
uart_rts=uart_rts,
uart_cts=uart_cts
)
@always_comb
def bus():
i2c_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
i2c_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
s1_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
s1_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
s2_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
s2_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
@always(delay(4))
def clkgen():
clk.next = not clk
phy_gmii_clk.next = not phy_gmii_clk
clk_enable_rate = Signal(int(0))
clk_enable_div = Signal(int(0))
@always(clk.posedge)
def clk_enable_gen():
if clk_enable_div.next > 0:
phy_gmii_clk_en.next = 0
clk_enable_div.next = clk_enable_div - 1
else:
phy_gmii_clk_en.next = 1
clk_enable_div.next = clk_enable_rate - 1
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
phy_gmii_rst.next = 1
yield clk.posedge
rst.next = 0
phy_gmii_rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
print("test 1: enumerate via UDP")
current_test.next = 1
pkt = xfcp.XFCPFrame()
pkt.path = []
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0x020000000000
test_frame.eth_src_mac = 0xDAD1D2D3D4D5
test_frame.eth_type = 0x0800
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80181
test_frame.ip_dest_ip = 0xc0a80180
test_frame.udp_source_port = 1234
test_frame.udp_dest_port = 14000
test_frame.payload = pkt.build_axis()
test_frame.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0x020000000000
assert check_frame.arp_spa == 0xc0a80180
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80181
arp_frame = arp_ep.ARPFrame()
arp_frame.eth_dest_mac = 0x020000000000
arp_frame.eth_src_mac = 0xDAD1D2D3D4D5
arp_frame.eth_type = 0x0806
arp_frame.arp_htype = 0x0001
arp_frame.arp_ptype = 0x0800
arp_frame.arp_hlen = 6
arp_frame.arp_plen = 4
arp_frame.arp_oper = 2
arp_frame.arp_sha = 0xDAD1D2D3D4D5
arp_frame.arp_spa = 0xc0a80181
arp_frame.arp_tha = 0x020000000000
arp_frame.arp_tpa = 0xc0a80180
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+arp_frame.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0800
assert check_frame.ip_version == 4
assert check_frame.ip_ihl == 5
assert check_frame.ip_dscp == 0
assert check_frame.ip_ecn == 0
assert check_frame.ip_identification == 0
assert check_frame.ip_flags == 2
assert check_frame.ip_fragment_offset == 0
assert check_frame.ip_ttl == 64
assert check_frame.ip_protocol == 0x11
assert check_frame.ip_source_ip == 0xc0a80180
assert check_frame.ip_dest_ip == 0xc0a80181
assert check_frame.udp_source_port == 14000
assert check_frame.udp_dest_port == 1234
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == []
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 64
pkt = xfcp.XFCPFrame()
pkt.path = [0]
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0x020000000000
test_frame.eth_src_mac = 0xDAD1D2D3D4D5
test_frame.eth_type = 0x0800
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80181
test_frame.ip_dest_ip = 0xc0a80180
test_frame.udp_source_port = 1234
test_frame.udp_dest_port = 14000
test_frame.payload = pkt.build_axis()
test_frame.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0800
assert check_frame.ip_version == 4
assert check_frame.ip_ihl == 5
assert check_frame.ip_dscp == 0
assert check_frame.ip_ecn == 0
assert check_frame.ip_identification == 0
assert check_frame.ip_flags == 2
assert check_frame.ip_fragment_offset == 0
assert check_frame.ip_ttl == 64
assert check_frame.ip_protocol == 0x11
assert check_frame.ip_source_ip == 0xc0a80180
assert check_frame.ip_dest_ip == 0xc0a80181
assert check_frame.udp_source_port == 14000
assert check_frame.udp_dest_port == 1234
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == [0]
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 32
assert gmii_source.empty()
assert gmii_sink.empty()
yield delay(100)
yield clk.posedge
print("test 1: test write and read RAM 0")
current_test.next = 1
pkt1 = xfcp.XFCPFrame()
pkt1.path = [0]
pkt1.ptype = 0x12
pkt1.payload = bytearray(struct.pack('<BH', 0, 4)+b'\x11\x22\x33\x44')
pkt2 = xfcp.XFCPFrame()
pkt2.path = [0]
pkt2.ptype = 0x10
pkt2.payload = bytearray(struct.pack('<BH', 0, 4))
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0x020000000000
test_frame1.eth_src_mac = 0xDAD1D2D3D4D5
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80181
test_frame1.ip_dest_ip = 0xc0a80180
test_frame1.udp_source_port = 1234
test_frame1.udp_dest_port = 14000
test_frame1.payload = pkt1.build_axis()
test_frame1.build()
test_frame2 = udp_ep.UDPFrame(test_frame1)
test_frame2.payload = pkt2.build_axis()
test_frame2.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame1.build_eth().build_axis_fcs().data)
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame2.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0x13
assert rx_pkt.payload.data == struct.pack('<BH', 0, 4)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0x11
assert rx_pkt.payload.data == struct.pack('<BH', 0, 4)+b'\x11\x22\x33\x44'
assert gmii_source.empty()
assert gmii_sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: enumerate via UART")
current_test.next = 3
pkt = xfcp.XFCPFrame()
pkt.path = []
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
uart_source.write(pkt.build_axis_cobs().data+b'\x00')
yield clk.posedge
rx_data = b''
while True:
if not uart_sink.empty():
b = bytearray(uart_sink.read(1))
rx_data += b
if b[0] == 0:
break
yield clk.posedge
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis_cobs(rx_data[:-1])
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == []
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 64
pkt = xfcp.XFCPFrame()
pkt.path = [0]
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
uart_source.write(pkt.build_axis_cobs().data+b'\x00')
yield clk.posedge
rx_data = b''
while True:
if not uart_sink.empty():
b = bytearray(uart_sink.read(1))
rx_data += b
if b[0] == 0:
break
yield clk.posedge
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis_cobs(rx_data[:-1])
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == [0]
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 32
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| true | true |
f72225b988b94880019a35eb225acab6e4b68b8a | 604 | py | Python | {{ cookiecutter.package_name }}/src/{{ cookiecutter.module_name }}/runner/decorators.py | toxinu/cookiecutter-django | 687ae5fe93f46aa57ef8d8f7315e60ca66391382 | [
"BSD-3-Clause"
] | 3 | 2016-12-17T01:05:35.000Z | 2016-12-23T10:03:34.000Z | {{ cookiecutter.package_name }}/src/{{ cookiecutter.module_name }}/runner/decorators.py | toxinu/cookiecutter-django | 687ae5fe93f46aa57ef8d8f7315e60ca66391382 | [
"BSD-3-Clause"
] | null | null | null | {{ cookiecutter.package_name }}/src/{{ cookiecutter.module_name }}/runner/decorators.py | toxinu/cookiecutter-django | 687ae5fe93f46aa57ef8d8f7315e60ca66391382 | [
"BSD-3-Clause"
] | null | null | null | import os
def configuration(f):
import click
from functools import update_wrapper
@click.pass_context
def inner(ctx, *args, **kwargs):
# HACK: We can't call `configure()` from within tests
# since we don't load config files from disk, so we
# need a way to bypass this initialization step
if os.environ.get('_{{ cookiecutter.module_name|upper }}_SKIP_CONFIGURATION') != '1':
from {{ cookiecutter.module_name }}.runner import configure
configure()
return ctx.invoke(f, *args, **kwargs)
return update_wrapper(inner, f)
| 33.555556 | 93 | 0.650662 | import os
def configuration(f):
import click
from functools import update_wrapper
@click.pass_context
def inner(ctx, *args, **kwargs):
# since we don't load config files from disk, so we
if os.environ.get('_{{ cookiecutter.module_name|upper }}_SKIP_CONFIGURATION') != '1':
from {{ cookiecutter.module_name }}.runner import configure
configure()
return ctx.invoke(f, *args, **kwargs)
return update_wrapper(inner, f)
| false | true |
f7222660525d88a831eb01f371001c0033a38be7 | 29,003 | py | Python | neutron/plugins/openvswitch/ovs_neutron_plugin.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | neutron/plugins/openvswitch/ovs_neutron_plugin.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | neutron/plugins/openvswitch/ovs_neutron_plugin.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
# @author: Dave Lapsley, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
# @author: Bob Kukura, Red Hat, Inc.
# @author: Seetharama Ayyadevara, Freescale Semiconductor, Inc.
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as q_exc
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_rpc_base
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import constants as svc_constants
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.openvswitch.common import config # noqa
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_db_v2
LOG = logging.getLogger(__name__)
class OVSRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
def __init__(self, notifier, tunnel_type):
self.notifier = notifier
self.tunnel_type = tunnel_type
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
port = ovs_db_v2.get_port_from_device(device)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
binding = ovs_db_v2.get_network_binding(None, port['network_id'])
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up'],
'network_type': binding.network_type,
'segmentation_id': binding.segmentation_id,
'physical_network': binding.physical_network}
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
ovs_db_v2.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
entry = {'device': device,
'exists': True}
if port['status'] != q_const.PORT_STATUS_DOWN:
# Set port status to DOWN
ovs_db_v2.set_port_status(port['id'], q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s up on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
if port['status'] != q_const.PORT_STATUS_ACTIVE:
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
def tunnel_sync(self, rpc_context, **kwargs):
"""Update new tunnel.
Updates the datbase with the tunnel IP. All listening agents will also
be notified about the new tunnel IP.
"""
tunnel_ip = kwargs.get('tunnel_ip')
# Update the database with the IP
tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip)
tunnels = ovs_db_v2.get_tunnel_endpoints()
entry = dict()
entry['tunnels'] = tunnels
# Notify all other listening agents
self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel.id, self.tunnel_type)
# Return the list of tunnels IP's to the agent
return entry
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
'''Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_tunnel_update = topics.get_topic_name(topic,
constants.TUNNEL,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type):
self.fanout_cast(context,
self.make_msg('tunnel_update',
tunnel_ip=tunnel_ip,
tunnel_id=tunnel_id,
tunnel_type=tunnel_type),
topic=self.topic_tunnel_update)
class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
addr_pair_db.AllowedAddressPairsMixin):
"""Implement the Neutron abstractions using Open vSwitch.
Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or
a new VLAN is created for each network. An agent is relied upon to
perform the actual OVS configuration on each host.
The provider extension is also supported. As discussed in
https://bugs.launchpad.net/neutron/+bug/1023156, this class could
be simplified, and filtering on extended attributes could be
handled, by adding support for extended attributes to the
NeutronDbPluginV2 base class. When that occurs, this class should
be updated to take advantage of it.
The port binding extension enables an external application relay
information to and from the plugin.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ["provider", "external-net", "router",
"ext-gw-mode", "binding", "quotas",
"security-group", "agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler",
"extra_dhcp_opt",
"allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
def __init__(self, configfile=None):
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.CAPABILITIES: {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
ovs_db_v2.initialize()
self._parse_network_vlan_ranges()
ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges)
self.tenant_network_type = cfg.CONF.OVS.tenant_network_type
if self.tenant_network_type not in [constants.TYPE_LOCAL,
constants.TYPE_VLAN,
constants.TYPE_GRE,
constants.TYPE_VXLAN,
constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Server terminated!"),
self.tenant_network_type)
sys.exit(1)
self.enable_tunneling = cfg.CONF.OVS.enable_tunneling
self.tunnel_type = None
if self.enable_tunneling:
self.tunnel_type = cfg.CONF.OVS.tunnel_type or constants.TYPE_GRE
elif cfg.CONF.OVS.tunnel_type:
self.tunnel_type = cfg.CONF.OVS.tunnel_type
self.enable_tunneling = True
self.tunnel_id_ranges = []
if self.enable_tunneling:
self._parse_tunnel_id_ranges()
ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges)
elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES:
LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. "
"Server terminated!"), self.tenant_network_type)
sys.exit(1)
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
def setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotify
)
self.callbacks = OVSRpcCallbacks(self.notifier, self.tunnel_type)
self.dispatcher = self.callbacks.create_rpc_dispatcher()
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.dispatcher, fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.OVS.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Server terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _parse_tunnel_id_ranges(self):
for entry in cfg.CONF.OVS.tunnel_id_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
self.tunnel_id_ranges.append((int(tun_min), int(tun_max)))
except ValueError as ex:
LOG.error(_("Invalid tunnel ID range: "
"'%(range)s' - %(e)s. Server terminated!"),
{'range': entry, 'e': ex})
sys.exit(1)
LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges)
def _extend_network_dict_provider(self, context, network):
binding = ovs_db_v2.get_network_binding(context.session,
network['id'])
network[provider.NETWORK_TYPE] = binding.network_type
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == constants.TYPE_FLAT:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.network_type == constants.TYPE_VLAN:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == constants.TYPE_LOCAL:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == constants.TYPE_FLAT:
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == constants.TYPE_VLAN:
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise q_exc.InvalidInput(error_message=msg)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
if not self.enable_tunneling:
msg = _("%s networks are not enabled") % network_type
raise q_exc.InvalidInput(error_message=msg)
if physical_network_set:
msg = _("provider:physical_network specified for %s "
"network") % network_type
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == constants.TYPE_LOCAL:
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = None
else:
msg = _("provider:network_type %s not supported") % network_type
raise q_exc.InvalidInput(error_message=msg)
if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = _("Unknown provider:physical_network "
"%s") % physical_network
raise q_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise q_exc.InvalidInput(error_message=msg)
return (network_type, physical_network, segmentation_id)
def create_network(self, context, network):
(network_type, physical_network,
segmentation_id) = self._process_provider_create(context,
network['network'])
session = context.session
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with session.begin(subtransactions=True):
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == constants.TYPE_NONE:
raise q_exc.TenantNetworksDisabled()
elif network_type == constants.TYPE_VLAN:
(physical_network,
segmentation_id) = ovs_db_v2.reserve_vlan(session)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
segmentation_id = ovs_db_v2.reserve_tunnel(session)
# no reservation needed for TYPE_LOCAL
else:
# provider network
if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
ovs_db_v2.reserve_specific_vlan(session, physical_network,
segmentation_id)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.reserve_specific_tunnel(session, segmentation_id)
# no reservation needed for TYPE_LOCAL
net = super(OVSNeutronPluginV2, self).create_network(context,
network)
ovs_db_v2.add_network_binding(session, net['id'], network_type,
physical_network, segmentation_id)
self._process_l3_create(context, net, network['network'])
self._extend_network_dict_provider(context, net)
# note - exception will rollback entire transaction
LOG.debug(_("Created network: %s"), net['id'])
return net
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
def delete_network(self, context, id):
session = context.session
with session.begin(subtransactions=True):
binding = ovs_db_v2.get_network_binding(session, id)
super(OVSNeutronPluginV2, self).delete_network(context, id)
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.release_tunnel(session, binding.segmentation_id,
self.tunnel_id_ranges)
elif binding.network_type in [constants.TYPE_VLAN,
constants.TYPE_FLAT]:
ovs_db_v2.release_vlan(session, binding.physical_network,
binding.segmentation_id,
self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, id)
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).get_network(context,
id, None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None,
limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(OVSNeutronPluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
port_data = port['port']
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
port = super(OVSNeutronPluginV2, self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data, port)
self._process_port_create_security_group(context, port, sgids)
self._process_port_create_extra_dhcp_opts(context, port,
dhcp_opts)
port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, port,
port_data.get(addr_pair.ADDRESS_PAIRS)))
self.notify_security_groups_member_updated(context, port)
return port
def update_port(self, context, id, port):
session = context.session
need_port_update_notify = False
changed_fixed_ips = 'fixed_ips' in port['port']
with session.begin(subtransactions=True):
original_port = super(OVSNeutronPluginV2, self).get_port(
context, id)
updated_port = super(OVSNeutronPluginV2, self).update_port(
context, id, port)
if addr_pair.ADDRESS_PAIRS in port['port']:
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, updated_port,
port['port'][addr_pair.ADDRESS_PAIRS])
need_port_update_notify = True
elif changed_fixed_ips:
self._check_fixed_ips_and_address_pairs_no_overlap(
context, updated_port)
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
binding = ovs_db_v2.get_network_binding(None,
updated_port['network_id'])
self.notifier.port_update(context, updated_port,
binding.network_type,
binding.segmentation_id,
binding.physical_network)
return updated_port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
session = context.session
with session.begin(subtransactions=True):
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(OVSNeutronPluginV2, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
| 46.703704 | 79 | 0.607937 |
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as q_exc
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_rpc_base
from neutron.db import portbindings_db
from neutron.db import quota_db
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import constants as svc_constants
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.openvswitch.common import config
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_db_v2
LOG = logging.getLogger(__name__)
class OVSRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def __init__(self, notifier, tunnel_type):
self.notifier = notifier
self.tunnel_type = tunnel_type
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
port = ovs_db_v2.get_port_from_device(device)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
binding = ovs_db_v2.get_network_binding(None, port['network_id'])
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up'],
'network_type': binding.network_type,
'segmentation_id': binding.segmentation_id,
'physical_network': binding.physical_network}
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
ovs_db_v2.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
entry = {'device': device,
'exists': True}
if port['status'] != q_const.PORT_STATUS_DOWN:
ovs_db_v2.set_port_status(port['id'], q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s up on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
if port['status'] != q_const.PORT_STATUS_ACTIVE:
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
def tunnel_sync(self, rpc_context, **kwargs):
tunnel_ip = kwargs.get('tunnel_ip')
tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip)
tunnels = ovs_db_v2.get_tunnel_endpoints()
entry = dict()
entry['tunnels'] = tunnels
self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel.id, self.tunnel_type)
return entry
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_tunnel_update = topics.get_topic_name(topic,
constants.TUNNEL,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type):
self.fanout_cast(context,
self.make_msg('tunnel_update',
tunnel_ip=tunnel_ip,
tunnel_id=tunnel_id,
tunnel_type=tunnel_type),
topic=self.topic_tunnel_update)
class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
addr_pair_db.AllowedAddressPairsMixin):
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ["provider", "external-net", "router",
"ext-gw-mode", "binding", "quotas",
"security-group", "agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler",
"extra_dhcp_opt",
"allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
def __init__(self, configfile=None):
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.CAPABILITIES: {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
ovs_db_v2.initialize()
self._parse_network_vlan_ranges()
ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges)
self.tenant_network_type = cfg.CONF.OVS.tenant_network_type
if self.tenant_network_type not in [constants.TYPE_LOCAL,
constants.TYPE_VLAN,
constants.TYPE_GRE,
constants.TYPE_VXLAN,
constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Server terminated!"),
self.tenant_network_type)
sys.exit(1)
self.enable_tunneling = cfg.CONF.OVS.enable_tunneling
self.tunnel_type = None
if self.enable_tunneling:
self.tunnel_type = cfg.CONF.OVS.tunnel_type or constants.TYPE_GRE
elif cfg.CONF.OVS.tunnel_type:
self.tunnel_type = cfg.CONF.OVS.tunnel_type
self.enable_tunneling = True
self.tunnel_id_ranges = []
if self.enable_tunneling:
self._parse_tunnel_id_ranges()
ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges)
elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES:
LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. "
"Server terminated!"), self.tenant_network_type)
sys.exit(1)
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
def setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotify
)
self.callbacks = OVSRpcCallbacks(self.notifier, self.tunnel_type)
self.dispatcher = self.callbacks.create_rpc_dispatcher()
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.dispatcher, fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.OVS.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Server terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _parse_tunnel_id_ranges(self):
for entry in cfg.CONF.OVS.tunnel_id_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
self.tunnel_id_ranges.append((int(tun_min), int(tun_max)))
except ValueError as ex:
LOG.error(_("Invalid tunnel ID range: "
"'%(range)s' - %(e)s. Server terminated!"),
{'range': entry, 'e': ex})
sys.exit(1)
LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges)
def _extend_network_dict_provider(self, context, network):
binding = ovs_db_v2.get_network_binding(context.session,
network['id'])
network[provider.NETWORK_TYPE] = binding.network_type
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == constants.TYPE_FLAT:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.network_type == constants.TYPE_VLAN:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == constants.TYPE_LOCAL:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == constants.TYPE_FLAT:
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == constants.TYPE_VLAN:
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise q_exc.InvalidInput(error_message=msg)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
if not self.enable_tunneling:
msg = _("%s networks are not enabled") % network_type
raise q_exc.InvalidInput(error_message=msg)
if physical_network_set:
msg = _("provider:physical_network specified for %s "
"network") % network_type
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == constants.TYPE_LOCAL:
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = None
else:
msg = _("provider:network_type %s not supported") % network_type
raise q_exc.InvalidInput(error_message=msg)
if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = _("Unknown provider:physical_network "
"%s") % physical_network
raise q_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise q_exc.InvalidInput(error_message=msg)
return (network_type, physical_network, segmentation_id)
def create_network(self, context, network):
(network_type, physical_network,
segmentation_id) = self._process_provider_create(context,
network['network'])
session = context.session
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with session.begin(subtransactions=True):
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == constants.TYPE_NONE:
raise q_exc.TenantNetworksDisabled()
elif network_type == constants.TYPE_VLAN:
(physical_network,
segmentation_id) = ovs_db_v2.reserve_vlan(session)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
segmentation_id = ovs_db_v2.reserve_tunnel(session)
# no reservation needed for TYPE_LOCAL
else:
# provider network
if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
ovs_db_v2.reserve_specific_vlan(session, physical_network,
segmentation_id)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.reserve_specific_tunnel(session, segmentation_id)
# no reservation needed for TYPE_LOCAL
net = super(OVSNeutronPluginV2, self).create_network(context,
network)
ovs_db_v2.add_network_binding(session, net['id'], network_type,
physical_network, segmentation_id)
self._process_l3_create(context, net, network['network'])
self._extend_network_dict_provider(context, net)
# note - exception will rollback entire transaction
LOG.debug(_("Created network: %s"), net['id'])
return net
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
def delete_network(self, context, id):
session = context.session
with session.begin(subtransactions=True):
binding = ovs_db_v2.get_network_binding(session, id)
super(OVSNeutronPluginV2, self).delete_network(context, id)
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.release_tunnel(session, binding.segmentation_id,
self.tunnel_id_ranges)
elif binding.network_type in [constants.TYPE_VLAN,
constants.TYPE_FLAT]:
ovs_db_v2.release_vlan(session, binding.physical_network,
binding.segmentation_id,
self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, id)
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).get_network(context,
id, None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None,
limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(OVSNeutronPluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
port_data = port['port']
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
port = super(OVSNeutronPluginV2, self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data, port)
self._process_port_create_security_group(context, port, sgids)
self._process_port_create_extra_dhcp_opts(context, port,
dhcp_opts)
port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, port,
port_data.get(addr_pair.ADDRESS_PAIRS)))
self.notify_security_groups_member_updated(context, port)
return port
def update_port(self, context, id, port):
session = context.session
need_port_update_notify = False
changed_fixed_ips = 'fixed_ips' in port['port']
with session.begin(subtransactions=True):
original_port = super(OVSNeutronPluginV2, self).get_port(
context, id)
updated_port = super(OVSNeutronPluginV2, self).update_port(
context, id, port)
if addr_pair.ADDRESS_PAIRS in port['port']:
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, updated_port,
port['port'][addr_pair.ADDRESS_PAIRS])
need_port_update_notify = True
elif changed_fixed_ips:
self._check_fixed_ips_and_address_pairs_no_overlap(
context, updated_port)
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
binding = ovs_db_v2.get_network_binding(None,
updated_port['network_id'])
self.notifier.port_update(context, updated_port,
binding.network_type,
binding.segmentation_id,
binding.physical_network)
return updated_port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
session = context.session
with session.begin(subtransactions=True):
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(OVSNeutronPluginV2, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
| true | true |
f722268be4ed5165edfde83085c14b1b8daef1d5 | 10,704 | py | Python | args/ft_args.py | humanlab/HaRT | ab5da16cb1bfdadc3fb77a99f0fda8123ea1fe02 | [
"Apache-2.0"
] | null | null | null | args/ft_args.py | humanlab/HaRT | ab5da16cb1bfdadc3fb77a99f0fda8123ea1fe02 | [
"Apache-2.0"
] | null | null | null | args/ft_args.py | humanlab/HaRT | ab5da16cb1bfdadc3fb77a99f0fda8123ea1fe02 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_type: Optional[str] = field(
default=None,
metadata={"help": "The type of task to train on: 'document' or 'user' -level"},
)
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: 'stance', 'sentiment', 'age', 'ope', or 'ner'"},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
return_entity_level_metrics: bool = field(
default=False, metadata={"help": "NER return entity level metrics or not"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
use_history_output: bool = field(
default=False, metadata={"help": "Should use the history output from Ar_HuLM for FT tasks predictions (regression/user-level tasks mainly) or not."}
)
save_preds_labels: bool = field(
default=False, metadata={"help": "Should save the predictions and labels into text files or not."}
)
num_labels: Optional[int] = field(
default=None,
metadata={
"help": "Number of classification labels when fine tuning a 'document' type task."
},
)
train_table: Optional[str] = field(
default=None,
metadata={"help": "The input training data table in a csv or pickle file (path to the file)."})
dev_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data table in a csv or pickle file (path to the file) to validate the model during training."},
)
test_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data table in a csv or pickle file (path to the file) to evaluate the trained model for perplexity."},
)
db: Optional[str] = field(
default=None,
metadata={"help": "The database where input training data table resides. (a mysql database)."}
)
hostname: Optional[str] = field(
default=None,
metadata={"help": "The host name or IP where the (mysql) database resides."}
)
max_train_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training blocks to this "
"value if set."
},
)
max_val_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation blocks to this "
"value if set."
},
)
block_size: Optional[int] = field(
default=1024,
metadata={
"help": "Optional input block sequence length after tokenization "
"(batched into instances of max_train_blocks/max_val_blocks , each of size block_size"
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
disable_hulm_batching: bool = field(
default=False, metadata={"help": "Batch the dataset as a flat list ([users, blocks * block_size]) instead of hulm style batching, i.e., [users, blocks, block_size] dimensions."}
)
agg_type: Optional[str] = field(
default=None,
metadata={
"help": "One of 'last', 'sum', 'avg', 'masked_last', 'masked_avg', 'masked_sum'"
"When using user_states/history for downstream tasks, what kind of "
"user_states/history aggregation to use. Currently, used only when saving states for users."
}
)
train_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input training data pickle file."})
train_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input training users' historical data pickle file."})
dev_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input dev data pickle file."})
dev_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input dev users' historical data pickle file."})
test_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input test data pickle file."})
test_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input test users' historical data pickle file."})
def __post_init__(self):
if self.task_type is None or (self.task_type != 'user' and self.task_type != 'document'):
raise ValueError("Need to define task type as one of 'document' or 'user'")
if self.num_labels is None:
raise ValueError('num_labels required to fine-tune downstream tasks!')
if self.train_table is None and (self.dev_table is None and self.test_table is None):
raise ValueError("Need a training/validation (dev or test) table.")
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
init_seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of model initialization."})
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
use_qh05_wts: bool = field(
default=False,
metadata={
"help": "Whether to use (at 'layer_ins') pretrained query, key, value weights followed by"
"query weights (for concatenated query and history) initialized with 0.5 mean, instead of,"
"newly initialized query (for concatenated hidden states and history) and key weights"
}
)
use_hart_no_hist: bool = field(
default=False,
metadata={"help": "Whether to use HaRT model with no available historcal context."},
)
freeze_model: bool = field(
default=False, metadata={"help": "Freeze the transformer module of the model. Train only classification layer."}
)
load_non_PT_hulm_model: bool = field(
default=False, metadata={"help": "Whether to use a non-pretrained hulm model or not"}
)
add_history: bool = field(
default=False, metadata={"help": "Whether to use history (and history recurrence) or not."}
)
initial_history: Optional[str] = field(
default=None, metadata={"help": "A .pt file containing a reasonable initial history embedding as a pytorch tensor."}
)
#TODO: following args should ideally be a part of training_args
metric_for_early_stopping: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
early_stopping_patience: int = field(
default=3,
metadata={
"help": "To be used with `metric_for_early_stopping`."
"To stop training when the specified `metric_for_early_stopping` worsens for"
"`early_stopping_patience` evaluation calls."
}
)
early_stopping_threshold: Optional[float] = field(
default=0.0,
metadata={
"help": "Use with `metric_for_early_stopping` and `early_stopping_patience` to denote how"
"much the specified metric must improve to satisfy early stopping conditions."
}
)
search_params: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna`` or ``Ray Tune``"}
)
use_ray: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``Ray Tune``"}
)
use_optuna: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna``"}
)
num_trials: Optional[int] = field(
default=10,
metadata={
"help": "Number of trials to run when 'search_params' is true."
},
)
| 42.816 | 185 | 0.634529 | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
task_type: Optional[str] = field(
default=None,
metadata={"help": "The type of task to train on: 'document' or 'user' -level"},
)
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: 'stance', 'sentiment', 'age', 'ope', or 'ner'"},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
return_entity_level_metrics: bool = field(
default=False, metadata={"help": "NER return entity level metrics or not"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
use_history_output: bool = field(
default=False, metadata={"help": "Should use the history output from Ar_HuLM for FT tasks predictions (regression/user-level tasks mainly) or not."}
)
save_preds_labels: bool = field(
default=False, metadata={"help": "Should save the predictions and labels into text files or not."}
)
num_labels: Optional[int] = field(
default=None,
metadata={
"help": "Number of classification labels when fine tuning a 'document' type task."
},
)
train_table: Optional[str] = field(
default=None,
metadata={"help": "The input training data table in a csv or pickle file (path to the file)."})
dev_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data table in a csv or pickle file (path to the file) to validate the model during training."},
)
test_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data table in a csv or pickle file (path to the file) to evaluate the trained model for perplexity."},
)
db: Optional[str] = field(
default=None,
metadata={"help": "The database where input training data table resides. (a mysql database)."}
)
hostname: Optional[str] = field(
default=None,
metadata={"help": "The host name or IP where the (mysql) database resides."}
)
max_train_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training blocks to this "
"value if set."
},
)
max_val_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation blocks to this "
"value if set."
},
)
block_size: Optional[int] = field(
default=1024,
metadata={
"help": "Optional input block sequence length after tokenization "
"(batched into instances of max_train_blocks/max_val_blocks , each of size block_size"
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
disable_hulm_batching: bool = field(
default=False, metadata={"help": "Batch the dataset as a flat list ([users, blocks * block_size]) instead of hulm style batching, i.e., [users, blocks, block_size] dimensions."}
)
agg_type: Optional[str] = field(
default=None,
metadata={
"help": "One of 'last', 'sum', 'avg', 'masked_last', 'masked_avg', 'masked_sum'"
"When using user_states/history for downstream tasks, what kind of "
"user_states/history aggregation to use. Currently, used only when saving states for users."
}
)
train_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input training data pickle file."})
train_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input training users' historical data pickle file."})
dev_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input dev data pickle file."})
dev_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input dev users' historical data pickle file."})
test_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input test data pickle file."})
test_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input test users' historical data pickle file."})
def __post_init__(self):
if self.task_type is None or (self.task_type != 'user' and self.task_type != 'document'):
raise ValueError("Need to define task type as one of 'document' or 'user'")
if self.num_labels is None:
raise ValueError('num_labels required to fine-tune downstream tasks!')
if self.train_table is None and (self.dev_table is None and self.test_table is None):
raise ValueError("Need a training/validation (dev or test) table.")
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
init_seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of model initialization."})
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
use_qh05_wts: bool = field(
default=False,
metadata={
"help": "Whether to use (at 'layer_ins') pretrained query, key, value weights followed by"
"query weights (for concatenated query and history) initialized with 0.5 mean, instead of,"
"newly initialized query (for concatenated hidden states and history) and key weights"
}
)
use_hart_no_hist: bool = field(
default=False,
metadata={"help": "Whether to use HaRT model with no available historcal context."},
)
freeze_model: bool = field(
default=False, metadata={"help": "Freeze the transformer module of the model. Train only classification layer."}
)
load_non_PT_hulm_model: bool = field(
default=False, metadata={"help": "Whether to use a non-pretrained hulm model or not"}
)
add_history: bool = field(
default=False, metadata={"help": "Whether to use history (and history recurrence) or not."}
)
initial_history: Optional[str] = field(
default=None, metadata={"help": "A .pt file containing a reasonable initial history embedding as a pytorch tensor."}
)
#TODO: following args should ideally be a part of training_args
metric_for_early_stopping: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
early_stopping_patience: int = field(
default=3,
metadata={
"help": "To be used with `metric_for_early_stopping`."
"To stop training when the specified `metric_for_early_stopping` worsens for"
"`early_stopping_patience` evaluation calls."
}
)
early_stopping_threshold: Optional[float] = field(
default=0.0,
metadata={
"help": "Use with `metric_for_early_stopping` and `early_stopping_patience` to denote how"
"much the specified metric must improve to satisfy early stopping conditions."
}
)
search_params: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna`` or ``Ray Tune``"}
)
use_ray: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``Ray Tune``"}
)
use_optuna: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna``"}
)
num_trials: Optional[int] = field(
default=10,
metadata={
"help": "Number of trials to run when 'search_params' is true."
},
)
| true | true |
f7222707f0f7481e358da6ecdef7b27d9ff7da98 | 12,969 | py | Python | office365/sharepoint/folders/folder.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | office365/sharepoint/folders/folder.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | office365/sharepoint/folders/folder.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | from office365.runtime.client_result import ClientResult
from office365.runtime.queries.create_entity_query import CreateEntityQuery
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.queries.update_entity_query import UpdateEntityQuery
from office365.runtime.resource_path import ResourcePath
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.actions.create_file import create_file_query
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.changes.change_collection import ChangeCollection
from office365.sharepoint.changes.change_query import ChangeQuery
from office365.sharepoint.contenttypes.content_type_id import ContentTypeId
from office365.sharepoint.files.file_creation_information import FileCreationInformation
from office365.sharepoint.listitems.listitem import ListItem
from office365.sharepoint.storagemetrics.storage_metrics import StorageMetrics
from office365.sharepoint.utilities.move_copy_options import MoveCopyOptions
from office365.sharepoint.utilities.move_copy_util import MoveCopyUtil
from office365.sharepoint.types.resource_path import ResourcePath as SPResPath
from office365.runtime.compat import urlparse
class Folder(BaseEntity):
"""Represents a folder in a SharePoint Web site."""
@staticmethod
def from_url(abs_url):
"""
Addresses a Folder by absolute url
:type abs_url: str
"""
from office365.sharepoint.client_context import ClientContext
ctx = ClientContext.from_url(abs_url)
relative_url = abs_url.replace(ctx.base_url, "")
return ctx.web.get_folder_by_server_relative_url(relative_url)
def recycle(self):
"""Moves the folder to the Recycle Bin and returns the identifier of the new Recycle Bin item."""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "Recycle", None, None, None, result)
self.context.add_query(qry)
return result
def recycle_with_parameters(self, parameters):
"""
:type parameters: office365.sharepoint.folders.folder_delete_parameters.FolderDeleteParameters
"""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "RecycleWithParameters", None, parameters, "parameters", result)
self.context.add_query(qry)
return result
def get_changes(self, query=None):
"""Returns the collection of changes from the change log that have occurred within the folder,
based on the specified query.
:param office365.sharepoint.changeQuery.ChangeQuery query: Specifies which changes to return
"""
if query is None:
query = ChangeQuery(folder=True)
changes = ChangeCollection(self.context)
qry = ServiceOperationQuery(self, "getChanges", None, query, "query", changes)
self.context.add_query(qry)
return changes
def get_list_item_changes(self, query):
"""
:param office365.sharepoint.changeQuery.ChangeQuery query: Specifies which changes to return
"""
changes = ChangeCollection(self.context)
qry = ServiceOperationQuery(self, "getListItemChanges", None, query, "query", changes)
self.context.add_query(qry)
return changes
def add(self, name):
"""Adds the folder that is located under a current folder
:type name: str
"""
new_folder = Folder(self.context)
def _add_sub_folder():
new_folder_url = "/".join([self.serverRelativeUrl, name])
new_folder.set_property("ServerRelativeUrl", new_folder_url)
qry = CreateEntityQuery(self.folders, new_folder, new_folder)
self.context.add_query(qry)
self.ensure_property("ServerRelativeUrl", _add_sub_folder)
return new_folder
def rename(self, name):
"""Rename a Folder resource
:type name: str
"""
item = self.list_item_all_fields
item.set_property('Title', name)
item.set_property('FileLeafRef', name)
qry = UpdateEntityQuery(item)
self.context.add_query(qry)
return self
def upload_file(self, file_name, content):
"""Uploads a file into folder
:type file_name: str
:type content: str
:rtype: office365.sharepoint.files.file.File
"""
info = FileCreationInformation(url=file_name, overwrite=True, content=content)
qry = create_file_query(self.files, info)
self.context.add_query(qry)
return qry.return_type
def copy_to(self, new_relative_url, keep_both=False, reset_author_and_created=False):
"""Copies the folder with files to the destination URL.
:type new_relative_url: str
:type keep_both: bool
:type reset_author_and_created: bool
"""
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativeUrl", new_relative_url)
def _copy_folder():
opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)
MoveCopyUtil.copy_folder(self.context, self._build_full_url(self.serverRelativeUrl),
self._build_full_url(new_relative_url), opts)
self.ensure_property("ServerRelativeUrl", _copy_folder)
return target_folder
def copy_to_by_path(self, new_relative_path, keep_both=False, reset_author_and_created=False):
"""Copies the folder with files to the destination Path.
:type new_relative_path: str
:type keep_both: bool
:type reset_author_and_created: bool
"""
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativePath", SPResPath(new_relative_path))
def _copy_folder():
opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)
MoveCopyUtil.copy_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),
self._build_full_url(new_relative_path), opts)
self.ensure_property("ServerRelativePath", _copy_folder)
return target_folder
def move_to(self, new_relative_url, retain_editor_and_modified=False):
"""Moves the folder with files to the destination URL.
:type new_relative_url: str
:type retain_editor_and_modified: bool
"""
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativeUrl", new_relative_url)
def _move_folder():
MoveCopyUtil.move_folder(self.context, self._build_full_url(self.serverRelativeUrl),
self._build_full_url(new_relative_url),
MoveCopyOptions(retain_editor_and_modified_on_move=retain_editor_and_modified))
self.ensure_property("ServerRelativeUrl", _move_folder)
return target_folder
def move_to_by_path(self, new_relative_path, retain_editor_and_modified=False):
"""Moves the folder with files to the destination Path.
:type new_relative_path: str
:type retain_editor_and_modified: bool
"""
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativePath", SPResPath(new_relative_path))
def _move_folder():
MoveCopyUtil.move_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),
self._build_full_url(new_relative_path),
MoveCopyOptions(
retain_editor_and_modified_on_move=retain_editor_and_modified))
self.ensure_property("ServerRelativePath", _move_folder)
return target_folder
@property
def storage_metrics(self):
return self.properties.get("StorageMetrics",
StorageMetrics(self.context, ResourcePath("StorageMetrics", self.resource_path)))
@property
def list_item_all_fields(self):
"""Specifies the list item fields (2) values for the list item corresponding to the folder."""
return self.properties.get("ListItemAllFields",
ListItem(self.context, ResourcePath("ListItemAllFields", self.resource_path)))
@property
def files(self):
"""Get a file collection"""
from office365.sharepoint.files.file_collection import FileCollection
return self.properties.get("Files",
FileCollection(self.context, ResourcePath("Files", self.resource_path)))
@property
def folders(self):
"""Specifies the collection of list folders contained within the list folder.
"""
from office365.sharepoint.folders.folder_collection import FolderCollection
return self.properties.get("Folders",
FolderCollection(self.context, ResourcePath("Folders", self.resource_path)))
@property
def parent_folder(self):
"""Specifies the list folder.
"""
return self.properties.get("ParentFolder",
Folder(self.context, ResourcePath("ParentFolder", self.resource_path)))
@property
def name(self):
"""Specifies the list folder name.
:rtype: str or None
"""
return self.properties.get("Name", None)
@property
def unique_id(self):
"""Gets the unique ID of the folder.
:rtype: str or None
"""
return self.properties.get("UniqueId", None)
@property
def exists(self):
"""Gets a Boolean value that indicates whether the folder exists.
:rtype: bool or None
"""
return self.properties.get("Exists", None)
@property
def welcome_page(self):
"""Specifies the server-relative URL for the list folder Welcome page.
:rtype: str or None
"""
return self.properties.get("WelcomePage", None)
@property
def unique_content_type_order(self):
"""Specifies the content type order for the list folder.
:rtype: office365.sharepoint.contenttypes.content_type_id.ContentTypeId or None
"""
return self.properties.get("UniqueContentTypeOrder", ContentTypeId())
@property
def content_type_order(self):
"""Specifies the content type order for the list folder.
:rtype: office365.sharepoint.contenttypes.content_type_id.ContentTypeId or None
"""
return self.properties.get("ContentTypeOrder", ContentTypeId())
@property
def time_last_modified(self):
"""Gets the last time this folder or a direct child was modified in UTC.
:rtype: str or None
"""
return self.properties.get("TimeLastModified", None)
@property
def serverRelativeUrl(self):
"""Gets the server-relative URL of the list folder.
:rtype: str or None
"""
return self.properties.get("ServerRelativeUrl", None)
@property
def server_relative_path(self):
"""Gets the server-relative Path of the list folder.
:rtype: SPResPath or None
"""
return self.properties.get("ServerRelativePath", SPResPath(None))
def get_property(self, name, default_value=None):
property_mapping = {
"ListItemAllFields": self.list_item_all_fields,
"ParentFolder": self.parent_folder,
"ServerRelativePath": self.server_relative_path
}
if name in property_mapping:
default_value = property_mapping[name]
return super(Folder, self).get_property(name, default_value)
def set_property(self, name, value, persist_changes=True):
super(Folder, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if name == "ServerRelativeUrl":
self._resource_path = ResourcePathServiceOperation("getFolderByServerRelativeUrl", [value],
ResourcePath("Web"))
elif name == "ServerRelativePath":
self._resource_path = ResourcePathServiceOperation("getFolderByServerRelativePath", [value],
ResourcePath("Web"))
elif name == "UniqueId":
self._resource_path = ResourcePathServiceOperation("getFolderById", [value], ResourcePath("Web"))
return self
def _build_full_url(self, rel_url):
"""
:type rel_url: str
"""
site_path = urlparse(self.context.base_url).path
return self.context.base_url.replace(site_path, "") + rel_url
| 40.783019 | 118 | 0.671524 | from office365.runtime.client_result import ClientResult
from office365.runtime.queries.create_entity_query import CreateEntityQuery
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.queries.update_entity_query import UpdateEntityQuery
from office365.runtime.resource_path import ResourcePath
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.actions.create_file import create_file_query
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.changes.change_collection import ChangeCollection
from office365.sharepoint.changes.change_query import ChangeQuery
from office365.sharepoint.contenttypes.content_type_id import ContentTypeId
from office365.sharepoint.files.file_creation_information import FileCreationInformation
from office365.sharepoint.listitems.listitem import ListItem
from office365.sharepoint.storagemetrics.storage_metrics import StorageMetrics
from office365.sharepoint.utilities.move_copy_options import MoveCopyOptions
from office365.sharepoint.utilities.move_copy_util import MoveCopyUtil
from office365.sharepoint.types.resource_path import ResourcePath as SPResPath
from office365.runtime.compat import urlparse
class Folder(BaseEntity):
@staticmethod
def from_url(abs_url):
from office365.sharepoint.client_context import ClientContext
ctx = ClientContext.from_url(abs_url)
relative_url = abs_url.replace(ctx.base_url, "")
return ctx.web.get_folder_by_server_relative_url(relative_url)
def recycle(self):
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "Recycle", None, None, None, result)
self.context.add_query(qry)
return result
def recycle_with_parameters(self, parameters):
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "RecycleWithParameters", None, parameters, "parameters", result)
self.context.add_query(qry)
return result
def get_changes(self, query=None):
if query is None:
query = ChangeQuery(folder=True)
changes = ChangeCollection(self.context)
qry = ServiceOperationQuery(self, "getChanges", None, query, "query", changes)
self.context.add_query(qry)
return changes
def get_list_item_changes(self, query):
changes = ChangeCollection(self.context)
qry = ServiceOperationQuery(self, "getListItemChanges", None, query, "query", changes)
self.context.add_query(qry)
return changes
def add(self, name):
new_folder = Folder(self.context)
def _add_sub_folder():
new_folder_url = "/".join([self.serverRelativeUrl, name])
new_folder.set_property("ServerRelativeUrl", new_folder_url)
qry = CreateEntityQuery(self.folders, new_folder, new_folder)
self.context.add_query(qry)
self.ensure_property("ServerRelativeUrl", _add_sub_folder)
return new_folder
def rename(self, name):
item = self.list_item_all_fields
item.set_property('Title', name)
item.set_property('FileLeafRef', name)
qry = UpdateEntityQuery(item)
self.context.add_query(qry)
return self
def upload_file(self, file_name, content):
info = FileCreationInformation(url=file_name, overwrite=True, content=content)
qry = create_file_query(self.files, info)
self.context.add_query(qry)
return qry.return_type
def copy_to(self, new_relative_url, keep_both=False, reset_author_and_created=False):
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativeUrl", new_relative_url)
def _copy_folder():
opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)
MoveCopyUtil.copy_folder(self.context, self._build_full_url(self.serverRelativeUrl),
self._build_full_url(new_relative_url), opts)
self.ensure_property("ServerRelativeUrl", _copy_folder)
return target_folder
def copy_to_by_path(self, new_relative_path, keep_both=False, reset_author_and_created=False):
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativePath", SPResPath(new_relative_path))
def _copy_folder():
opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)
MoveCopyUtil.copy_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),
self._build_full_url(new_relative_path), opts)
self.ensure_property("ServerRelativePath", _copy_folder)
return target_folder
def move_to(self, new_relative_url, retain_editor_and_modified=False):
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativeUrl", new_relative_url)
def _move_folder():
MoveCopyUtil.move_folder(self.context, self._build_full_url(self.serverRelativeUrl),
self._build_full_url(new_relative_url),
MoveCopyOptions(retain_editor_and_modified_on_move=retain_editor_and_modified))
self.ensure_property("ServerRelativeUrl", _move_folder)
return target_folder
def move_to_by_path(self, new_relative_path, retain_editor_and_modified=False):
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativePath", SPResPath(new_relative_path))
def _move_folder():
MoveCopyUtil.move_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),
self._build_full_url(new_relative_path),
MoveCopyOptions(
retain_editor_and_modified_on_move=retain_editor_and_modified))
self.ensure_property("ServerRelativePath", _move_folder)
return target_folder
@property
def storage_metrics(self):
return self.properties.get("StorageMetrics",
StorageMetrics(self.context, ResourcePath("StorageMetrics", self.resource_path)))
@property
def list_item_all_fields(self):
return self.properties.get("ListItemAllFields",
ListItem(self.context, ResourcePath("ListItemAllFields", self.resource_path)))
@property
def files(self):
from office365.sharepoint.files.file_collection import FileCollection
return self.properties.get("Files",
FileCollection(self.context, ResourcePath("Files", self.resource_path)))
@property
def folders(self):
from office365.sharepoint.folders.folder_collection import FolderCollection
return self.properties.get("Folders",
FolderCollection(self.context, ResourcePath("Folders", self.resource_path)))
@property
def parent_folder(self):
return self.properties.get("ParentFolder",
Folder(self.context, ResourcePath("ParentFolder", self.resource_path)))
@property
def name(self):
return self.properties.get("Name", None)
@property
def unique_id(self):
return self.properties.get("UniqueId", None)
@property
def exists(self):
return self.properties.get("Exists", None)
@property
def welcome_page(self):
return self.properties.get("WelcomePage", None)
@property
def unique_content_type_order(self):
return self.properties.get("UniqueContentTypeOrder", ContentTypeId())
@property
def content_type_order(self):
return self.properties.get("ContentTypeOrder", ContentTypeId())
@property
def time_last_modified(self):
return self.properties.get("TimeLastModified", None)
@property
def serverRelativeUrl(self):
return self.properties.get("ServerRelativeUrl", None)
@property
def server_relative_path(self):
return self.properties.get("ServerRelativePath", SPResPath(None))
def get_property(self, name, default_value=None):
property_mapping = {
"ListItemAllFields": self.list_item_all_fields,
"ParentFolder": self.parent_folder,
"ServerRelativePath": self.server_relative_path
}
if name in property_mapping:
default_value = property_mapping[name]
return super(Folder, self).get_property(name, default_value)
def set_property(self, name, value, persist_changes=True):
super(Folder, self).set_property(name, value, persist_changes)
if name == "ServerRelativeUrl":
self._resource_path = ResourcePathServiceOperation("getFolderByServerRelativeUrl", [value],
ResourcePath("Web"))
elif name == "ServerRelativePath":
self._resource_path = ResourcePathServiceOperation("getFolderByServerRelativePath", [value],
ResourcePath("Web"))
elif name == "UniqueId":
self._resource_path = ResourcePathServiceOperation("getFolderById", [value], ResourcePath("Web"))
return self
def _build_full_url(self, rel_url):
site_path = urlparse(self.context.base_url).path
return self.context.base_url.replace(site_path, "") + rel_url
| true | true |
f722280ec9e8ec0e2c0c2b5813f5b46b814656b3 | 5,621 | py | Python | navigation/arena_local_planner/model_based/crowdnav_ros/scripts/crowd_nav/utils/explorer.py | kilinmao/sarl_star | dde9bb2b690c705a615195f4b570af3ea9dfe05e | [
"MIT"
] | 7 | 2021-11-11T13:25:25.000Z | 2021-12-25T21:34:41.000Z | navigation/arena_local_planner/model_based/crowdnav_ros/scripts/crowd_nav/utils/explorer.py | kilinmao/sarl_star | dde9bb2b690c705a615195f4b570af3ea9dfe05e | [
"MIT"
] | 1 | 2021-11-20T20:34:14.000Z | 2021-11-20T20:34:14.000Z | arena_navigation/arena_local_planner/model_based/crowdnav_ros/scripts/crowd_nav/utils/explorer.py | ignc-research/arena-marl | 3b9b2521436ef7f364a250da71a01e915d840296 | [
"MIT"
] | null | null | null | import logging
import copy
import torch
from crowd_sim.envs.utils.info import *
class Explorer(object):
def __init__(self, env, robot, device, memory=None, gamma=None, target_policy=None):
self.env = env
self.robot = robot
self.device = device
self.memory = memory
self.gamma = gamma
self.target_policy = target_policy
self.target_model = None
def update_target_model(self, target_model):
self.target_model = copy.deepcopy(target_model)
# @profile
def run_k_episodes(self, k, phase, update_memory=False, imitation_learning=False, episode=None,
print_failure=False):
self.robot.policy.set_phase(phase)
success_times = []
collision_times = []
timeout_times = []
success = 0
collision = 0
timeout = 0
too_close = 0
min_dist = []
cumulative_rewards = []
collision_cases = []
timeout_cases = []
for i in range(k):
ob = self.env.reset(phase)
done = False
states = []
actions = []
rewards = []
while not done:
action = self.robot.act(ob)
ob, reward, done, info = self.env.step(action)
states.append(self.robot.policy.last_state)
actions.append(action)
rewards.append(reward)
if isinstance(info, Danger):
too_close += 1
min_dist.append(info.min_dist)
if isinstance(info, ReachGoal):
success += 1
success_times.append(self.env.global_time)
elif isinstance(info, Collision):
collision += 1
collision_cases.append(i)
collision_times.append(self.env.global_time)
elif isinstance(info, Timeout):
timeout += 1
timeout_cases.append(i)
timeout_times.append(self.env.time_limit)
else:
raise ValueError('Invalid end signal from environment')
if update_memory:
if isinstance(info, ReachGoal) or isinstance(info, Collision):
# only add positive(success) or negative(collision) experience in experience set
self.update_memory(states, actions, rewards, imitation_learning)
cumulative_rewards.append(sum([pow(self.gamma, t * self.robot.time_step * self.robot.v_pref)
* reward for t, reward in enumerate(rewards)]))
success_rate = success / k
collision_rate = collision / k
assert success + collision + timeout == k
avg_nav_time = sum(success_times) / len(success_times) if success_times else self.env.time_limit
extra_info = '' if episode is None else 'in episode {} '.format(episode)
logging.info('{:<5} {}has success rate: {:.2f}, collision rate: {:.2f}, nav time: {:.2f}, total reward: {:.4f}'.
format(phase.upper(), extra_info, success_rate, collision_rate, avg_nav_time,
average(cumulative_rewards)))
if phase in ['val', 'test']:
total_time = sum(success_times + collision_times + timeout_times) * self.robot.time_step
logging.info('Frequency of being in danger: %.2f and average min separate distance in danger: %.2f',
too_close / total_time, average(min_dist))
if print_failure:
logging.info('Collision cases: ' + ' '.join([str(x) for x in collision_cases]))
logging.info('Timeout cases: ' + ' '.join([str(x) for x in timeout_cases]))
def update_memory(self, states, actions, rewards, imitation_learning=False):
if self.memory is None or self.gamma is None:
raise ValueError('Memory or gamma value is not set!')
for i, state in enumerate(states):
reward = rewards[i]
# VALUE UPDATE
if imitation_learning:
# define the value of states in IL as cumulative discounted rewards, which is the same in RL
state = self.target_policy.transform(state)
# value = pow(self.gamma, (len(states) - 1 - i) * self.robot.time_step * self.robot.v_pref)
value = sum([pow(self.gamma, max(t - i, 0) * self.robot.time_step * self.robot.v_pref) * reward
* (1 if t >= i else 0) for t, reward in enumerate(rewards)])
else:
if i == len(states) - 1:
# terminal state
value = reward
else:
next_state = states[i + 1]
gamma_bar = pow(self.gamma, self.robot.time_step * self.robot.v_pref)
value = reward + gamma_bar * self.target_model(next_state.unsqueeze(0)).data.item()
value = torch.Tensor([value]).to(self.device)
# # transform state of different human_num into fixed-size tensor
# if len(state.size()) == 1:
# human_num = 1
# feature_size = state.size()[0]
# else:
# human_num, feature_size = state.size()
# if human_num != 5:
# padding = torch.zeros((5 - human_num, feature_size))
# state = torch.cat([state, padding])
self.memory.push((state, value))
def average(input_list):
if input_list:
return sum(input_list) / len(input_list)
else:
return 0
| 42.263158 | 120 | 0.560754 | import logging
import copy
import torch
from crowd_sim.envs.utils.info import *
class Explorer(object):
def __init__(self, env, robot, device, memory=None, gamma=None, target_policy=None):
self.env = env
self.robot = robot
self.device = device
self.memory = memory
self.gamma = gamma
self.target_policy = target_policy
self.target_model = None
def update_target_model(self, target_model):
self.target_model = copy.deepcopy(target_model)
def run_k_episodes(self, k, phase, update_memory=False, imitation_learning=False, episode=None,
print_failure=False):
self.robot.policy.set_phase(phase)
success_times = []
collision_times = []
timeout_times = []
success = 0
collision = 0
timeout = 0
too_close = 0
min_dist = []
cumulative_rewards = []
collision_cases = []
timeout_cases = []
for i in range(k):
ob = self.env.reset(phase)
done = False
states = []
actions = []
rewards = []
while not done:
action = self.robot.act(ob)
ob, reward, done, info = self.env.step(action)
states.append(self.robot.policy.last_state)
actions.append(action)
rewards.append(reward)
if isinstance(info, Danger):
too_close += 1
min_dist.append(info.min_dist)
if isinstance(info, ReachGoal):
success += 1
success_times.append(self.env.global_time)
elif isinstance(info, Collision):
collision += 1
collision_cases.append(i)
collision_times.append(self.env.global_time)
elif isinstance(info, Timeout):
timeout += 1
timeout_cases.append(i)
timeout_times.append(self.env.time_limit)
else:
raise ValueError('Invalid end signal from environment')
if update_memory:
if isinstance(info, ReachGoal) or isinstance(info, Collision):
self.update_memory(states, actions, rewards, imitation_learning)
cumulative_rewards.append(sum([pow(self.gamma, t * self.robot.time_step * self.robot.v_pref)
* reward for t, reward in enumerate(rewards)]))
success_rate = success / k
collision_rate = collision / k
assert success + collision + timeout == k
avg_nav_time = sum(success_times) / len(success_times) if success_times else self.env.time_limit
extra_info = '' if episode is None else 'in episode {} '.format(episode)
logging.info('{:<5} {}has success rate: {:.2f}, collision rate: {:.2f}, nav time: {:.2f}, total reward: {:.4f}'.
format(phase.upper(), extra_info, success_rate, collision_rate, avg_nav_time,
average(cumulative_rewards)))
if phase in ['val', 'test']:
total_time = sum(success_times + collision_times + timeout_times) * self.robot.time_step
logging.info('Frequency of being in danger: %.2f and average min separate distance in danger: %.2f',
too_close / total_time, average(min_dist))
if print_failure:
logging.info('Collision cases: ' + ' '.join([str(x) for x in collision_cases]))
logging.info('Timeout cases: ' + ' '.join([str(x) for x in timeout_cases]))
def update_memory(self, states, actions, rewards, imitation_learning=False):
if self.memory is None or self.gamma is None:
raise ValueError('Memory or gamma value is not set!')
for i, state in enumerate(states):
reward = rewards[i]
if imitation_learning:
state = self.target_policy.transform(state)
value = sum([pow(self.gamma, max(t - i, 0) * self.robot.time_step * self.robot.v_pref) * reward
* (1 if t >= i else 0) for t, reward in enumerate(rewards)])
else:
if i == len(states) - 1:
value = reward
else:
next_state = states[i + 1]
gamma_bar = pow(self.gamma, self.robot.time_step * self.robot.v_pref)
value = reward + gamma_bar * self.target_model(next_state.unsqueeze(0)).data.item()
value = torch.Tensor([value]).to(self.device)
self.memory.push((state, value))
def average(input_list):
if input_list:
return sum(input_list) / len(input_list)
else:
return 0
| true | true |
f722281aed4b588b7fe6bc868576317fb976354c | 548 | py | Python | books/migrations/0002_auto_20180205_0033.py | RandyRomero/library | 2a150d76c1f656a885e56dbffd46dd1a33f562e9 | [
"MIT"
] | null | null | null | books/migrations/0002_auto_20180205_0033.py | RandyRomero/library | 2a150d76c1f656a885e56dbffd46dd1a33f562e9 | [
"MIT"
] | null | null | null | books/migrations/0002_auto_20180205_0033.py | RandyRomero/library | 2a150d76c1f656a885e56dbffd46dd1a33f562e9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-04 21:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='book',
old_name='pud_date',
new_name='pub_date',
),
migrations.RenameField(
model_name='book',
old_name='name',
new_name='title',
),
]
| 21.076923 | 46 | 0.560219 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='book',
old_name='pud_date',
new_name='pub_date',
),
migrations.RenameField(
model_name='book',
old_name='name',
new_name='title',
),
]
| true | true |
f7222871cfafe696b6c9c3fcdcc8754d53c48f2c | 5,922 | py | Python | kubernetes_asyncio/client/models/v1_component_condition.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | 1 | 2020-03-25T01:24:27.000Z | 2020-03-25T01:24:27.000Z | kubernetes_asyncio/client/models/v1_component_condition.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_component_condition.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1ComponentCondition(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'str',
'message': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'error': 'error',
'message': 'message',
'status': 'status',
'type': 'type'
}
def __init__(self, error=None, message=None, status=None, type=None): # noqa: E501
"""V1ComponentCondition - a model defined in Swagger""" # noqa: E501
self._error = None
self._message = None
self._status = None
self._type = None
self.discriminator = None
if error is not None:
self.error = error
if message is not None:
self.message = message
self.status = status
self.type = type
@property
def error(self):
"""Gets the error of this V1ComponentCondition. # noqa: E501
Condition error code for a component. For example, a health check error code. # noqa: E501
:return: The error of this V1ComponentCondition. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this V1ComponentCondition.
Condition error code for a component. For example, a health check error code. # noqa: E501
:param error: The error of this V1ComponentCondition. # noqa: E501
:type: str
"""
self._error = error
@property
def message(self):
"""Gets the message of this V1ComponentCondition. # noqa: E501
Message about the condition for a component. For example, information about a health check. # noqa: E501
:return: The message of this V1ComponentCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ComponentCondition.
Message about the condition for a component. For example, information about a health check. # noqa: E501
:param message: The message of this V1ComponentCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def status(self):
"""Gets the status of this V1ComponentCondition. # noqa: E501
Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\". # noqa: E501
:return: The status of this V1ComponentCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1ComponentCondition.
Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\". # noqa: E501
:param status: The status of this V1ComponentCondition. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1ComponentCondition. # noqa: E501
Type of condition for a component. Valid value: \"Healthy\" # noqa: E501
:return: The type of this V1ComponentCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1ComponentCondition.
Type of condition for a component. Valid value: \"Healthy\" # noqa: E501
:param type: The type of this V1ComponentCondition. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ComponentCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.462687 | 129 | 0.578352 |
import pprint
import re
import six
class V1ComponentCondition(object):
swagger_types = {
'error': 'str',
'message': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'error': 'error',
'message': 'message',
'status': 'status',
'type': 'type'
}
def __init__(self, error=None, message=None, status=None, type=None):
self._error = None
self._message = None
self._status = None
self._type = None
self.discriminator = None
if error is not None:
self.error = error
if message is not None:
self.message = message
self.status = status
self.type = type
@property
def error(self):
return self._error
@error.setter
def error(self, error):
self._error = error
@property
def message(self):
return self._message
@message.setter
def message(self, message):
self._message = message
@property
def status(self):
return self._status
@status.setter
def status(self, status):
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
@property
def type(self):
return self._type
@type.setter
def type(self, type):
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1ComponentCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f722292fc89496c0e00f66e2a1ae564ccf20b115 | 452 | py | Python | tests/components/switch_as_x/conftest.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/switch_as_x/conftest.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | tests/components/switch_as_x/conftest.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Fixtures for the Switch as X integration tests."""
from __future__ import annotations
from collections.abc import Generator
from unittest.mock import AsyncMock, patch
import pytest
@pytest.fixture
def mock_setup_entry() -> Generator[AsyncMock, None, None]:
"""Mock setting up a config entry."""
with patch(
"homeassistant.components.switch_as_x.async_setup_entry", return_value=True
) as mock_setup:
yield mock_setup
| 26.588235 | 83 | 0.745575 | from __future__ import annotations
from collections.abc import Generator
from unittest.mock import AsyncMock, patch
import pytest
@pytest.fixture
def mock_setup_entry() -> Generator[AsyncMock, None, None]:
with patch(
"homeassistant.components.switch_as_x.async_setup_entry", return_value=True
) as mock_setup:
yield mock_setup
| true | true |
f7222a9c5d62333400242377e8d452dae0e37caa | 22,040 | py | Python | rampage/daq/gpib.py | shreyaspotnis/rampage | e2565aef7ee16ee06523de975e8aa41aca14e3b2 | [
"MIT"
] | null | null | null | rampage/daq/gpib.py | shreyaspotnis/rampage | e2565aef7ee16ee06523de975e8aa41aca14e3b2 | [
"MIT"
] | null | null | null | rampage/daq/gpib.py | shreyaspotnis/rampage | e2565aef7ee16ee06523de975e8aa41aca14e3b2 | [
"MIT"
] | null | null | null | import visa
import numpy as np
import logging
from datetime import datetime
resource_manager = visa.ResourceManager()
class Aglient33250A(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Agilent Technologies,33250A' in idn:
return instr
else:
raise GPIBError('Aglient33250A function generator not in GPIB device list')
# device not round raise exception
def set_output(self, state):
"""Sets whether the function generator is outputting a voltage."""
if state:
self.instr.write('OUTP ON')
else:
self.instr.write('OUTP OFF')
def set_fm_ext(self, freq, amplitude, peak_freq_dev=None,
output_state=True):
"""Sets the func generator to frequency modulation with external modulation.
freq is the carrier frequency in Hz."""
if peak_freq_dev is None:
peak_freq_dev = freq
commands = ['FUNC SIN', # set to output sine functions
'FM:STAT ON',
'FREQ {0}'.format(freq),
'FM:SOUR EXT',
# 'FM:FREQ {0}'.format(freq),
'FM:DEV {0}'.format(peak_freq_dev),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0'] # set to frequency modulation
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
# self.read_all_errors()
def set_burst(self, freq, amplitude, period, output_state=True):
"""Sets the func generator to burst mode with external trigerring."""
ncyc = int(period*freq)
commands = ['FUNC SIN',
'BURS:STAT ON',
'BURS:MODE TRIG', # external trigger
'TRIG:SOUR EXT',
'TRIG:SLOP POS',
'FREQ {0}'.format(freq),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0',
'BURS:NCYC {0}'.format(ncyc)]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
# self.read_all_errors()
def set_continuous(self, freq, amplitude, offset, output_state=True):
"""Programs the function generator to output a continuous sine wave."""
commands = ['FUNC SIN',
'BURS:STAT OFF',
'SWE:STAT OFF',
'FM:STAT OFF',
'FREQ {0}'.format(freq),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS {0}'.format(offset),
]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
# self.read_all_errors()
def set_freq_sweep(self, start_freq, stop_freq, sweep_time, amplitude,
output_state=True):
commands = ['FUNC SIN',
'TRIG:SOUR EXT',
'TRIG:SLOP POS',
'SWE:STAT ON',
'FREQ:STAR {0}'.format(start_freq),
'FREQ:STOP {0}'.format(stop_freq),
'SWE:TIME {0}'.format(sweep_time),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0',
'SWE:STAT ON']
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_arbitrary(self, freq, low_volt, high_volt, output_state=True):
"""Programs the function generator to output the arbitrary waveform."""
commands = ['FUNC USER',
'BURS:STAT OFF',
'SWE:STAT OFF',
'FM:STAT OFF',
'FREQ {0}'.format(freq),
'VOLT:HIGH {0}'.format(high_volt),
'VOLT:LOW {0}'.format(low_volt),
]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('SYST:ERR?')
print(err)
if err[:2] == '+0':
done = True
class TektronixTDS1002(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'TEKTRONIX,TDS 1002' in idn:
return instr
else:
raise GPIBError('TektronicsTDS1002 oscilloscope not in GPIB device list')
# device not round raise exception
def get_data(self, channel=1):
hor_pos = float(self.instr.query('HOR:MAI:POS?'))
hor_scale = float(self.instr.query('HOR:MAI:SCA?'))
ch1_pos = float(self.instr.query('CH{0}:POS?'.format(channel)))
ch1_sca = float(self.instr.query('CH{0}:SCA?'.format(channel)))
commands = ['DATA:WIDTH 1',
'DATA:STAR 1',
'DATA:STOP 2500',
'DATA:SOU CH{0}'.format(channel),
'CURV?']
command_string = '\r\n'.join(commands)
self.instr.write(command_string)
# the first 6 bytes are #42500 and the last byte is \n
# ignore those
data = self.instr.read_raw()[6:-1]
data = np.fromstring(data, dtype=np.int8)
data_scaled = (np.array(data, dtype='float')*(10.0/2**8) - ch1_pos)*ch1_sca
time_array = np.arange(len(data_scaled), dtype='float')*10.0*hor_scale/len(data_scaled)
return time_array, data_scaled
def get_save_data(self, file_path, channel=1):
hor_pos = float(self.instr.query('HOR:MAI:POS?'))
hor_scale = float(self.instr.query('HOR:MAI:SCA?'))
ch1_pos = float(self.instr.query('CH{0}:POS?'.format(channel)))
ch1_sca = float(self.instr.query('CH{0}:SCA?'.format(channel)))
commands = ['DATA:WIDTH 1',
'DATA:STAR 1',
'DATA:STOP 2500',
'DATA:SOU CH{0}'.format(channel),
'CURV?']
command_string = '\r\n'.join(commands)
self.instr.write(command_string)
# the first 6 bytes are #42500 and the last byte is \n
# ignore those
data = self.instr.read_raw()[6:-1]
data = np.fromstring(data, dtype=np.int8)
data_scaled = (np.array(data, dtype='float')*(10.0/2**8) - ch1_pos)*ch1_sca
time_array = np.arange(len(data_scaled), dtype='float')*10.0*hor_scale/len(data_scaled)
np.savetxt(file_path + '\\' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '.txt', (time_array, data_scaled), fmt='%1.4e')
#return time_array, data_scaled
class TektronixTDS2012C(TektronixTDS1002):
def __init__(self):
self.instr = self.open_instrument()
super(TektronixTDS2012C, self).__init__()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:3] == 'USB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'TEKTRONIX,TDS 2012C' in idn:
return instr
else:
raise GPIBError('TektronixTDS2012C oscilloscope not in USB device list')
# device not round raise exception
class NewportESP300(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'ESP300 Version' in idn:
return instr
else:
raise GPIBError('ESP300 Motion Controller not in GPIB device list')
# device not round raise exception
def read_position(self, num_axes=2):
for i in range(num_axes-1):
pos = self.instr.query(str(i+1)+'TP?')
print('Pos' + str(i+1) + ' ' + pos[:8])
def move_absposition(self, abs_pos, axis):
self.instr.write(str(int(axis))+'PA'+str(np.around(abs_pos, decimals=3)))
print('Set Axis ' + str(axis) + ' to ' + str(np.around(abs_pos, decimals=3)))
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('TB?')
print(err)
if 'NO ERROR DETECTED' in err:
done = True
class AgilentN900A(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
IP = '192.168.0.109'
instr = resource_manager.get_instrument('TCPIP::' + IP + '::INSTR')
return instr
def get_n_save_marker_pos(self, file_path, channel=1):
self.instr.write(':CALC:MARK1:X?')
freq = np.float(self.instr.read())
self.instr.write(':CALC:MARK1:Y?')
amp = np.float(self.instr.read())
self.instr.write(':AVER:STAT OFF')
arr_write = np.array([freq, amp])
f_handle = open(file_path + '\\' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '.txt', 'ab')
np.savetxt(f_handle, arr_write.reshape(1, arr_write.shape[0]))
f_handle.close()
def trigger_marker_avg(self,num_avg=100,freq=6.83468,span=25,ref_lev=15):
commands = [':FREQ:CENT {0}'.format(freq) + ' GHz',
':FREQ:SPAN {0}'.format(span) + ' MHz',
':DISP:WIND:TRAC:Y:RLEV {0}'.format(ref_lev) + ' dBm',
':CALC:MARK:MODE POS',
':CALC:MARK:CPS ON',
':TRIG:SOUR EXT1',
':TRIG:EXT1:LEV 1.0V',
':AVER:STAT ON',
':AVER:COUNT {0}'.format(num_avg)
]
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
class SRSSG384(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Stanford Research Systems,SG384' in idn:
return instr
else:
raise GPIBError('SRS SG384 function generator not in GPIB device list')
# device not found raise exception
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('LERR?')
print(err)
if err[:1] == '0':
done = True
def set_continuous(self, freq, amplitude, offset, output_state=True):
"""Programs the Stanford MW function generator to output a continuous sine wave.
External 'triggering' is accomplished using the MW switch."""
commands = ['MODL 0', #disable any modulation
'FREQ {0}'.format(freq)
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude)) #set rear RF doubler amplitude
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1') #enable output
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)]) #set front BNC amplitude
if output_state is True:
commands.append('ENBL 1') #enable output
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
# print(print_string)
# self.read_all_errors()
def set_continuous_Vpp(self, freq, amplitude, offset, output_state=True):
"""Programs the Stanford MW function generator to output a continuous sine wave.
External 'triggering' is accomplished using the MW switch."""
commands = ['MODL 0', #disable any modulation
'FREQ {0}'.format(freq)
]
if freq > 4.05e9:
commands.append('AMPH {0} VPP'.format(amplitude)) #set rear RF doubler amplitude
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1') #enable output
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0} VPP'.format(amplitude), 'OFSL {0}'.format(offset)]) #set front BNC amplitude
if output_state is True:
commands.append('ENBL 1') #enable output
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_fm_ext(self, freq, amplitude, offset=0.0, peak_fm_deviation=None, output_state=True):
"""Sets the Stanford MW function generator to freq modulation with external modulation.
freq is the carrier frequency in Hz."""
if peak_fm_deviation is None:
peak_fm_deviation = freq
commands = ['TYPE 1', #set to FM
'MFNC 5', #external modulation
'FREQ {0}'.format(freq),
'FDEV {0}'.format(peak_fm_deviation),
'MODL 1' #enable modulation
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude)) #set rear RF doubler amplitude
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1') #enable output
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)]) #set front BNC amplitude
if output_state is True:
commands.append('ENBL 1') #enable output
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
#print(print_string)
#self.read_all_errors()
def set_freqsweep_ext(self, amplitude, sweep_low_end, sweep_high_end, offset=0.0, output_state=True):
"""Sets the Stanford MW function generator to freq modulation with external modulation.
freq is the carrier frequency in Hz."""
sweep_deviation = round(abs(sweep_low_end - sweep_high_end)/2.0,6)
freq = sweep_low_end + sweep_deviation
commands = ['TYPE 3', #set to sweep
'SFNC 5', #external modulation
'FREQ {0}'.format(freq),
'SDEV {0}'.format(sweep_deviation),
'MODL 1' #enable modulation
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude)) #set rear RF doubler amplitude
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1') #enable output
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)]) #set front BNC amplitude
if output_state is True:
commands.append('ENBL 1') #enable output
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
#print(print_string)
#self.read_all_errors()
def set_output(self, state):
"""Sets whether the function generator is outputting a voltage."""
freq = float(self.instr.query('FREQ?'))
if freq > 4.05e9:
if state:
self.instr.write('ENBH 1') #enable output
else:
self.instr.write('ENBH 0')
elif freq < 62.5e6:
if state:
self.instr.write('ENBL 1') #enable output
else:
self.instr.write('ENBL 0')
def trigger_ListMode(self):
"""Iterates the function generator to the next state in ListMode
NOTE: ListMode does not enable outputs, but only writes the function
generator state. Output must be enabled separately"""
self.instr.write('*TRG')
def disable_all(self, disable):
"""Disables all modulation and outputs of the Standford MW func. generator"""
commands = ['ENBH 0', #disable high freq. rear output
'ENBL 0', #disable low freq. front bnc
'MODL 0' #disable modulation
]
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
if disable:
self.instr.write(command_string)
# self.read_all_errors()
# def set_MWinstr_freq_sweep(self, mod_type, freq, amplitude, mod_rate, mod_deviation, list_size=2, list_enable=True):
# """Sets the Stanford MW device to an instrument to be triggered later."""
# #create list of instrument states
# self.instr.query('LSTC? {0}'.format(list_size))
# for j in range(list_size):
# #enable to list for triggering
# cur_enable_state = self.instr.query('LSTE?')
# if cur_enable_state == False:
# self.instr.write('LSTE 1')
class RigolDG1022Z(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:3] == 'USB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Rigol Technologies,DG1022Z,DG1ZA184750979' in idn:
return instr
else:
raise GPIBError('Rigol DG1022Z function generator not in USB device list')
# device not round raise exception
def set_output(self, state, channel=2):
"""Sets whether the function generator is outputting a voltage."""
if state:
self.instr.write(':OUTP{0} ON'.format(channel))
else:
self.instr.write(':OUTP{0} OFF'.format(channel))
def set_continuous(self, freq, amplitude, offset, phase, channel=2):
"""Programs the function generator to output a continuous sine wave."""
commands = [':SOUR{0}:APPL:SIN '.format(channel),
'{0},'.format(freq),
'{0},'.format(amplitude),
'{0},'.format(offset),
'{0}'.format(phase),
]
command_string = ''.join(commands)
logging.info(command_string)
self.instr.write(command_string)
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('SYST:ERR?')
print(err)
if err[:2] == '+0':
done = True
class GPIBError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#globals
agilent_33250a = Aglient33250A()
tektronixTDS1002 = TektronixTDS1002()
# agilentN900A = AgilentN900A()
#tektronixTDS2012C = TektronixTDS2012C()
stanfordSG384 = SRSSG384()
# newportesp300 = NewportESP300()
rigolDG1022Z = RigolDG1022Z() | 38.802817 | 132 | 0.562069 | import visa
import numpy as np
import logging
from datetime import datetime
resource_manager = visa.ResourceManager()
class Aglient33250A(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Agilent Technologies,33250A' in idn:
return instr
else:
raise GPIBError('Aglient33250A function generator not in GPIB device list')
def set_output(self, state):
if state:
self.instr.write('OUTP ON')
else:
self.instr.write('OUTP OFF')
def set_fm_ext(self, freq, amplitude, peak_freq_dev=None,
output_state=True):
if peak_freq_dev is None:
peak_freq_dev = freq
commands = ['FUNC SIN',
'FM:STAT ON',
'FREQ {0}'.format(freq),
'FM:SOUR EXT',
'FM:DEV {0}'.format(peak_freq_dev),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0']
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_burst(self, freq, amplitude, period, output_state=True):
ncyc = int(period*freq)
commands = ['FUNC SIN',
'BURS:STAT ON',
'BURS:MODE TRIG',
'TRIG:SOUR EXT',
'TRIG:SLOP POS',
'FREQ {0}'.format(freq),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0',
'BURS:NCYC {0}'.format(ncyc)]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_continuous(self, freq, amplitude, offset, output_state=True):
commands = ['FUNC SIN',
'BURS:STAT OFF',
'SWE:STAT OFF',
'FM:STAT OFF',
'FREQ {0}'.format(freq),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS {0}'.format(offset),
]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_freq_sweep(self, start_freq, stop_freq, sweep_time, amplitude,
output_state=True):
commands = ['FUNC SIN',
'TRIG:SOUR EXT',
'TRIG:SLOP POS',
'SWE:STAT ON',
'FREQ:STAR {0}'.format(start_freq),
'FREQ:STOP {0}'.format(stop_freq),
'SWE:TIME {0}'.format(sweep_time),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0',
'SWE:STAT ON']
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_arbitrary(self, freq, low_volt, high_volt, output_state=True):
commands = ['FUNC USER',
'BURS:STAT OFF',
'SWE:STAT OFF',
'FM:STAT OFF',
'FREQ {0}'.format(freq),
'VOLT:HIGH {0}'.format(high_volt),
'VOLT:LOW {0}'.format(low_volt),
]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('SYST:ERR?')
print(err)
if err[:2] == '+0':
done = True
class TektronixTDS1002(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'TEKTRONIX,TDS 1002' in idn:
return instr
else:
raise GPIBError('TektronicsTDS1002 oscilloscope not in GPIB device list')
def get_data(self, channel=1):
hor_pos = float(self.instr.query('HOR:MAI:POS?'))
hor_scale = float(self.instr.query('HOR:MAI:SCA?'))
ch1_pos = float(self.instr.query('CH{0}:POS?'.format(channel)))
ch1_sca = float(self.instr.query('CH{0}:SCA?'.format(channel)))
commands = ['DATA:WIDTH 1',
'DATA:STAR 1',
'DATA:STOP 2500',
'DATA:SOU CH{0}'.format(channel),
'CURV?']
command_string = '\r\n'.join(commands)
self.instr.write(command_string)
instr.read_raw()[6:-1]
data = np.fromstring(data, dtype=np.int8)
data_scaled = (np.array(data, dtype='float')*(10.0/2**8) - ch1_pos)*ch1_sca
time_array = np.arange(len(data_scaled), dtype='float')*10.0*hor_scale/len(data_scaled)
return time_array, data_scaled
def get_save_data(self, file_path, channel=1):
hor_pos = float(self.instr.query('HOR:MAI:POS?'))
hor_scale = float(self.instr.query('HOR:MAI:SCA?'))
ch1_pos = float(self.instr.query('CH{0}:POS?'.format(channel)))
ch1_sca = float(self.instr.query('CH{0}:SCA?'.format(channel)))
commands = ['DATA:WIDTH 1',
'DATA:STAR 1',
'DATA:STOP 2500',
'DATA:SOU CH{0}'.format(channel),
'CURV?']
command_string = '\r\n'.join(commands)
self.instr.write(command_string)
instr.read_raw()[6:-1]
data = np.fromstring(data, dtype=np.int8)
data_scaled = (np.array(data, dtype='float')*(10.0/2**8) - ch1_pos)*ch1_sca
time_array = np.arange(len(data_scaled), dtype='float')*10.0*hor_scale/len(data_scaled)
np.savetxt(file_path + '\\' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '.txt', (time_array, data_scaled), fmt='%1.4e')
class TektronixTDS2012C(TektronixTDS1002):
def __init__(self):
self.instr = self.open_instrument()
super(TektronixTDS2012C, self).__init__()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:3] == 'USB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'TEKTRONIX,TDS 2012C' in idn:
return instr
else:
raise GPIBError('TektronixTDS2012C oscilloscope not in USB device list')
class NewportESP300(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'ESP300 Version' in idn:
return instr
else:
raise GPIBError('ESP300 Motion Controller not in GPIB device list')
def read_position(self, num_axes=2):
for i in range(num_axes-1):
pos = self.instr.query(str(i+1)+'TP?')
print('Pos' + str(i+1) + ' ' + pos[:8])
def move_absposition(self, abs_pos, axis):
self.instr.write(str(int(axis))+'PA'+str(np.around(abs_pos, decimals=3)))
print('Set Axis ' + str(axis) + ' to ' + str(np.around(abs_pos, decimals=3)))
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('TB?')
print(err)
if 'NO ERROR DETECTED' in err:
done = True
class AgilentN900A(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
IP = '192.168.0.109'
instr = resource_manager.get_instrument('TCPIP::' + IP + '::INSTR')
return instr
def get_n_save_marker_pos(self, file_path, channel=1):
self.instr.write(':CALC:MARK1:X?')
freq = np.float(self.instr.read())
self.instr.write(':CALC:MARK1:Y?')
amp = np.float(self.instr.read())
self.instr.write(':AVER:STAT OFF')
arr_write = np.array([freq, amp])
f_handle = open(file_path + '\\' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '.txt', 'ab')
np.savetxt(f_handle, arr_write.reshape(1, arr_write.shape[0]))
f_handle.close()
def trigger_marker_avg(self,num_avg=100,freq=6.83468,span=25,ref_lev=15):
commands = [':FREQ:CENT {0}'.format(freq) + ' GHz',
':FREQ:SPAN {0}'.format(span) + ' MHz',
':DISP:WIND:TRAC:Y:RLEV {0}'.format(ref_lev) + ' dBm',
':CALC:MARK:MODE POS',
':CALC:MARK:CPS ON',
':TRIG:SOUR EXT1',
':TRIG:EXT1:LEV 1.0V',
':AVER:STAT ON',
':AVER:COUNT {0}'.format(num_avg)
]
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
class SRSSG384(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Stanford Research Systems,SG384' in idn:
return instr
else:
raise GPIBError('SRS SG384 function generator not in GPIB device list')
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('LERR?')
print(err)
if err[:1] == '0':
done = True
def set_continuous(self, freq, amplitude, offset, output_state=True):
commands = ['MODL 0',
'FREQ {0}'.format(freq)
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude))
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1')
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)])
if output_state is True:
commands.append('ENBL 1')
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_continuous_Vpp(self, freq, amplitude, offset, output_state=True):
commands = ['MODL 0',
'FREQ {0}'.format(freq)
]
if freq > 4.05e9:
commands.append('AMPH {0} VPP'.format(amplitude))
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1')
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0} VPP'.format(amplitude), 'OFSL {0}'.format(offset)])
if output_state is True:
commands.append('ENBL 1')
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_fm_ext(self, freq, amplitude, offset=0.0, peak_fm_deviation=None, output_state=True):
if peak_fm_deviation is None:
peak_fm_deviation = freq
commands = ['TYPE 1',
'MFNC 5',
'FREQ {0}'.format(freq),
'FDEV {0}'.format(peak_fm_deviation),
'MODL 1'
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude))
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1')
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)])
if output_state is True:
commands.append('ENBL 1')
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_freqsweep_ext(self, amplitude, sweep_low_end, sweep_high_end, offset=0.0, output_state=True):
sweep_deviation = round(abs(sweep_low_end - sweep_high_end)/2.0,6)
freq = sweep_low_end + sweep_deviation
commands = ['TYPE 3',
'SFNC 5',
'FREQ {0}'.format(freq),
'SDEV {0}'.format(sweep_deviation),
'MODL 1'
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude))
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1')
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)])
if output_state is True:
commands.append('ENBL 1')
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_output(self, state):
freq = float(self.instr.query('FREQ?'))
if freq > 4.05e9:
if state:
self.instr.write('ENBH 1')
else:
self.instr.write('ENBH 0')
elif freq < 62.5e6:
if state:
self.instr.write('ENBL 1')
else:
self.instr.write('ENBL 0')
def trigger_ListMode(self):
self.instr.write('*TRG')
def disable_all(self, disable):
commands = ['ENBH 0',
'ENBL 0',
'MODL 0'
]
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
if disable:
self.instr.write(command_string)
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:3] == 'USB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Rigol Technologies,DG1022Z,DG1ZA184750979' in idn:
return instr
else:
raise GPIBError('Rigol DG1022Z function generator not in USB device list')
def set_output(self, state, channel=2):
if state:
self.instr.write(':OUTP{0} ON'.format(channel))
else:
self.instr.write(':OUTP{0} OFF'.format(channel))
def set_continuous(self, freq, amplitude, offset, phase, channel=2):
commands = [':SOUR{0}:APPL:SIN '.format(channel),
'{0},'.format(freq),
'{0},'.format(amplitude),
'{0},'.format(offset),
'{0}'.format(phase),
]
command_string = ''.join(commands)
logging.info(command_string)
self.instr.write(command_string)
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('SYST:ERR?')
print(err)
if err[:2] == '+0':
done = True
class GPIBError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
agilent_33250a = Aglient33250A()
tektronixTDS1002 = TektronixTDS1002()
stanfordSG384 = SRSSG384()
rigolDG1022Z = RigolDG1022Z() | true | true |
f7222b09075fb515a1d8e1859b51f4740b705561 | 2,726 | py | Python | diverse/cache.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | diverse/cache.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | diverse/cache.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | import json
from django.db.models import FieldDoesNotExist
class BaseCache(object):
def get(self, version):
raise NotImplementedError
def set(self, version, data):
raise NotImplementedError
def delete(self, version):
raise NotImplementedError
class ModelCache(object):
update_value_immediately = True
delete_value_immediately = False
def get_specdata(self, version):
if version.data:
instance = version.data.get('instance', None)
field = version.data.get('field', None)
else:
instance, field = None, None
try:
cache = (instance._meta.get_field('%s_cache' % field.name)
if field else None)
except FieldDoesNotExist:
cache = None
return instance, cache.name if instance and cache else (None,)*2
def update_instance(self, instance, cachefield):
# do nothing if object still not in database
if not instance.pk:
return
# call update of queryset to disable models signals
queryset = instance.__class__.objects.filter(id=instance.pk)
queryset.update(**{cachefield: getattr(instance, cachefield),})
def get(self, version):
instance, cachefield = self.get_specdata(version)
value = {}
if instance and cachefield:
cache = getattr(instance, cachefield, '')
try:
value = json.loads(cache) if cache else {}
except ValueError:
value = {}
value = value.get(version.attrname, {})
return value
def set(self, version, data):
instance, cachefield = self.get_specdata(version)
if instance and cachefield:
cache = getattr(instance, cachefield, '')
try:
value = json.loads(cache) if cache else {}
except ValueError:
value = {}
value[version.attrname] = data
setattr(instance, cachefield, json.dumps(value))
if self.update_value_immediately:
self.update_instance(instance, cachefield)
return True
def delete(self, version):
instance, cachefield = self.get_specdata(version)
if instance and cachefield:
cache = getattr(instance, cachefield, '')
try:
value = json.loads(cache) if cache else {}
except ValueError:
value = {}
value.pop(version.attrname, None)
setattr(instance, cachefield, json.dumps(value) if value else '')
if self.delete_value_immediately:
self.update_instance(instance, cachefield)
| 30.629213 | 77 | 0.592443 | import json
from django.db.models import FieldDoesNotExist
class BaseCache(object):
def get(self, version):
raise NotImplementedError
def set(self, version, data):
raise NotImplementedError
def delete(self, version):
raise NotImplementedError
class ModelCache(object):
update_value_immediately = True
delete_value_immediately = False
def get_specdata(self, version):
if version.data:
instance = version.data.get('instance', None)
field = version.data.get('field', None)
else:
instance, field = None, None
try:
cache = (instance._meta.get_field('%s_cache' % field.name)
if field else None)
except FieldDoesNotExist:
cache = None
return instance, cache.name if instance and cache else (None,)*2
def update_instance(self, instance, cachefield):
if not instance.pk:
return
queryset = instance.__class__.objects.filter(id=instance.pk)
queryset.update(**{cachefield: getattr(instance, cachefield),})
def get(self, version):
instance, cachefield = self.get_specdata(version)
value = {}
if instance and cachefield:
cache = getattr(instance, cachefield, '')
try:
value = json.loads(cache) if cache else {}
except ValueError:
value = {}
value = value.get(version.attrname, {})
return value
def set(self, version, data):
instance, cachefield = self.get_specdata(version)
if instance and cachefield:
cache = getattr(instance, cachefield, '')
try:
value = json.loads(cache) if cache else {}
except ValueError:
value = {}
value[version.attrname] = data
setattr(instance, cachefield, json.dumps(value))
if self.update_value_immediately:
self.update_instance(instance, cachefield)
return True
def delete(self, version):
instance, cachefield = self.get_specdata(version)
if instance and cachefield:
cache = getattr(instance, cachefield, '')
try:
value = json.loads(cache) if cache else {}
except ValueError:
value = {}
value.pop(version.attrname, None)
setattr(instance, cachefield, json.dumps(value) if value else '')
if self.delete_value_immediately:
self.update_instance(instance, cachefield)
| true | true |
f7222b1be63bc03e221db6dbd871a5a3495af7f2 | 2,543 | py | Python | pyshield/io.py | heydude1337/pyshield | 8f103ccc160e6208c8a6754264168416f62373cb | [
"MIT"
] | 1 | 2021-02-08T14:58:31.000Z | 2021-02-08T14:58:31.000Z | pyshield/io.py | heydude1337/pyshield | 8f103ccc160e6208c8a6754264168416f62373cb | [
"MIT"
] | null | null | null | pyshield/io.py | heydude1337/pyshield | 8f103ccc160e6208c8a6754264168416f62373cb | [
"MIT"
] | 1 | 2020-06-25T22:23:19.000Z | 2020-06-25T22:23:19.000Z | """ Functions to read resource files (.yml, .xls/.xlsx and images) """
# -*- coding: utf-8 -*-
from os import path
import pandas as pd
import matplotlib.image as mpimg
import numpy as np
import yaml
import pyshield as ps
def load_item(item):
""" Load yaml, image or excel or return value as is. """
if isinstance(item, str):
if is_yaml(item):
try:
item = read_yaml(item)
except FileNotFoundError:
item = {}
if is_img(item):
item = read_img(item)
if is_excel(item):
item = read_excel(item)
if isinstance(item, dict):
return dict(zip(item.keys(), map(load_item, item.values())))
else:
return item
def _file_ext(file):
return path.splitext(file)[1].lower()
def is_yaml(file):
YAML = ('.yml','.yaml')
return isinstance(file, str) and _file_ext(file) in YAML
def is_img(file):
IMG = ('.png', '.jpeg', '.jpg', '.bmp')
return isinstance(file, str) and _file_ext(file) in IMG
def is_excel(file):
XLS = ('.csv', '.xls', '.xlsx')
return isinstance(file, str) and _file_ext(file) in XLS
def read_excel(file):
return pd.read_excel(file, sheet_name=None)
def read_img(file):
return np.flipud(mpimg.imread(file))
def read_yaml(file):
"""
Read yaml file and include files that are defined with the INCLUDE tag
"""
if not is_yaml(file):
raise IOError('File {0} is not a yaml file'.format(file))
folder = path.dirname(path.abspath(file))
stream = open(file, 'r')
yaml_dict = yaml.load(stream)
if yaml_dict is None: yaml_dict = {}
# append include files to dict
files = yaml_dict.pop('INCLUDE', [])
# read all included files and add items to dict
for file in files:
# take care of relative path
if not(path.isabs(file)):
file = path.join(folder, file)
append_dict = read_yaml(file) # recursive call
# check if key already existed warn if key will be overwritten
for key in append_dict.keys():
if key in yaml_dict.keys():
ps.logger.warning('Duplicate data found in file' + \
'{0} for key {1}'.format(file, key))
# append data (append_dict keys overwrite yaml_dict keys if duplicates)
yaml_dict = {**yaml_dict, **append_dict}
return yaml_dict
def write_yaml(file_name, dict_obj):
stream = open(file_name, 'w')
yaml.dump(dict_obj, stream=stream, default_flow_style=False)
| 25.178218 | 79 | 0.616201 |
from os import path
import pandas as pd
import matplotlib.image as mpimg
import numpy as np
import yaml
import pyshield as ps
def load_item(item):
if isinstance(item, str):
if is_yaml(item):
try:
item = read_yaml(item)
except FileNotFoundError:
item = {}
if is_img(item):
item = read_img(item)
if is_excel(item):
item = read_excel(item)
if isinstance(item, dict):
return dict(zip(item.keys(), map(load_item, item.values())))
else:
return item
def _file_ext(file):
return path.splitext(file)[1].lower()
def is_yaml(file):
YAML = ('.yml','.yaml')
return isinstance(file, str) and _file_ext(file) in YAML
def is_img(file):
IMG = ('.png', '.jpeg', '.jpg', '.bmp')
return isinstance(file, str) and _file_ext(file) in IMG
def is_excel(file):
XLS = ('.csv', '.xls', '.xlsx')
return isinstance(file, str) and _file_ext(file) in XLS
def read_excel(file):
return pd.read_excel(file, sheet_name=None)
def read_img(file):
return np.flipud(mpimg.imread(file))
def read_yaml(file):
if not is_yaml(file):
raise IOError('File {0} is not a yaml file'.format(file))
folder = path.dirname(path.abspath(file))
stream = open(file, 'r')
yaml_dict = yaml.load(stream)
if yaml_dict is None: yaml_dict = {}
files = yaml_dict.pop('INCLUDE', [])
for file in files:
if not(path.isabs(file)):
file = path.join(folder, file)
append_dict = read_yaml(file)
for key in append_dict.keys():
if key in yaml_dict.keys():
ps.logger.warning('Duplicate data found in file' + \
'{0} for key {1}'.format(file, key))
yaml_dict = {**yaml_dict, **append_dict}
return yaml_dict
def write_yaml(file_name, dict_obj):
stream = open(file_name, 'w')
yaml.dump(dict_obj, stream=stream, default_flow_style=False)
| true | true |
f7222b3625e9519e955bd6e39ca0f3136506ae0d | 18,448 | py | Python | plipify/_version.py | volkamerlab/plipify | c7b68bbf325aac904969867c7fbed5179f9670b7 | [
"MIT"
] | 6 | 2022-01-19T13:31:57.000Z | 2022-02-09T05:48:08.000Z | plipify/_version.py | volkamerlab/plipify | c7b68bbf325aac904969867c7fbed5179f9670b7 | [
"MIT"
] | 4 | 2021-10-13T08:45:33.000Z | 2022-02-11T15:56:09.000Z | plipify/_version.py | volkamerlab/plipify | c7b68bbf325aac904969867c7fbed5179f9670b7 | [
"MIT"
] | 2 | 2022-01-25T03:01:56.000Z | 2022-03-21T09:26:31.000Z |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "plipify/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| 35.408829 | 79 | 0.584508 |
import errno
import os
import re
import subprocess
import sys
def get_keywords():
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
def get_config():
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "plipify/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method):
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root)
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# -like" string, which we must then edit to make compliant), because
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| true | true |
f7222d14897ee68ad7e800ceebf5fae81a30b3c1 | 555 | py | Python | profit/wallet/derivation_record.py | zcomputerwiz/profit-blockchain | d6d4337ea7c418c66f05f22a263e94190452aed6 | [
"Apache-2.0"
] | 7 | 2022-03-15T01:33:35.000Z | 2022-03-26T21:29:45.000Z | profit/wallet/derivation_record.py | zcomputerwiz/profit-blockchain | d6d4337ea7c418c66f05f22a263e94190452aed6 | [
"Apache-2.0"
] | 3 | 2022-03-19T23:02:18.000Z | 2022-03-19T23:02:19.000Z | profit/wallet/derivation_record.py | zcomputerwiz/profit-blockchain | d6d4337ea7c418c66f05f22a263e94190452aed6 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from blspy import G1Element
from profit.types.blockchain_format.sized_bytes import bytes32
from profit.util.ints import uint32
from profit.wallet.util.wallet_types import WalletType
@dataclass(frozen=True)
class DerivationRecord:
"""
These are records representing a puzzle hash, which is generated from a
public key, derivation index, and wallet type. Stored in the puzzle_store.
"""
index: uint32
puzzle_hash: bytes32
pubkey: G1Element
wallet_type: WalletType
wallet_id: uint32
| 25.227273 | 78 | 0.769369 | from dataclasses import dataclass
from blspy import G1Element
from profit.types.blockchain_format.sized_bytes import bytes32
from profit.util.ints import uint32
from profit.wallet.util.wallet_types import WalletType
@dataclass(frozen=True)
class DerivationRecord:
index: uint32
puzzle_hash: bytes32
pubkey: G1Element
wallet_type: WalletType
wallet_id: uint32
| true | true |
f7222e97526e2e34c659fdecffa4f7af1b6f47c0 | 8,349 | py | Python | parsl/providers/ad_hoc/ad_hoc.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 323 | 2017-07-28T21:31:27.000Z | 2022-03-05T13:06:05.000Z | parsl/providers/ad_hoc/ad_hoc.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 1,286 | 2017-06-01T16:50:00.000Z | 2022-03-31T16:45:14.000Z | parsl/providers/ad_hoc/ad_hoc.py | cylondata/parsl | 00ff9372bd841dafef8a0b3566c79ffe68f0e367 | [
"Apache-2.0"
] | 113 | 2017-06-03T11:38:40.000Z | 2022-03-26T16:43:05.000Z | import logging
import os
import time
from parsl.channels import LocalChannel
from parsl.launchers import SimpleLauncher
from parsl.providers.provider_base import ExecutionProvider, JobStatus, JobState
from parsl.providers.error import ScriptPathError
from parsl.utils import RepresentationMixin
logger = logging.getLogger(__name__)
class AdHocProvider(ExecutionProvider, RepresentationMixin):
""" Ad-hoc execution provider
This provider is used to provision execution resources over one or more ad hoc nodes
that are each accessible over a Channel (say, ssh) but otherwise lack a cluster scheduler.
Parameters
----------
channels : list of Channel ojects
Each channel represents a connection to a remote node
worker_init : str
Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
Since this provider calls the same worker_init across all nodes in the ad-hoc cluster, it is
recommended that a single script is made available across nodes such as ~/setup.sh that can
be invoked.
cmd_timeout : int
Duration for which the provider will wait for a command to be invoked on a remote system.
Defaults to 30s
parallelism : float
Determines the ratio of workers to tasks as managed by the strategy component
"""
def __init__(self,
channels=[],
worker_init='',
cmd_timeout=30,
parallelism=1,
move_files=None):
self.channels = channels
self._label = 'ad-hoc'
self.worker_init = worker_init
self.cmd_timeout = cmd_timeout
self.parallelism = 1
self.move_files = move_files
self.launcher = SimpleLauncher()
self.init_blocks = self.min_blocks = self.max_blocks = len(channels)
# This will be overridden by the DFK to the rundirs.
self.script_dir = "."
# In ad-hoc mode, nodes_per_block should be 1
self.nodes_per_block = 1
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
self.least_loaded = self._least_loaded()
logger.debug("AdHoc provider initialized")
def _write_submit_script(self, script_string, script_filename):
'''
Load the template string with config values and write the generated submit script to
a submit script file.
Parameters
----------
script_string: (string)
The template string to be used for the writing submit script
script_filename: (string)
Name of the submit script
Returns
-------
None: on success
Raises
------
ScriptPathError
Unable to write submit script out
'''
try:
with open(script_filename, 'w') as f:
f.write(script_string)
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ScriptPathError(script_filename, e))
return None
def _least_loaded(self):
""" Find channels that are not in use
Returns
-------
channel : Channel object
None : When there are no more available channels
"""
while True:
channel_counts = {channel: 0 for channel in self.channels}
for job_id in self.resources:
channel = self.resources[job_id]['channel']
if self.resources[job_id]['status'].state == JobState.RUNNING:
channel_counts[channel] = channel_counts.get(channel, 0) + 1
else:
channel_counts[channel] = channel_counts.get(channel, 0)
logger.debug("Channel_counts : {}".format(channel_counts))
if 0 not in channel_counts.values():
yield None
for channel in channel_counts:
if channel_counts[channel] == 0:
yield channel
def submit(self, command, tasks_per_node, job_name="parsl.adhoc"):
''' Submits the command onto a channel from the list of channels
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command: (String)
Commandline invocation to be made on the remote side.
tasks_per_node: (int)
command invocations to be launched per node
job_name: (String)
Name of the job. Default : parsl.adhoc
Returns
-------
None
At capacity, cannot provision more
job_id: (string)
Identifier for the job
'''
channel = next(self.least_loaded)
if channel is None:
logger.warning("All Channels in Ad-Hoc provider are in use")
return None
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.worker_init + '\n' + self.launcher(command, tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
job_id = None
remote_pid = None
final_cmd = None
if (self.move_files is None and not isinstance(channel, LocalChannel)) or (self.move_files):
logger.debug("Pushing start script")
script_path = channel.push_file(script_path, channel.script_dir)
# Bash would return until the streams are closed. So we redirect to a outs file
final_cmd = 'bash {0} > {0}.out 2>&1 & \n echo "PID:$!" '.format(script_path)
retcode, stdout, stderr = channel.execute_wait(final_cmd, self.cmd_timeout)
for line in stdout.split('\n'):
if line.startswith("PID:"):
remote_pid = line.split("PID:")[1].strip()
job_id = remote_pid
if job_id is None:
logger.warning("Channel failed to start remote command/retrieve PID")
self.resources[job_id] = {'job_id': job_id,
'status': JobStatus(JobState.RUNNING),
'cmd': final_cmd,
'channel': channel,
'remote_pid': remote_pid}
return job_id
def status(self, job_ids):
""" Get status of the list of jobs with job_ids
Parameters
----------
job_ids : list of strings
List of job id strings
Returns
-------
list of JobStatus objects
"""
for job_id in job_ids:
channel = self.resources[job_id]['channel']
status_command = "ps --pid {} | grep {}".format(self.resources[job_id]['job_id'],
self.resources[job_id]['cmd'].split()[0])
retcode, stdout, stderr = channel.execute_wait(status_command)
if retcode != 0 and self.resources[job_id]['status'].state == JobState.RUNNING:
self.resources[job_id]['status'] = JobStatus(JobState.FAILED)
return [self.resources[job_id]['status'] for job_id in job_ids]
def cancel(self, job_ids):
""" Cancel a list of jobs with job_ids
Parameters
----------
job_ids : list of strings
List of job id strings
Returns
-------
list of confirmation bools: [True, False...]
"""
logger.debug("Cancelling jobs: {}".format(job_ids))
rets = []
for job_id in job_ids:
channel = self.resources[job_id]['channel']
cmd = "kill -TERM -$(ps -o pgid= {} | grep -o '[0-9]*')".format(self.resources[job_id]['job_id'])
retcode, stdout, stderr = channel.execute_wait(cmd)
if retcode == 0:
rets.append(True)
else:
rets.append(False)
self.resources[job_id]['status'] = JobStatus(JobState.COMPLETED)
return rets
@property
def scaling_enabled(self):
return True
@property
def label(self):
return self._label
@property
def status_polling_interval(self):
return 10
| 33.130952 | 109 | 0.592526 | import logging
import os
import time
from parsl.channels import LocalChannel
from parsl.launchers import SimpleLauncher
from parsl.providers.provider_base import ExecutionProvider, JobStatus, JobState
from parsl.providers.error import ScriptPathError
from parsl.utils import RepresentationMixin
logger = logging.getLogger(__name__)
class AdHocProvider(ExecutionProvider, RepresentationMixin):
def __init__(self,
channels=[],
worker_init='',
cmd_timeout=30,
parallelism=1,
move_files=None):
self.channels = channels
self._label = 'ad-hoc'
self.worker_init = worker_init
self.cmd_timeout = cmd_timeout
self.parallelism = 1
self.move_files = move_files
self.launcher = SimpleLauncher()
self.init_blocks = self.min_blocks = self.max_blocks = len(channels)
self.script_dir = "."
self.nodes_per_block = 1
self.resources = {}
self.least_loaded = self._least_loaded()
logger.debug("AdHoc provider initialized")
def _write_submit_script(self, script_string, script_filename):
try:
with open(script_filename, 'w') as f:
f.write(script_string)
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ScriptPathError(script_filename, e))
return None
def _least_loaded(self):
while True:
channel_counts = {channel: 0 for channel in self.channels}
for job_id in self.resources:
channel = self.resources[job_id]['channel']
if self.resources[job_id]['status'].state == JobState.RUNNING:
channel_counts[channel] = channel_counts.get(channel, 0) + 1
else:
channel_counts[channel] = channel_counts.get(channel, 0)
logger.debug("Channel_counts : {}".format(channel_counts))
if 0 not in channel_counts.values():
yield None
for channel in channel_counts:
if channel_counts[channel] == 0:
yield channel
def submit(self, command, tasks_per_node, job_name="parsl.adhoc"):
channel = next(self.least_loaded)
if channel is None:
logger.warning("All Channels in Ad-Hoc provider are in use")
return None
job_name = "{0}.{1}".format(job_name, time.time())
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.worker_init + '\n' + self.launcher(command, tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
job_id = None
remote_pid = None
final_cmd = None
if (self.move_files is None and not isinstance(channel, LocalChannel)) or (self.move_files):
logger.debug("Pushing start script")
script_path = channel.push_file(script_path, channel.script_dir)
final_cmd = 'bash {0} > {0}.out 2>&1 & \n echo "PID:$!" '.format(script_path)
retcode, stdout, stderr = channel.execute_wait(final_cmd, self.cmd_timeout)
for line in stdout.split('\n'):
if line.startswith("PID:"):
remote_pid = line.split("PID:")[1].strip()
job_id = remote_pid
if job_id is None:
logger.warning("Channel failed to start remote command/retrieve PID")
self.resources[job_id] = {'job_id': job_id,
'status': JobStatus(JobState.RUNNING),
'cmd': final_cmd,
'channel': channel,
'remote_pid': remote_pid}
return job_id
def status(self, job_ids):
for job_id in job_ids:
channel = self.resources[job_id]['channel']
status_command = "ps --pid {} | grep {}".format(self.resources[job_id]['job_id'],
self.resources[job_id]['cmd'].split()[0])
retcode, stdout, stderr = channel.execute_wait(status_command)
if retcode != 0 and self.resources[job_id]['status'].state == JobState.RUNNING:
self.resources[job_id]['status'] = JobStatus(JobState.FAILED)
return [self.resources[job_id]['status'] for job_id in job_ids]
def cancel(self, job_ids):
logger.debug("Cancelling jobs: {}".format(job_ids))
rets = []
for job_id in job_ids:
channel = self.resources[job_id]['channel']
cmd = "kill -TERM -$(ps -o pgid= {} | grep -o '[0-9]*')".format(self.resources[job_id]['job_id'])
retcode, stdout, stderr = channel.execute_wait(cmd)
if retcode == 0:
rets.append(True)
else:
rets.append(False)
self.resources[job_id]['status'] = JobStatus(JobState.COMPLETED)
return rets
@property
def scaling_enabled(self):
return True
@property
def label(self):
return self._label
@property
def status_polling_interval(self):
return 10
| true | true |
f7222f402986219a0e09068b7d7e002f2a798ee8 | 6,931 | py | Python | streamer/node_base.py | shaka-bot/shaka-streamer | 60588ea0be83074d29538fa851338fc0183f1909 | [
"Apache-2.0"
] | 154 | 2019-08-29T16:53:24.000Z | 2022-02-25T00:29:56.000Z | streamer/node_base.py | shaka-bot/shaka-streamer | 60588ea0be83074d29538fa851338fc0183f1909 | [
"Apache-2.0"
] | 101 | 2019-08-30T17:34:51.000Z | 2022-03-02T18:46:22.000Z | streamer/node_base.py | shaka-bot/shaka-streamer | 60588ea0be83074d29538fa851338fc0183f1909 | [
"Apache-2.0"
] | 56 | 2019-09-08T17:47:22.000Z | 2022-02-23T17:35:11.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for nodes."""
import abc
import enum
import os
import shlex
import subprocess
import sys
import threading
import time
import traceback
from . import node_base
from typing import Any, Dict, IO, List, Optional, Union
class ProcessStatus(enum.Enum):
# Use number values so we can sort based on value.
Finished = 0
"""The node has completed its task and shut down."""
Running = 1
"""The node is still running."""
Errored = 2
"""The node has failed."""
class NodeBase(object):
"""A base class for nodes that run a single subprocess."""
@abc.abstractmethod
def __init__(self) -> None:
self._process: Optional[subprocess.Popen] = None
def __del__(self) -> None:
# If the process isn't stopped by now, stop it here. It is preferable to
# explicitly call stop().
self.stop(None)
@abc.abstractmethod
def start(self):
"""Start the subprocess.
Should be overridden by the subclass to construct a command line, call
self._create_process, and assign the result to self._process.
"""
pass
def _create_process(self,
args: Union[str, List[str]],
env: Dict[str, str] = {},
merge_env: bool = True,
stdout: Union[int, IO[Any], None] = None,
stderr: Union[int, IO[Any], None] = None,
shell: bool = False, **kwargs) -> subprocess.Popen:
"""A central point to create subprocesses, so that we can debug the
command-line arguments.
Args:
args: An array of strings if shell is False, or a single string is shell
is True; the command line of the subprocess.
env: A dictionary of environment variables to pass to the subprocess.
merge_env: If true, merge env with the parent process environment.
shell: If true, args must be a single string, which will be executed as a
shell command.
Returns:
The Popen object of the subprocess.
"""
if merge_env:
child_env = os.environ.copy()
child_env.update(env)
else:
child_env = env
# Print arguments formatted as output from bash -x would be.
# This makes it easy to see the arguments and easy to copy/paste them for
# debugging in a shell.
if shell:
assert isinstance(args, str)
print('+ ' + args)
else:
assert type(args) is list
print('+ ' + ' '.join([shlex.quote(arg) for arg in args]))
return subprocess.Popen(args,
env=child_env,
stdin=subprocess.DEVNULL,
stdout=stdout, stderr=stderr,
shell=shell, **kwargs)
def check_status(self) -> ProcessStatus:
"""Returns the current ProcessStatus of the node."""
if not self._process:
raise ValueError('Must have a process to check')
self._process.poll()
if self._process.returncode is None:
return ProcessStatus.Running
if self._process.returncode == 0:
return ProcessStatus.Finished
else:
return ProcessStatus.Errored
def stop(self, status: Optional[ProcessStatus]) -> None:
"""Stop the subprocess if it's still running."""
if self._process:
# Slightly more polite than kill. Try this first.
self._process.terminate()
if self.check_status() == ProcessStatus.Running:
# If it's not dead yet, wait 1 second.
time.sleep(1)
if self.check_status() == ProcessStatus.Running:
# If it's still not dead, use kill.
self._process.kill()
# Wait for the process to die and read its exit code. There is no way
# to ignore a kill signal, so this will happen quickly. If we don't do
# this, it can create a zombie process.
self._process.wait()
class PolitelyWaitOnFinish(node_base.NodeBase):
"""A mixin that makes stop() wait for the subprocess if status is Finished.
This is as opposed to the base class behavior, in which stop() forces
the subprocesses of a node to terminate.
"""
def stop(self, status: Optional[ProcessStatus]) -> None:
if self._process and status == ProcessStatus.Finished:
try:
print('Waiting for', self.__class__.__name__)
self._process.wait(timeout=300) # 5m timeout
except subprocess.TimeoutExpired:
traceback.print_exc() # print the exception
# Fall through.
super().stop(status)
class ThreadedNodeBase(NodeBase):
"""A base class for nodes that run a thread.
The thread repeats some callback in a background thread.
"""
def __init__(self, thread_name: str, continue_on_exception: bool, sleep_time: float):
super().__init__()
self._status = ProcessStatus.Finished
self._thread_name = thread_name
self._continue_on_exception = continue_on_exception
self._sleep_time = sleep_time
self._thread = threading.Thread(target=self._thread_main, name=thread_name)
def _thread_main(self) -> None:
while self._status == ProcessStatus.Running:
try:
self._thread_single_pass()
except:
print('Exception in', self._thread_name, '-', sys.exc_info())
if self._continue_on_exception:
print(self.__class__.__name__+": 'Continuing.'")
else:
print(self.__class__.__name__+": 'Quitting.'")
self._status = ProcessStatus.Errored
return
# Wait a little bit before performing the next pass.
time.sleep(self._sleep_time)
@abc.abstractmethod
def _thread_single_pass(self) -> None:
"""Runs a single step of the thread loop.
This is implemented by subclasses to do whatever it is they do. It will be
called repeatedly by the base class from the node's background thread. If
this method raises an exception, the behavior depends on the
continue_on_exception argument in the constructor. If
continue_on_exception is true, the the thread will continue. Otherwise, an
exception will stop the thread and therefore the node.
"""
pass
def start(self) -> None:
self._status = ProcessStatus.Running
self._thread.start()
def stop(self, status: Optional[ProcessStatus]) -> None:
self._status = ProcessStatus.Finished
self._thread.join()
def check_status(self) -> ProcessStatus:
return self._status
| 32.848341 | 87 | 0.664118 |
import abc
import enum
import os
import shlex
import subprocess
import sys
import threading
import time
import traceback
from . import node_base
from typing import Any, Dict, IO, List, Optional, Union
class ProcessStatus(enum.Enum):
Finished = 0
Running = 1
Errored = 2
class NodeBase(object):
@abc.abstractmethod
def __init__(self) -> None:
self._process: Optional[subprocess.Popen] = None
def __del__(self) -> None:
# explicitly call stop().
self.stop(None)
@abc.abstractmethod
def start(self):
pass
def _create_process(self,
args: Union[str, List[str]],
env: Dict[str, str] = {},
merge_env: bool = True,
stdout: Union[int, IO[Any], None] = None,
stderr: Union[int, IO[Any], None] = None,
shell: bool = False, **kwargs) -> subprocess.Popen:
if merge_env:
child_env = os.environ.copy()
child_env.update(env)
else:
child_env = env
# Print arguments formatted as output from bash -x would be.
# This makes it easy to see the arguments and easy to copy/paste them for
# debugging in a shell.
if shell:
assert isinstance(args, str)
print('+ ' + args)
else:
assert type(args) is list
print('+ ' + ' '.join([shlex.quote(arg) for arg in args]))
return subprocess.Popen(args,
env=child_env,
stdin=subprocess.DEVNULL,
stdout=stdout, stderr=stderr,
shell=shell, **kwargs)
def check_status(self) -> ProcessStatus:
if not self._process:
raise ValueError('Must have a process to check')
self._process.poll()
if self._process.returncode is None:
return ProcessStatus.Running
if self._process.returncode == 0:
return ProcessStatus.Finished
else:
return ProcessStatus.Errored
def stop(self, status: Optional[ProcessStatus]) -> None:
if self._process:
# Slightly more polite than kill. Try this first.
self._process.terminate()
if self.check_status() == ProcessStatus.Running:
# If it's not dead yet, wait 1 second.
time.sleep(1)
if self.check_status() == ProcessStatus.Running:
self._process.kill()
# Wait for the process to die and read its exit code. There is no way
# to ignore a kill signal, so this will happen quickly. If we don't do
self._process.wait()
class PolitelyWaitOnFinish(node_base.NodeBase):
def stop(self, status: Optional[ProcessStatus]) -> None:
if self._process and status == ProcessStatus.Finished:
try:
print('Waiting for', self.__class__.__name__)
self._process.wait(timeout=300)
except subprocess.TimeoutExpired:
traceback.print_exc()
super().stop(status)
class ThreadedNodeBase(NodeBase):
def __init__(self, thread_name: str, continue_on_exception: bool, sleep_time: float):
super().__init__()
self._status = ProcessStatus.Finished
self._thread_name = thread_name
self._continue_on_exception = continue_on_exception
self._sleep_time = sleep_time
self._thread = threading.Thread(target=self._thread_main, name=thread_name)
def _thread_main(self) -> None:
while self._status == ProcessStatus.Running:
try:
self._thread_single_pass()
except:
print('Exception in', self._thread_name, '-', sys.exc_info())
if self._continue_on_exception:
print(self.__class__.__name__+": 'Continuing.'")
else:
print(self.__class__.__name__+": 'Quitting.'")
self._status = ProcessStatus.Errored
return
time.sleep(self._sleep_time)
@abc.abstractmethod
def _thread_single_pass(self) -> None:
pass
def start(self) -> None:
self._status = ProcessStatus.Running
self._thread.start()
def stop(self, status: Optional[ProcessStatus]) -> None:
self._status = ProcessStatus.Finished
self._thread.join()
def check_status(self) -> ProcessStatus:
return self._status
| true | true |
f722306e60c10161ddfe9d5743fd65dcb873c5bb | 3,778 | py | Python | scrubber/models.py | adamyi/CrowdAnnotator | 5b5e4703899d2f1f4640b9cf76894ccb3943f07f | [
"Apache-2.0"
] | 3 | 2020-10-04T12:09:46.000Z | 2021-09-20T19:25:11.000Z | scrubber/models.py | adamyi/CrowdAnnotator | 5b5e4703899d2f1f4640b9cf76894ccb3943f07f | [
"Apache-2.0"
] | 2 | 2020-06-05T21:18:09.000Z | 2021-06-10T21:33:18.000Z | scrubber/models.py | adamyi/CrowdAnnotator | 5b5e4703899d2f1f4640b9cf76894ccb3943f07f | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
import re
import json
# import nlp
try:
import Queue as Q #python version < 3.0
except ImportError:
import queue as Q #python3.*
class wordBlock():
def __init__(self, start, end, kind):
self.start = start;
self.end = end;
self.kind = kind;
def __lt__(self,other):#operator <
return self.end < other.start
def __cmp__(self,other):
#call global(builtin) function cmp for int
return cmp(self.start,other.end)
class sentence(models.Model):
originalText = models.TextField(blank=True)
annotatedText = models.TextField(blank=True)
#alteredText = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
@classmethod
def create(originalText):
stc = sentence();
#stc.originalText = originalText.replace('\n', ' ').replace('\r', ' ').strip();
stc.originalText = ' '.join((originalText.split())).strip();
#stc.analyze();
return stc;
# def getScrubbedText(self):
# self.scrubbedText = '';
# return self.analyze();
# #we would rescrub due to frequent update of algorithm.
# if self.scrubbedText is None:
# return self.analyze();
# if self.scrubbedText == '':
# return self.analyze();
# return self.scrubbedText;
def __unicode__(self):
return self.originalText;
# def analyze(self):
# scrubbedContent = "test";#nlp.scrub(self.originalText);
# i = 0;
# str_suffix = '';
# for token in scrubbedContent:
# j = token[0].idx;
# if self.originalText[j - 1] == ' ':
# j = j - 1;
# str_suffix = ''.join((str_suffix, ' '));
# if token[1] != '':
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:j], str_suffix, "<scrub type='", token[1].lower() ,"'>"));
# str_suffix = '</scrub>';
# else:
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:j], str_suffix));
# str_suffix = '';
# i = token[0].idx;
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:len(self.originalText)], str_suffix));
# self.save();
# #self.scrubbedText = self.scrubbedText.replace('<scrub></scrub>', ' ').strip();
# return self.scrubbedText;
class task(models.Model):
#msg_a1 = models.ForeignKey(sentence, related_name="sentence_a1")
#msg_a2 = models.ForeignKey(sentence, related_name="sentence_a2")
#msg_b1 = models.ForeignKey(sentence, related_name="sentence_b1")
#msg_b2 = models.ForeignKey(sentence, related_name="sentence_b2")
#msg_c1 = models.ForeignKey(sentence, related_name="sentence_c1")
sentences = models.TextField(default="[]")
status = models.IntegerField() #0: init 1: opened 2: answered
workers = models.TextField(default="[]")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return " ".join((self.id, self.status));
def getSentences(self):
ret = []
stc_ids = json.loads(self.sentences)
for stc_id in stc_ids:
ret.append(sentence.objects.get(id=stc_id))
return ret
class hit(models.Model):
mTurk_id = models.TextField()
status = models.IntegerField()
code = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return " ".join((self.mTurk_id, self.code, self.status));
| 35.641509 | 143 | 0.622287 | from __future__ import unicode_literals
from django.db import models
import re
import json
try:
import Queue as Q
except ImportError:
import queue as Q
class wordBlock():
def __init__(self, start, end, kind):
self.start = start;
self.end = end;
self.kind = kind;
def __lt__(self,other):
return self.end < other.start
def __cmp__(self,other):
return cmp(self.start,other.end)
class sentence(models.Model):
originalText = models.TextField(blank=True)
annotatedText = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
@classmethod
def create(originalText):
stc = sentence();
stc.originalText = ' '.join((originalText.split())).strip();
return stc;
originalText;
ult="[]")
status = models.IntegerField()
workers = models.TextField(default="[]")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return " ".join((self.id, self.status));
def getSentences(self):
ret = []
stc_ids = json.loads(self.sentences)
for stc_id in stc_ids:
ret.append(sentence.objects.get(id=stc_id))
return ret
class hit(models.Model):
mTurk_id = models.TextField()
status = models.IntegerField()
code = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return " ".join((self.mTurk_id, self.code, self.status));
| true | true |
f722319c05ef4cef2543858c3137359ca5bc94e9 | 8,509 | py | Python | mgtune/optinterface.py | ghbrown/mg_tune | 674957e4eb24e94d9f033bffdf5e42a54b00f155 | [
"MIT"
] | null | null | null | mgtune/optinterface.py | ghbrown/mg_tune | 674957e4eb24e94d9f033bffdf5e42a54b00f155 | [
"MIT"
] | null | null | null | mgtune/optinterface.py | ghbrown/mg_tune | 674957e4eb24e94d9f033bffdf5e42a54b00f155 | [
"MIT"
] | null | null | null |
import pickle
def write_params_file(params_file,type_file,lower_file,upper_file,obj_file,max_f_eval):
"""
assembles known problem info and settings for NOMAD
into a plain text file
---Inputs---
---Outputs---
"""
#get data from pickles
type_list = pickle.load(open(type_file,'rb'))
lower_bound_list = pickle.load(open(lower_file,'rb'))
upper_bound_list = pickle.load(open(upper_file,'rb'))
#convert to single string
type_string = ' '.join(type_list)
lower_bound_string = ' '.join(lower_bound_list)
upper_bound_string = ' '.join(upper_bound_list)
dimension = len(type_string.split()) #number of decision variables
lines = []
lines.append(f'BB_OUTPUT_TYPE OBJ\n')
lines.append(f'BB_EXE \"$python {obj_file}\"\n\n')
lines.append(f'DIMENSION {dimension}\n')
lines.append(f'BB_INPUT_TYPE ( {type_string} )\n')
lines.append(f'LOWER_BOUND ( {lower_bound_string} )\n')
lines.append(f'UPPER_BOUND ( {upper_bound_string} )\n\n')
lines.append(f'LH_SEARCH 1 0\n\n') #use latin hypercube search to set X0
lines.append(f'MAX_BB_EVAL {max_f_eval}\n\n')
lines.append(f'DISPLAY_DEGREE 2')
with open(params_file,'w') as f:
f.writelines(lines)
def get_parameter_strings(x,options_list,param_types,obj_dir):
"""
gives the source code snippets corresponding to the current iterate
---Inputs---
x : {list}
current NOMAD iterate, made of integers and floats
param_types : {list}
list of strings corresponding to NOMAD parameters types
'I' -> integer
'R' -> real
'B' -> binary
options_list : {list}
list keeping track of all free parameters (those that are inserted with tag)
and their respective possible values
len(parameter_options_list) = number of parameters NOMAD is optimizing
given by string
len(parameter_options_list[i]) = 2
parameter_options_list[i] = [ith option name, list of ith argument options (or keywords)]
obj_dir : {path or string}
directory which houses the NOMAD objective function
likely something like .../user_running_dir/mgtune_working/
---Outputs---
parameter_strings : {list}
list of strings, each corresponding to a parameter option
"""
parameter_strings = [0]*len(x) #list to hold parameter strings
for i_p,(x_cur,type_cur) in enumerate(zip(x,param_types)):
if (type_cur == 'R'):
print('ERROR: mgtune does not yet support real (non-integer) parameters')
elif (type_cur == 'B'):
print('ERROR: mgtune does not yet support binary parameters (though it easily could)')
elif (type_cur == 'I'):
parameter_strings[i_p] = str(options_list[i_p][1][x_cur])
return parameter_strings
def overwrite_tags_with_parameters(tagged_lines,parameter_strings):
"""
Takes tagged lines and the respective options and creates a
valid Python source file.
---Inputs---
tagged_lines : {list}
list of all lines making up the tagged source code of the
user's solver (all lines included, some tagged)
parameter_strings : {list}
list containing strings of source code to insert at each
corresponding tag point
---Outputs---
running_lines : {list}
list of lines corresponding to a version of user's
solver with arguments specified by parameter_strings
"""
running_lines = [0]*len(tagged_lines)
num_tags_found = 0
tag_string_cur = f'mgtune_tag_{num_tags_found}'
for i_line,tagged_line in enumerate(tagged_lines):
cur_line = tagged_lines[i_line]
found_all_tags_in_line = False
while (not found_all_tags_in_line):
cur_line_split = cur_line.split(tag_string_cur)
if (len(cur_line_split) == 2):
#current tag was in line, update cur_line by inserting parameter at split point
cur_line = cur_line_split[0] + parameter_strings[num_tags_found] + cur_line_split[1]
num_tags_found += 1
tag_string_cur = f'mgtune_tag_{num_tags_found}'
elif (len(cur_line_split) == 1):
#can't split on current tag (since it's not in line)
found_all_tags_in_line = True
running_lines[i_line] = cur_line
return running_lines
def iterate_to_running_solver(x,obj_dir):
"""
takes the current NOMAD iterate (a vector of numbers largely
corresponding to option value indices) and uses them to
---Inputs---
x : {list}
current NOMAD iterate, made of integers and floats
obj_dir : {path or string}
directory which houses the NOMAD objective function
likely something like .../user_running_dir/mgtune_working/
---Outputs---
NONE, writes a file
"""
#absolute paths of hardcoded files with relevant information
#these variables should match those in mgtune.ttune.tune
#limitations of NOMAD IO make it difficult to get around
# such hardcoding
options_file = obj_dir + '/options.p'
types_file = obj_dir + '/param_types.txt'
tagged_solver_file = obj_dir + '/tagged_solver.py'
running_solver_file = obj_dir + '/running_solver.py'
#get possible options
options_list = pickle.load(open(options_file,'rb'))
#options_list is a list of two element lists
#options_list[i][0] is the parameter's name
#options_list[i][1] is a list of its possible values (or bounds if the
#variable is a float, etc.)
#extract data type of each option from file
type_list = pickle.load(open(types_file,'rb'))
#get lines of tagged version of user's solver
with open(tagged_solver_file) as f:
tagged_lines = f.readlines()
#get source code snippets corresponding to each entry of iterate
parameter_strings = get_parameter_strings(x,options_list,type_list,obj_dir)
#replace tags in source code with respective parameters
running_lines = overwrite_tags_with_parameters(tagged_lines,parameter_strings)
#write running solver
with open(running_solver_file,'w') as f:
f.writelines(running_lines)
def params_file_to_list(params_file):
#reads a file setting up NOMAD optimization problem
#and converts to list, each elements of which sets
#one option
with open(params_file,'r') as f:
lines = f.readlines()
params = [line.strip() for line in lines if not line.isspace()]
return params
def get_types_and_bounds(parameter_options_list):
"""
---Inputs---
parameter_options_list : {list}
list keeping track of all free parameters (those that are inserted with tag)
and their respective possible values
len(parameter_options_list) = number of free parameters in function call
given by string
len(parameter_options_list[i]) = 2
parameter_options_list[i] = [ith option name, ith argument options (or keywords)]
---Outputs---
types_list : {list}
list of "types" for each of the decision variables
I -> integer
R -> real (float)
B -> binary
lower_bounds_list : {list}
lower bounds for each of the decision variables, numerics plus -inf, +inf
upper_bounds_list : {list}
upper bounds for each of the decision variables, numerics plus -inf, +inf
"""
n_params = len(parameter_options_list) #get number of free parameters
types_list = [0]*n_params
lower_bounds_list = [0]*n_params
upper_bounds_list = [0]*n_params
for i_o, option_cur in enumerate(parameter_options_list):
#option_cur looks like [option_name, [option_1, option_2,...]]
options = option_cur[1]
if isinstance(options,list):
if ((isinstance(options[0],str)) and ('unbounded' in options[0])):
print('ERROR: parameters which belong to set of unbounded size not yet implemented')
else:
types_list[i_o] = 'I' #if variable from set of bounded size, must be integer
lower_bounds_list[i_o] = 0
upper_bounds_list[i_o] = int(len(options) - 1)
#numerics -> strings
types_list = [str(elem) for elem in types_list]
lower_bounds_list = [str(elem) for elem in lower_bounds_list]
upper_bounds_list = [str(elem) for elem in upper_bounds_list]
return types_list, lower_bounds_list, upper_bounds_list
| 39.393519 | 104 | 0.670702 |
import pickle
def write_params_file(params_file,type_file,lower_file,upper_file,obj_file,max_f_eval):
type_list = pickle.load(open(type_file,'rb'))
lower_bound_list = pickle.load(open(lower_file,'rb'))
upper_bound_list = pickle.load(open(upper_file,'rb'))
type_string = ' '.join(type_list)
lower_bound_string = ' '.join(lower_bound_list)
upper_bound_string = ' '.join(upper_bound_list)
dimension = len(type_string.split())
lines = []
lines.append(f'BB_OUTPUT_TYPE OBJ\n')
lines.append(f'BB_EXE \"$python {obj_file}\"\n\n')
lines.append(f'DIMENSION {dimension}\n')
lines.append(f'BB_INPUT_TYPE ( {type_string} )\n')
lines.append(f'LOWER_BOUND ( {lower_bound_string} )\n')
lines.append(f'UPPER_BOUND ( {upper_bound_string} )\n\n')
lines.append(f'LH_SEARCH 1 0\n\n')
lines.append(f'MAX_BB_EVAL {max_f_eval}\n\n')
lines.append(f'DISPLAY_DEGREE 2')
with open(params_file,'w') as f:
f.writelines(lines)
def get_parameter_strings(x,options_list,param_types,obj_dir):
parameter_strings = [0]*len(x)
for i_p,(x_cur,type_cur) in enumerate(zip(x,param_types)):
if (type_cur == 'R'):
print('ERROR: mgtune does not yet support real (non-integer) parameters')
elif (type_cur == 'B'):
print('ERROR: mgtune does not yet support binary parameters (though it easily could)')
elif (type_cur == 'I'):
parameter_strings[i_p] = str(options_list[i_p][1][x_cur])
return parameter_strings
def overwrite_tags_with_parameters(tagged_lines,parameter_strings):
running_lines = [0]*len(tagged_lines)
num_tags_found = 0
tag_string_cur = f'mgtune_tag_{num_tags_found}'
for i_line,tagged_line in enumerate(tagged_lines):
cur_line = tagged_lines[i_line]
found_all_tags_in_line = False
while (not found_all_tags_in_line):
cur_line_split = cur_line.split(tag_string_cur)
if (len(cur_line_split) == 2):
cur_line = cur_line_split[0] + parameter_strings[num_tags_found] + cur_line_split[1]
num_tags_found += 1
tag_string_cur = f'mgtune_tag_{num_tags_found}'
elif (len(cur_line_split) == 1):
found_all_tags_in_line = True
running_lines[i_line] = cur_line
return running_lines
def iterate_to_running_solver(x,obj_dir):
options_file = obj_dir + '/options.p'
types_file = obj_dir + '/param_types.txt'
tagged_solver_file = obj_dir + '/tagged_solver.py'
running_solver_file = obj_dir + '/running_solver.py'
options_list = pickle.load(open(options_file,'rb'))
#options_list[i][1] is a list of its possible values (or bounds if the
#variable is a float, etc.)
#extract data type of each option from file
type_list = pickle.load(open(types_file,'rb'))
#get lines of tagged version of user's solver
with open(tagged_solver_file) as f:
tagged_lines = f.readlines()
parameter_strings = get_parameter_strings(x,options_list,type_list,obj_dir)
running_lines = overwrite_tags_with_parameters(tagged_lines,parameter_strings)
with open(running_solver_file,'w') as f:
f.writelines(running_lines)
def params_file_to_list(params_file):
with open(params_file,'r') as f:
lines = f.readlines()
params = [line.strip() for line in lines if not line.isspace()]
return params
def get_types_and_bounds(parameter_options_list):
n_params = len(parameter_options_list)
types_list = [0]*n_params
lower_bounds_list = [0]*n_params
upper_bounds_list = [0]*n_params
for i_o, option_cur in enumerate(parameter_options_list):
options = option_cur[1]
if isinstance(options,list):
if ((isinstance(options[0],str)) and ('unbounded' in options[0])):
print('ERROR: parameters which belong to set of unbounded size not yet implemented')
else:
types_list[i_o] = 'I'
lower_bounds_list[i_o] = 0
upper_bounds_list[i_o] = int(len(options) - 1)
types_list = [str(elem) for elem in types_list]
lower_bounds_list = [str(elem) for elem in lower_bounds_list]
upper_bounds_list = [str(elem) for elem in upper_bounds_list]
return types_list, lower_bounds_list, upper_bounds_list
| true | true |
f7223385a23edc82057a8732fb5856bc89217e52 | 1,694 | py | Python | setup.py | homeworkprod/boto3 | 753de6d98d2beec1f60ab304f83e92db82a8686a | [
"Apache-2.0"
] | null | null | null | setup.py | homeworkprod/boto3 | 753de6d98d2beec1f60ab304f83e92db82a8686a | [
"Apache-2.0"
] | null | null | null | setup.py | homeworkprod/boto3 | 753de6d98d2beec1f60ab304f83e92db82a8686a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
distutils/setuptools install script.
"""
import os
import re
from setuptools import find_packages, setup
ROOT = os.path.dirname(__file__)
VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
requires = [
'botocore>=1.24.29,<1.25.0',
'jmespath>=0.7.1,<2.0.0',
's3transfer>=0.5.0,<0.6.0',
]
def get_version():
init = open(os.path.join(ROOT, 'boto3', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
setup(
name='boto3',
version=get_version(),
description='The AWS SDK for Python',
long_description=open('README.rst').read(),
author='Amazon Web Services',
url='https://github.com/boto/boto3',
scripts=[],
packages=find_packages(exclude=['tests*']),
package_data={'boto3': ['data/aws/resources/*.json', 'examples/*.rst']},
include_package_data=True,
install_requires=requires,
license="Apache License 2.0",
python_requires=">= 3.6",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
project_urls={
'Documentation': 'https://boto3.amazonaws.com/v1/documentation/api/latest/index.html',
'Source': 'https://github.com/boto/boto3',
},
)
| 28.711864 | 94 | 0.618064 |
import os
import re
from setuptools import find_packages, setup
ROOT = os.path.dirname(__file__)
VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
requires = [
'botocore>=1.24.29,<1.25.0',
'jmespath>=0.7.1,<2.0.0',
's3transfer>=0.5.0,<0.6.0',
]
def get_version():
init = open(os.path.join(ROOT, 'boto3', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
setup(
name='boto3',
version=get_version(),
description='The AWS SDK for Python',
long_description=open('README.rst').read(),
author='Amazon Web Services',
url='https://github.com/boto/boto3',
scripts=[],
packages=find_packages(exclude=['tests*']),
package_data={'boto3': ['data/aws/resources/*.json', 'examples/*.rst']},
include_package_data=True,
install_requires=requires,
license="Apache License 2.0",
python_requires=">= 3.6",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
project_urls={
'Documentation': 'https://boto3.amazonaws.com/v1/documentation/api/latest/index.html',
'Source': 'https://github.com/boto/boto3',
},
)
| true | true |
f72234cf15ad00ab2c9069cac16b7589616c73e1 | 2,888 | py | Python | postgresqleu/accounting/backendviews.py | bradfordboyle/pgeu-system | bbe70e7a94092c10f11a0f74fda23079532bb018 | [
"MIT"
] | 11 | 2020-08-20T11:16:02.000Z | 2022-03-12T23:25:04.000Z | postgresqleu/accounting/backendviews.py | bradfordboyle/pgeu-system | bbe70e7a94092c10f11a0f74fda23079532bb018 | [
"MIT"
] | 71 | 2019-11-18T10:11:22.000Z | 2022-03-27T16:12:57.000Z | postgresqleu/accounting/backendviews.py | bradfordboyle/pgeu-system | bbe70e7a94092c10f11a0f74fda23079532bb018 | [
"MIT"
] | 18 | 2019-11-18T09:56:31.000Z | 2022-01-08T03:16:43.000Z | from django.shortcuts import render
from postgresqleu.util.backendviews import backend_list_editor
from postgresqleu.util.auth import authenticate_backend_group
from postgresqleu.util.db import exec_to_dict
from postgresqleu.accounting.backendforms import BackendAccountClassForm
from postgresqleu.accounting.backendforms import BackendAccountGroupForm
from postgresqleu.accounting.backendforms import BackendAccountForm
from postgresqleu.accounting.backendforms import BackendObjectForm
def edit_accountclass(request, rest):
authenticate_backend_group(request, 'Accounting managers')
return backend_list_editor(request,
None,
BackendAccountClassForm,
rest,
bypass_conference_filter=True,
topadmin='Accounting',
return_url='/admin/',
)
def edit_accountgroup(request, rest):
authenticate_backend_group(request, 'Accounting managers')
return backend_list_editor(request,
None,
BackendAccountGroupForm,
rest,
bypass_conference_filter=True,
topadmin='Accounting',
return_url='/admin/',
)
def edit_account(request, rest):
authenticate_backend_group(request, 'Accounting managers')
return backend_list_editor(request,
None,
BackendAccountForm,
rest,
bypass_conference_filter=True,
topadmin='Accounting',
return_url='/admin/',
)
def edit_object(request, rest):
authenticate_backend_group(request, 'Accounting managers')
return backend_list_editor(request,
None,
BackendObjectForm,
rest,
bypass_conference_filter=True,
topadmin='Accounting',
return_url='/admin/',
)
def accountstructure(request):
authenticate_backend_group(request, 'Accounting managers')
accounts = exec_to_dict("""SELECT ac.id AS classid, ac.name AS classname, ac.inbalance,
ag.id AS groupid, ag.name AS groupname,
a.id AS accountid, a.num AS accountnum, a.name AS accountname
FROM accounting_accountclass ac
INNER JOIN accounting_accountgroup ag ON ag.accountclass_id=ac.id
INNER JOIN accounting_account a ON a.group_id=ag.id
ORDER BY a.num""")
return render(request, 'accounting/structure.html', {
'accounts': accounts,
'topadmin': 'Accounting',
'helplink': 'accounting',
})
| 35.654321 | 91 | 0.588296 | from django.shortcuts import render
from postgresqleu.util.backendviews import backend_list_editor
from postgresqleu.util.auth import authenticate_backend_group
from postgresqleu.util.db import exec_to_dict
from postgresqleu.accounting.backendforms import BackendAccountClassForm
from postgresqleu.accounting.backendforms import BackendAccountGroupForm
from postgresqleu.accounting.backendforms import BackendAccountForm
from postgresqleu.accounting.backendforms import BackendObjectForm
def edit_accountclass(request, rest):
authenticate_backend_group(request, 'Accounting managers')
return backend_list_editor(request,
None,
BackendAccountClassForm,
rest,
bypass_conference_filter=True,
topadmin='Accounting',
return_url='/admin/',
)
def edit_accountgroup(request, rest):
authenticate_backend_group(request, 'Accounting managers')
return backend_list_editor(request,
None,
BackendAccountGroupForm,
rest,
bypass_conference_filter=True,
topadmin='Accounting',
return_url='/admin/',
)
def edit_account(request, rest):
authenticate_backend_group(request, 'Accounting managers')
return backend_list_editor(request,
None,
BackendAccountForm,
rest,
bypass_conference_filter=True,
topadmin='Accounting',
return_url='/admin/',
)
def edit_object(request, rest):
authenticate_backend_group(request, 'Accounting managers')
return backend_list_editor(request,
None,
BackendObjectForm,
rest,
bypass_conference_filter=True,
topadmin='Accounting',
return_url='/admin/',
)
def accountstructure(request):
authenticate_backend_group(request, 'Accounting managers')
accounts = exec_to_dict("""SELECT ac.id AS classid, ac.name AS classname, ac.inbalance,
ag.id AS groupid, ag.name AS groupname,
a.id AS accountid, a.num AS accountnum, a.name AS accountname
FROM accounting_accountclass ac
INNER JOIN accounting_accountgroup ag ON ag.accountclass_id=ac.id
INNER JOIN accounting_account a ON a.group_id=ag.id
ORDER BY a.num""")
return render(request, 'accounting/structure.html', {
'accounts': accounts,
'topadmin': 'Accounting',
'helplink': 'accounting',
})
| true | true |
f72235c0ef5c4f321a37023769e91aa94995bf56 | 7,756 | py | Python | stl10/utee/misc.py | EricElmoznino/OrthogonalLowrankEmbedding | cce12ca5cb34f7cb888b04739724bdbbd18b1e2d | [
"MIT"
] | 64 | 2017-12-16T18:37:36.000Z | 2022-01-18T00:24:31.000Z | stl10/utee/misc.py | EricElmoznino/OrthogonalLowrankEmbedding | cce12ca5cb34f7cb888b04739724bdbbd18b1e2d | [
"MIT"
] | 4 | 2018-05-17T12:09:13.000Z | 2022-01-31T01:25:08.000Z | stl10/utee/misc.py | EricElmoznino/OrthogonalLowrankEmbedding | cce12ca5cb34f7cb888b04739724bdbbd18b1e2d | [
"MIT"
] | 14 | 2017-12-06T12:31:54.000Z | 2021-12-08T09:33:22.000Z | from __future__ import print_function
import cv2
import os
import shutil
import pickle as pkl
import time
import numpy as np
import hashlib
from IPython import embed
class Logger(object):
def __init__(self):
self._logger = None
def init(self, logdir, name='log'):
if self._logger is None:
import logging
if not os.path.exists(logdir):
os.makedirs(logdir)
log_file = os.path.join(logdir, name)
if os.path.exists(log_file):
os.remove(log_file)
self._logger = logging.getLogger()
self._logger.setLevel('INFO')
fh = logging.FileHandler(log_file)
ch = logging.StreamHandler()
self._logger.addHandler(fh)
self._logger.addHandler(ch)
def info(self, str_info):
self.init('/tmp', 'tmp.log')
self._logger.info(str_info)
logger = Logger()
print = logger.info
def ensure_dir(path, erase=False):
if os.path.exists(path) and erase:
print("Removing old folder {}".format(path))
shutil.rmtree(path)
if not os.path.exists(path):
print("Creating folder {}".format(path))
os.makedirs(path)
def load_pickle(path):
begin_st = time.time()
with open(path, 'rb') as f:
print("Loading pickle object from {}".format(path))
v = pkl.load(f)
print("=> Done ({:.4f} s)".format(time.time() - begin_st))
return v
def dump_pickle(obj, path):
with open(path, 'wb') as f:
print("Dumping pickle object to {}".format(path))
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
def auto_select_gpu(mem_bound=500, utility_bound=0, gpus=(0, 1, 2, 3, 4, 5, 6, 7), num_gpu=1, selected_gpus=None):
import sys
import os
import subprocess
import re
import time
import numpy as np
if 'CUDA_VISIBLE_DEVCIES' in os.environ:
sys.exit(0)
if selected_gpus is None:
mem_trace = []
utility_trace = []
for i in range(5): # sample 5 times
info = subprocess.check_output('nvidia-smi', shell=True).decode('utf-8')
mem = [int(s[:-5]) for s in re.compile('\d+MiB\s/').findall(info)]
utility = [int(re.compile('\d+').findall(s)[0]) for s in re.compile('\d+%\s+Default').findall(info)]
mem_trace.append(mem)
utility_trace.append(utility)
time.sleep(0.1)
mem = np.mean(mem_trace, axis=0)
utility = np.mean(utility_trace, axis=0)
assert(len(mem) == len(utility))
nGPU = len(utility)
ideal_gpus = [i for i in range(nGPU) if mem[i] <= mem_bound and utility[i] <= utility_bound and i in gpus]
if len(ideal_gpus) < num_gpu:
print("No sufficient resource, available: {}, require {} gpu".format(ideal_gpus, num_gpu))
sys.exit(0)
else:
selected_gpus = list(map(str, ideal_gpus[:num_gpu]))
else:
selected_gpus = selected_gpus.split(',')
print("Setting GPU: {}".format(selected_gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(selected_gpus)
return selected_gpus
def expand_user(path):
return os.path.abspath(os.path.expanduser(path))
def model_snapshot(model, new_file, old_file=None, verbose=False):
from collections import OrderedDict
import torch
if isinstance(model, torch.nn.DataParallel):
model = model.module
if old_file and os.path.exists(expand_user(old_file)):
if verbose:
print("Removing old model {}".format(expand_user(old_file)))
os.remove(expand_user(old_file))
if verbose:
print("Saving model to {}".format(expand_user(new_file)))
state_dict = OrderedDict()
for k, v in model.state_dict().items():
if v.is_cuda:
v = v.cpu()
state_dict[k] = v
torch.save(state_dict, expand_user(new_file))
def load_lmdb(lmdb_file, n_records=None):
import lmdb
import numpy as np
lmdb_file = expand_user(lmdb_file)
if os.path.exists(lmdb_file):
data = []
env = lmdb.open(lmdb_file, readonly=True, max_readers=512)
with env.begin() as txn:
cursor = txn.cursor()
begin_st = time.time()
print("Loading lmdb file {} into memory".format(lmdb_file))
for key, value in cursor:
_, target, _ = key.decode('ascii').split(':')
target = int(target)
img = cv2.imdecode(np.fromstring(value, np.uint8), cv2.IMREAD_COLOR)
data.append((img, target))
if n_records is not None and len(data) >= n_records:
break
env.close()
print("=> Done ({:.4f} s)".format(time.time() - begin_st))
return data
else:
print("Not found lmdb file".format(lmdb_file))
def str2img(str_b):
return cv2.imdecode(np.fromstring(str_b, np.uint8), cv2.IMREAD_COLOR)
def img2str(img):
return cv2.imencode('.jpg', img)[1].tostring()
def md5(s):
m = hashlib.md5()
m.update(s)
return m.hexdigest()
def eval_model(model, ds, n_sample=None, ngpu=1, is_imagenet=False):
import tqdm
import torch
from torch import nn
from torch.autograd import Variable
class ModelWrapper(nn.Module):
def __init__(self, model):
super(ModelWrapper, self).__init__()
self.model = model
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
def forward(self, input):
input.data.div_(255.)
input.data[:, 0, :, :].sub_(self.mean[0]).div_(self.std[0])
input.data[:, 1, :, :].sub_(self.mean[1]).div_(self.std[1])
input.data[:, 2, :, :].sub_(self.mean[2]).div_(self.std[2])
return self.model(input)
correct1, correct5 = 0, 0
n_passed = 0
if is_imagenet:
model = ModelWrapper(model)
model = model.eval()
model = torch.nn.DataParallel(model, device_ids=range(ngpu)).cuda()
n_sample = len(ds) if n_sample is None else n_sample
for idx, (data, target) in enumerate(tqdm.tqdm(ds, total=n_sample)):
n_passed += len(data)
data = Variable(torch.FloatTensor(data)).cuda()
indx_target = torch.LongTensor(target)
output = model(data)
bs = output.size(0)
idx_pred = output.data.sort(1, descending=True)[1]
idx_gt1 = indx_target.expand(1, bs).transpose_(0, 1)
idx_gt5 = idx_gt1.expand(bs, 5)
correct1 += idx_pred[:, :1].cpu().eq(idx_gt1).sum()
correct5 += idx_pred[:, :5].cpu().eq(idx_gt5).sum()
if idx >= n_sample - 1:
break
acc1 = correct1 * 1.0 / n_passed
acc5 = correct5 * 1.0 / n_passed
return acc1, acc5
def load_state_dict(model, model_urls, model_root):
from torch.utils import model_zoo
from torch import nn
import re
from collections import OrderedDict
own_state_old = model.state_dict()
own_state = OrderedDict() # remove all 'group' string
for k, v in own_state_old.items():
k = re.sub('group\d+\.', '', k)
own_state[k] = v
state_dict = model_zoo.load_url(model_urls, model_root)
for name, param in state_dict.items():
if name not in own_state:
print(own_state.keys())
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
| 33.868996 | 114 | 0.603533 | from __future__ import print_function
import cv2
import os
import shutil
import pickle as pkl
import time
import numpy as np
import hashlib
from IPython import embed
class Logger(object):
def __init__(self):
self._logger = None
def init(self, logdir, name='log'):
if self._logger is None:
import logging
if not os.path.exists(logdir):
os.makedirs(logdir)
log_file = os.path.join(logdir, name)
if os.path.exists(log_file):
os.remove(log_file)
self._logger = logging.getLogger()
self._logger.setLevel('INFO')
fh = logging.FileHandler(log_file)
ch = logging.StreamHandler()
self._logger.addHandler(fh)
self._logger.addHandler(ch)
def info(self, str_info):
self.init('/tmp', 'tmp.log')
self._logger.info(str_info)
logger = Logger()
print = logger.info
def ensure_dir(path, erase=False):
if os.path.exists(path) and erase:
print("Removing old folder {}".format(path))
shutil.rmtree(path)
if not os.path.exists(path):
print("Creating folder {}".format(path))
os.makedirs(path)
def load_pickle(path):
begin_st = time.time()
with open(path, 'rb') as f:
print("Loading pickle object from {}".format(path))
v = pkl.load(f)
print("=> Done ({:.4f} s)".format(time.time() - begin_st))
return v
def dump_pickle(obj, path):
with open(path, 'wb') as f:
print("Dumping pickle object to {}".format(path))
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
def auto_select_gpu(mem_bound=500, utility_bound=0, gpus=(0, 1, 2, 3, 4, 5, 6, 7), num_gpu=1, selected_gpus=None):
import sys
import os
import subprocess
import re
import time
import numpy as np
if 'CUDA_VISIBLE_DEVCIES' in os.environ:
sys.exit(0)
if selected_gpus is None:
mem_trace = []
utility_trace = []
for i in range(5):
info = subprocess.check_output('nvidia-smi', shell=True).decode('utf-8')
mem = [int(s[:-5]) for s in re.compile('\d+MiB\s/').findall(info)]
utility = [int(re.compile('\d+').findall(s)[0]) for s in re.compile('\d+%\s+Default').findall(info)]
mem_trace.append(mem)
utility_trace.append(utility)
time.sleep(0.1)
mem = np.mean(mem_trace, axis=0)
utility = np.mean(utility_trace, axis=0)
assert(len(mem) == len(utility))
nGPU = len(utility)
ideal_gpus = [i for i in range(nGPU) if mem[i] <= mem_bound and utility[i] <= utility_bound and i in gpus]
if len(ideal_gpus) < num_gpu:
print("No sufficient resource, available: {}, require {} gpu".format(ideal_gpus, num_gpu))
sys.exit(0)
else:
selected_gpus = list(map(str, ideal_gpus[:num_gpu]))
else:
selected_gpus = selected_gpus.split(',')
print("Setting GPU: {}".format(selected_gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(selected_gpus)
return selected_gpus
def expand_user(path):
return os.path.abspath(os.path.expanduser(path))
def model_snapshot(model, new_file, old_file=None, verbose=False):
from collections import OrderedDict
import torch
if isinstance(model, torch.nn.DataParallel):
model = model.module
if old_file and os.path.exists(expand_user(old_file)):
if verbose:
print("Removing old model {}".format(expand_user(old_file)))
os.remove(expand_user(old_file))
if verbose:
print("Saving model to {}".format(expand_user(new_file)))
state_dict = OrderedDict()
for k, v in model.state_dict().items():
if v.is_cuda:
v = v.cpu()
state_dict[k] = v
torch.save(state_dict, expand_user(new_file))
def load_lmdb(lmdb_file, n_records=None):
import lmdb
import numpy as np
lmdb_file = expand_user(lmdb_file)
if os.path.exists(lmdb_file):
data = []
env = lmdb.open(lmdb_file, readonly=True, max_readers=512)
with env.begin() as txn:
cursor = txn.cursor()
begin_st = time.time()
print("Loading lmdb file {} into memory".format(lmdb_file))
for key, value in cursor:
_, target, _ = key.decode('ascii').split(':')
target = int(target)
img = cv2.imdecode(np.fromstring(value, np.uint8), cv2.IMREAD_COLOR)
data.append((img, target))
if n_records is not None and len(data) >= n_records:
break
env.close()
print("=> Done ({:.4f} s)".format(time.time() - begin_st))
return data
else:
print("Not found lmdb file".format(lmdb_file))
def str2img(str_b):
return cv2.imdecode(np.fromstring(str_b, np.uint8), cv2.IMREAD_COLOR)
def img2str(img):
return cv2.imencode('.jpg', img)[1].tostring()
def md5(s):
m = hashlib.md5()
m.update(s)
return m.hexdigest()
def eval_model(model, ds, n_sample=None, ngpu=1, is_imagenet=False):
import tqdm
import torch
from torch import nn
from torch.autograd import Variable
class ModelWrapper(nn.Module):
def __init__(self, model):
super(ModelWrapper, self).__init__()
self.model = model
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
def forward(self, input):
input.data.div_(255.)
input.data[:, 0, :, :].sub_(self.mean[0]).div_(self.std[0])
input.data[:, 1, :, :].sub_(self.mean[1]).div_(self.std[1])
input.data[:, 2, :, :].sub_(self.mean[2]).div_(self.std[2])
return self.model(input)
correct1, correct5 = 0, 0
n_passed = 0
if is_imagenet:
model = ModelWrapper(model)
model = model.eval()
model = torch.nn.DataParallel(model, device_ids=range(ngpu)).cuda()
n_sample = len(ds) if n_sample is None else n_sample
for idx, (data, target) in enumerate(tqdm.tqdm(ds, total=n_sample)):
n_passed += len(data)
data = Variable(torch.FloatTensor(data)).cuda()
indx_target = torch.LongTensor(target)
output = model(data)
bs = output.size(0)
idx_pred = output.data.sort(1, descending=True)[1]
idx_gt1 = indx_target.expand(1, bs).transpose_(0, 1)
idx_gt5 = idx_gt1.expand(bs, 5)
correct1 += idx_pred[:, :1].cpu().eq(idx_gt1).sum()
correct5 += idx_pred[:, :5].cpu().eq(idx_gt5).sum()
if idx >= n_sample - 1:
break
acc1 = correct1 * 1.0 / n_passed
acc5 = correct5 * 1.0 / n_passed
return acc1, acc5
def load_state_dict(model, model_urls, model_root):
from torch.utils import model_zoo
from torch import nn
import re
from collections import OrderedDict
own_state_old = model.state_dict()
own_state = OrderedDict()
for k, v in own_state_old.items():
k = re.sub('group\d+\.', '', k)
own_state[k] = v
state_dict = model_zoo.load_url(model_urls, model_root)
for name, param in state_dict.items():
if name not in own_state:
print(own_state.keys())
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if isinstance(param, nn.Parameter):
param = param.data
own_state[name].copy_(param)
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
| true | true |
f722364d6824f270c535f653164145ae812490d3 | 2,405 | py | Python | hardest/binary.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 2 | 2018-02-03T13:43:25.000Z | 2021-12-03T16:13:49.000Z | hardest/binary.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 8 | 2017-08-16T08:34:59.000Z | 2018-02-05T18:30:44.000Z | hardest/binary.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 1 | 2018-02-05T18:26:20.000Z | 2018-02-05T18:26:20.000Z | """Binary class."""
import os
from subprocess import CalledProcessError
from subprocess import check_output
from subprocess import STDOUT
class Binary(object): # pylint: disable=too-few-public-methods
"""Represent Binary structure."""
def __init__(self, path):
# type: (str) -> None
"""Binary constructor."""
self.executable = os.path.basename(path) # type: str
self.path = path # type: str
self._version = '' # type: str
def version(self):
# type: () -> str
"""Return version, by trying to get from binary."""
if not self._version:
return self._get_version()
return self._version
def _get_version(self):
# type: () -> str
raw_result = b'' # type: bytes
try:
raw_result = check_output([self.path, '-V'],
stderr=STDOUT) # type: ignore
except CalledProcessError:
return 'Unknown'
except OSError: # type: ignore
return 'Error'
decoded_result = str(raw_result.decode()) # type: str
if not decoded_result:
return 'Unknown'
stripped_version = decoded_result.strip()
self._version = stripped_version.replace('\n', ' ')
return self._version
def __eq__(self, second_addend):
# type: (object) -> bool
"""Test equality of two binaries."""
if not isinstance(second_addend, Binary):
return False
first_addend = self # type : Binary
equal_path = bool(first_addend.path == second_addend.path)
equal_version = bool(first_addend.version() == second_addend.version())
return equal_path and equal_version
def __ne__(self, second_addend):
# type: (object) -> bool
"""Test not equality of two binaries."""
return not bool(self == second_addend)
def __hash__(self):
# type: () -> int
"""Return hash."""
return hash(self.path) ^ hash(self.version())
def __repr__(self):
# type: () -> str
"""Return object representation."""
return "Binary obj ({}, {})".format(self.path, self.version())
def __str__(self):
# type: () -> str
"""Return string representation."""
return "{} ({})".format(self.path, self.version())
| 32.945205 | 79 | 0.565489 | import os
from subprocess import CalledProcessError
from subprocess import check_output
from subprocess import STDOUT
class Binary(object):
def __init__(self, path):
self.executable = os.path.basename(path)
self.path = path
self._version = ''
def version(self):
if not self._version:
return self._get_version()
return self._version
def _get_version(self):
raw_result = b''
try:
raw_result = check_output([self.path, '-V'],
stderr=STDOUT)
except CalledProcessError:
return 'Unknown'
except OSError:
return 'Error'
decoded_result = str(raw_result.decode())
if not decoded_result:
return 'Unknown'
stripped_version = decoded_result.strip()
self._version = stripped_version.replace('\n', ' ')
return self._version
def __eq__(self, second_addend):
if not isinstance(second_addend, Binary):
return False
first_addend = self
equal_path = bool(first_addend.path == second_addend.path)
equal_version = bool(first_addend.version() == second_addend.version())
return equal_path and equal_version
def __ne__(self, second_addend):
return not bool(self == second_addend)
def __hash__(self):
return hash(self.path) ^ hash(self.version())
def __repr__(self):
return "Binary obj ({}, {})".format(self.path, self.version())
def __str__(self):
return "{} ({})".format(self.path, self.version())
| true | true |
f7223666efc6621192cb49805d6a9c9dc5472b81 | 3,589 | py | Python | open_spiel/python/examples/command_line_risk.py | JohnIsak/open_spiel-master | 0590539991f04b68b4a20a1c3b553d1445e6696f | [
"Apache-2.0"
] | 1 | 2021-09-16T11:19:12.000Z | 2021-09-16T11:19:12.000Z | open_spiel/python/examples/command_line_risk.py | JohnIsak/open_spiel-master | 0590539991f04b68b4a20a1c3b553d1445e6696f | [
"Apache-2.0"
] | null | null | null | open_spiel/python/examples/command_line_risk.py | JohnIsak/open_spiel-master | 0590539991f04b68b4a20a1c3b553d1445e6696f | [
"Apache-2.0"
] | 1 | 2021-03-25T00:41:02.000Z | 2021-03-25T00:41:02.000Z | """NFSP agents trained on simplified risk."""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import numpy as np
import igraph as ig
import cairocffi
import random
import pyspiel
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import nfsp
simple_adj = [ [1, 2],[0, 2, 3],[0, 1, 3],[1, 2, 4],[3, 5, 6],[6, 7, 8, 10, 11],[4, 5, 8, 34],[5, 9, 11, 12, 14],[5, 6, 10, 40],[7, 14],[5, 8, 11, 40],[5, 7, 10, 12, 13],[7, 11, 13, 14],[11, 12, 14],[7, 9, 12, 13, 15],[14, 16, 20],[15, 17, 19, 20],[16, 18, 19, 38],[17, 19, 22],[16, 17, 18, 22, 21, 20],[15, 16, 19, 21],[19, 20, 22, 23],[18, 19, 21, 23],[21, 22, 24],[23, 25, 27],[24, 26, 27],[25, 27],[24, 25, 26, 28],[27, 29, 33, 35, 36],[28, 30, 32],[29, 31, 32],[30, 32],[29, 30, 33, 34],[28, 32, 34, 35],[6, 32, 33, 40],[28, 33, 34, 36, 40, 41],[28, 35, 37, 41],[36, 38, 39, 41],[37, 39],[37, 38, 40, 41],[8, 10, 34, 35, 39, 41],[35, 36, 37, 39, 40] ]
vertex_lis = []
for i in range(len(simple_adj)):
for j in simple_adj[i]:
if (i,j) not in vertex_lis and (j,i) not in vertex_lis:
vertex_lis.append((i,j))
FLAGS = flags.FLAGS
SEED = 12983641
final_policy_type = pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT
"""final_policy_type = pyspiel.ISMCTSFinalPolicyType.MAX_VALUE"""
evaluator = pyspiel.RandomRolloutEvaluator(1, SEED)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 0.75, 1000, -1, final_policy_type,
False, False)
human_id = 1
bot_id = 0
def visualise(state,player_id):
g = ig.Graph()
g.add_vertices(42)
for i in vertex_lis:
g.add_edges([(i[0],i[1])])
colour_dict = {0:'red',0.5:'black',1:'blue'}
g.vs["terr_no"]=[i for i in range(42)]
troops=[0 for i in range(42)]
ownership=[0.5 for i in range(42)]
info_state =state.information_state_tensor(player_id)
for player in range(2):
for terr in range(42):
if player == 0 and info_state[44+terr]>0:
ownership[terr]=0
troops[terr]=info_state[44+terr]
if player == 1 and info_state[86+terr]>0:
ownership[terr]=1
troops[terr]=info_state[86+terr]
g.vs["player"]=ownership
g.vs["troops"]=troops
g.vs["label"]=["______"+str(g.vs["terr_no"][i])+","+str(g.vs["troops"][i]) for i in range(42)]
layout = g.layout_kamada_kawai()
return(ig.plot(g,layout=layout,vertex_color = [colour_dict[player] for player in g.vs["player"]]))
def main(unused_argv):
game = pyspiel.load_game("risk")
state = game.new_initial_state()
count = 0
while not state.is_terminal():
"""if count <160:
actions = state.legal_actions()
action = random.choice(actions)
state.apply_action(action)
count+=1
continue"""
current_player = state.current_player()
if state.is_chance_node():
state.apply_action(0)
elif current_player ==human_id:
visualise(state,human_id)
info_state = state.information_state_tensor(human_id)
print(info_state[:42])
print(info_state[-4:-2])
legal = state.legal_actions()
print(state.legal_actions())
action = "1000"
while int(action) not in legal:
action = input("Action:")
if action =="":
action = "1000"
state.apply_action(int(action))
elif current_player == bot_id:
action = bot.step(state)
print("Bot action:"+str(action))
state.apply_action(action)
print(state.rewards())
if __name__ == "__main__":
app.run(main) | 37 | 656 | 0.631931 |
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import numpy as np
import igraph as ig
import cairocffi
import random
import pyspiel
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import nfsp
simple_adj = [ [1, 2],[0, 2, 3],[0, 1, 3],[1, 2, 4],[3, 5, 6],[6, 7, 8, 10, 11],[4, 5, 8, 34],[5, 9, 11, 12, 14],[5, 6, 10, 40],[7, 14],[5, 8, 11, 40],[5, 7, 10, 12, 13],[7, 11, 13, 14],[11, 12, 14],[7, 9, 12, 13, 15],[14, 16, 20],[15, 17, 19, 20],[16, 18, 19, 38],[17, 19, 22],[16, 17, 18, 22, 21, 20],[15, 16, 19, 21],[19, 20, 22, 23],[18, 19, 21, 23],[21, 22, 24],[23, 25, 27],[24, 26, 27],[25, 27],[24, 25, 26, 28],[27, 29, 33, 35, 36],[28, 30, 32],[29, 31, 32],[30, 32],[29, 30, 33, 34],[28, 32, 34, 35],[6, 32, 33, 40],[28, 33, 34, 36, 40, 41],[28, 35, 37, 41],[36, 38, 39, 41],[37, 39],[37, 38, 40, 41],[8, 10, 34, 35, 39, 41],[35, 36, 37, 39, 40] ]
vertex_lis = []
for i in range(len(simple_adj)):
for j in simple_adj[i]:
if (i,j) not in vertex_lis and (j,i) not in vertex_lis:
vertex_lis.append((i,j))
FLAGS = flags.FLAGS
SEED = 12983641
final_policy_type = pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT
evaluator = pyspiel.RandomRolloutEvaluator(1, SEED)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 0.75, 1000, -1, final_policy_type,
False, False)
human_id = 1
bot_id = 0
def visualise(state,player_id):
g = ig.Graph()
g.add_vertices(42)
for i in vertex_lis:
g.add_edges([(i[0],i[1])])
colour_dict = {0:'red',0.5:'black',1:'blue'}
g.vs["terr_no"]=[i for i in range(42)]
troops=[0 for i in range(42)]
ownership=[0.5 for i in range(42)]
info_state =state.information_state_tensor(player_id)
for player in range(2):
for terr in range(42):
if player == 0 and info_state[44+terr]>0:
ownership[terr]=0
troops[terr]=info_state[44+terr]
if player == 1 and info_state[86+terr]>0:
ownership[terr]=1
troops[terr]=info_state[86+terr]
g.vs["player"]=ownership
g.vs["troops"]=troops
g.vs["label"]=["______"+str(g.vs["terr_no"][i])+","+str(g.vs["troops"][i]) for i in range(42)]
layout = g.layout_kamada_kawai()
return(ig.plot(g,layout=layout,vertex_color = [colour_dict[player] for player in g.vs["player"]]))
def main(unused_argv):
game = pyspiel.load_game("risk")
state = game.new_initial_state()
count = 0
while not state.is_terminal():
current_player = state.current_player()
if state.is_chance_node():
state.apply_action(0)
elif current_player ==human_id:
visualise(state,human_id)
info_state = state.information_state_tensor(human_id)
print(info_state[:42])
print(info_state[-4:-2])
legal = state.legal_actions()
print(state.legal_actions())
action = "1000"
while int(action) not in legal:
action = input("Action:")
if action =="":
action = "1000"
state.apply_action(int(action))
elif current_player == bot_id:
action = bot.step(state)
print("Bot action:"+str(action))
state.apply_action(action)
print(state.rewards())
if __name__ == "__main__":
app.run(main) | true | true |
f722368f08b8a4e011882311849f6584dcfca88a | 3,800 | py | Python | tests/test_core.py | Borealin/fastclasses-json | 3318f02338624145bd488c1f38fd4d7bd6210410 | [
"MIT"
] | null | null | null | tests/test_core.py | Borealin/fastclasses-json | 3318f02338624145bd488c1f38fd4d7bd6210410 | [
"MIT"
] | null | null | null | tests/test_core.py | Borealin/fastclasses-json | 3318f02338624145bd488c1f38fd4d7bd6210410 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Dict
import textwrap
from fastclasses_json.api import dataclass_json
from fastclasses_json import core
def test_to_dict_source():
@dataclass
class A:
x: int
assert core._to_dict_source(A) == textwrap.dedent(
"""\
def to_dict(self):
result = {}
result['x'] = self.x
return result
"""
)
def test_from_dict_source():
@dataclass
class A:
x: int
assert core._from_dict_source(A) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
args['x'] = o.get('x')
return cls(**args)
"""
)
def test_from_dict_source__optional():
@dataclass
class A:
x: Optional[int]
assert core._from_dict_source(A) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
args['x'] = o.get('x')
return cls(**args)
"""
)
def test_from_dict_source__default():
@dataclass
class A:
x: int = 1
assert core._from_dict_source(A) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
if 'x' in o:
args['x'] = o.get('x')
return cls(**args)
"""
)
def test_from_dict_source__list_nested():
@dataclass_json
@dataclass
class A:
a: str
@dataclass_json
@dataclass
class B:
a: List[A]
assert core._from_dict_source(B) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
value = o.get('a')
if value is not None:
value = [A._fastclasses_json_from_dict(__0) for __0 in value]
args['a'] = value
return cls(**args)
"""
)
def test_from_dict_source__enum():
from enum import Enum
class A(Enum):
X = 'ex'
Y = 'why'
@dataclass_json
@dataclass
class B:
a: A
assert core._from_dict_source(B) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
value = o.get('a')
if value is not None:
value = A(value)
args['a'] = value
return cls(**args)
"""
)
def test_expr_builder__list_enum():
class A(Enum):
X = 'ex'
Y = 'why'
t = List[A]
builder = core.expr_builder(t)
assert builder('XXX') == '[A(__0) for __0 in XXX]'
def test_expr_builder__list_list_enum():
class A(Enum):
X = 'ex'
Y = 'why'
t = List[List[A]]
builder = core.expr_builder(t)
assert builder('XXX') == '[[A(__1) for __1 in __0] for __0 in XXX]'
def test_expr_builder__list_dataclass():
@dataclass
class A:
X = 'ex'
Y = 'why'
t = List[A]
builder = core.expr_builder(t)
assert builder('XXX') == \
'[A._fastclasses_json_from_dict(__0) for __0 in XXX]'
def test_expr_builder__optional_enum():
class A(Enum):
X = 'ex'
Y = 'why'
t = Optional[A]
builder = core.expr_builder(t)
assert builder('XXX') == 'A(__0) if (__0:=(XXX)) is not None else None'
def test_expr_builder__dict_enum():
class A(Enum):
X = 'ex'
Y = 'why'
t = Dict[str, A]
builder = core.expr_builder(t)
assert builder('XXX') == '{__k0: A(__v0) for __k0,__v0 in (XXX).items()}'
def test_references_types__enum():
class A(Enum):
X = 'ex'
Y = 'why'
@dataclass
class XX:
a: A
assert core.referenced_types(XX) == {'A': A}
| 18.811881 | 77 | 0.525 | from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Dict
import textwrap
from fastclasses_json.api import dataclass_json
from fastclasses_json import core
def test_to_dict_source():
@dataclass
class A:
x: int
assert core._to_dict_source(A) == textwrap.dedent(
"""\
def to_dict(self):
result = {}
result['x'] = self.x
return result
"""
)
def test_from_dict_source():
@dataclass
class A:
x: int
assert core._from_dict_source(A) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
args['x'] = o.get('x')
return cls(**args)
"""
)
def test_from_dict_source__optional():
@dataclass
class A:
x: Optional[int]
assert core._from_dict_source(A) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
args['x'] = o.get('x')
return cls(**args)
"""
)
def test_from_dict_source__default():
@dataclass
class A:
x: int = 1
assert core._from_dict_source(A) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
if 'x' in o:
args['x'] = o.get('x')
return cls(**args)
"""
)
def test_from_dict_source__list_nested():
@dataclass_json
@dataclass
class A:
a: str
@dataclass_json
@dataclass
class B:
a: List[A]
assert core._from_dict_source(B) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
value = o.get('a')
if value is not None:
value = [A._fastclasses_json_from_dict(__0) for __0 in value]
args['a'] = value
return cls(**args)
"""
)
def test_from_dict_source__enum():
from enum import Enum
class A(Enum):
X = 'ex'
Y = 'why'
@dataclass_json
@dataclass
class B:
a: A
assert core._from_dict_source(B) == textwrap.dedent(
"""\
def from_dict(cls, o, *, infer_missing):
args = {}
value = o.get('a')
if value is not None:
value = A(value)
args['a'] = value
return cls(**args)
"""
)
def test_expr_builder__list_enum():
class A(Enum):
X = 'ex'
Y = 'why'
t = List[A]
builder = core.expr_builder(t)
assert builder('XXX') == '[A(__0) for __0 in XXX]'
def test_expr_builder__list_list_enum():
class A(Enum):
X = 'ex'
Y = 'why'
t = List[List[A]]
builder = core.expr_builder(t)
assert builder('XXX') == '[[A(__1) for __1 in __0] for __0 in XXX]'
def test_expr_builder__list_dataclass():
@dataclass
class A:
X = 'ex'
Y = 'why'
t = List[A]
builder = core.expr_builder(t)
assert builder('XXX') == \
'[A._fastclasses_json_from_dict(__0) for __0 in XXX]'
def test_expr_builder__optional_enum():
class A(Enum):
X = 'ex'
Y = 'why'
t = Optional[A]
builder = core.expr_builder(t)
assert builder('XXX') == 'A(__0) if (__0:=(XXX)) is not None else None'
def test_expr_builder__dict_enum():
class A(Enum):
X = 'ex'
Y = 'why'
t = Dict[str, A]
builder = core.expr_builder(t)
assert builder('XXX') == '{__k0: A(__v0) for __k0,__v0 in (XXX).items()}'
def test_references_types__enum():
class A(Enum):
X = 'ex'
Y = 'why'
@dataclass
class XX:
a: A
assert core.referenced_types(XX) == {'A': A}
| true | true |
f72237fc84457838936f32dd89967d14328bc26d | 5,984 | py | Python | train-text2mel.py | TraceOnBrainOff/pytorch-dc-tts | 993a0fbace561729b04df2179b41a0a7ea502e93 | [
"MIT"
] | null | null | null | train-text2mel.py | TraceOnBrainOff/pytorch-dc-tts | 993a0fbace561729b04df2179b41a0a7ea502e93 | [
"MIT"
] | null | null | null | train-text2mel.py | TraceOnBrainOff/pytorch-dc-tts | 993a0fbace561729b04df2179b41a0a7ea502e93 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Train the Text2Mel network. See: https://arxiv.org/abs/1710.08969"""
__author__ = 'Erdene-Ochir Tuguldur'
import sys
import time
import argparse
from tqdm import *
import numpy as np
import torch
import torch.nn.functional as F
# project imports
from models import Text2Mel
from hyperparams import HParams as hp
from logger import Logger
from utils import get_last_checkpoint_file_name, load_checkpoint, save_checkpoint, load_checkpoint_test
from datasets.data_loader import Text2MelDataLoader
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", required=True, choices=['ljspeech', 'mbspeech','emovdb'], help='dataset name')
args = parser.parse_args()
if args.dataset == 'ljspeech':
from datasets.lj_speech import vocab, LJSpeech as SpeechDataset
elif args.dataset == 'emovdb':
from datasets.emovdb import vocab, Emovdb as SpeechDataset
else:
from datasets.mb_speech import vocab, MBSpeech as SpeechDataset
use_gpu = torch.cuda.is_available()
print('use_gpu', use_gpu)
if use_gpu:
torch.backends.cudnn.benchmark = True
train_data_loader = Text2MelDataLoader(text2mel_dataset=SpeechDataset(['texts', 'mels', 'mel_gates']), batch_size=64,
mode='train')
valid_data_loader = Text2MelDataLoader(text2mel_dataset=SpeechDataset(['texts', 'mels', 'mel_gates']), batch_size=64,
mode='valid')
text2mel = Text2Mel(vocab).cpu()
start_timestamp = int(time.time() * 1000)
start_epoch = 0
global_step = 0
logger = Logger(args.dataset, 'text2mel')
# load the last checkpoint if exists
last_checkpoint_file_name = get_last_checkpoint_file_name(logger.logdir)
if last_checkpoint_file_name:
print("loading the last checkpoint: %s" % last_checkpoint_file_name)
start_epoch, global_step = load_checkpoint(last_checkpoint_file_name, text2mel, None)
optimizer = torch.optim.Adam(text2mel.parameters(), lr=hp.text2mel_lr)
def get_lr():
return optimizer.param_groups[0]['lr']
def lr_decay(step, warmup_steps=4000):
new_lr = hp.text2mel_lr * warmup_steps ** 0.5 * min((step + 1) * warmup_steps ** -1.5, (step + 1) ** -0.5)
optimizer.param_groups[0]['lr'] = new_lr
def train(train_epoch, phase='train'):
global global_step
lr_decay(global_step)
print("epoch %3d with lr=%.02e" % (train_epoch, get_lr()))
text2mel.train() if phase == 'train' else text2mel.eval()
torch.set_grad_enabled(True) if phase == 'train' else torch.set_grad_enabled(False)
data_loader = train_data_loader if phase == 'train' else valid_data_loader
it = 0
running_loss = 0.0
running_l1_loss = 0.0
running_att_loss = 0.0
pbar = tqdm(data_loader, unit="audios", unit_scale=data_loader.batch_size, disable=hp.disable_progress_bar)
for batch in pbar:
L, S, gates = batch['texts'], batch['mels'], batch['mel_gates']
S = S.permute(0, 2, 1) # TODO: because of pre processing
B, N = L.size() # batch size and text count
_, n_mels, T = S.size() # number of melspectrogram bins and time
assert gates.size(0) == B # TODO: later remove
assert gates.size(1) == T
S_shifted = torch.cat((S[:, :, 1:], torch.zeros(B, n_mels, 1)), 2)
S.requires_grad = False
S_shifted.requires_grad = False
gates.requires_grad = False
def W_nt(_, n, t, g=0.2):
return 1.0 - np.exp(-((n / float(N) - t / float(T)) ** 2) / (2 * g ** 2))
W = np.fromfunction(W_nt, (B, N, T), dtype=np.float32)
W = torch.from_numpy(W)
L = L.cpu()
S = S.cpu()
S_shifted = S_shifted.cpu()
W = W.cpu()
gates = gates.cpu()
Y_logit, Y, A = text2mel(L, S, monotonic_attention=True)
l1_loss = F.l1_loss(Y, S_shifted)
masks = gates.reshape(B, 1, T).float()
att_loss = (A * W * masks).mean()
loss = l1_loss + att_loss
if phase == 'train':
lr_decay(global_step)
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
it += 1
loss, l1_loss, att_loss = loss.item(), l1_loss.item(), att_loss.item()
running_loss += loss
running_l1_loss += l1_loss
running_att_loss += att_loss
if phase == 'train':
# update the progress bar
pbar.set_postfix({
'l1': "%.05f" % (running_l1_loss / it),
'att': "%.05f" % (running_att_loss / it)
})
logger.log_step(phase, global_step, {'loss_l1': l1_loss, 'loss_att': att_loss},
{'mels-true': S[:1, :, :], 'mels-pred': Y[:1, :, :], 'attention': A[:1, :, :]})
if global_step % 1000 == 0:
# checkpoint at every 1000th step
save_checkpoint(logger.logdir, train_epoch, global_step, text2mel, optimizer)
epoch_loss = running_loss / it
epoch_l1_loss = running_l1_loss / it
epoch_att_loss = running_att_loss / it
logger.log_epoch(phase, global_step, {'loss_l1': epoch_l1_loss, 'loss_att': epoch_att_loss})
return epoch_loss
since = time.time()
epoch = start_epoch
while True:
train_epoch_loss = train(epoch, phase='train')
time_elapsed = time.time() - since
time_str = 'total time elapsed: {:.0f}h {:.0f}m {:.0f}s '.format(time_elapsed // 3600, time_elapsed % 3600 // 60,
time_elapsed % 60)
print("train epoch loss %f, step=%d, %s" % (train_epoch_loss, global_step, time_str))
valid_epoch_loss = train(epoch, phase='valid')
print("valid epoch loss %f" % valid_epoch_loss)
epoch += 1
if global_step >= hp.text2mel_max_iteration:
print("max step %d (current step %d) reached, exiting..." % (hp.text2mel_max_iteration, global_step))
sys.exit(0)
| 34.589595 | 117 | 0.639873 |
__author__ = 'Erdene-Ochir Tuguldur'
import sys
import time
import argparse
from tqdm import *
import numpy as np
import torch
import torch.nn.functional as F
from models import Text2Mel
from hyperparams import HParams as hp
from logger import Logger
from utils import get_last_checkpoint_file_name, load_checkpoint, save_checkpoint, load_checkpoint_test
from datasets.data_loader import Text2MelDataLoader
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", required=True, choices=['ljspeech', 'mbspeech','emovdb'], help='dataset name')
args = parser.parse_args()
if args.dataset == 'ljspeech':
from datasets.lj_speech import vocab, LJSpeech as SpeechDataset
elif args.dataset == 'emovdb':
from datasets.emovdb import vocab, Emovdb as SpeechDataset
else:
from datasets.mb_speech import vocab, MBSpeech as SpeechDataset
use_gpu = torch.cuda.is_available()
print('use_gpu', use_gpu)
if use_gpu:
torch.backends.cudnn.benchmark = True
train_data_loader = Text2MelDataLoader(text2mel_dataset=SpeechDataset(['texts', 'mels', 'mel_gates']), batch_size=64,
mode='train')
valid_data_loader = Text2MelDataLoader(text2mel_dataset=SpeechDataset(['texts', 'mels', 'mel_gates']), batch_size=64,
mode='valid')
text2mel = Text2Mel(vocab).cpu()
start_timestamp = int(time.time() * 1000)
start_epoch = 0
global_step = 0
logger = Logger(args.dataset, 'text2mel')
last_checkpoint_file_name = get_last_checkpoint_file_name(logger.logdir)
if last_checkpoint_file_name:
print("loading the last checkpoint: %s" % last_checkpoint_file_name)
start_epoch, global_step = load_checkpoint(last_checkpoint_file_name, text2mel, None)
optimizer = torch.optim.Adam(text2mel.parameters(), lr=hp.text2mel_lr)
def get_lr():
return optimizer.param_groups[0]['lr']
def lr_decay(step, warmup_steps=4000):
new_lr = hp.text2mel_lr * warmup_steps ** 0.5 * min((step + 1) * warmup_steps ** -1.5, (step + 1) ** -0.5)
optimizer.param_groups[0]['lr'] = new_lr
def train(train_epoch, phase='train'):
global global_step
lr_decay(global_step)
print("epoch %3d with lr=%.02e" % (train_epoch, get_lr()))
text2mel.train() if phase == 'train' else text2mel.eval()
torch.set_grad_enabled(True) if phase == 'train' else torch.set_grad_enabled(False)
data_loader = train_data_loader if phase == 'train' else valid_data_loader
it = 0
running_loss = 0.0
running_l1_loss = 0.0
running_att_loss = 0.0
pbar = tqdm(data_loader, unit="audios", unit_scale=data_loader.batch_size, disable=hp.disable_progress_bar)
for batch in pbar:
L, S, gates = batch['texts'], batch['mels'], batch['mel_gates']
S = S.permute(0, 2, 1)
B, N = L.size()
_, n_mels, T = S.size()
assert gates.size(0) == B
assert gates.size(1) == T
S_shifted = torch.cat((S[:, :, 1:], torch.zeros(B, n_mels, 1)), 2)
S.requires_grad = False
S_shifted.requires_grad = False
gates.requires_grad = False
def W_nt(_, n, t, g=0.2):
return 1.0 - np.exp(-((n / float(N) - t / float(T)) ** 2) / (2 * g ** 2))
W = np.fromfunction(W_nt, (B, N, T), dtype=np.float32)
W = torch.from_numpy(W)
L = L.cpu()
S = S.cpu()
S_shifted = S_shifted.cpu()
W = W.cpu()
gates = gates.cpu()
Y_logit, Y, A = text2mel(L, S, monotonic_attention=True)
l1_loss = F.l1_loss(Y, S_shifted)
masks = gates.reshape(B, 1, T).float()
att_loss = (A * W * masks).mean()
loss = l1_loss + att_loss
if phase == 'train':
lr_decay(global_step)
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
it += 1
loss, l1_loss, att_loss = loss.item(), l1_loss.item(), att_loss.item()
running_loss += loss
running_l1_loss += l1_loss
running_att_loss += att_loss
if phase == 'train':
pbar.set_postfix({
'l1': "%.05f" % (running_l1_loss / it),
'att': "%.05f" % (running_att_loss / it)
})
logger.log_step(phase, global_step, {'loss_l1': l1_loss, 'loss_att': att_loss},
{'mels-true': S[:1, :, :], 'mels-pred': Y[:1, :, :], 'attention': A[:1, :, :]})
if global_step % 1000 == 0:
save_checkpoint(logger.logdir, train_epoch, global_step, text2mel, optimizer)
epoch_loss = running_loss / it
epoch_l1_loss = running_l1_loss / it
epoch_att_loss = running_att_loss / it
logger.log_epoch(phase, global_step, {'loss_l1': epoch_l1_loss, 'loss_att': epoch_att_loss})
return epoch_loss
since = time.time()
epoch = start_epoch
while True:
train_epoch_loss = train(epoch, phase='train')
time_elapsed = time.time() - since
time_str = 'total time elapsed: {:.0f}h {:.0f}m {:.0f}s '.format(time_elapsed // 3600, time_elapsed % 3600 // 60,
time_elapsed % 60)
print("train epoch loss %f, step=%d, %s" % (train_epoch_loss, global_step, time_str))
valid_epoch_loss = train(epoch, phase='valid')
print("valid epoch loss %f" % valid_epoch_loss)
epoch += 1
if global_step >= hp.text2mel_max_iteration:
print("max step %d (current step %d) reached, exiting..." % (hp.text2mel_max_iteration, global_step))
sys.exit(0)
| true | true |
f722384996b08f28631f548d4a200af6eb57a480 | 1,467 | py | Python | config.py | emphasize/solaredge-pv-monitoring-skill | 88b01e221fee42417b1d88c869b0f3525337812e | [
"Apache-2.0"
] | null | null | null | config.py | emphasize/solaredge-pv-monitoring-skill | 88b01e221fee42417b1d88c869b0f3525337812e | [
"Apache-2.0"
] | null | null | null | config.py | emphasize/solaredge-pv-monitoring-skill | 88b01e221fee42417b1d88c869b0f3525337812e | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import Column, Integer, Float, DateTime
SE_CREDENTIALS = {"apiKey": "XXXXXXXXX",
"siteID": "12345"}
SQL_CREDENTIALS = {"user": "user",
"password": "password",
"host": "ip:port"
}
SQL_SSL = {"CA": "/path/to/ca.pem",
"CKEY": "/path/to/client-key.pem",
"CCERT": "/path/to/client-cert.pem"}
#todo make table renaming possible
#to add your own colums, append them after the last table entry
SQL_DB_SCHEMAS = {"energy": {'id': Column(Integer, primary_key=True),
'Time': Column(DateTime),
'Production': Column(Float),
'FeedIn': Column(Float),
'SelfConsumption': Column(Float),
'Purchased': Column(Float),
'Consumption': Column(Float)}
}
# The Solaredge-Api corresponding to a table
SE_API_TABLE = {"energyDetails": "energy"}
SQL_TABLES_PREFIX = {"energy": "solar"}
# creates new tables in DAY, WEEK or MONTH -ly timespans
# SQL_SPLIT_TABLE_TIME = {"energy": "year"}
# if table is not mentioned it's a continuous table append
SQL_SPLIT_TABLE_TIME = {}
# derivative tables naming convention:
# the derivative table (table inherits schema of basetable)
# has to be named with the basename in front
SQL_DERIVATIVE_TABLES = {"energy": ["day", "week", "month", "year"]}
| 39.648649 | 69 | 0.576005 | from sqlalchemy import Column, Integer, Float, DateTime
SE_CREDENTIALS = {"apiKey": "XXXXXXXXX",
"siteID": "12345"}
SQL_CREDENTIALS = {"user": "user",
"password": "password",
"host": "ip:port"
}
SQL_SSL = {"CA": "/path/to/ca.pem",
"CKEY": "/path/to/client-key.pem",
"CCERT": "/path/to/client-cert.pem"}
SQL_DB_SCHEMAS = {"energy": {'id': Column(Integer, primary_key=True),
'Time': Column(DateTime),
'Production': Column(Float),
'FeedIn': Column(Float),
'SelfConsumption': Column(Float),
'Purchased': Column(Float),
'Consumption': Column(Float)}
}
SE_API_TABLE = {"energyDetails": "energy"}
SQL_TABLES_PREFIX = {"energy": "solar"}
SQL_SPLIT_TABLE_TIME = {}
# derivative tables naming convention:
# the derivative table (table inherits schema of basetable)
# has to be named with the basename in front
SQL_DERIVATIVE_TABLES = {"energy": ["day", "week", "month", "year"]}
| true | true |
f722385a3832caaf47418725fb9eef7f7347a061 | 5,595 | py | Python | tests/test.py | jonsim/tiny-backup | 743a6dcc80d20046316a7d89da8b8c58691233a5 | [
"MIT"
] | 1 | 2017-06-04T00:23:33.000Z | 2017-06-04T00:23:33.000Z | tests/test.py | jonsim/tiny-backup | 743a6dcc80d20046316a7d89da8b8c58691233a5 | [
"MIT"
] | null | null | null | tests/test.py | jonsim/tiny-backup | 743a6dcc80d20046316a7d89da8b8c58691233a5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# (c) Copyright 2017 Jonathan Simmonds
#
# Licensed under the MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Main test runner for tiny-backup.
Maintained at https://github.com/jonsim/tiny-backup
"""
import os.path
import subprocess
import sys
import unittest
#
# Shared testing defines.
#
GPG_HOME = os.path.join(sys.path[0], 'gpg-test-homedir')
#
# Shared testing methods.
#
def create_ascii_file(path, kb_size=16):
"""
Creates an ASCII file.
Args:
path: string path for the file to be written to.
kb_size: int (approximate) size, in KB, of the file to create.
"""
assert not os.path.exists(path)
with open(path, 'w') as out_file:
num = 0
file_size = 0
while (file_size / 1024) < kb_size:
line = ' '.join(["%04d" % (i) for i in range(num * 16, (num + 1) * 16)]) + '\n'
out_file.write(line)
num += 1
file_size += len(line)
def create_binary_file(path, kb_size=16):
"""
Creates a binary file.
Args:
path: string path for the file to be written to.
kb_size: int (approximate) size, in KB, of the file to create.
"""
assert not os.path.exists(path)
with open(path, 'wb') as out_file:
out_file.write(bytearray(range(256) * 4 * kb_size))
def create_test_dir(path):
"""
Creates a test directory and populates it full of files.
Args:
path: string path for the directory to be created at.
"""
assert not os.path.exists(path)
os.makedirs(path)
create_ascii_file(os.path.join(path, 'file.txt'), 8)
create_ascii_file(os.path.join(path, 'file.log'), 24)
create_binary_file(os.path.join(path, 'file.bin'), 16)
def create_test_structure(path):
"""
Creates a test directory structure with files and directories.
Args:
path: string path for the directory structure to be created at.
"""
assert not os.path.exists(path)
os.makedirs(path)
create_test_dir(os.path.join(path, 'test_dir1'))
create_test_dir(os.path.join(path, 'test_dir1', 'test_subdir'))
create_test_dir(os.path.join(path, 'test_dir2'))
create_ascii_file(os.path.join(path, 'root_file.txt'))
create_binary_file(os.path.join(path, 'root_file.bin'))
def get_file_md5(path):
"""
Retrieves the md5sum of a file's contents.
Args:
path: string path of the file to hash.
Returns:
string md5sum hash.
"""
import hashlib
hash_md5 = hashlib.md5()
with open(path, 'rb') as in_file:
hash_md5.update(in_file.read())
return hash_md5.hexdigest()
def get_dir_md5(path):
"""
Retrieves the md5sum for a directory and all its contents.
Args:
path: string path of the directory to hash.
Returns:
string md5sum hash.
"""
import hashlib
hash_md5 = hashlib.md5()
for root, dirs, files in os.walk(path):
dirs.sort()
files.sort()
rel_root = '.' + root[len(path):]
hash_md5.update(rel_root)
for directory in dirs:
hash_md5.update(directory)
for sub_file in files:
with open(os.path.join(root, sub_file), 'rb') as in_file:
hash_md5.update(in_file.read())
return hash_md5.hexdigest()
def get_file_type(path):
"""
Determines the file type of a path as given by the 'file' command.
Args:
path: string path of the file whose type will be determined.
Returns:
string file type.
"""
return subprocess.check_output(['file', '--brief', path]).strip()
#
# Main test runner.
#
def main():
"""Run all test suites."""
# Life is too short to try to make Python's uniquely terrible package system
# accept this as a nice package import.
from unittests import TestBackupMethods
from systemtests import TestBackupSystem
# Load the tests.
runner = unittest.TextTestRunner(descriptions=False, verbosity=2)
ut_suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestBackupMethods)
st_suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestBackupSystem)
# Run the unit tests.
print 'Running unit tests...\n'
ut_res = runner.run(ut_suite)
if not ut_res.wasSuccessful():
sys.exit(1)
print '\n\nRunning system tests...\n'
st_res = runner.run(st_suite)
if not st_res.wasSuccessful():
sys.exit(1)
print '\n\nAll tests passed.'
sys.exit(0)
# Entry point.
if __name__ == "__main__":
main()
| 29.919786 | 91 | 0.664164 |
"""
Main test runner for tiny-backup.
Maintained at https://github.com/jonsim/tiny-backup
"""
import os.path
import subprocess
import sys
import unittest
GPG_HOME = os.path.join(sys.path[0], 'gpg-test-homedir')
def create_ascii_file(path, kb_size=16):
"""
Creates an ASCII file.
Args:
path: string path for the file to be written to.
kb_size: int (approximate) size, in KB, of the file to create.
"""
assert not os.path.exists(path)
with open(path, 'w') as out_file:
num = 0
file_size = 0
while (file_size / 1024) < kb_size:
line = ' '.join(["%04d" % (i) for i in range(num * 16, (num + 1) * 16)]) + '\n'
out_file.write(line)
num += 1
file_size += len(line)
def create_binary_file(path, kb_size=16):
"""
Creates a binary file.
Args:
path: string path for the file to be written to.
kb_size: int (approximate) size, in KB, of the file to create.
"""
assert not os.path.exists(path)
with open(path, 'wb') as out_file:
out_file.write(bytearray(range(256) * 4 * kb_size))
def create_test_dir(path):
"""
Creates a test directory and populates it full of files.
Args:
path: string path for the directory to be created at.
"""
assert not os.path.exists(path)
os.makedirs(path)
create_ascii_file(os.path.join(path, 'file.txt'), 8)
create_ascii_file(os.path.join(path, 'file.log'), 24)
create_binary_file(os.path.join(path, 'file.bin'), 16)
def create_test_structure(path):
"""
Creates a test directory structure with files and directories.
Args:
path: string path for the directory structure to be created at.
"""
assert not os.path.exists(path)
os.makedirs(path)
create_test_dir(os.path.join(path, 'test_dir1'))
create_test_dir(os.path.join(path, 'test_dir1', 'test_subdir'))
create_test_dir(os.path.join(path, 'test_dir2'))
create_ascii_file(os.path.join(path, 'root_file.txt'))
create_binary_file(os.path.join(path, 'root_file.bin'))
def get_file_md5(path):
"""
Retrieves the md5sum of a file's contents.
Args:
path: string path of the file to hash.
Returns:
string md5sum hash.
"""
import hashlib
hash_md5 = hashlib.md5()
with open(path, 'rb') as in_file:
hash_md5.update(in_file.read())
return hash_md5.hexdigest()
def get_dir_md5(path):
"""
Retrieves the md5sum for a directory and all its contents.
Args:
path: string path of the directory to hash.
Returns:
string md5sum hash.
"""
import hashlib
hash_md5 = hashlib.md5()
for root, dirs, files in os.walk(path):
dirs.sort()
files.sort()
rel_root = '.' + root[len(path):]
hash_md5.update(rel_root)
for directory in dirs:
hash_md5.update(directory)
for sub_file in files:
with open(os.path.join(root, sub_file), 'rb') as in_file:
hash_md5.update(in_file.read())
return hash_md5.hexdigest()
def get_file_type(path):
"""
Determines the file type of a path as given by the 'file' command.
Args:
path: string path of the file whose type will be determined.
Returns:
string file type.
"""
return subprocess.check_output(['file', '--brief', path]).strip()
#
# Main test runner.
#
def main():
"""Run all test suites."""
# Life is too short to try to make Python's uniquely terrible package system
from unittests import TestBackupMethods
from systemtests import TestBackupSystem
runner = unittest.TextTestRunner(descriptions=False, verbosity=2)
ut_suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestBackupMethods)
st_suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestBackupSystem)
print 'Running unit tests...\n'
ut_res = runner.run(ut_suite)
if not ut_res.wasSuccessful():
sys.exit(1)
print '\n\nRunning system tests...\n'
st_res = runner.run(st_suite)
if not st_res.wasSuccessful():
sys.exit(1)
print '\n\nAll tests passed.'
sys.exit(0)
if __name__ == "__main__":
main()
| false | true |
f722389d760f4cedf63fda31313b14ec8ee7c7fa | 12,423 | py | Python | main/management/commands/updatecomponents.py | biojs/biojs-backend | 8bbd34b7be455ecd1be99d549bf958e17623c1c5 | [
"MIT"
] | 1 | 2020-02-06T19:10:55.000Z | 2020-02-06T19:10:55.000Z | main/management/commands/updatecomponents.py | biojs/biojs-backend | 8bbd34b7be455ecd1be99d549bf958e17623c1c5 | [
"MIT"
] | 30 | 2018-05-30T22:58:27.000Z | 2022-03-12T00:52:21.000Z | main/management/commands/updatecomponents.py | biojs/biojs-backend | 8bbd34b7be455ecd1be99d549bf958e17623c1c5 | [
"MIT"
] | 2 | 2019-01-29T12:12:07.000Z | 2019-02-20T04:19:31.000Z | from django.core.management import BaseCommand
import urllib2, json, urllib, base64
from main.models import *
try:
from biojs.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET
except:
print('ERROR: Could not load config!')
GITHUB_CLIENT_ID = ''
GITHUB_CLIENT_SECRET = ''
from datetime import datetime
import pytz
import ast
import re
# Get sniper data
'''
https://rawgit.com/cytoscape/cytoscape.js/master/package.json
"sniper" key, has "js" and "css". Search for "first"
'''
def get_npm_data():
# response = urllib2.urlopen()
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'}
req = urllib2.Request("http://registry.npmjs.com/-/v1/search?text=keywords:biojs,bionode&size=500", headers=hdr)
# req = urllib2.Request("http://registry.npmjs.com/-/v1/search?text=biojs-vis-msa&size=1", headers=hdr)
response = urllib2.urlopen(req)
data = json.load(response)
return data
def get_npm_downloads(url):
package=url.split('/')[-1]
# dateRange='1980-02-12:'+str(datetime.date(datetime.now()))
dateRange='last-week'
url='https://api.npmjs.org/downloads/range/'+dateRange+'/'+package
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'}
req = urllib2.Request(url, headers=hdr)
response = urllib2.urlopen(req)
data = json.load(response)
download_count=0
for i in data['downloads']:
download_count+=i['downloads']
return download_count
def send_GET_request(url, user=None, password=None):
request = urllib2.Request(url)
if (user is not None and password is not None):
base64string = base64.encodestring('%s:%s' % (user, password)).replace('\n', '')
request.add_header('Authorization', 'Basic %s' % base64string)
return urllib2.urlopen(request)
def create_jsdelivr_link(owner, repo, file_path, commit=None):
return str('https://cdn.jsdelivr.net/gh/' + owner + '/' + repo + ('@' + commit if commit else '') + file_path)
def is_absolute_dependency(dep):
return re.match('^(?:[a-z]+:)?//', dep) is not None
def get_owner_and_repo_from_github_url(url):
split_url = url.split('?')[0].split('/')
return split_url[4], split_url[5]
### store the dependency urls and snippet urls
def update_visualizations(component, commit_hash, test=False):
owner, repo_name = get_owner_and_repo_from_github_url(component.github_url)
try:
url = create_jsdelivr_link(owner, repo_name, '/package.json', commit_hash)
response = send_GET_request(url)
package = json.load(response)
sniper_data = package["sniper"]
except KeyError:
print('No sniper info in ', repo_name)
return
buildJS = sniper_data.get('buildJS', [])
js = sniper_data.get('js', [])
buildCSS = sniper_data.get('buildCSS', [])
css = sniper_data.get('css', [])
# Move absolute links from js to buildJS and same for css
buildJS = buildJS + filter(lambda l: is_absolute_dependency(l), js)
js = filter(lambda l: not is_absolute_dependency(l), js)
buildCSS = buildCSS + filter(lambda l: is_absolute_dependency(l), css)
css = filter(lambda l: not is_absolute_dependency(l), css)
# Save to db
for dep in buildJS:
JSDependency.objects.create(component=component, js_url=dep, sniper_data_value=dep)
for dep in buildCSS:
CSSDependency.objects.create(component=component, css_url=dep, sniper_data_value=dep)
sniperData, created = SniperData.objects.get_or_create(component=component)
if 'noBrowserify' in sniper_data:
sniperData.no_browserify = sniper_data['noBrowserify']
elif len(js) == 0 and len(css) == 0:
sniperData.no_browserify = True
sniperData.wzrd_url = '#' if sniperData.no_browserify else 'https://wzrd.in/bundle/' + component.name
if 'snippets' in sniper_data:
sniperData.snippets_dir_name = sniper_data['snippets'][0]
sniperData.save()
### For Snippets URLs
try:
url = str('https://api.github.com/repos/' + owner + '/' + repo_name + '/contents/' + sniperData.snippets_dir_name + '?ref=master')
if not test:
print(url)
snippets_data = send_GET_request(url, GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
snippets = json.load(snippets_data)
filtered_snippets = filter(lambda s: s['name'].endswith('.js'), snippets)
except Exception as e:
print('ERROR: Something went wrong getting snippets data!')
print(e)
for snip in filtered_snippets:
try:
url = create_jsdelivr_link(owner, repo_name, str('/' + sniperData.snippets_dir_name + '/' + snip['name']), commit_hash)
name = snip.get('name', '').split('.')[0]
Snippet.objects.update_or_create(name=name, url=url, sniperData=sniperData)
except Exception as e:
print('ERROR: Something went wrong creating a new Snippet')
print(e)
class Command(BaseCommand):
# during --help
help = "Command to update the details of all the components from Github"
def handle(self, *args, **options):
all_components = get_npm_data()['objects']
for component in all_components:
component_data = component['package']
try:
_component = Component.objects.get(name=component_data['name'])
print ('exists')
except:
_component = Component.objects.create(name=component_data['name'])
print (_component.name)
try:
_component.version = component_data['version']
except:
pass
try:
_component.short_description = component_data['description']
except:
pass
try:
tags = component_data['keywords']
except:
tags = []
for tag in tags:
try:
_tag = Tag.objects.get(name=tag)
except:
_tag = Tag.objects.create(name=tag)
_component.tags.add(_tag)
if not _tag in _component.tags.all():
_component.tags.add(_tag)
try:
str_date = component_data['date']
req_date = datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%S.%fZ") #This object is timezone unaware
aware_date = pytz.utc.localize(req_date) #This object is now timezone aware
_component.modified_time = aware_date
except:
pass
try:
_component.npm_url = component_data['links']['npm']
except:
pass
try:
_component.homepage_url = component_data['links']['homepage']
except:
pass
try:
github_url = component_data['links']['repository']
url_list = github_url.split('/')
_component.github_url = 'https://api.github.com/repos/' + str(url_list[3]) + '/' + str(url_list[4])
except:
pass
try:
_component.author = component_data['author']['name']
except:
pass
try:
_component.author_email = component_data['author']['email']
except:
pass
_component.save()
if _component.github_url:
print (_component.github_url)
try:
response = send_GET_request(_component.github_url, GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
github_data = json.load(response)
except urllib2.HTTPError as e:
print('Error getting github data!')
print(e)
print GITHUB_CLIENT_ID
continue
except Exception as e:
print('Unexpected error accessing Github!')
print(e)
continue
_component.stars = github_data['stargazers_count']
_component.forks = github_data['forks']
# subscriber_count
_component.watchers = github_data['subscribers_count']
_component.icon_url = github_data['owner']['avatar_url']
_component.open_issues = github_data['open_issues']
try:
_component.license = github_data['license']['name']
except:
pass
try:
str_date = github_data['created_at']
req_date = datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%SZ") #This object is timezone unaware
aware_date = pytz.utc.localize(req_date) #This object is now timezone aware
_component.created_time = aware_date
except:
pass
# try:
str_date = github_data['updated_at']
req_date = datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%SZ") #This object is timezone unaware
aware_date = pytz.utc.localize(req_date) #This object is now timezone aware
# if _component.github_update_time:
# if aware_date > _component.github_update_time:
# _component.github_updated_time = aware_date
# latest_commit_hash = get_commit_hash(github_data['commits_url'])
# _component.latest_commit_hash = latest_commit_hash
# update_visualizations(_component, latest_commit_hash)
# else:
_component.github_update_time = aware_date
try:
response = send_GET_request(github_data['commits_url'].split('{')[0], GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
latest_commit = json.load(response)[0]
latest_commit_hash = latest_commit['sha']
_component.latest_commit_hash = latest_commit_hash
except:
print('Error getting commit hash!')
pass
try:
update_visualizations(_component, latest_commit_hash)
except Exception as e:
print('Error updating visualisations!')
print(e)
# except:
# pass
_component.save()
print (str(github_data['contributors_url']))
try:
response = send_GET_request(str(github_data['contributors_url']), GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
contributors_data = json.load(response)
except:
continue
commits = 0
count = 0
try:
for contributor in contributors_data:
try:
_contributor = Contributor.objects.get(username=contributor["login"])
except:
_contributor = Contributor.objects.create(username=contributor["login"], avatar_url=contributor["avatar_url"])
try:
_contribution = Contribution.objects.get(component=_component, contributor=_contributor)
_contribution.contributions = contributor["contributions"]
_contribution.save()
except:
_contribution = Contribution.objects.create(component=_component, contributor=_contributor, contributions=contributor["contributions"])
commits += _contribution.contributions
count +=1
except:
print ('Error')
continue
# response = send_GET_request(github_data['downloads_url'], GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
# downloads_array = json.load(response)
_component.downloads = get_npm_downloads(_component.npm_url)
_component.commits = commits
_component.no_of_contributors = count
_component.save()
| 45.174545 | 163 | 0.5811 | from django.core.management import BaseCommand
import urllib2, json, urllib, base64
from main.models import *
try:
from biojs.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET
except:
print('ERROR: Could not load config!')
GITHUB_CLIENT_ID = ''
GITHUB_CLIENT_SECRET = ''
from datetime import datetime
import pytz
import ast
import re
'''
https://rawgit.com/cytoscape/cytoscape.js/master/package.json
"sniper" key, has "js" and "css". Search for "first"
'''
def get_npm_data():
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'}
req = urllib2.Request("http://registry.npmjs.com/-/v1/search?text=keywords:biojs,bionode&size=500", headers=hdr)
response = urllib2.urlopen(req)
data = json.load(response)
return data
def get_npm_downloads(url):
package=url.split('/')[-1]
dateRange='last-week'
url='https://api.npmjs.org/downloads/range/'+dateRange+'/'+package
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'}
req = urllib2.Request(url, headers=hdr)
response = urllib2.urlopen(req)
data = json.load(response)
download_count=0
for i in data['downloads']:
download_count+=i['downloads']
return download_count
def send_GET_request(url, user=None, password=None):
request = urllib2.Request(url)
if (user is not None and password is not None):
base64string = base64.encodestring('%s:%s' % (user, password)).replace('\n', '')
request.add_header('Authorization', 'Basic %s' % base64string)
return urllib2.urlopen(request)
def create_jsdelivr_link(owner, repo, file_path, commit=None):
return str('https://cdn.jsdelivr.net/gh/' + owner + '/' + repo + ('@' + commit if commit else '') + file_path)
def is_absolute_dependency(dep):
return re.match('^(?:[a-z]+:)?//', dep) is not None
def get_owner_and_repo_from_github_url(url):
split_url = url.split('?')[0].split('/')
return split_url[4], split_url[5]
t_owner_and_repo_from_github_url(component.github_url)
try:
url = create_jsdelivr_link(owner, repo_name, '/package.json', commit_hash)
response = send_GET_request(url)
package = json.load(response)
sniper_data = package["sniper"]
except KeyError:
print('No sniper info in ', repo_name)
return
buildJS = sniper_data.get('buildJS', [])
js = sniper_data.get('js', [])
buildCSS = sniper_data.get('buildCSS', [])
css = sniper_data.get('css', [])
buildJS = buildJS + filter(lambda l: is_absolute_dependency(l), js)
js = filter(lambda l: not is_absolute_dependency(l), js)
buildCSS = buildCSS + filter(lambda l: is_absolute_dependency(l), css)
css = filter(lambda l: not is_absolute_dependency(l), css)
for dep in buildJS:
JSDependency.objects.create(component=component, js_url=dep, sniper_data_value=dep)
for dep in buildCSS:
CSSDependency.objects.create(component=component, css_url=dep, sniper_data_value=dep)
sniperData, created = SniperData.objects.get_or_create(component=component)
if 'noBrowserify' in sniper_data:
sniperData.no_browserify = sniper_data['noBrowserify']
elif len(js) == 0 and len(css) == 0:
sniperData.no_browserify = True
sniperData.wzrd_url = '#' if sniperData.no_browserify else 'https://wzrd.in/bundle/' + component.name
if 'snippets' in sniper_data:
sniperData.snippets_dir_name = sniper_data['snippets'][0]
sniperData.save()
i.github.com/repos/' + owner + '/' + repo_name + '/contents/' + sniperData.snippets_dir_name + '?ref=master')
if not test:
print(url)
snippets_data = send_GET_request(url, GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
snippets = json.load(snippets_data)
filtered_snippets = filter(lambda s: s['name'].endswith('.js'), snippets)
except Exception as e:
print('ERROR: Something went wrong getting snippets data!')
print(e)
for snip in filtered_snippets:
try:
url = create_jsdelivr_link(owner, repo_name, str('/' + sniperData.snippets_dir_name + '/' + snip['name']), commit_hash)
name = snip.get('name', '').split('.')[0]
Snippet.objects.update_or_create(name=name, url=url, sniperData=sniperData)
except Exception as e:
print('ERROR: Something went wrong creating a new Snippet')
print(e)
class Command(BaseCommand):
help = "Command to update the details of all the components from Github"
def handle(self, *args, **options):
all_components = get_npm_data()['objects']
for component in all_components:
component_data = component['package']
try:
_component = Component.objects.get(name=component_data['name'])
print ('exists')
except:
_component = Component.objects.create(name=component_data['name'])
print (_component.name)
try:
_component.version = component_data['version']
except:
pass
try:
_component.short_description = component_data['description']
except:
pass
try:
tags = component_data['keywords']
except:
tags = []
for tag in tags:
try:
_tag = Tag.objects.get(name=tag)
except:
_tag = Tag.objects.create(name=tag)
_component.tags.add(_tag)
if not _tag in _component.tags.all():
_component.tags.add(_tag)
try:
str_date = component_data['date']
req_date = datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%S.%fZ")
aware_date = pytz.utc.localize(req_date)
_component.modified_time = aware_date
except:
pass
try:
_component.npm_url = component_data['links']['npm']
except:
pass
try:
_component.homepage_url = component_data['links']['homepage']
except:
pass
try:
github_url = component_data['links']['repository']
url_list = github_url.split('/')
_component.github_url = 'https://api.github.com/repos/' + str(url_list[3]) + '/' + str(url_list[4])
except:
pass
try:
_component.author = component_data['author']['name']
except:
pass
try:
_component.author_email = component_data['author']['email']
except:
pass
_component.save()
if _component.github_url:
print (_component.github_url)
try:
response = send_GET_request(_component.github_url, GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
github_data = json.load(response)
except urllib2.HTTPError as e:
print('Error getting github data!')
print(e)
print GITHUB_CLIENT_ID
continue
except Exception as e:
print('Unexpected error accessing Github!')
print(e)
continue
_component.stars = github_data['stargazers_count']
_component.forks = github_data['forks']
_component.watchers = github_data['subscribers_count']
_component.icon_url = github_data['owner']['avatar_url']
_component.open_issues = github_data['open_issues']
try:
_component.license = github_data['license']['name']
except:
pass
try:
str_date = github_data['created_at']
req_date = datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%SZ")
aware_date = pytz.utc.localize(req_date)
_component.created_time = aware_date
except:
pass
str_date = github_data['updated_at']
req_date = datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%SZ")
aware_date = pytz.utc.localize(req_date)
_component.github_update_time = aware_date
try:
response = send_GET_request(github_data['commits_url'].split('{')[0], GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
latest_commit = json.load(response)[0]
latest_commit_hash = latest_commit['sha']
_component.latest_commit_hash = latest_commit_hash
except:
print('Error getting commit hash!')
pass
try:
update_visualizations(_component, latest_commit_hash)
except Exception as e:
print('Error updating visualisations!')
print(e)
_component.save()
print (str(github_data['contributors_url']))
try:
response = send_GET_request(str(github_data['contributors_url']), GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET)
contributors_data = json.load(response)
except:
continue
commits = 0
count = 0
try:
for contributor in contributors_data:
try:
_contributor = Contributor.objects.get(username=contributor["login"])
except:
_contributor = Contributor.objects.create(username=contributor["login"], avatar_url=contributor["avatar_url"])
try:
_contribution = Contribution.objects.get(component=_component, contributor=_contributor)
_contribution.contributions = contributor["contributions"]
_contribution.save()
except:
_contribution = Contribution.objects.create(component=_component, contributor=_contributor, contributions=contributor["contributions"])
commits += _contribution.contributions
count +=1
except:
print ('Error')
continue
_component.downloads = get_npm_downloads(_component.npm_url)
_component.commits = commits
_component.no_of_contributors = count
_component.save()
| false | true |
f7223a217ef47f10efc81de17fba6dfba178144f | 732 | py | Python | agavepy/response_handlers.py | jchuahtacc/agavepy | 5d4917739bb249051bf746cd21f5dcd363ffb287 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | agavepy/response_handlers.py | jchuahtacc/agavepy | 5d4917739bb249051bf746cd21f5dcd363ffb287 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | agavepy/response_handlers.py | jchuahtacc/agavepy | 5d4917739bb249051bf746cd21f5dcd363ffb287 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | """
response_hanlders.py
"""
from __future__ import print_function
import sys
def handle_bad_response_status_code(r):
""" Handle a response with a bad status code
"""
if not r.ok:
print("Bad {0} request to {1}, status code {2}".format(
r.request.method, r.url, r.status_code),
file=sys.stderr)
print(r.request.body)
print(r.json())
sys.exit(1)
| 38.526316 | 80 | 0.344262 | from __future__ import print_function
import sys
def handle_bad_response_status_code(r):
if not r.ok:
print("Bad {0} request to {1}, status code {2}".format(
r.request.method, r.url, r.status_code),
file=sys.stderr)
print(r.request.body)
print(r.json())
sys.exit(1)
| true | true |
f7223a8bdb7f3006647c4ab186561a8c97ffae82 | 15,374 | py | Python | venv/lib/python3.5/site-packages/yapf/yapflib/subtype_assigner.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/yapf/yapflib/subtype_assigner.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/yapf/yapflib/subtype_assigner.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | # Copyright 2015-2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subtype assigner for lib2to3 trees.
This module assigns extra type information to the lib2to3 trees. This
information is more specific than whether something is an operator or an
identifier. For instance, it can specify if a node in the tree is part of a
subscript.
AssignSubtypes(): the main function exported by this module.
Annotations:
subtype: The subtype of a pytree token. See 'format_token' module for a list
of subtypes.
"""
from lib2to3 import pytree
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as syms
from yapf.yapflib import format_token
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
def AssignSubtypes(tree):
"""Run the subtype assigner visitor over the tree, modifying it in place.
Arguments:
tree: the top-level pytree node to annotate with subtypes.
"""
subtype_assigner = _SubtypeAssigner()
subtype_assigner.Visit(tree)
# Map tokens in argument lists to their respective subtype.
_ARGLIST_TOKEN_TO_SUBTYPE = {
'=': format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
':': format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
'*': format_token.Subtype.VARARGS_STAR,
'**': format_token.Subtype.KWARGS_STAR_STAR,
}
class _SubtypeAssigner(pytree_visitor.PyTreeVisitor):
"""_SubtypeAssigner - see file-level docstring for detailed description.
The subtype is added as an annotation to the pytree token.
"""
def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name
# dictsetmaker ::= (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [',']))
for child in node.children:
self.Visit(child)
comp_for = False
dict_maker = False
for child in node.children:
if pytree_utils.NodeName(child) == 'comp_for':
comp_for = True
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICT_SET_GENERATOR)
elif pytree_utils.NodeName(child) == 'COLON':
dict_maker = True
if not comp_for and dict_maker:
last_was_colon = False
for child in node.children:
if dict_maker:
if last_was_colon:
if style.Get('INDENT_DICTIONARY_VALUE'):
_InsertPseudoParentheses(child)
else:
_AppendFirstLeafTokenSubtype(
child, format_token.Subtype.DICTIONARY_VALUE)
elif (child is not None and
(isinstance(child, pytree.Node) or child.value not in '{:,')):
# Mark the first leaf of a key entry as a DICTIONARY_KEY. We
# normally want to split before them if the dictionary cannot exist
# on a single line.
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_KEY)
last_was_colon = pytree_utils.NodeName(child) == 'COLON'
def Visit_expr_stmt(self, node): # pylint: disable=invalid-name
# expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist)
# | ('=' (yield_expr|testlist_star_expr))*)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.ASSIGN_OPERATOR)
def Visit_or_test(self, node): # pylint: disable=invalid-name
# or_test ::= and_test ('or' and_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'or':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_test(self, node): # pylint: disable=invalid-name
# and_test ::= not_test ('and' not_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'and':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_not_test(self, node): # pylint: disable=invalid-name
# not_test ::= 'not' not_test | comparison
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'not':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_comparison(self, node): # pylint: disable=invalid-name
# comparison ::= expr (comp_op expr)*
# comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not'
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is'}):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
elif pytree_utils.NodeName(child) == 'comp_op':
for grandchild in child.children:
_AppendTokenSubtype(grandchild, format_token.Subtype.BINARY_OPERATOR)
def Visit_star_expr(self, node): # pylint: disable=invalid-name
# star_expr ::= '*' expr
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '*':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_expr(self, node): # pylint: disable=invalid-name
# expr ::= xor_expr ('|' xor_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '|':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_xor_expr(self, node): # pylint: disable=invalid-name
# xor_expr ::= and_expr ('^' and_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '^':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_expr(self, node): # pylint: disable=invalid-name
# and_expr ::= shift_expr ('&' shift_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '&':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_shift_expr(self, node): # pylint: disable=invalid-name
# shift_expr ::= arith_expr (('<<'|'>>') arith_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in {'<<', '>>'}:
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_arith_expr(self, node): # pylint: disable=invalid-name
# arith_expr ::= term (('+'|'-') term)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_term(self, node): # pylint: disable=invalid-name
# term ::= factor (('*'|'/'|'%'|'//') factor)*
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'*', '/', '%', '//'}):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_factor(self, node): # pylint: disable=invalid-name
# factor ::= ('+'|'-'|'~') factor | power
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-~':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_power(self, node): # pylint: disable=invalid-name
# power ::= atom trailer* ['**' factor]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '**':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_trailer(self, node): # pylint: disable=invalid-name
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '[]':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_BRACKET)
def Visit_subscript(self, node): # pylint: disable=invalid-name
# subscript ::= test | [test] ':' [test] [sliceop]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_sliceop(self, node): # pylint: disable=invalid-name
# sliceop ::= ':' [test]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_argument(self, node): # pylint: disable=invalid-name
# argument ::=
# test [comp_for] | test '=' test
self._ProcessArgLists(node)
def Visit_arglist(self, node): # pylint: disable=invalid-name
# arglist ::=
# (argument ',')* (argument [',']
# | '*' test (',' argument)* [',' '**' test]
# | '**' test)
self._ProcessArgLists(node)
_SetDefaultOrNamedAssignArgListSubtype(node)
def Visit_tname(self, node): # pylint: disable=invalid-name
self._ProcessArgLists(node)
_SetDefaultOrNamedAssignArgListSubtype(node)
def Visit_funcdef(self, node): # pylint: disable=invalid-name
# funcdef ::=
# 'def' NAME parameters ['->' test] ':' suite
for child in node.children:
if pytree_utils.NodeName(child) == 'NAME' and child.value != 'def':
_AppendTokenSubtype(child, format_token.Subtype.FUNC_DEF)
break
for child in node.children:
self.Visit(child)
def Visit_typedargslist(self, node): # pylint: disable=invalid-name
# typedargslist ::=
# ((tfpdef ['=' test] ',')*
# ('*' [tname] (',' tname ['=' test])* [',' '**' tname]
# | '**' tname)
# | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
self._ProcessArgLists(node)
_SetDefaultOrNamedAssignArgListSubtype(node)
def Visit_varargslist(self, node): # pylint: disable=invalid-name
# varargslist ::=
# ((vfpdef ['=' test] ',')*
# ('*' [vname] (',' vname ['=' test])* [',' '**' vname]
# | '**' vname)
# | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
self._ProcessArgLists(node)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.VARARGS_LIST)
def Visit_comp_for(self, node): # pylint: disable=invalid-name
# comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter]
_AppendSubtypeRec(node, format_token.Subtype.COMP_FOR)
self.DefaultNodeVisit(node)
def Visit_comp_if(self, node): # pylint: disable=invalid-name
# comp_if ::= 'if' old_test [comp_iter]
_AppendSubtypeRec(node, format_token.Subtype.COMP_IF)
self.DefaultNodeVisit(node)
def _ProcessArgLists(self, node):
"""Common method for processing argument lists."""
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf):
_AppendTokenSubtype(
child,
subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get(child.value,
format_token.Subtype.NONE))
def _SetDefaultOrNamedAssignArgListSubtype(node):
def HasDefaultOrNamedAssignSubtype(node):
if isinstance(node, pytree.Leaf):
if (format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in
pytree_utils.GetNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE,
set())):
return True
return False
has_subtype = False
for child in node.children:
has_subtype |= HasDefaultOrNamedAssignSubtype(child)
return has_subtype
if HasDefaultOrNamedAssignSubtype(node):
for child in node.children:
if pytree_utils.NodeName(child) != 'COMMA':
_AppendFirstLeafTokenSubtype(
child, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
def _AppendTokenSubtype(node, subtype):
"""Append the token's subtype only if it's not already set."""
pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE,
subtype)
def _AppendFirstLeafTokenSubtype(node, subtype):
"""Append the first leaf token's subtypes."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
_AppendFirstLeafTokenSubtype(node.children[0], subtype)
def _AppendSubtypeRec(node, subtype, force=True):
"""Append the leafs in the node to the given subtype."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
for child in node.children:
_AppendSubtypeRec(child, subtype, force=force)
def _InsertPseudoParentheses(node):
"""Insert pseudo parentheses so that dicts can be formatted correctly."""
comment_node = None
if isinstance(node, pytree.Node):
if node.children[-1].type == token.COMMENT:
comment_node = node.children[-1].clone()
node.children[-1].remove()
first = _GetFirstLeafNode(node)
last = _GetLastLeafNode(node)
if first == last and first.type == token.COMMENT:
# A comment was inserted before the value, which is a pytree.Leaf.
# Encompass the dictionary's value into an ATOM node.
last = first.next_sibling
new_node = pytree.Node(syms.atom, [first.clone(), last.clone()])
node.replace(new_node)
node = new_node
last.remove()
first = _GetFirstLeafNode(node)
last = _GetLastLeafNode(node)
lparen = pytree.Leaf(
token.LPAR, u'(', context=('', (first.get_lineno(), first.column - 1)))
last_lineno = last.get_lineno()
if last.type == token.STRING and '\n' in last.value:
last_lineno += last.value.count('\n')
if last.type == token.STRING and '\n' in last.value:
last_column = len(last.value.split('\n')[-1]) + 1
else:
last_column = last.column + len(last.value) + 1
rparen = pytree.Leaf(
token.RPAR, u')', context=('', (last_lineno, last_column)))
lparen.is_pseudo = True
rparen.is_pseudo = True
if isinstance(node, pytree.Node):
node.insert_child(0, lparen)
node.append_child(rparen)
if comment_node:
node.append_child(comment_node)
_AppendFirstLeafTokenSubtype(node, format_token.Subtype.DICTIONARY_VALUE)
else:
clone = node.clone()
new_node = pytree.Node(syms.atom, [lparen, clone, rparen])
node.replace(new_node)
_AppendFirstLeafTokenSubtype(clone, format_token.Subtype.DICTIONARY_VALUE)
def _GetFirstLeafNode(node):
if isinstance(node, pytree.Leaf):
return node
return _GetFirstLeafNode(node.children[0])
def _GetLastLeafNode(node):
if isinstance(node, pytree.Leaf):
return node
return _GetLastLeafNode(node.children[-1])
| 38.435 | 79 | 0.664368 |
from lib2to3 import pytree
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as syms
from yapf.yapflib import format_token
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
def AssignSubtypes(tree):
subtype_assigner = _SubtypeAssigner()
subtype_assigner.Visit(tree)
_ARGLIST_TOKEN_TO_SUBTYPE = {
'=': format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
':': format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
'*': format_token.Subtype.VARARGS_STAR,
'**': format_token.Subtype.KWARGS_STAR_STAR,
}
class _SubtypeAssigner(pytree_visitor.PyTreeVisitor):
def Visit_dictsetmaker(self, node):
for child in node.children:
self.Visit(child)
comp_for = False
dict_maker = False
for child in node.children:
if pytree_utils.NodeName(child) == 'comp_for':
comp_for = True
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICT_SET_GENERATOR)
elif pytree_utils.NodeName(child) == 'COLON':
dict_maker = True
if not comp_for and dict_maker:
last_was_colon = False
for child in node.children:
if dict_maker:
if last_was_colon:
if style.Get('INDENT_DICTIONARY_VALUE'):
_InsertPseudoParentheses(child)
else:
_AppendFirstLeafTokenSubtype(
child, format_token.Subtype.DICTIONARY_VALUE)
elif (child is not None and
(isinstance(child, pytree.Node) or child.value not in '{:,')):
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_KEY)
last_was_colon = pytree_utils.NodeName(child) == 'COLON'
def Visit_expr_stmt(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.ASSIGN_OPERATOR)
def Visit_or_test(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'or':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_test(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'and':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_not_test(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'not':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_comparison(self, node):
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is'}):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
elif pytree_utils.NodeName(child) == 'comp_op':
for grandchild in child.children:
_AppendTokenSubtype(grandchild, format_token.Subtype.BINARY_OPERATOR)
def Visit_star_expr(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '*':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_expr(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '|':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_xor_expr(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '^':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_expr(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '&':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_shift_expr(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in {'<<', '>>'}:
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_arith_expr(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_term(self, node):
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'*', '/', '%', '//'}):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_factor(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-~':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_power(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '**':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_trailer(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '[]':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_BRACKET)
def Visit_subscript(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_sliceop(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_argument(self, node):
self._ProcessArgLists(node)
def Visit_arglist(self, node):
self._ProcessArgLists(node)
_SetDefaultOrNamedAssignArgListSubtype(node)
def Visit_tname(self, node):
self._ProcessArgLists(node)
_SetDefaultOrNamedAssignArgListSubtype(node)
def Visit_funcdef(self, node):
for child in node.children:
if pytree_utils.NodeName(child) == 'NAME' and child.value != 'def':
_AppendTokenSubtype(child, format_token.Subtype.FUNC_DEF)
break
for child in node.children:
self.Visit(child)
def Visit_typedargslist(self, node):
self._ProcessArgLists(node)
_SetDefaultOrNamedAssignArgListSubtype(node)
def Visit_varargslist(self, node):
self._ProcessArgLists(node)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.VARARGS_LIST)
def Visit_comp_for(self, node):
_AppendSubtypeRec(node, format_token.Subtype.COMP_FOR)
self.DefaultNodeVisit(node)
def Visit_comp_if(self, node):
_AppendSubtypeRec(node, format_token.Subtype.COMP_IF)
self.DefaultNodeVisit(node)
def _ProcessArgLists(self, node):
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf):
_AppendTokenSubtype(
child,
subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get(child.value,
format_token.Subtype.NONE))
def _SetDefaultOrNamedAssignArgListSubtype(node):
def HasDefaultOrNamedAssignSubtype(node):
if isinstance(node, pytree.Leaf):
if (format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in
pytree_utils.GetNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE,
set())):
return True
return False
has_subtype = False
for child in node.children:
has_subtype |= HasDefaultOrNamedAssignSubtype(child)
return has_subtype
if HasDefaultOrNamedAssignSubtype(node):
for child in node.children:
if pytree_utils.NodeName(child) != 'COMMA':
_AppendFirstLeafTokenSubtype(
child, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
def _AppendTokenSubtype(node, subtype):
pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE,
subtype)
def _AppendFirstLeafTokenSubtype(node, subtype):
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
_AppendFirstLeafTokenSubtype(node.children[0], subtype)
def _AppendSubtypeRec(node, subtype, force=True):
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
for child in node.children:
_AppendSubtypeRec(child, subtype, force=force)
def _InsertPseudoParentheses(node):
comment_node = None
if isinstance(node, pytree.Node):
if node.children[-1].type == token.COMMENT:
comment_node = node.children[-1].clone()
node.children[-1].remove()
first = _GetFirstLeafNode(node)
last = _GetLastLeafNode(node)
if first == last and first.type == token.COMMENT:
last = first.next_sibling
new_node = pytree.Node(syms.atom, [first.clone(), last.clone()])
node.replace(new_node)
node = new_node
last.remove()
first = _GetFirstLeafNode(node)
last = _GetLastLeafNode(node)
lparen = pytree.Leaf(
token.LPAR, u'(', context=('', (first.get_lineno(), first.column - 1)))
last_lineno = last.get_lineno()
if last.type == token.STRING and '\n' in last.value:
last_lineno += last.value.count('\n')
if last.type == token.STRING and '\n' in last.value:
last_column = len(last.value.split('\n')[-1]) + 1
else:
last_column = last.column + len(last.value) + 1
rparen = pytree.Leaf(
token.RPAR, u')', context=('', (last_lineno, last_column)))
lparen.is_pseudo = True
rparen.is_pseudo = True
if isinstance(node, pytree.Node):
node.insert_child(0, lparen)
node.append_child(rparen)
if comment_node:
node.append_child(comment_node)
_AppendFirstLeafTokenSubtype(node, format_token.Subtype.DICTIONARY_VALUE)
else:
clone = node.clone()
new_node = pytree.Node(syms.atom, [lparen, clone, rparen])
node.replace(new_node)
_AppendFirstLeafTokenSubtype(clone, format_token.Subtype.DICTIONARY_VALUE)
def _GetFirstLeafNode(node):
if isinstance(node, pytree.Leaf):
return node
return _GetFirstLeafNode(node.children[0])
def _GetLastLeafNode(node):
if isinstance(node, pytree.Leaf):
return node
return _GetLastLeafNode(node.children[-1])
| true | true |
f7223aed59c6c723e79013fc4c45714b63baace8 | 362 | py | Python | examples/keyword_search.py | smilelight/lightTEXT | b015d0e3524722fb5a8ee5ea83b7fbbd7408f797 | [
"Apache-2.0"
] | 12 | 2020-01-26T09:16:21.000Z | 2021-12-06T06:44:37.000Z | examples/keyword_search.py | smilelight/lightTEXT | b015d0e3524722fb5a8ee5ea83b7fbbd7408f797 | [
"Apache-2.0"
] | null | null | null | examples/keyword_search.py | smilelight/lightTEXT | b015d0e3524722fb5a8ee5ea83b7fbbd7408f797 | [
"Apache-2.0"
] | 7 | 2020-04-30T00:37:32.000Z | 2021-07-07T06:32:40.000Z | # -*- coding: utf-8 -*-
# @Time : 2020/7/2 15:16
# @Author : lightsmile
# @Software: PyCharm
from lighttext import KeywordProcessor
if __name__ == '__main__':
kp = KeywordProcessor()
kp.add_keyword("曹操")
kp.add_keyword("曹丕")
kp.add_keyword("司马懿")
kp.add_keyword("司马")
stn = "曹操、曹丕和司马懿一起去吃大盘鸡。"
print(kp.extract_keywords(stn))
| 20.111111 | 38 | 0.638122 |
from lighttext import KeywordProcessor
if __name__ == '__main__':
kp = KeywordProcessor()
kp.add_keyword("曹操")
kp.add_keyword("曹丕")
kp.add_keyword("司马懿")
kp.add_keyword("司马")
stn = "曹操、曹丕和司马懿一起去吃大盘鸡。"
print(kp.extract_keywords(stn))
| true | true |
f7223b5daa0d856c37c6a3df3dcc20d36542e51d | 2,203 | py | Python | feed/migrations/0001_initial.py | pabulumm/neighbors | 59f3f3ae727fe52c7897beaf73d157b02cdcb7a3 | [
"BSD-3-Clause"
] | null | null | null | feed/migrations/0001_initial.py | pabulumm/neighbors | 59f3f3ae727fe52c7897beaf73d157b02cdcb7a3 | [
"BSD-3-Clause"
] | null | null | null | feed/migrations/0001_initial.py | pabulumm/neighbors | 59f3f3ae727fe52c7897beaf73d157b02cdcb7a3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('markers', '__first__'),
('polls', '__first__'),
('neighborhood', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Feed',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('neighborhood', models.ForeignKey(to='neighborhood.Neighborhood')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FeedPost',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('type', models.CharField(max_length=50, default='ANNOUNCEMENT')),
('text', models.TextField(max_length=1000)),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('decision', models.ForeignKey(null=True, to='polls.Poll')),
('feed', models.ForeignKey(null=True, to='feed.Feed')),
('marker', models.ForeignKey(null=True, to='markers.Marker')),
('poll', models.ForeignKey(null=True, to='polls.Question')),
('user', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PostView',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateTimeField(auto_now=True)),
('post', models.ForeignKey(to='feed.FeedPost')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| 36.716667 | 89 | 0.546074 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('markers', '__first__'),
('polls', '__first__'),
('neighborhood', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Feed',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('neighborhood', models.ForeignKey(to='neighborhood.Neighborhood')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FeedPost',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('type', models.CharField(max_length=50, default='ANNOUNCEMENT')),
('text', models.TextField(max_length=1000)),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('decision', models.ForeignKey(null=True, to='polls.Poll')),
('feed', models.ForeignKey(null=True, to='feed.Feed')),
('marker', models.ForeignKey(null=True, to='markers.Marker')),
('poll', models.ForeignKey(null=True, to='polls.Question')),
('user', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PostView',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateTimeField(auto_now=True)),
('post', models.ForeignKey(to='feed.FeedPost')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| true | true |
f7223b8b41c3b79f80ac1b63c777994d997a08d9 | 544 | py | Python | __init__.py | edbizarro/airflow_facebook_ads_plugin | 53187111bdd00f2bf756bd21e2c853d58784f0a6 | [
"Apache-2.0"
] | 2 | 2019-06-19T13:46:11.000Z | 2020-10-29T09:51:10.000Z | __init__.py | edbizarro/airflow_facebook_ads_plugin | 53187111bdd00f2bf756bd21e2c853d58784f0a6 | [
"Apache-2.0"
] | null | null | null | __init__.py | edbizarro/airflow_facebook_ads_plugin | 53187111bdd00f2bf756bd21e2c853d58784f0a6 | [
"Apache-2.0"
] | 2 | 2019-07-24T21:12:44.000Z | 2020-04-30T13:17:42.000Z | from airflow.plugins_manager import AirflowPlugin
from facebook_ads_plugin.hooks.facebook_ads_hook import FacebookAdsHook
from facebook_ads_plugin.operators.facebook_ads_to_s3_operator import (
FacebookAdsInsightsToS3Operator,
FacebookAdsToS3Operator,
)
class FacebookAdsPlugin(AirflowPlugin):
name = "facebook_ads_plugin"
hooks = [FacebookAdsHook]
operators = [FacebookAdsInsightsToS3Operator, FacebookAdsToS3Operator]
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
| 30.222222 | 74 | 0.777574 | from airflow.plugins_manager import AirflowPlugin
from facebook_ads_plugin.hooks.facebook_ads_hook import FacebookAdsHook
from facebook_ads_plugin.operators.facebook_ads_to_s3_operator import (
FacebookAdsInsightsToS3Operator,
FacebookAdsToS3Operator,
)
class FacebookAdsPlugin(AirflowPlugin):
name = "facebook_ads_plugin"
hooks = [FacebookAdsHook]
operators = [FacebookAdsInsightsToS3Operator, FacebookAdsToS3Operator]
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
| true | true |
f7223bea8be2abbd0d7ea132889bc8827caeaa19 | 2,539 | py | Python | samples/list_vmwaretools_status.py | jm66/pyvmomi-community-samples | 5ca4a50b767500e07b9bce9fba70240bfa963a4e | [
"Apache-2.0"
] | 6 | 2017-01-25T06:33:47.000Z | 2021-01-28T22:20:24.000Z | samples/list_vmwaretools_status.py | zhangjiahaol/pyvmomi-community-samples | 905ec34edfbd151531832e98b6a0748fa6ff5e0e | [
"Apache-2.0"
] | 12 | 2019-04-17T02:47:25.000Z | 2021-04-02T09:15:37.000Z | samples/list_vmwaretools_status.py | zhangjiahaol/pyvmomi-community-samples | 905ec34edfbd151531832e98b6a0748fa6ff5e0e | [
"Apache-2.0"
] | 15 | 2018-04-26T05:18:12.000Z | 2021-11-06T04:44:58.000Z | #!/usr/bin/env python
#
# Written by JM Lopez
# GitHub: https://github.com/jm66
# Email: jm@jmll.me
# Website: http://jose-manuel.me
#
# Note: Example code For testing purposes only
#
# This code has been released under the terms of the Apache-2.0 license
# http://opensource.org/licenses/Apache-2.0
#
import atexit
import requests
from tools import cli
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
_columns_four = "{0:<20} {1:<30} {2:<30} {3:<20}"
# disable urllib3 warnings
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
def get_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--vmname', required=False,
help="Name of the VirtualMachine you want to change.")
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vim_type, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vim_type, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def get_vms(content):
obj_view = content.viewManager.CreateContainerView(
content.rootFolder, [vim.VirtualMachine], True)
vms_list = obj_view.view
obj_view.Destroy()
return vms_list
def print_vmwareware_tools_status(vm):
print _columns_four.format(vm.name,
vm.guest.toolsRunningStatus,
vm.guest.toolsVersion,
vm.guest.toolsVersionStatus2)
def main():
args = get_args()
# connect to vc
si = SmartConnect(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
# disconnect vc
atexit.register(Disconnect, si)
content = si.RetrieveContent()
if args.vmname:
print 'Searching for VM {}'.format(args.vmname)
vm_obj = get_obj(content, [vim.VirtualMachine], args.vmname)
if vm_obj:
print _columns_four.format('Name', 'Status',
'Version', 'Version Status')
print_vmwareware_tools_status(vm_obj)
else:
print "VM not found"
else:
print _columns_four.format('Name', 'Status',
'Version', 'Version Status')
for vm_obj in get_vms(content):
print_vmwareware_tools_status(vm_obj)
# start
if __name__ == "__main__":
main()
| 27.010638 | 78 | 0.625049 |
import atexit
import requests
from tools import cli
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
_columns_four = "{0:<20} {1:<30} {2:<30} {3:<20}"
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
def get_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--vmname', required=False,
help="Name of the VirtualMachine you want to change.")
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vim_type, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vim_type, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def get_vms(content):
obj_view = content.viewManager.CreateContainerView(
content.rootFolder, [vim.VirtualMachine], True)
vms_list = obj_view.view
obj_view.Destroy()
return vms_list
def print_vmwareware_tools_status(vm):
print _columns_four.format(vm.name,
vm.guest.toolsRunningStatus,
vm.guest.toolsVersion,
vm.guest.toolsVersionStatus2)
def main():
args = get_args()
si = SmartConnect(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
atexit.register(Disconnect, si)
content = si.RetrieveContent()
if args.vmname:
print 'Searching for VM {}'.format(args.vmname)
vm_obj = get_obj(content, [vim.VirtualMachine], args.vmname)
if vm_obj:
print _columns_four.format('Name', 'Status',
'Version', 'Version Status')
print_vmwareware_tools_status(vm_obj)
else:
print "VM not found"
else:
print _columns_four.format('Name', 'Status',
'Version', 'Version Status')
for vm_obj in get_vms(content):
print_vmwareware_tools_status(vm_obj)
if __name__ == "__main__":
main()
| false | true |
f7223d3f95314305c7c2ad1f785d545492de0057 | 6,233 | py | Python | nemo_convert_png.py | threadreaper/nemo_convert_png | 2961ad4b0ff7e57b59ba98104100707d54915a3d | [
"Apache-2.0"
] | null | null | null | nemo_convert_png.py | threadreaper/nemo_convert_png | 2961ad4b0ff7e57b59ba98104100707d54915a3d | [
"Apache-2.0"
] | null | null | null | nemo_convert_png.py | threadreaper/nemo_convert_png | 2961ad4b0ff7e57b59ba98104100707d54915a3d | [
"Apache-2.0"
] | null | null | null | '''Extension for Nemo's context menu to easily convert images to PNG and
optimize their filesize with pngcrush.'''
from __future__ import annotations
import os
import subprocess
from urllib.parse import unquote_plus, urlparse
from PIL import Image, UnidentifiedImageError
import PySimpleGUI as sg
import gi
gi.require_version('Nemo', '3.0')
from gi.repository import GObject, Nemo # type: ignore pylint: disable=wrong-import-position
EXTENSIONS = ('jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png')
uqp= unquote_plus
def get_files(files_in: list[GObject]) -> list[str]|None:
"""
Retrieve filenames as cross-platform safe strings from file objects.
:param files_in: List of file objects.
"""
files = []
for file_in in files_in:
file_in = unquote_plus(file_in.get_uri()[7:])
if os.path.isfile(file_in):
files.append(file_in)
if files:
return files
return None
def convert_one(file: str) -> None:
'''
Converts an image to a PNG.
:param file: Filename of the image to convert.
'''
filename = f'{file.split(".")[-2]}.png'
try:
img = Image.open(file).convert('RGB')
except UnidentifiedImageError:
img = False
if img:
os.remove(file)
img.save(filename, 'PNG')
def convert_images(_, files: list[str]) -> list[str]:
'''
Called by the context menu item "Convert selected image(s) to PNG".
:param files: The currently selected files.
'''
filenames = [f'{file.split(".")[-2]}.png' for file in files]
count = sum(not file.endswith('png') for file in files)
for i, file in enumerate(files):
if not file.endswith('png'):
sg.OneLineProgressMeter('Please wait...', i+1, count, 'pb', 'Converting images', orientation='h')
convert_one(file)
sg.OneLineProgressMeter('', count, count, key='pb')
return filenames
def crush_one(file: str) -> None:
'''
Runs pngcrush on a png file.
:param file: The file to execute this action on.
'''
subprocess.run(['pngcrush', '-rem', 'alla', '-nofilecheck', '-fix', '-ow',
'-reduce', '-m', '0', file], check=False)
def crush_images(_, files: list[str]) -> None:
'''
Called by the context menu item "Optimize image(s) with pngcrush.
:param files: The currently selected files.
'''
for i, file in enumerate(files):
sg.OneLineProgressMeter('Please wait...', i+1, len(files), 'pb',
'Optimize images with pngcrush', orientation='h')
crush_one(file)
sg.OneLineProgressMeter('', len(files), len(files), key='pb')
def convert_and_crush(_, files: list[str]) -> None:
'''
Called by the context menu item "Convert to PNG and optimize.
:param files: The currently selected files.
'''
converted = convert_images(None, files)
crush_images(None, converted)
class PNGConverter(GObject.GObject, Nemo.MenuProvider):
'''Class for extension context menu items.'''
def __init__(self):
'''File manager crashes if init is not called.'''
...
def get_background_items( # pylint: disable=arguments-differ
self, _, folder: GObject) -> list[Nemo.MenuItem]|None:
'''
Called when context menu is called with no file objects selected.
:param folder: Nemo's current working directory.
'''
folder = urlparse(folder.get_uri()).path
files = [uqp(os.path.join(folder, f))
for f in os.listdir(uqp(folder))
if os.path.isfile(uqp(os.path.join(folder, f))) and
f.lower().endswith(EXTENSIONS)]
if all(file.endswith('png') for file in files):
crush = Nemo.MenuItem(
name='CrushImages',
label='Optimize image(s) with pngcrush',
tip='Optimize image filesizes with pngcrush'
)
crush.connect('activate', crush_images, files)
return [crush]
if any(file.endswith(EXTENSIONS) for file in files):
convert = Nemo.MenuItem(
name="ConvertAllImagestoPNG",
label="Convert all images to PNG",
tip="Convert all images to PNG"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name='ConvertandCrush',
label="Convert images to PNG and optimize",
tip="Convert images to PNG and optimize filesizes with pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
def get_file_items( # pylint: disable=arguments-differ
self, _, files: list[GObject]) -> list[Nemo.MenuItem]|None:
'''
Called when context menu is called with files selected.
:param files: The currently selected file objects.
'''
files = get_files(files) # type: ignore
try:
is_iter = iter(files)
check = all(file.lower().endswith('png') for file in files)
except TypeError:
is_iter = False
check = False
if check:
convert = Nemo.MenuItem(
name="CrushImages",
label="Optimize image(s) with pngcrush",
tip="Optimize filesize(s) with pngcrush"
)
convert.connect('activate', crush_images, files)
return [convert]
if is_iter:
check = all(file.lower().endswith(EXTENSIONS) for file in files)
if check:
convert = Nemo.MenuItem(
name="ConvertImagetoPNG",
label="Convert selected image(s) to .png",
tip="Convert image(s) to .png"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name="ConvertandCrush",
label="Convert to PNG and optimize with pngcrush",
tip="Convert image(s) to PNG and optimize filesize(s) with\
pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
| 32.633508 | 109 | 0.590887 | from __future__ import annotations
import os
import subprocess
from urllib.parse import unquote_plus, urlparse
from PIL import Image, UnidentifiedImageError
import PySimpleGUI as sg
import gi
gi.require_version('Nemo', '3.0')
from gi.repository import GObject, Nemo
EXTENSIONS = ('jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png')
uqp= unquote_plus
def get_files(files_in: list[GObject]) -> list[str]|None:
files = []
for file_in in files_in:
file_in = unquote_plus(file_in.get_uri()[7:])
if os.path.isfile(file_in):
files.append(file_in)
if files:
return files
return None
def convert_one(file: str) -> None:
filename = f'{file.split(".")[-2]}.png'
try:
img = Image.open(file).convert('RGB')
except UnidentifiedImageError:
img = False
if img:
os.remove(file)
img.save(filename, 'PNG')
def convert_images(_, files: list[str]) -> list[str]:
filenames = [f'{file.split(".")[-2]}.png' for file in files]
count = sum(not file.endswith('png') for file in files)
for i, file in enumerate(files):
if not file.endswith('png'):
sg.OneLineProgressMeter('Please wait...', i+1, count, 'pb', 'Converting images', orientation='h')
convert_one(file)
sg.OneLineProgressMeter('', count, count, key='pb')
return filenames
def crush_one(file: str) -> None:
subprocess.run(['pngcrush', '-rem', 'alla', '-nofilecheck', '-fix', '-ow',
'-reduce', '-m', '0', file], check=False)
def crush_images(_, files: list[str]) -> None:
for i, file in enumerate(files):
sg.OneLineProgressMeter('Please wait...', i+1, len(files), 'pb',
'Optimize images with pngcrush', orientation='h')
crush_one(file)
sg.OneLineProgressMeter('', len(files), len(files), key='pb')
def convert_and_crush(_, files: list[str]) -> None:
converted = convert_images(None, files)
crush_images(None, converted)
class PNGConverter(GObject.GObject, Nemo.MenuProvider):
def __init__(self):
...
def get_background_items(
self, _, folder: GObject) -> list[Nemo.MenuItem]|None:
folder = urlparse(folder.get_uri()).path
files = [uqp(os.path.join(folder, f))
for f in os.listdir(uqp(folder))
if os.path.isfile(uqp(os.path.join(folder, f))) and
f.lower().endswith(EXTENSIONS)]
if all(file.endswith('png') for file in files):
crush = Nemo.MenuItem(
name='CrushImages',
label='Optimize image(s) with pngcrush',
tip='Optimize image filesizes with pngcrush'
)
crush.connect('activate', crush_images, files)
return [crush]
if any(file.endswith(EXTENSIONS) for file in files):
convert = Nemo.MenuItem(
name="ConvertAllImagestoPNG",
label="Convert all images to PNG",
tip="Convert all images to PNG"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name='ConvertandCrush',
label="Convert images to PNG and optimize",
tip="Convert images to PNG and optimize filesizes with pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
def get_file_items(
self, _, files: list[GObject]) -> list[Nemo.MenuItem]|None:
files = get_files(files)
try:
is_iter = iter(files)
check = all(file.lower().endswith('png') for file in files)
except TypeError:
is_iter = False
check = False
if check:
convert = Nemo.MenuItem(
name="CrushImages",
label="Optimize image(s) with pngcrush",
tip="Optimize filesize(s) with pngcrush"
)
convert.connect('activate', crush_images, files)
return [convert]
if is_iter:
check = all(file.lower().endswith(EXTENSIONS) for file in files)
if check:
convert = Nemo.MenuItem(
name="ConvertImagetoPNG",
label="Convert selected image(s) to .png",
tip="Convert image(s) to .png"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name="ConvertandCrush",
label="Convert to PNG and optimize with pngcrush",
tip="Convert image(s) to PNG and optimize filesize(s) with\
pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
| true | true |
f7223df604d66bf40e373b34878bc06930adef9d | 25,280 | py | Python | tensorflow/python/kernel_tests/signal/fft_ops_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/signal/fft_ops_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/signal/fft_ops_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fft operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
VALID_FFT_RANKS = (1, 2, 3)
class BaseFFTOpsTest(test.TestCase):
def _compare(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
self._compareForward(x, rank, fft_length, use_placeholder, rtol, atol)
self._compareBackward(x, rank, fft_length, use_placeholder, rtol, atol)
def _compareForward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._npFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._npIFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfIFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfIFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _checkMemoryFail(self, x, rank):
config = config_pb2.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1e-2
with self.cached_session(config=config, force_gpu=True):
self._tfFFT(x, rank, fft_length=None)
def _checkGradComplex(self, func, x, y, result_is_complex=True,
rtol=1e-2, atol=1e-2):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
# func is a forward or inverse, real or complex, batched or unbatched FFT
# function with a complex input.
z = func(math_ops.complex(inx, iny))
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [list(x.shape), list(y.shape)],
loss, [1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)
def _checkGradReal(self, func, x, rtol=1e-2, atol=1e-2):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
# func is a forward RFFT function (batched or unbatched).
z = func(inx)
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
x_jacob_t, x_jacob_n = test.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
class FFTOpsTest(BaseFFTOpsTest):
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.cached_session(use_gpu=True) as sess:
return sess.run(self._tfFFTForRank(rank)(x), feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.cached_session(use_gpu=True) as sess:
return sess.run(self._tfIFFTForRank(rank)(x), feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.fft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.ifft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return fft_ops.fft
elif rank == 2:
return fft_ops.fft2d
elif rank == 3:
return fft_ops.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return fft_ops.ifft
elif rank == 2:
return fft_ops.ifft2d
elif rank == 3:
return fft_ops.ifft3d
else:
raise ValueError("invalid rank")
@test_util.run_deprecated_v1
def testEmpty(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type in (np.complex64, np.complex128):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np_type)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
@test_util.run_deprecated_v1
def testBasic(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np_type), rank, rtol=tol, atol=tol)
def testLargeBatch(self):
if test.is_gpu_available(cuda_only=True):
rank = 1
for dims in xrange(rank, rank + 3):
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-5)):
self._compare(
np.mod(np.arange(np.power(128, dims)), 10).reshape(
(128,) * dims).astype(np_type), rank, rtol=tol, atol=tol)
# TODO(yangzihao): Disable before we can figure out a way to
# properly test memory fail for large batch fft.
# def testLargeBatchMemoryFail(self):
# if test.is_gpu_available(cuda_only=True):
# rank = 1
# for dims in xrange(rank, rank + 3):
# self._checkMemoryFail(
# np.mod(np.arange(np.power(128, dims)), 64).reshape(
# (128,) * dims).astype(np.complex64), rank)
@test_util.run_deprecated_v1
def testBasicPlaceholder(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np_type),
rank, use_placeholder=True, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testRandom(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 5e-6)):
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(gen((4,) * dims).astype(np_type), rank,
rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testRandom1D(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type in (np.complex64, np.complex128):
has_gpu = test.is_gpu_available(cuda_only=True)
tol = {(np.complex64, True): 1e-4,
(np.complex64, False): 1e-2,
(np.complex128, True): 1e-4,
(np.complex128, False): 1e-2}[(np_type, has_gpu)]
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
# Check a variety of power-of-2 FFT sizes.
for dim in (128, 256, 512, 1024):
self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)
# Check a variety of non-power-of-2 FFT sizes.
for dim in (127, 255, 511, 1023):
self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testError(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
@test_util.run_deprecated_v1
def testGrad_Simple(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.float32, 1e-4), (np.float64, 1e-10)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np_type) / 10.0
im = np.zeros(shape=(4,) * dims, dtype=np_type)
self._checkGradComplex(self._tfFFTForRank(rank), re, im,
rtol=tol, atol=tol)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im,
rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testGrad_Random(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.float32, 1e-2), (np.float64, 1e-10)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1
im = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1
self._checkGradComplex(self._tfFFTForRank(rank), re, im,
rtol=tol, atol=tol)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im,
rtol=tol, atol=tol)
class RFFTOpsTest(BaseFFTOpsTest):
def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False):
super(RFFTOpsTest, self)._compareBackward(x, rank, fft_length,
use_placeholder)
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(
self._tfFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(
self._tfIFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.rfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.rfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.irfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.irfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return fft_ops.rfft
elif rank == 2:
return fft_ops.rfft2d
elif rank == 3:
return fft_ops.rfft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return fft_ops.irfft
elif rank == 2:
return fft_ops.irfft2d
elif rank == 3:
return fft_ops.irfft3d
else:
raise ValueError("invalid rank")
@test_util.run_deprecated_v1
def testEmpty(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.float32)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
@test_util.run_deprecated_v1
def testBasic(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(
c2r.astype(np.complex64), rank, (size,) * rank)
def testLargeBatch(self):
if test.is_gpu_available(cuda_only=True):
rank = 1
for dims in xrange(rank, rank + 3):
for size in (64, 128):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(c2r.astype(np.complex64), rank, (size,) * rank)
@test_util.run_deprecated_v1
def testBasicPlaceholder(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(
r2c.astype(np.float32),
rank, (size,) * rank,
use_placeholder=True)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(
c2r.astype(np.complex64),
rank, (size,) * rank,
use_placeholder=True)
@test_util.run_deprecated_v1
def testFftLength(self):
if test.is_gpu_available(cuda_only=True):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
# Test truncation (FFT size < dimensions).
fft_length = (size - 2,) * rank
self._compareForward(r2c.astype(np.float32), rank, fft_length)
self._compareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._compareForward(
r2c.astype(np.float32),
rank,
fft_length,
use_placeholder=True)
self._compareBackward(
c2r.astype(np.complex64),
rank,
fft_length,
use_placeholder=True)
# Test padding (FFT size > dimensions).
fft_length = (size + 2,) * rank
self._compareForward(r2c.astype(np.float32), rank, fft_length)
self._compareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._compareForward(
r2c.astype(np.float32),
rank,
fft_length,
use_placeholder=True)
self._compareBackward(
c2r.astype(np.complex64),
rank,
fft_length,
use_placeholder=True)
@test_util.run_deprecated_v1
def testRandom(self):
with spectral_ops_test_util.fft_kernel_label_map():
def gen_real(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
ret = re.reshape(shape)
return ret
def gen_complex(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
ret = (re + im * 1j).reshape(shape)
return ret
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
self._compareForward(gen_real((size,) * dims), rank, (size,) * rank)
complex_dims = (size,) * (dims - 1) + (inner_dim,)
self._compareBackward(
gen_complex(complex_dims), rank, (size,) * rank)
@test_util.run_deprecated_v1
def testError(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfIFFT(x, rank)
for dims in xrange(rank, rank + 2):
x = np.zeros((1,) * rank)
# Test non-rank-1 fft_length produces an error.
fft_length = np.zeros((1, 1)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfIFFT(x, rank, fft_length)
# Test wrong fft_length length.
fft_length = np.zeros((rank + 1,)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfIFFT(x, rank, fft_length)
# Test that calling the kernel directly without padding to fft_length
# produces an error.
rffts_for_rank = {
1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft],
2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d],
3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d]
}
rfft_fn, irfft_fn = rffts_for_rank[rank]
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least 6 but got: 5"):
x = np.zeros((5,) * rank).astype(np.float32)
fft_length = [6] * rank
with self.cached_session():
self.evaluate(rfft_fn(x, fft_length))
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least .* but got: 3"):
x = np.zeros((3,) * rank).astype(np.complex64)
fft_length = [6] * rank
with self.cached_session():
self.evaluate(irfft_fn(x, fft_length))
@test_util.run_deprecated_v1
def testGrad_Simple(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.ones(shape=(size,) * dims, dtype=np.float32)
im = -np.ones(shape=(size,) * dims, dtype=np.float32)
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
@test_util.run_deprecated_v1
def testGrad_Random(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
class FFTShiftTest(test.TestCase):
@test_util.run_deprecated_v1
def testDefinition(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), y)
self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), y)
self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)
@test_util.run_deprecated_v1
def testAxesKeyword(self):
with self.session():
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(fft_ops.fftshift(freqs, axes=(0, 1)).eval(), shifted)
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=0).eval(),
fft_ops.fftshift(freqs, axes=(0,)).eval())
self.assertAllEqual(fft_ops.ifftshift(shifted, axes=(0, 1)).eval(), freqs)
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=0).eval(),
fft_ops.ifftshift(shifted, axes=(0,)).eval())
self.assertAllEqual(fft_ops.fftshift(freqs).eval(), shifted)
self.assertAllEqual(fft_ops.ifftshift(shifted).eval(), freqs)
@test_util.run_deprecated_v1
def testNumpyCompatibility(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=(0, 1)).eval(),
np.fft.fftshift(freqs, axes=(0, 1)))
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=(0, 1)).eval(),
np.fft.ifftshift(shifted, axes=(0, 1)))
if __name__ == "__main__":
test.main()
| 41.374795 | 81 | 0.587935 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
VALID_FFT_RANKS = (1, 2, 3)
class BaseFFTOpsTest(test.TestCase):
def _compare(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
self._compareForward(x, rank, fft_length, use_placeholder, rtol, atol)
self._compareBackward(x, rank, fft_length, use_placeholder, rtol, atol)
def _compareForward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._npFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._npIFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfIFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfIFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _checkMemoryFail(self, x, rank):
config = config_pb2.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1e-2
with self.cached_session(config=config, force_gpu=True):
self._tfFFT(x, rank, fft_length=None)
def _checkGradComplex(self, func, x, y, result_is_complex=True,
rtol=1e-2, atol=1e-2):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
z = func(math_ops.complex(inx, iny))
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [list(x.shape), list(y.shape)],
loss, [1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)
def _checkGradReal(self, func, x, rtol=1e-2, atol=1e-2):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
z = func(inx)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
x_jacob_t, x_jacob_n = test.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
class FFTOpsTest(BaseFFTOpsTest):
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(self._tfFFTForRank(rank)(x), feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(self._tfIFFTForRank(rank)(x), feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.fft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.ifft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return fft_ops.fft
elif rank == 2:
return fft_ops.fft2d
elif rank == 3:
return fft_ops.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return fft_ops.ifft
elif rank == 2:
return fft_ops.ifft2d
elif rank == 3:
return fft_ops.ifft3d
else:
raise ValueError("invalid rank")
@test_util.run_deprecated_v1
def testEmpty(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type in (np.complex64, np.complex128):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np_type)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
@test_util.run_deprecated_v1
def testBasic(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np_type), rank, rtol=tol, atol=tol)
def testLargeBatch(self):
if test.is_gpu_available(cuda_only=True):
rank = 1
for dims in xrange(rank, rank + 3):
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-5)):
self._compare(
np.mod(np.arange(np.power(128, dims)), 10).reshape(
(128,) * dims).astype(np_type), rank, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testBasicPlaceholder(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np_type),
rank, use_placeholder=True, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testRandom(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 5e-6)):
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(gen((4,) * dims).astype(np_type), rank,
rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testRandom1D(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type in (np.complex64, np.complex128):
has_gpu = test.is_gpu_available(cuda_only=True)
tol = {(np.complex64, True): 1e-4,
(np.complex64, False): 1e-2,
(np.complex128, True): 1e-4,
(np.complex128, False): 1e-2}[(np_type, has_gpu)]
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for dim in (128, 256, 512, 1024):
self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)
for dim in (127, 255, 511, 1023):
self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testError(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
@test_util.run_deprecated_v1
def testGrad_Simple(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.float32, 1e-4), (np.float64, 1e-10)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np_type) / 10.0
im = np.zeros(shape=(4,) * dims, dtype=np_type)
self._checkGradComplex(self._tfFFTForRank(rank), re, im,
rtol=tol, atol=tol)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im,
rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testGrad_Random(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.float32, 1e-2), (np.float64, 1e-10)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1
im = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1
self._checkGradComplex(self._tfFFTForRank(rank), re, im,
rtol=tol, atol=tol)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im,
rtol=tol, atol=tol)
class RFFTOpsTest(BaseFFTOpsTest):
def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False):
super(RFFTOpsTest, self)._compareBackward(x, rank, fft_length,
use_placeholder)
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(
self._tfFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(
self._tfIFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.rfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.rfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.irfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.irfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return fft_ops.rfft
elif rank == 2:
return fft_ops.rfft2d
elif rank == 3:
return fft_ops.rfft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return fft_ops.irfft
elif rank == 2:
return fft_ops.irfft2d
elif rank == 3:
return fft_ops.irfft3d
else:
raise ValueError("invalid rank")
@test_util.run_deprecated_v1
def testEmpty(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.float32)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
@test_util.run_deprecated_v1
def testBasic(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(
c2r.astype(np.complex64), rank, (size,) * rank)
def testLargeBatch(self):
if test.is_gpu_available(cuda_only=True):
rank = 1
for dims in xrange(rank, rank + 3):
for size in (64, 128):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(c2r.astype(np.complex64), rank, (size,) * rank)
@test_util.run_deprecated_v1
def testBasicPlaceholder(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(
r2c.astype(np.float32),
rank, (size,) * rank,
use_placeholder=True)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(
c2r.astype(np.complex64),
rank, (size,) * rank,
use_placeholder=True)
@test_util.run_deprecated_v1
def testFftLength(self):
if test.is_gpu_available(cuda_only=True):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
fft_length = (size - 2,) * rank
self._compareForward(r2c.astype(np.float32), rank, fft_length)
self._compareBackward(c2r.astype(np.complex64), rank, fft_length)
self._compareForward(
r2c.astype(np.float32),
rank,
fft_length,
use_placeholder=True)
self._compareBackward(
c2r.astype(np.complex64),
rank,
fft_length,
use_placeholder=True)
fft_length = (size + 2,) * rank
self._compareForward(r2c.astype(np.float32), rank, fft_length)
self._compareBackward(c2r.astype(np.complex64), rank, fft_length)
self._compareForward(
r2c.astype(np.float32),
rank,
fft_length,
use_placeholder=True)
self._compareBackward(
c2r.astype(np.complex64),
rank,
fft_length,
use_placeholder=True)
@test_util.run_deprecated_v1
def testRandom(self):
with spectral_ops_test_util.fft_kernel_label_map():
def gen_real(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
ret = re.reshape(shape)
return ret
def gen_complex(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
ret = (re + im * 1j).reshape(shape)
return ret
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
self._compareForward(gen_real((size,) * dims), rank, (size,) * rank)
complex_dims = (size,) * (dims - 1) + (inner_dim,)
self._compareBackward(
gen_complex(complex_dims), rank, (size,) * rank)
@test_util.run_deprecated_v1
def testError(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfIFFT(x, rank)
for dims in xrange(rank, rank + 2):
x = np.zeros((1,) * rank)
fft_length = np.zeros((1, 1)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfIFFT(x, rank, fft_length)
fft_length = np.zeros((rank + 1,)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfIFFT(x, rank, fft_length)
rffts_for_rank = {
1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft],
2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d],
3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d]
}
rfft_fn, irfft_fn = rffts_for_rank[rank]
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least 6 but got: 5"):
x = np.zeros((5,) * rank).astype(np.float32)
fft_length = [6] * rank
with self.cached_session():
self.evaluate(rfft_fn(x, fft_length))
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least .* but got: 3"):
x = np.zeros((3,) * rank).astype(np.complex64)
fft_length = [6] * rank
with self.cached_session():
self.evaluate(irfft_fn(x, fft_length))
@test_util.run_deprecated_v1
def testGrad_Simple(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.ones(shape=(size,) * dims, dtype=np.float32)
im = -np.ones(shape=(size,) * dims, dtype=np.float32)
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
@test_util.run_deprecated_v1
def testGrad_Random(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
class FFTShiftTest(test.TestCase):
@test_util.run_deprecated_v1
def testDefinition(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), y)
self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), y)
self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)
@test_util.run_deprecated_v1
def testAxesKeyword(self):
with self.session():
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(fft_ops.fftshift(freqs, axes=(0, 1)).eval(), shifted)
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=0).eval(),
fft_ops.fftshift(freqs, axes=(0,)).eval())
self.assertAllEqual(fft_ops.ifftshift(shifted, axes=(0, 1)).eval(), freqs)
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=0).eval(),
fft_ops.ifftshift(shifted, axes=(0,)).eval())
self.assertAllEqual(fft_ops.fftshift(freqs).eval(), shifted)
self.assertAllEqual(fft_ops.ifftshift(shifted).eval(), freqs)
@test_util.run_deprecated_v1
def testNumpyCompatibility(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=(0, 1)).eval(),
np.fft.fftshift(freqs, axes=(0, 1)))
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=(0, 1)).eval(),
np.fft.ifftshift(shifted, axes=(0, 1)))
if __name__ == "__main__":
test.main()
| true | true |
f7223e2bd6b862b236c58f7382bcb4dc65b65b5b | 6,781 | py | Python | swagger_client/models/docker_hub_subset.py | swat5421/swagger_portainer | e18b287dc906e171077912677515469ee3f4e5c2 | [
"RSA-MD"
] | null | null | null | swagger_client/models/docker_hub_subset.py | swat5421/swagger_portainer | e18b287dc906e171077912677515469ee3f4e5c2 | [
"RSA-MD"
] | null | null | null | swagger_client/models/docker_hub_subset.py | swat5421/swagger_portainer | e18b287dc906e171077912677515469ee3f4e5c2 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
Portainer API
Portainer API is an HTTP API served by Portainer. It is used by the Portainer UI and everything you can do with the UI can be done using the HTTP API. Examples are available at https://gist.github.com/deviantony/77026d402366b4b43fa5918d41bc42f8 You can find out more about Portainer at [http://portainer.io](http://portainer.io) and get some support on [Slack](http://portainer.io/slack/). # Authentication Most of the API endpoints require to be authenticated as well as some level of authorization to be used. Portainer API uses JSON Web Token to manage authentication and thus requires you to provide a token in the **Authorization** header of each request with the **Bearer** authentication mechanism. Example: ``` Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6MSwidXNlcm5hbWUiOiJhZG1pbiIsInJvbGUiOjEsImV4cCI6MTQ5OTM3NjE1NH0.NJ6vE8FY1WG6jsRQzfMqeatJ4vh2TWAeeYfDhP71YEE ``` # Security Each API endpoint has an associated access policy, it is documented in the description of each endpoint. Different access policies are available: * Public access * Authenticated access * Restricted access * Administrator access ### Public access No authentication is required to access the endpoints with this access policy. ### Authenticated access Authentication is required to access the endpoints with this access policy. ### Restricted access Authentication is required to access the endpoints with this access policy. Extra-checks might be added to ensure access to the resource is granted. Returned data might also be filtered. ### Administrator access Authentication as well as an administrator role are required to access the endpoints with this access policy. # Execute Docker requests Portainer **DO NOT** expose specific endpoints to manage your Docker resources (create a container, remove a volume, etc...). Instead, it acts as a reverse-proxy to the Docker HTTP API. This means that you can execute Docker requests **via** the Portainer HTTP API. To do so, you can use the `/endpoints/{id}/docker` Portainer API endpoint (which is not documented below due to Swagger limitations). This endpoint has a restricted access policy so you still need to be authenticated to be able to query this endpoint. Any query on this endpoint will be proxied to the Docker API of the associated endpoint (requests and responses objects are the same as documented in the Docker API). **NOTE**: You can find more information on how to query the Docker API in the [Docker official documentation](https://docs.docker.com/engine/api/v1.30/) as well as in [this Portainer example](https://gist.github.com/deviantony/77026d402366b4b43fa5918d41bc42f8). # noqa: E501
OpenAPI spec version: 1.24.1
Contact: info@portainer.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DockerHubSubset(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'authentication': 'bool',
'username': 'str'
}
attribute_map = {
'authentication': 'Authentication',
'username': 'Username'
}
def __init__(self, authentication=None, username=None): # noqa: E501
"""DockerHubSubset - a model defined in Swagger""" # noqa: E501
self._authentication = None
self._username = None
self.discriminator = None
if authentication is not None:
self.authentication = authentication
if username is not None:
self.username = username
@property
def authentication(self):
"""Gets the authentication of this DockerHubSubset. # noqa: E501
Is authentication against DockerHub enabled # noqa: E501
:return: The authentication of this DockerHubSubset. # noqa: E501
:rtype: bool
"""
return self._authentication
@authentication.setter
def authentication(self, authentication):
"""Sets the authentication of this DockerHubSubset.
Is authentication against DockerHub enabled # noqa: E501
:param authentication: The authentication of this DockerHubSubset. # noqa: E501
:type: bool
"""
self._authentication = authentication
@property
def username(self):
"""Gets the username of this DockerHubSubset. # noqa: E501
Username used to authenticate against the DockerHub # noqa: E501
:return: The username of this DockerHubSubset. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this DockerHubSubset.
Username used to authenticate against the DockerHub # noqa: E501
:param username: The username of this DockerHubSubset. # noqa: E501
:type: str
"""
self._username = username
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DockerHubSubset, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DockerHubSubset):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 46.445205 | 2,674 | 0.668043 |
import pprint
import re
import six
class DockerHubSubset(object):
swagger_types = {
'authentication': 'bool',
'username': 'str'
}
attribute_map = {
'authentication': 'Authentication',
'username': 'Username'
}
def __init__(self, authentication=None, username=None):
self._authentication = None
self._username = None
self.discriminator = None
if authentication is not None:
self.authentication = authentication
if username is not None:
self.username = username
@property
def authentication(self):
return self._authentication
@authentication.setter
def authentication(self, authentication):
self._authentication = authentication
@property
def username(self):
return self._username
@username.setter
def username(self, username):
self._username = username
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DockerHubSubset, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, DockerHubSubset):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7223e8d03aa3063eacccb5aff0438e30ec28eaf | 813 | py | Python | World 3/Exercise 115/Main.py | NikiReis/Python--Exercises | 2f50a3cd6900cec024edcf1a812d1cd86afcdea1 | [
"MIT"
] | null | null | null | World 3/Exercise 115/Main.py | NikiReis/Python--Exercises | 2f50a3cd6900cec024edcf1a812d1cd86afcdea1 | [
"MIT"
] | null | null | null | World 3/Exercise 115/Main.py | NikiReis/Python--Exercises | 2f50a3cd6900cec024edcf1a812d1cd86afcdea1 | [
"MIT"
] | null | null | null | from Exercise_115.Interface import *
from Exercise_115.Functions import *
data_base = 'registers.txt'
while True:
screen()
options = menu('Type a value: ')
print(line())
if options == 1:
if validation_file(data_base) == True:
name = str(input('Type the name: '))
age = validation_number('Type the age: ')
registering(data_base,name,age)
else:
create(data_base)
mensagem()
name = str(input('Type the name: '))
age = validation_number('Type your age: ')
registering(data_base,name,age)
elif options == 2:
search(data_base)
elif options == 3:
print("Ok... Thanks for using our sistem, come back soon!")
break
else:
print('Invalid Option')
| 26.225806 | 67 | 0.570726 | from Exercise_115.Interface import *
from Exercise_115.Functions import *
data_base = 'registers.txt'
while True:
screen()
options = menu('Type a value: ')
print(line())
if options == 1:
if validation_file(data_base) == True:
name = str(input('Type the name: '))
age = validation_number('Type the age: ')
registering(data_base,name,age)
else:
create(data_base)
mensagem()
name = str(input('Type the name: '))
age = validation_number('Type your age: ')
registering(data_base,name,age)
elif options == 2:
search(data_base)
elif options == 3:
print("Ok... Thanks for using our sistem, come back soon!")
break
else:
print('Invalid Option')
| true | true |
f7223e8d48f1678de1e1a6833f01ec581a312ecd | 927 | py | Python | tempest/util/cli_parser.py | KiranPawar72/tempest | 1fef3dd92b083055793065dd0693454735ec2c01 | [
"Apache-2.0"
] | null | null | null | tempest/util/cli_parser.py | KiranPawar72/tempest | 1fef3dd92b083055793065dd0693454735ec2c01 | [
"Apache-2.0"
] | null | null | null | tempest/util/cli_parser.py | KiranPawar72/tempest | 1fef3dd92b083055793065dd0693454735ec2c01 | [
"Apache-2.0"
] | 1 | 2020-07-21T02:18:23.000Z | 2020-07-21T02:18:23.000Z | import subprocess
def cli_returncode(argument_string):
p = subprocess.Popen(argument_string,shell=True,stdout=subprocess.PIPE)
print str(p.communicate()[0])
p.wait()
rc = p.returncode
return rc
def cli_output(argument_string):
p = subprocess.Popen(argument_string,shell=True,stdout=subprocess.PIPE)
out = str(p.communicate()[0])
p.wait()
return out
def cli_error(argument_string):
p = subprocess.Popen(argument_string,shell=True,stderr=subprocess.PIPE)
err = str(p.communicate()[1])
p.wait()
return err
def cli_response_parser(cli_resp, key_attr):
arrResp = cli_resp.splitlines()
for j in range(0, len(arrResp)):
arrL = arrResp[j].split("|")
for i in range(0, len(arrL)):
arrL[i] = arrL[i].rstrip()
arrL[i] = arrL[i].lstrip()
if(len(arrL) > 1):
if(arrL[1] == key_attr):
return arrL[2]
| 27.264706 | 75 | 0.631068 | import subprocess
def cli_returncode(argument_string):
p = subprocess.Popen(argument_string,shell=True,stdout=subprocess.PIPE)
print str(p.communicate()[0])
p.wait()
rc = p.returncode
return rc
def cli_output(argument_string):
p = subprocess.Popen(argument_string,shell=True,stdout=subprocess.PIPE)
out = str(p.communicate()[0])
p.wait()
return out
def cli_error(argument_string):
p = subprocess.Popen(argument_string,shell=True,stderr=subprocess.PIPE)
err = str(p.communicate()[1])
p.wait()
return err
def cli_response_parser(cli_resp, key_attr):
arrResp = cli_resp.splitlines()
for j in range(0, len(arrResp)):
arrL = arrResp[j].split("|")
for i in range(0, len(arrL)):
arrL[i] = arrL[i].rstrip()
arrL[i] = arrL[i].lstrip()
if(len(arrL) > 1):
if(arrL[1] == key_attr):
return arrL[2]
| false | true |
f7223f884ccd6891aaf8a29566086baf481a946d | 2,219 | py | Python | ospurge/resources/swift.py | puppetlabs-operations/ospurge | 3ea94225fc4ff55b740867c0590f07fde2d56fe5 | [
"Apache-2.0"
] | null | null | null | ospurge/resources/swift.py | puppetlabs-operations/ospurge | 3ea94225fc4ff55b740867c0590f07fde2d56fe5 | [
"Apache-2.0"
] | 5 | 2022-03-22T07:28:35.000Z | 2022-03-22T07:29:16.000Z | ospurge/resources/swift.py | puppetlabs-operations/ospurge | 3ea94225fc4ff55b740867c0590f07fde2d56fe5 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Iterator
from ospurge.resources import base
from ospurge.resources.base import BaseServiceResource
from ospurge.resources import glance
class ListObjectsMixin(BaseServiceResource):
def list_objects(self) -> Iterator[Dict[str, Any]]:
for container in self.cloud.list_containers():
for obj in self.cloud.list_objects(container['name']):
obj['container_name'] = container['name']
yield obj
class Objects(base.ServiceResource, glance.ListImagesMixin, ListObjectsMixin):
ORDER = 73
def check_prerequisite(self) -> bool:
return (self.list_images_by_owner() == [] and
self.cloud.list_volume_backups() == [])
def list(self) -> Iterable:
yield from self.list_objects()
def delete(self, resource: Dict[str, Any]) -> None:
self.cloud.delete_object(resource['container_name'], resource['name'])
@staticmethod
def to_str(resource: Dict[str, Any]) -> str:
return "Object '{}' from Container '{}'".format(
resource['name'], resource['container_name'])
class Containers(base.ServiceResource, ListObjectsMixin):
ORDER = 75
def check_prerequisite(self) -> bool:
return list(self.list_objects()) == []
def list(self) -> Iterable:
return self.cloud.list_containers()
def delete(self, resource: Dict[str, Any]) -> None:
self.cloud.delete_container(resource['name'])
@staticmethod
def to_str(resource: Dict[str, Any]) -> str:
return "Container (name='{}')".format(resource['name'])
| 34.671875 | 78 | 0.688598 |
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Iterator
from ospurge.resources import base
from ospurge.resources.base import BaseServiceResource
from ospurge.resources import glance
class ListObjectsMixin(BaseServiceResource):
def list_objects(self) -> Iterator[Dict[str, Any]]:
for container in self.cloud.list_containers():
for obj in self.cloud.list_objects(container['name']):
obj['container_name'] = container['name']
yield obj
class Objects(base.ServiceResource, glance.ListImagesMixin, ListObjectsMixin):
ORDER = 73
def check_prerequisite(self) -> bool:
return (self.list_images_by_owner() == [] and
self.cloud.list_volume_backups() == [])
def list(self) -> Iterable:
yield from self.list_objects()
def delete(self, resource: Dict[str, Any]) -> None:
self.cloud.delete_object(resource['container_name'], resource['name'])
@staticmethod
def to_str(resource: Dict[str, Any]) -> str:
return "Object '{}' from Container '{}'".format(
resource['name'], resource['container_name'])
class Containers(base.ServiceResource, ListObjectsMixin):
ORDER = 75
def check_prerequisite(self) -> bool:
return list(self.list_objects()) == []
def list(self) -> Iterable:
return self.cloud.list_containers()
def delete(self, resource: Dict[str, Any]) -> None:
self.cloud.delete_container(resource['name'])
@staticmethod
def to_str(resource: Dict[str, Any]) -> str:
return "Container (name='{}')".format(resource['name'])
| true | true |
f7223fe30b9707fcca42785b0ca63b67d1192be3 | 5,830 | py | Python | rl/replay/planner.py | Koen-AI/world-model-as-a-graph | cafde59ef7159d1b62ca508568c85c6498c1342c | [
"MIT"
] | null | null | null | rl/replay/planner.py | Koen-AI/world-model-as-a-graph | cafde59ef7159d1b62ca508568c85c6498c1342c | [
"MIT"
] | null | null | null | rl/replay/planner.py | Koen-AI/world-model-as-a-graph | cafde59ef7159d1b62ca508568c85c6498c1342c | [
"MIT"
] | null | null | null | import threading
import numpy as np
import torch
import os.path as osp
from rl.utils import mpi_utils
#Replay buffer!!!
def sample_her_transitions(buffer, reward_func, batch_size, future_step, future_p=1.0):
assert all(k in buffer for k in ['ob', 'ag', 'bg', 'a'])
buffer['o2'] = buffer['ob'][:, 1:, :]
buffer['ag2'] = buffer['ag'][:, 1:, :]
n_trajs = buffer['a'].shape[0]
horizon = buffer['a'].shape[1]
ep_idxes = np.random.randint(0, n_trajs, size=batch_size)
t_samples = np.random.randint(0, horizon, size=batch_size)
batch = {key: buffer[key][ep_idxes, t_samples].copy() for key in buffer.keys()}
her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)
future_offset = (np.random.uniform(size=batch_size) * np.minimum(horizon - t_samples, future_step)).astype(int)
future_t = (t_samples + 1 + future_offset)
batch['bg'][her_indexes] = buffer['ag'][ep_idxes[her_indexes], future_t[her_indexes]]
batch['future_ag'] = buffer['ag'][ep_idxes, future_t].copy()
batch['offset'] = future_offset.copy()
batch['r'] = reward_func(batch['ag2'], batch['bg'], None)
assert all(batch[k].shape[0] == batch_size for k in batch.keys())
assert all(k in batch for k in ['ob', 'ag', 'bg', 'a', 'o2', 'ag2', 'r', 'future_ag', 'offset'])
return batch
def sample_transitions(buffer, batch_size):
n_trajs = buffer['a'].shape[0]
horizon = buffer['a'].shape[1]
ep_idxes = np.random.randint(0, n_trajs, size=batch_size)
t_samples = np.random.randint(0, horizon, size=batch_size)
batch = {key: buffer[key][ep_idxes, t_samples].copy() for key in buffer.keys()}
assert all(batch[k].shape[0] == batch_size for k in batch.keys())
return batch
class Replay:
def __init__(self, env_params, args, reward_func, name='replay'):
self.env_params = env_params
self.args = args
self.reward_func = reward_func
self.horizon = env_params['max_timesteps']
self.size = args.buffer_size // self.horizon
self.current_size = 0
self.n_transitions_stored = 0
self.buffers = dict(ob=np.zeros((self.size, self.horizon + 1, self.env_params['obs'])),
ag=np.zeros((self.size, self.horizon + 1, self.env_params['goal'])),
bg=np.zeros((self.size, self.horizon, self.env_params['goal'])),
a=np.zeros((self.size, self.horizon, self.env_params['action'])))
self.lock = threading.Lock()
self._save_file = str(name) + '_' + str(mpi_utils.get_rank()) + '.pt'
def store(self, episodes):
ob_list, ag_list, bg_list, a_list = episodes['ob'], episodes['ag'], episodes['bg'], episodes['a']
batch_size = ob_list.shape[0]
with self.lock:
idxs = self._get_storage_idx(batch_size=batch_size)
self.buffers['ob'][idxs] = ob_list.copy() # State
self.buffers['ag'][idxs] = ag_list.copy() # Achieved state after N steps
self.buffers['bg'][idxs] = bg_list.copy() # Desired goal
self.buffers['a'][idxs] = a_list.copy() # Action taken from state
self.n_transitions_stored += self.horizon * batch_size
def sample(self, batch_size):
temp_buffers = {}
with self.lock:
for key in self.buffers.keys():
temp_buffers[key] = self.buffers[key][:self.current_size]
transitions = sample_her_transitions(temp_buffers, self.reward_func, batch_size,
future_step=self.args.future_step,
future_p=self.args.future_p)
return transitions
def _get_storage_idx(self, batch_size):
if self.current_size + batch_size <= self.size:
idx = np.arange(self.current_size, self.current_size + batch_size)
elif self.current_size < self.size:
idx_a = np.arange(self.current_size, self.size)
idx_b = np.random.randint(0, self.current_size, batch_size - len(idx_a))
idx = np.concatenate([idx_a, idx_b])
else:
idx = np.random.randint(0, self.size, batch_size)
self.current_size = min(self.size, self.current_size + batch_size)
if batch_size == 1:
idx = idx[0]
return idx
def get_all_data(self):
temp_buffers = {}
with self.lock:
for key in self.buffers.keys():
temp_buffers[key] = self.buffers[key][:self.current_size]
return temp_buffers
def sample_regular_batch(self, batch_size):
temp_buffers = {}
with self.lock:
for key in self.buffers.keys():
temp_buffers[key] = self.buffers[key][:self.current_size]
transitions = sample_transitions(temp_buffers, batch_size)
return transitions
def state_dict(self):
return dict(
current_size=self.current_size,
n_transitions_stored=self.n_transitions_stored,
buffers=self.buffers,
)
def load_state_dict(self, state_dict):
self.current_size = state_dict['current_size']
self.n_transitions_stored = state_dict['n_transitions_stored']
self.buffers = state_dict['buffers']
def save(self, path):
state_dict = self.state_dict()
save_path = osp.join(path, self._save_file)
torch.save(state_dict, save_path)
def load(self, path):
load_path = osp.join(path, self._save_file)
try:
state_dict = torch.load(load_path)
except RuntimeError:
state_dict = torch.load(load_path, map_location=torch.device('cpu'))
self.load_state_dict(state_dict)
| 41.642857 | 115 | 0.61012 | import threading
import numpy as np
import torch
import os.path as osp
from rl.utils import mpi_utils
def sample_her_transitions(buffer, reward_func, batch_size, future_step, future_p=1.0):
assert all(k in buffer for k in ['ob', 'ag', 'bg', 'a'])
buffer['o2'] = buffer['ob'][:, 1:, :]
buffer['ag2'] = buffer['ag'][:, 1:, :]
n_trajs = buffer['a'].shape[0]
horizon = buffer['a'].shape[1]
ep_idxes = np.random.randint(0, n_trajs, size=batch_size)
t_samples = np.random.randint(0, horizon, size=batch_size)
batch = {key: buffer[key][ep_idxes, t_samples].copy() for key in buffer.keys()}
her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)
future_offset = (np.random.uniform(size=batch_size) * np.minimum(horizon - t_samples, future_step)).astype(int)
future_t = (t_samples + 1 + future_offset)
batch['bg'][her_indexes] = buffer['ag'][ep_idxes[her_indexes], future_t[her_indexes]]
batch['future_ag'] = buffer['ag'][ep_idxes, future_t].copy()
batch['offset'] = future_offset.copy()
batch['r'] = reward_func(batch['ag2'], batch['bg'], None)
assert all(batch[k].shape[0] == batch_size for k in batch.keys())
assert all(k in batch for k in ['ob', 'ag', 'bg', 'a', 'o2', 'ag2', 'r', 'future_ag', 'offset'])
return batch
def sample_transitions(buffer, batch_size):
n_trajs = buffer['a'].shape[0]
horizon = buffer['a'].shape[1]
ep_idxes = np.random.randint(0, n_trajs, size=batch_size)
t_samples = np.random.randint(0, horizon, size=batch_size)
batch = {key: buffer[key][ep_idxes, t_samples].copy() for key in buffer.keys()}
assert all(batch[k].shape[0] == batch_size for k in batch.keys())
return batch
class Replay:
def __init__(self, env_params, args, reward_func, name='replay'):
self.env_params = env_params
self.args = args
self.reward_func = reward_func
self.horizon = env_params['max_timesteps']
self.size = args.buffer_size // self.horizon
self.current_size = 0
self.n_transitions_stored = 0
self.buffers = dict(ob=np.zeros((self.size, self.horizon + 1, self.env_params['obs'])),
ag=np.zeros((self.size, self.horizon + 1, self.env_params['goal'])),
bg=np.zeros((self.size, self.horizon, self.env_params['goal'])),
a=np.zeros((self.size, self.horizon, self.env_params['action'])))
self.lock = threading.Lock()
self._save_file = str(name) + '_' + str(mpi_utils.get_rank()) + '.pt'
def store(self, episodes):
ob_list, ag_list, bg_list, a_list = episodes['ob'], episodes['ag'], episodes['bg'], episodes['a']
batch_size = ob_list.shape[0]
with self.lock:
idxs = self._get_storage_idx(batch_size=batch_size)
self.buffers['ob'][idxs] = ob_list.copy()
self.buffers['ag'][idxs] = ag_list.copy()
self.buffers['bg'][idxs] = bg_list.copy()
self.buffers['a'][idxs] = a_list.copy()
self.n_transitions_stored += self.horizon * batch_size
def sample(self, batch_size):
temp_buffers = {}
with self.lock:
for key in self.buffers.keys():
temp_buffers[key] = self.buffers[key][:self.current_size]
transitions = sample_her_transitions(temp_buffers, self.reward_func, batch_size,
future_step=self.args.future_step,
future_p=self.args.future_p)
return transitions
def _get_storage_idx(self, batch_size):
if self.current_size + batch_size <= self.size:
idx = np.arange(self.current_size, self.current_size + batch_size)
elif self.current_size < self.size:
idx_a = np.arange(self.current_size, self.size)
idx_b = np.random.randint(0, self.current_size, batch_size - len(idx_a))
idx = np.concatenate([idx_a, idx_b])
else:
idx = np.random.randint(0, self.size, batch_size)
self.current_size = min(self.size, self.current_size + batch_size)
if batch_size == 1:
idx = idx[0]
return idx
def get_all_data(self):
temp_buffers = {}
with self.lock:
for key in self.buffers.keys():
temp_buffers[key] = self.buffers[key][:self.current_size]
return temp_buffers
def sample_regular_batch(self, batch_size):
temp_buffers = {}
with self.lock:
for key in self.buffers.keys():
temp_buffers[key] = self.buffers[key][:self.current_size]
transitions = sample_transitions(temp_buffers, batch_size)
return transitions
def state_dict(self):
return dict(
current_size=self.current_size,
n_transitions_stored=self.n_transitions_stored,
buffers=self.buffers,
)
def load_state_dict(self, state_dict):
self.current_size = state_dict['current_size']
self.n_transitions_stored = state_dict['n_transitions_stored']
self.buffers = state_dict['buffers']
def save(self, path):
state_dict = self.state_dict()
save_path = osp.join(path, self._save_file)
torch.save(state_dict, save_path)
def load(self, path):
load_path = osp.join(path, self._save_file)
try:
state_dict = torch.load(load_path)
except RuntimeError:
state_dict = torch.load(load_path, map_location=torch.device('cpu'))
self.load_state_dict(state_dict)
| true | true |
f72240325176353661f8ec018555cfa01ce1bd7a | 5,501 | py | Python | derbot/names/admin.py | bdunnette/derbot-docker | b28fd0bf7f078dac1f72024dbd13c233d657c043 | [
"MIT"
] | null | null | null | derbot/names/admin.py | bdunnette/derbot-docker | b28fd0bf7f078dac1f72024dbd13c233d657c043 | [
"MIT"
] | 11 | 2022-02-21T05:29:51.000Z | 2022-03-31T05:33:10.000Z | derbot/names/admin.py | bdunnette/derbot-docker | b28fd0bf7f078dac1f72024dbd13c233d657c043 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib import admin
from django.db import IntegrityError
from django.db.models import Exists, OuterRef, Q
from django.utils.translation import gettext as _
from import_export import resources
from import_export.admin import ImportExportMixin, ImportExportModelAdmin
from derbot.names.models import Color, DerbyName, DerbyNumber, Jersey, Toot
from derbot.names.tasks import generate_tank, pick_number, toot_name
logger = settings.LOGGER
class TootedFilter(admin.SimpleListFilter):
title = _("Tooted")
parameter_name = "tooted"
def lookups(self, request, model_admin):
return (
("yes", _("Yes")),
("no", _("No")),
)
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.filter(Q(Exists(Toot.objects.filter(name=OuterRef("pk")))))
if self.value() == "no":
return queryset.filter(~Q(Exists(Toot.objects.filter(name=OuterRef("pk")))))
class JerseyFilter(admin.SimpleListFilter):
title = _("has jersey")
parameter_name = "jersey"
def lookups(self, request, model_admin):
return (
("yes", _("Yes")),
("no", _("No")),
)
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.filter(~Q(jersey=None))
if self.value() == "no":
return queryset.filter(Q(jersey=None))
class NameResource(resources.ModelResource):
class Meta:
model = DerbyName
skip_unchanged = True
report_skipped = True
# use_bulk = True
# batch_size = 100
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super(NameResource, self).save_instance(
instance, using_transactions, dry_run
)
except IntegrityError:
pass
class NumberResource(resources.ModelResource):
class Meta:
model = DerbyNumber
skip_unchanged = True
report_skipped = True
# use_bulk = True
# batch_size = 100
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super(NumberResource, self).save_instance(
instance, using_transactions, dry_run
)
except IntegrityError:
pass
@admin.register(DerbyName)
class NameAdmin(ImportExportModelAdmin):
list_display = (
"id",
"name",
"number",
"cleared",
"registered",
"archived",
"created",
"updated",
"jersey",
)
list_filter = ["registered", "cleared", "archived", JerseyFilter, TootedFilter]
actions = [
"clear",
"unclear",
"archive",
"unarchive",
"new_numbers",
"make_tanks",
"toot",
]
resource_class = NameResource
@admin.action(description="Mark selected names as cleared for tooting")
def clear(self, request, queryset):
queryset.update(cleared=True)
self.message_user(request, f"Cleared {queryset.count()} names")
@admin.action(description="Mark selected names as NOT cleared for tooting")
def unclear(self, request, queryset):
queryset.update(cleared=False)
self.message_user(request, f"Uncleared {queryset.count()} names")
@admin.action(description="Archive selected names")
def archive(self, request, queryset):
queryset.update(archived=True)
self.message_user(request, f"Archived {queryset.count()} names")
@admin.action(description="Unrchive selected names")
def unarchive(self, request, queryset):
queryset.update(archived=False)
self.message_user(request, f"Unarchived {queryset.count()} names")
@admin.action(description="Choose (new) numbers for selected names")
def new_numbers(self, request, queryset):
for name in queryset:
print(name)
logger.info(f"Picking new number for {name}")
pick_number.delay(name.pk)
self.message_user(request, f"New numbers chosen for {queryset.count()} names")
@admin.action(description="Generate tanks for selected names")
def make_tanks(self, request, queryset):
for name in queryset:
print(name)
logger.info(f"Generating tank for {name}")
generate_tank.delay(name.pk, overwrite=True)
self.message_user(request, f"Tanks generated for {queryset.count()} names")
@admin.action(description="Toot selected names")
def toot(self, request, queryset):
logger.info(f"Tooting {queryset.count()} names")
for name in queryset:
logger.info(f"Tooting {name}")
toot_name.delay(name.pk, max_wait=0)
self.message_user(request, "Tooted selected names")
@admin.register(DerbyNumber)
class NumberAdmin(ImportExportModelAdmin):
list_display = ("id", "number", "created", "updated")
list_filter = ["created", "updated"]
resource_class = NumberResource
@admin.register(Jersey)
class JerseyAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "derbyname", "fg_color", "bg_color", "image")
@admin.register(Toot)
class TootAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "name", "toot_id", "date")
@admin.register(Color)
class ColorAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "name", "hex", "pair_with")
| 31.797688 | 88 | 0.642065 | from django.conf import settings
from django.contrib import admin
from django.db import IntegrityError
from django.db.models import Exists, OuterRef, Q
from django.utils.translation import gettext as _
from import_export import resources
from import_export.admin import ImportExportMixin, ImportExportModelAdmin
from derbot.names.models import Color, DerbyName, DerbyNumber, Jersey, Toot
from derbot.names.tasks import generate_tank, pick_number, toot_name
logger = settings.LOGGER
class TootedFilter(admin.SimpleListFilter):
title = _("Tooted")
parameter_name = "tooted"
def lookups(self, request, model_admin):
return (
("yes", _("Yes")),
("no", _("No")),
)
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.filter(Q(Exists(Toot.objects.filter(name=OuterRef("pk")))))
if self.value() == "no":
return queryset.filter(~Q(Exists(Toot.objects.filter(name=OuterRef("pk")))))
class JerseyFilter(admin.SimpleListFilter):
title = _("has jersey")
parameter_name = "jersey"
def lookups(self, request, model_admin):
return (
("yes", _("Yes")),
("no", _("No")),
)
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.filter(~Q(jersey=None))
if self.value() == "no":
return queryset.filter(Q(jersey=None))
class NameResource(resources.ModelResource):
class Meta:
model = DerbyName
skip_unchanged = True
report_skipped = True
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super(NameResource, self).save_instance(
instance, using_transactions, dry_run
)
except IntegrityError:
pass
class NumberResource(resources.ModelResource):
class Meta:
model = DerbyNumber
skip_unchanged = True
report_skipped = True
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super(NumberResource, self).save_instance(
instance, using_transactions, dry_run
)
except IntegrityError:
pass
@admin.register(DerbyName)
class NameAdmin(ImportExportModelAdmin):
list_display = (
"id",
"name",
"number",
"cleared",
"registered",
"archived",
"created",
"updated",
"jersey",
)
list_filter = ["registered", "cleared", "archived", JerseyFilter, TootedFilter]
actions = [
"clear",
"unclear",
"archive",
"unarchive",
"new_numbers",
"make_tanks",
"toot",
]
resource_class = NameResource
@admin.action(description="Mark selected names as cleared for tooting")
def clear(self, request, queryset):
queryset.update(cleared=True)
self.message_user(request, f"Cleared {queryset.count()} names")
@admin.action(description="Mark selected names as NOT cleared for tooting")
def unclear(self, request, queryset):
queryset.update(cleared=False)
self.message_user(request, f"Uncleared {queryset.count()} names")
@admin.action(description="Archive selected names")
def archive(self, request, queryset):
queryset.update(archived=True)
self.message_user(request, f"Archived {queryset.count()} names")
@admin.action(description="Unrchive selected names")
def unarchive(self, request, queryset):
queryset.update(archived=False)
self.message_user(request, f"Unarchived {queryset.count()} names")
@admin.action(description="Choose (new) numbers for selected names")
def new_numbers(self, request, queryset):
for name in queryset:
print(name)
logger.info(f"Picking new number for {name}")
pick_number.delay(name.pk)
self.message_user(request, f"New numbers chosen for {queryset.count()} names")
@admin.action(description="Generate tanks for selected names")
def make_tanks(self, request, queryset):
for name in queryset:
print(name)
logger.info(f"Generating tank for {name}")
generate_tank.delay(name.pk, overwrite=True)
self.message_user(request, f"Tanks generated for {queryset.count()} names")
@admin.action(description="Toot selected names")
def toot(self, request, queryset):
logger.info(f"Tooting {queryset.count()} names")
for name in queryset:
logger.info(f"Tooting {name}")
toot_name.delay(name.pk, max_wait=0)
self.message_user(request, "Tooted selected names")
@admin.register(DerbyNumber)
class NumberAdmin(ImportExportModelAdmin):
list_display = ("id", "number", "created", "updated")
list_filter = ["created", "updated"]
resource_class = NumberResource
@admin.register(Jersey)
class JerseyAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "derbyname", "fg_color", "bg_color", "image")
@admin.register(Toot)
class TootAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "name", "toot_id", "date")
@admin.register(Color)
class ColorAdmin(ImportExportMixin, admin.ModelAdmin):
list_display = ("id", "name", "hex", "pair_with")
| true | true |
f722404009a7e9ec7dea247f4a90acdaa2b64d1d | 4,678 | py | Python | accelbyte_py_sdk/api/basic/models/country_group_object.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/basic/models/country_group_object.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/basic/models/country_group_object.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | # Auto-generated at 2021-09-27T17:12:38.701190+08:00
# from: Justice Basic Service (1.17.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ..models.country_object import CountryObject
class CountryGroupObject(Model):
"""Country group object
Properties:
country_group_code: (countryGroupCode) OPTIONAL str
country_group_name: (countryGroupName) OPTIONAL str
countries: (countries) OPTIONAL List[CountryObject]
"""
# region fields
country_group_code: str # OPTIONAL
country_group_name: str # OPTIONAL
countries: List[CountryObject] # OPTIONAL
# endregion fields
# region with_x methods
def with_country_group_code(self, value: str) -> CountryGroupObject:
self.country_group_code = value
return self
def with_country_group_name(self, value: str) -> CountryGroupObject:
self.country_group_name = value
return self
def with_countries(self, value: List[CountryObject]) -> CountryGroupObject:
self.countries = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "country_group_code") and self.country_group_code:
result["countryGroupCode"] = str(self.country_group_code)
elif include_empty:
result["countryGroupCode"] = str()
if hasattr(self, "country_group_name") and self.country_group_name:
result["countryGroupName"] = str(self.country_group_name)
elif include_empty:
result["countryGroupName"] = str()
if hasattr(self, "countries") and self.countries:
result["countries"] = [i0.to_dict(include_empty=include_empty) for i0 in self.countries]
elif include_empty:
result["countries"] = []
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
country_group_code: Optional[str] = None,
country_group_name: Optional[str] = None,
countries: Optional[List[CountryObject]] = None,
) -> CountryGroupObject:
instance = cls()
if country_group_code is not None:
instance.country_group_code = country_group_code
if country_group_name is not None:
instance.country_group_name = country_group_name
if countries is not None:
instance.countries = countries
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> CountryGroupObject:
instance = cls()
if not dict_:
return instance
if "countryGroupCode" in dict_ and dict_["countryGroupCode"] is not None:
instance.country_group_code = str(dict_["countryGroupCode"])
elif include_empty:
instance.country_group_code = str()
if "countryGroupName" in dict_ and dict_["countryGroupName"] is not None:
instance.country_group_name = str(dict_["countryGroupName"])
elif include_empty:
instance.country_group_name = str()
if "countries" in dict_ and dict_["countries"] is not None:
instance.countries = [CountryObject.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["countries"]]
elif include_empty:
instance.countries = []
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"countryGroupCode": "country_group_code",
"countryGroupName": "country_group_name",
"countries": "countries",
}
# endregion static methods
| 35.709924 | 127 | 0.648782 |
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ..models.country_object import CountryObject
class CountryGroupObject(Model):
country_group_code: str
country_group_name: str
countries: List[CountryObject]
def with_country_group_code(self, value: str) -> CountryGroupObject:
self.country_group_code = value
return self
def with_country_group_name(self, value: str) -> CountryGroupObject:
self.country_group_name = value
return self
def with_countries(self, value: List[CountryObject]) -> CountryGroupObject:
self.countries = value
return self
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "country_group_code") and self.country_group_code:
result["countryGroupCode"] = str(self.country_group_code)
elif include_empty:
result["countryGroupCode"] = str()
if hasattr(self, "country_group_name") and self.country_group_name:
result["countryGroupName"] = str(self.country_group_name)
elif include_empty:
result["countryGroupName"] = str()
if hasattr(self, "countries") and self.countries:
result["countries"] = [i0.to_dict(include_empty=include_empty) for i0 in self.countries]
elif include_empty:
result["countries"] = []
return result
@classmethod
def create(
cls,
country_group_code: Optional[str] = None,
country_group_name: Optional[str] = None,
countries: Optional[List[CountryObject]] = None,
) -> CountryGroupObject:
instance = cls()
if country_group_code is not None:
instance.country_group_code = country_group_code
if country_group_name is not None:
instance.country_group_name = country_group_name
if countries is not None:
instance.countries = countries
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> CountryGroupObject:
instance = cls()
if not dict_:
return instance
if "countryGroupCode" in dict_ and dict_["countryGroupCode"] is not None:
instance.country_group_code = str(dict_["countryGroupCode"])
elif include_empty:
instance.country_group_code = str()
if "countryGroupName" in dict_ and dict_["countryGroupName"] is not None:
instance.country_group_name = str(dict_["countryGroupName"])
elif include_empty:
instance.country_group_name = str()
if "countries" in dict_ and dict_["countries"] is not None:
instance.countries = [CountryObject.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["countries"]]
elif include_empty:
instance.countries = []
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"countryGroupCode": "country_group_code",
"countryGroupName": "country_group_name",
"countries": "countries",
}
| true | true |
f722419c27153ef2e7230dd43ebd82a7ac439f73 | 3,127 | py | Python | vertx/response.py | hugollm/vertx | d67dbfc08e275e2d0bca5e572fcda1fe58db8e03 | [
"MIT"
] | null | null | null | vertx/response.py | hugollm/vertx | d67dbfc08e275e2d0bca5e572fcda1fe58db8e03 | [
"MIT"
] | null | null | null | vertx/response.py | hugollm/vertx | d67dbfc08e275e2d0bca5e572fcda1fe58db8e03 | [
"MIT"
] | null | null | null | from datetime import datetime
from http.client import responses as STATUS_MESSAGES
from http.cookies import SimpleCookie
import mimetypes
import os
from .case_insensitive_dict import CaseInsensitiveDict
class Response(BaseException):
def __init__(self):
self.status = 404
self.headers = CaseInsensitiveDict()
self.cookies = []
self._body = b''
self._file = None
def __str__(self):
return self.to_str()
def __repr__(self):
return self.to_str()
def to_str(self):
cls = type(self).__name__
headers = len(self.headers)
bytes = len(self._body)
return '<{}:{}h:{}b>'.format(cls, headers, bytes)
@property
def body(self):
return self._body
@body.setter
def body(self, value):
if type(value) is not bytes:
value = str(value).encode('utf-8')
self._body = value
def file(self, path, type=None, download=False, name=None):
self._file = path
if type is None:
type, _ = mimetypes.guess_type(path)
self.headers['Content-Type'] = type or 'application/octet-stream'
self.headers['Content-Disposition'] = 'attachment' if download else 'inline'
self.headers['Content-Disposition'] += '; filename="{}"'.format(name or os.path.basename(path))
self.headers['Content-Length'] = str(os.stat(path).st_size)
def set_cookie(self, key, value, expires=None, domain=None, path=None, secure=False, http_only=True, same_site=True):
cookie = SimpleCookie({key: value}).get(key).OutputString()
if expires:
cookie += '; Expires=' + expires.strftime('%a, %d %b %Y %T') + ' GMT'
if domain:
cookie += '; Domain=' + domain
if path:
cookie += '; Path=' + path
if secure:
cookie += '; Secure'
if http_only:
cookie += '; HttpOnly'
if same_site:
cookie += '; SameSite=Strict'
self.cookies.append(cookie)
def unset_cookie(self, key, domain=None, path=None):
cookie = key + '=; Expires=' + datetime(1970, 1, 1).strftime('%a, %d %b %Y %T') + ' GMT'
if domain:
cookie += '; Domain=' + domain
if path:
cookie += '; Path=' + path
self.cookies.append(cookie)
def wsgi(self, start_respose):
start_respose(self._wsgi_status(), self._wsgi_headers())
if self._file:
return self._wsgi_file()
return self._wsgi_body()
def _wsgi_status(self):
return str(self.status) + ' ' + STATUS_MESSAGES.get(self.status, '')
def _wsgi_headers(self):
headers = list(self.headers.items())
for cookie in self.cookies:
headers.append(('Set-Cookie', cookie))
return headers
def _wsgi_body(self):
return (self.body,)
def _wsgi_file(self):
with open(self._file, 'rb') as f:
mbyte = 1024 ** 2
while True:
chunk = f.read(mbyte)
if not chunk:
break
yield chunk
| 31.27 | 121 | 0.574672 | from datetime import datetime
from http.client import responses as STATUS_MESSAGES
from http.cookies import SimpleCookie
import mimetypes
import os
from .case_insensitive_dict import CaseInsensitiveDict
class Response(BaseException):
def __init__(self):
self.status = 404
self.headers = CaseInsensitiveDict()
self.cookies = []
self._body = b''
self._file = None
def __str__(self):
return self.to_str()
def __repr__(self):
return self.to_str()
def to_str(self):
cls = type(self).__name__
headers = len(self.headers)
bytes = len(self._body)
return '<{}:{}h:{}b>'.format(cls, headers, bytes)
@property
def body(self):
return self._body
@body.setter
def body(self, value):
if type(value) is not bytes:
value = str(value).encode('utf-8')
self._body = value
def file(self, path, type=None, download=False, name=None):
self._file = path
if type is None:
type, _ = mimetypes.guess_type(path)
self.headers['Content-Type'] = type or 'application/octet-stream'
self.headers['Content-Disposition'] = 'attachment' if download else 'inline'
self.headers['Content-Disposition'] += '; filename="{}"'.format(name or os.path.basename(path))
self.headers['Content-Length'] = str(os.stat(path).st_size)
def set_cookie(self, key, value, expires=None, domain=None, path=None, secure=False, http_only=True, same_site=True):
cookie = SimpleCookie({key: value}).get(key).OutputString()
if expires:
cookie += '; Expires=' + expires.strftime('%a, %d %b %Y %T') + ' GMT'
if domain:
cookie += '; Domain=' + domain
if path:
cookie += '; Path=' + path
if secure:
cookie += '; Secure'
if http_only:
cookie += '; HttpOnly'
if same_site:
cookie += '; SameSite=Strict'
self.cookies.append(cookie)
def unset_cookie(self, key, domain=None, path=None):
cookie = key + '=; Expires=' + datetime(1970, 1, 1).strftime('%a, %d %b %Y %T') + ' GMT'
if domain:
cookie += '; Domain=' + domain
if path:
cookie += '; Path=' + path
self.cookies.append(cookie)
def wsgi(self, start_respose):
start_respose(self._wsgi_status(), self._wsgi_headers())
if self._file:
return self._wsgi_file()
return self._wsgi_body()
def _wsgi_status(self):
return str(self.status) + ' ' + STATUS_MESSAGES.get(self.status, '')
def _wsgi_headers(self):
headers = list(self.headers.items())
for cookie in self.cookies:
headers.append(('Set-Cookie', cookie))
return headers
def _wsgi_body(self):
return (self.body,)
def _wsgi_file(self):
with open(self._file, 'rb') as f:
mbyte = 1024 ** 2
while True:
chunk = f.read(mbyte)
if not chunk:
break
yield chunk
| true | true |
f72241b49458dd8302528f576568424dc16360f7 | 17,204 | py | Python | Python/OneLang/Utils/TSOverviewGenerator.py | onelang/OneLang-CrossCompiled | ed67f2b57e7181712c7d8c2ba85f23b3812d8e3a | [
"MIT"
] | 2 | 2020-11-15T23:38:23.000Z | 2020-11-16T00:54:25.000Z | Python/OneLang/Utils/TSOverviewGenerator.py | onelang/OneLang-CrossCompiled | ed67f2b57e7181712c7d8c2ba85f23b3812d8e3a | [
"MIT"
] | null | null | null | Python/OneLang/Utils/TSOverviewGenerator.py | onelang/OneLang-CrossCompiled | ed67f2b57e7181712c7d8c2ba85f23b3812d8e3a | [
"MIT"
] | null | null | null | from onelang_core import *
import OneLang.One.Ast.Expressions as exprs
import OneLang.One.Ast.Statements as stats
import OneLang.One.Ast.Types as types
import OneLang.One.Ast.AstTypes as astTypes
import OneLang.One.Ast.References as refs
import OneLang.One.Ast.Interfaces as ints
import onelang_core as one
import json
import re
@one.static_init
class TSOverviewGenerator:
@classmethod
def static_init(cls):
cls.preview = cls(True)
def __init__(self, preview_only = False, show_types = False):
self.preview_only = preview_only
self.show_types = show_types
def leading(self, item):
result = ""
if item.leading_trivia != None and len(item.leading_trivia) > 0:
result += item.leading_trivia
if item.attributes != None:
result += "".join(list(map(lambda x: f'''/// {{ATTR}} name="{x}", value={json.dumps(item.attributes.get(x), separators=(',', ':'))}\n''', item.attributes.keys())))
return result
def pre_arr(self, prefix, value):
return f'''{prefix}{", ".join(value)}''' if len(value) > 0 else ""
def pre_if(self, prefix, condition):
return prefix if condition else ""
def pre(self, prefix, value):
return f'''{prefix}{value}''' if value != None else ""
def type_args(self, args):
return f'''<{", ".join(args)}>''' if args != None and len(args) > 0 else ""
def type(self, t, raw = False):
repr = "???" if t == None else t.repr()
if repr == "U:UNKNOWN":
pass
return ("" if raw else "{T}") + repr
def var(self, v):
result = ""
is_prop = isinstance(v, types.Property)
if isinstance(v, types.Field) or isinstance(v, types.Property):
m = v
result += self.pre_if("", m.is_static)
result += "private " if m.visibility == types.VISIBILITY.PRIVATE else "protected " if m.visibility == types.VISIBILITY.PROTECTED else "public " if m.visibility == types.VISIBILITY.PUBLIC else "VISIBILITY-NOT-SET"
result += f'''{("@prop " if is_prop else "")}'''
if v.mutability != None:
result += f'''{("@unused " if v.mutability.unused else "")}'''
result += f'''{("@mutated " if v.mutability.mutated else "")}'''
result += f'''{("@reass " if v.mutability.reassigned else "")}'''
result += f'''{v.name}{("()" if is_prop else "")}: {self.type(v.type)}'''
if isinstance(v, stats.VariableDeclaration) or isinstance(v, stats.ForVariable) or isinstance(v, types.Field) or isinstance(v, types.MethodParameter):
init = (v).initializer
if init != None:
result += self.pre(" = ", self.expr(init))
return result
def expr(self, expr):
res = "UNKNOWN-EXPR"
if isinstance(expr, exprs.NewExpression):
res = f'''new {self.type(expr.cls_)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedNewExpression):
res = f'''new {self.type(expr.cls_)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.Identifier):
res = f'''{{ID}}{expr.text}'''
elif isinstance(expr, exprs.PropertyAccessExpression):
res = f'''{self.expr(expr.object)}.{{PA}}{expr.property_name}'''
elif isinstance(expr, exprs.UnresolvedCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.func)}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{UM}}{expr.method_name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.InstanceMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{M}}{expr.method.name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.StaticMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{expr.method.parent_interface.name}.{{M}}{expr.method.name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.GlobalFunctionCallExpression):
res = f'''{expr.func.name}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.LambdaCallExpression):
res = f'''{self.expr(expr.method)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.BooleanLiteral):
res = f'''{("true" if expr.bool_value else "false")}'''
elif isinstance(expr, exprs.StringLiteral):
res = f'''{json.dumps(expr.string_value, separators=(',', ':'))}'''
elif isinstance(expr, exprs.NumericLiteral):
res = f'''{expr.value_as_text}'''
elif isinstance(expr, exprs.CharacterLiteral):
res = f'''\'{expr.char_value}\''''
elif isinstance(expr, exprs.ElementAccessExpression):
res = f'''({self.expr(expr.object)})[{self.expr(expr.element_expr)}]'''
elif isinstance(expr, exprs.TemplateString):
res = "`" + "".join(list(map(lambda x: x.literal_text if x.is_literal else "${" + self.expr(x.expression) + "}", expr.parts))) + "`"
elif isinstance(expr, exprs.BinaryExpression):
res = f'''{self.expr(expr.left)} {expr.operator} {self.expr(expr.right)}'''
elif isinstance(expr, exprs.ArrayLiteral):
res = f'''[{", ".join(list(map(lambda x: self.expr(x), expr.items)))}]'''
elif isinstance(expr, exprs.CastExpression):
res = f'''<{self.type(expr.new_type)}>({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.ConditionalExpression):
res = f'''{self.expr(expr.condition)} ? {self.expr(expr.when_true)} : {self.expr(expr.when_false)}'''
elif isinstance(expr, exprs.InstanceOfExpression):
res = f'''{self.expr(expr.expr)} instanceof {self.type(expr.check_type)}'''
elif isinstance(expr, exprs.ParenthesizedExpression):
res = f'''({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.RegexLiteral):
res = f'''/{expr.pattern}/{("g" if expr.global_ else "")}{("g" if expr.case_insensitive else "")}'''
elif isinstance(expr, types.Lambda):
res = f'''({", ".join(list(map(lambda x: x.name + (": " + self.type(x.type) if x.type != None else ""), expr.parameters)))})''' + (f''' @captures({", ".join(list(map(lambda x: x.name, expr.captures)))})''' if expr.captures != None and len(expr.captures) > 0 else "") + f''' => {{ {self.raw_block(expr.body)} }}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.PREFIX:
res = f'''{expr.operator}{self.expr(expr.operand)}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.POSTFIX:
res = f'''{self.expr(expr.operand)}{expr.operator}'''
elif isinstance(expr, exprs.MapLiteral):
repr = ",\n".join(list(map(lambda item: f'''{item.key}: {self.expr(item.value)}''', expr.items)))
res = "{L:M}" + ("{}" if repr == "" else f'''{{\n{self.pad(repr)}\n}}''' if "\n" in repr else f'''{{ {repr} }}''')
elif isinstance(expr, exprs.NullLiteral):
res = f'''null'''
elif isinstance(expr, exprs.AwaitExpression):
res = f'''await {self.expr(expr.expr)}'''
elif isinstance(expr, refs.ThisReference):
res = f'''{{R}}this'''
elif isinstance(expr, refs.StaticThisReference):
res = f'''{{R:Static}}this'''
elif isinstance(expr, refs.EnumReference):
res = f'''{{R:Enum}}{expr.decl.name}'''
elif isinstance(expr, refs.ClassReference):
res = f'''{{R:Cls}}{expr.decl.name}'''
elif isinstance(expr, refs.MethodParameterReference):
res = f'''{{R:MetP}}{expr.decl.name}'''
elif isinstance(expr, refs.VariableDeclarationReference):
res = f'''{{V}}{expr.decl.name}'''
elif isinstance(expr, refs.ForVariableReference):
res = f'''{{R:ForV}}{expr.decl.name}'''
elif isinstance(expr, refs.ForeachVariableReference):
res = f'''{{R:ForEV}}{expr.decl.name}'''
elif isinstance(expr, refs.CatchVariableReference):
res = f'''{{R:CatchV}}{expr.decl.name}'''
elif isinstance(expr, refs.GlobalFunctionReference):
res = f'''{{R:GFunc}}{expr.decl.name}'''
elif isinstance(expr, refs.SuperReference):
res = f'''{{R}}super'''
elif isinstance(expr, refs.StaticFieldReference):
res = f'''{{R:StFi}}{expr.decl.parent_interface.name}::{expr.decl.name}'''
elif isinstance(expr, refs.StaticPropertyReference):
res = f'''{{R:StPr}}{expr.decl.parent_class.name}::{expr.decl.name}'''
elif isinstance(expr, refs.InstanceFieldReference):
res = f'''{self.expr(expr.object)}.{{F}}{expr.field.name}'''
elif isinstance(expr, refs.InstancePropertyReference):
res = f'''{self.expr(expr.object)}.{{P}}{expr.property.name}'''
elif isinstance(expr, refs.EnumMemberReference):
res = f'''{{E}}{expr.decl.parent_enum.name}::{expr.decl.name}'''
elif isinstance(expr, exprs.NullCoalesceExpression):
res = f'''{self.expr(expr.default_expr)} ?? {self.expr(expr.expr_if_null)}'''
else:
pass
if self.show_types:
res = f'''<{self.type(expr.get_type(), True)}>({res})'''
return res
def block(self, block, allow_one_liner = True):
if self.preview_only:
return " { ... }"
stmt_len = len(block.statements)
return " { }" if stmt_len == 0 else f'''\n{self.pad(self.raw_block(block))}''' if allow_one_liner and stmt_len == 1 else f''' {{\n{self.pad(self.raw_block(block))}\n}}'''
def stmt(self, stmt):
res = "UNKNOWN-STATEMENT"
if isinstance(stmt, stats.BreakStatement):
res = "break;"
elif isinstance(stmt, stats.ReturnStatement):
res = "return;" if stmt.expression == None else f'''return {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.UnsetStatement):
res = f'''unset {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ThrowStatement):
res = f'''throw {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ExpressionStatement):
res = f'''{self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.VariableDeclaration):
res = f'''var {self.var(stmt)};'''
elif isinstance(stmt, stats.ForeachStatement):
res = f'''for (const {stmt.item_var.name} of {self.expr(stmt.items)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.IfStatement):
else_if = stmt.else_ != None and len(stmt.else_.statements) == 1 and isinstance(stmt.else_.statements[0], stats.IfStatement)
res = f'''if ({self.expr(stmt.condition)}){self.block(stmt.then)}'''
if not self.preview_only:
res += (f'''\nelse {self.stmt(stmt.else_.statements[0])}''' if else_if else "") + (f'''\nelse''' + self.block(stmt.else_) if not else_if and stmt.else_ != None else "")
elif isinstance(stmt, stats.WhileStatement):
res = f'''while ({self.expr(stmt.condition)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.ForStatement):
res = f'''for ({(self.var(stmt.item_var) if stmt.item_var != None else "")}; {self.expr(stmt.condition)}; {self.expr(stmt.incrementor)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.DoStatement):
res = f'''do{self.block(stmt.body)} while ({self.expr(stmt.condition)})'''
elif isinstance(stmt, stats.TryStatement):
res = "try" + self.block(stmt.try_body, False) + (f''' catch ({stmt.catch_var.name}){self.block(stmt.catch_body)}''' if stmt.catch_body != None else "") + ("finally" + self.block(stmt.finally_body) if stmt.finally_body != None else "")
elif isinstance(stmt, stats.ContinueStatement):
res = f'''continue;'''
else:
pass
return res if self.preview_only else self.leading(stmt) + res
def raw_block(self, block):
return "\n".join(list(map(lambda stmt: self.stmt(stmt), block.statements)))
def method_base(self, method, returns):
if method == None:
return ""
name = method.name if isinstance(method, types.Method) else "constructor" if isinstance(method, types.Constructor) else method.name if isinstance(method, types.GlobalFunction) else "???"
type_args = method.type_arguments if isinstance(method, types.Method) else None
return self.pre_if("/* throws */ ", method.throws) + f'''{name}{self.type_args(type_args)}({", ".join(list(map(lambda p: self.leading(p) + self.var(p), method.parameters)))})''' + ("" if isinstance(returns, astTypes.VoidType) else f''': {self.type(returns)}''') + (f''' {{\n{self.pad(self.raw_block(method.body))}\n}}''' if method.body != None else ";")
def method(self, method):
return "" if method == None else ("static " if method.is_static else "") + ("@mutates " if method.attributes != None and "mutates" in method.attributes else "") + self.method_base(method, method.returns)
def class_like(self, cls_):
res_list = []
res_list.append("\n".join(list(map(lambda field: self.var(field) + ";", cls_.fields))))
if isinstance(cls_, types.Class):
res_list.append("\n".join(list(map(lambda prop: self.var(prop) + ";", cls_.properties))))
res_list.append(self.method_base(cls_.constructor_, astTypes.VoidType.instance))
res_list.append("\n\n".join(list(map(lambda method: self.method(method), cls_.methods))))
return self.pad("\n\n".join(list(filter(lambda x: x != "", res_list))))
def pad(self, str):
return "\n".join(list(map(lambda x: f''' {x}''', re.split("\\n", str))))
def imp(self, imp):
return "" + ("X" if isinstance(imp, types.UnresolvedImport) else "C" if isinstance(imp, types.Class) else "I" if isinstance(imp, types.Interface) else "E" if isinstance(imp, types.Enum) else "???") + f''':{imp.name}'''
def node_repr(self, node):
if isinstance(node, stats.Statement):
return self.stmt(node)
elif isinstance(node, exprs.Expression):
return self.expr(node)
else:
return "/* TODO: missing */"
def generate(self, source_file):
imps = list(map(lambda imp: (f'''import * as {imp.import_as}''' if imp.import_all else f'''import {{ {", ".join(list(map(lambda x: self.imp(x), imp.imports)))} }}''') + f''' from "{imp.export_scope.package_name}{self.pre("/", imp.export_scope.scope_name)}";''', source_file.imports))
enums = list(map(lambda enum_: f'''{self.leading(enum_)}enum {enum_.name} {{ {", ".join(list(map(lambda x: x.name, enum_.values)))} }}''', source_file.enums))
intfs = list(map(lambda intf: f'''{self.leading(intf)}interface {intf.name}{self.type_args(intf.type_arguments)}''' + f'''{self.pre_arr(" extends ", list(map(lambda x: self.type(x), intf.base_interfaces)))} {{\n{self.class_like(intf)}\n}}''', source_file.interfaces))
classes = list(map(lambda cls_: f'''{self.leading(cls_)}class {cls_.name}{self.type_args(cls_.type_arguments)}''' + self.pre(" extends ", self.type(cls_.base_class) if cls_.base_class != None else None) + self.pre_arr(" implements ", list(map(lambda x: self.type(x), cls_.base_interfaces))) + f''' {{\n{self.class_like(cls_)}\n}}''', source_file.classes))
funcs = list(map(lambda func: f'''{self.leading(func)}function {func.name}{self.method_base(func, func.returns)}''', source_file.funcs))
main = self.raw_block(source_file.main_block)
result = f'''// export scope: {source_file.export_scope.package_name}/{source_file.export_scope.scope_name}\n''' + "\n\n".join(list(filter(lambda x: x != "", ["\n".join(imps), "\n".join(enums), "\n\n".join(intfs), "\n\n".join(classes), "\n\n".join(funcs), main])))
return result | 66.42471 | 363 | 0.602592 | from onelang_core import *
import OneLang.One.Ast.Expressions as exprs
import OneLang.One.Ast.Statements as stats
import OneLang.One.Ast.Types as types
import OneLang.One.Ast.AstTypes as astTypes
import OneLang.One.Ast.References as refs
import OneLang.One.Ast.Interfaces as ints
import onelang_core as one
import json
import re
@one.static_init
class TSOverviewGenerator:
@classmethod
def static_init(cls):
cls.preview = cls(True)
def __init__(self, preview_only = False, show_types = False):
self.preview_only = preview_only
self.show_types = show_types
def leading(self, item):
result = ""
if item.leading_trivia != None and len(item.leading_trivia) > 0:
result += item.leading_trivia
if item.attributes != None:
result += "".join(list(map(lambda x: f'''/// {{ATTR}} name="{x}", value={json.dumps(item.attributes.get(x), separators=(',', ':'))}\n''', item.attributes.keys())))
return result
def pre_arr(self, prefix, value):
return f'''{prefix}{", ".join(value)}''' if len(value) > 0 else ""
def pre_if(self, prefix, condition):
return prefix if condition else ""
def pre(self, prefix, value):
return f'''{prefix}{value}''' if value != None else ""
def type_args(self, args):
return f'''<{", ".join(args)}>''' if args != None and len(args) > 0 else ""
def type(self, t, raw = False):
repr = "???" if t == None else t.repr()
if repr == "U:UNKNOWN":
pass
return ("" if raw else "{T}") + repr
def var(self, v):
result = ""
is_prop = isinstance(v, types.Property)
if isinstance(v, types.Field) or isinstance(v, types.Property):
m = v
result += self.pre_if("", m.is_static)
result += "private " if m.visibility == types.VISIBILITY.PRIVATE else "protected " if m.visibility == types.VISIBILITY.PROTECTED else "public " if m.visibility == types.VISIBILITY.PUBLIC else "VISIBILITY-NOT-SET"
result += f'''{("@prop " if is_prop else "")}'''
if v.mutability != None:
result += f'''{("@unused " if v.mutability.unused else "")}'''
result += f'''{("@mutated " if v.mutability.mutated else "")}'''
result += f'''{("@reass " if v.mutability.reassigned else "")}'''
result += f'''{v.name}{("()" if is_prop else "")}: {self.type(v.type)}'''
if isinstance(v, stats.VariableDeclaration) or isinstance(v, stats.ForVariable) or isinstance(v, types.Field) or isinstance(v, types.MethodParameter):
init = (v).initializer
if init != None:
result += self.pre(" = ", self.expr(init))
return result
def expr(self, expr):
res = "UNKNOWN-EXPR"
if isinstance(expr, exprs.NewExpression):
res = f'''new {self.type(expr.cls_)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedNewExpression):
res = f'''new {self.type(expr.cls_)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.Identifier):
res = f'''{{ID}}{expr.text}'''
elif isinstance(expr, exprs.PropertyAccessExpression):
res = f'''{self.expr(expr.object)}.{{PA}}{expr.property_name}'''
elif isinstance(expr, exprs.UnresolvedCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.func)}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{UM}}{expr.method_name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.InstanceMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{M}}{expr.method.name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.StaticMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{expr.method.parent_interface.name}.{{M}}{expr.method.name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.GlobalFunctionCallExpression):
res = f'''{expr.func.name}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.LambdaCallExpression):
res = f'''{self.expr(expr.method)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.BooleanLiteral):
res = f'''{("true" if expr.bool_value else "false")}'''
elif isinstance(expr, exprs.StringLiteral):
res = f'''{json.dumps(expr.string_value, separators=(',', ':'))}'''
elif isinstance(expr, exprs.NumericLiteral):
res = f'''{expr.value_as_text}'''
elif isinstance(expr, exprs.CharacterLiteral):
res = f'''\'{expr.char_value}\''''
elif isinstance(expr, exprs.ElementAccessExpression):
res = f'''({self.expr(expr.object)})[{self.expr(expr.element_expr)}]'''
elif isinstance(expr, exprs.TemplateString):
res = "`" + "".join(list(map(lambda x: x.literal_text if x.is_literal else "${" + self.expr(x.expression) + "}", expr.parts))) + "`"
elif isinstance(expr, exprs.BinaryExpression):
res = f'''{self.expr(expr.left)} {expr.operator} {self.expr(expr.right)}'''
elif isinstance(expr, exprs.ArrayLiteral):
res = f'''[{", ".join(list(map(lambda x: self.expr(x), expr.items)))}]'''
elif isinstance(expr, exprs.CastExpression):
res = f'''<{self.type(expr.new_type)}>({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.ConditionalExpression):
res = f'''{self.expr(expr.condition)} ? {self.expr(expr.when_true)} : {self.expr(expr.when_false)}'''
elif isinstance(expr, exprs.InstanceOfExpression):
res = f'''{self.expr(expr.expr)} instanceof {self.type(expr.check_type)}'''
elif isinstance(expr, exprs.ParenthesizedExpression):
res = f'''({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.RegexLiteral):
res = f'''/{expr.pattern}/{("g" if expr.global_ else "")}{("g" if expr.case_insensitive else "")}'''
elif isinstance(expr, types.Lambda):
res = f'''({", ".join(list(map(lambda x: x.name + (": " + self.type(x.type) if x.type != None else ""), expr.parameters)))})''' + (f''' @captures({", ".join(list(map(lambda x: x.name, expr.captures)))})''' if expr.captures != None and len(expr.captures) > 0 else "") + f''' => {{ {self.raw_block(expr.body)} }}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.PREFIX:
res = f'''{expr.operator}{self.expr(expr.operand)}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.POSTFIX:
res = f'''{self.expr(expr.operand)}{expr.operator}'''
elif isinstance(expr, exprs.MapLiteral):
repr = ",\n".join(list(map(lambda item: f'''{item.key}: {self.expr(item.value)}''', expr.items)))
res = "{L:M}" + ("{}" if repr == "" else f'''{{\n{self.pad(repr)}\n}}''' if "\n" in repr else f'''{{ {repr} }}''')
elif isinstance(expr, exprs.NullLiteral):
res = f'''null'''
elif isinstance(expr, exprs.AwaitExpression):
res = f'''await {self.expr(expr.expr)}'''
elif isinstance(expr, refs.ThisReference):
res = f'''{{R}}this'''
elif isinstance(expr, refs.StaticThisReference):
res = f'''{{R:Static}}this'''
elif isinstance(expr, refs.EnumReference):
res = f'''{{R:Enum}}{expr.decl.name}'''
elif isinstance(expr, refs.ClassReference):
res = f'''{{R:Cls}}{expr.decl.name}'''
elif isinstance(expr, refs.MethodParameterReference):
res = f'''{{R:MetP}}{expr.decl.name}'''
elif isinstance(expr, refs.VariableDeclarationReference):
res = f'''{{V}}{expr.decl.name}'''
elif isinstance(expr, refs.ForVariableReference):
res = f'''{{R:ForV}}{expr.decl.name}'''
elif isinstance(expr, refs.ForeachVariableReference):
res = f'''{{R:ForEV}}{expr.decl.name}'''
elif isinstance(expr, refs.CatchVariableReference):
res = f'''{{R:CatchV}}{expr.decl.name}'''
elif isinstance(expr, refs.GlobalFunctionReference):
res = f'''{{R:GFunc}}{expr.decl.name}'''
elif isinstance(expr, refs.SuperReference):
res = f'''{{R}}super'''
elif isinstance(expr, refs.StaticFieldReference):
res = f'''{{R:StFi}}{expr.decl.parent_interface.name}::{expr.decl.name}'''
elif isinstance(expr, refs.StaticPropertyReference):
res = f'''{{R:StPr}}{expr.decl.parent_class.name}::{expr.decl.name}'''
elif isinstance(expr, refs.InstanceFieldReference):
res = f'''{self.expr(expr.object)}.{{F}}{expr.field.name}'''
elif isinstance(expr, refs.InstancePropertyReference):
res = f'''{self.expr(expr.object)}.{{P}}{expr.property.name}'''
elif isinstance(expr, refs.EnumMemberReference):
res = f'''{{E}}{expr.decl.parent_enum.name}::{expr.decl.name}'''
elif isinstance(expr, exprs.NullCoalesceExpression):
res = f'''{self.expr(expr.default_expr)} ?? {self.expr(expr.expr_if_null)}'''
else:
pass
if self.show_types:
res = f'''<{self.type(expr.get_type(), True)}>({res})'''
return res
def block(self, block, allow_one_liner = True):
if self.preview_only:
return " { ... }"
stmt_len = len(block.statements)
return " { }" if stmt_len == 0 else f'''\n{self.pad(self.raw_block(block))}''' if allow_one_liner and stmt_len == 1 else f''' {{\n{self.pad(self.raw_block(block))}\n}}'''
def stmt(self, stmt):
res = "UNKNOWN-STATEMENT"
if isinstance(stmt, stats.BreakStatement):
res = "break;"
elif isinstance(stmt, stats.ReturnStatement):
res = "return;" if stmt.expression == None else f'''return {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.UnsetStatement):
res = f'''unset {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ThrowStatement):
res = f'''throw {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ExpressionStatement):
res = f'''{self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.VariableDeclaration):
res = f'''var {self.var(stmt)};'''
elif isinstance(stmt, stats.ForeachStatement):
res = f'''for (const {stmt.item_var.name} of {self.expr(stmt.items)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.IfStatement):
else_if = stmt.else_ != None and len(stmt.else_.statements) == 1 and isinstance(stmt.else_.statements[0], stats.IfStatement)
res = f'''if ({self.expr(stmt.condition)}){self.block(stmt.then)}'''
if not self.preview_only:
res += (f'''\nelse {self.stmt(stmt.else_.statements[0])}''' if else_if else "") + (f'''\nelse''' + self.block(stmt.else_) if not else_if and stmt.else_ != None else "")
elif isinstance(stmt, stats.WhileStatement):
res = f'''while ({self.expr(stmt.condition)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.ForStatement):
res = f'''for ({(self.var(stmt.item_var) if stmt.item_var != None else "")}; {self.expr(stmt.condition)}; {self.expr(stmt.incrementor)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.DoStatement):
res = f'''do{self.block(stmt.body)} while ({self.expr(stmt.condition)})'''
elif isinstance(stmt, stats.TryStatement):
res = "try" + self.block(stmt.try_body, False) + (f''' catch ({stmt.catch_var.name}){self.block(stmt.catch_body)}''' if stmt.catch_body != None else "") + ("finally" + self.block(stmt.finally_body) if stmt.finally_body != None else "")
elif isinstance(stmt, stats.ContinueStatement):
res = f'''continue;'''
else:
pass
return res if self.preview_only else self.leading(stmt) + res
def raw_block(self, block):
return "\n".join(list(map(lambda stmt: self.stmt(stmt), block.statements)))
def method_base(self, method, returns):
if method == None:
return ""
name = method.name if isinstance(method, types.Method) else "constructor" if isinstance(method, types.Constructor) else method.name if isinstance(method, types.GlobalFunction) else "???"
type_args = method.type_arguments if isinstance(method, types.Method) else None
return self.pre_if("/* throws */ ", method.throws) + f'''{name}{self.type_args(type_args)}({", ".join(list(map(lambda p: self.leading(p) + self.var(p), method.parameters)))})''' + ("" if isinstance(returns, astTypes.VoidType) else f''': {self.type(returns)}''') + (f''' {{\n{self.pad(self.raw_block(method.body))}\n}}''' if method.body != None else ";")
def method(self, method):
return "" if method == None else ("static " if method.is_static else "") + ("@mutates " if method.attributes != None and "mutates" in method.attributes else "") + self.method_base(method, method.returns)
def class_like(self, cls_):
res_list = []
res_list.append("\n".join(list(map(lambda field: self.var(field) + ";", cls_.fields))))
if isinstance(cls_, types.Class):
res_list.append("\n".join(list(map(lambda prop: self.var(prop) + ";", cls_.properties))))
res_list.append(self.method_base(cls_.constructor_, astTypes.VoidType.instance))
res_list.append("\n\n".join(list(map(lambda method: self.method(method), cls_.methods))))
return self.pad("\n\n".join(list(filter(lambda x: x != "", res_list))))
def pad(self, str):
return "\n".join(list(map(lambda x: f''' {x}''', re.split("\\n", str))))
def imp(self, imp):
return "" + ("X" if isinstance(imp, types.UnresolvedImport) else "C" if isinstance(imp, types.Class) else "I" if isinstance(imp, types.Interface) else "E" if isinstance(imp, types.Enum) else "???") + f''':{imp.name}'''
def node_repr(self, node):
if isinstance(node, stats.Statement):
return self.stmt(node)
elif isinstance(node, exprs.Expression):
return self.expr(node)
else:
return "/* TODO: missing */"
def generate(self, source_file):
imps = list(map(lambda imp: (f'''import * as {imp.import_as}''' if imp.import_all else f'''import {{ {", ".join(list(map(lambda x: self.imp(x), imp.imports)))} }}''') + f''' from "{imp.export_scope.package_name}{self.pre("/", imp.export_scope.scope_name)}";''', source_file.imports))
enums = list(map(lambda enum_: f'''{self.leading(enum_)}enum {enum_.name} {{ {", ".join(list(map(lambda x: x.name, enum_.values)))} }}''', source_file.enums))
intfs = list(map(lambda intf: f'''{self.leading(intf)}interface {intf.name}{self.type_args(intf.type_arguments)}''' + f'''{self.pre_arr(" extends ", list(map(lambda x: self.type(x), intf.base_interfaces)))} {{\n{self.class_like(intf)}\n}}''', source_file.interfaces))
classes = list(map(lambda cls_: f'''{self.leading(cls_)}class {cls_.name}{self.type_args(cls_.type_arguments)}''' + self.pre(" extends ", self.type(cls_.base_class) if cls_.base_class != None else None) + self.pre_arr(" implements ", list(map(lambda x: self.type(x), cls_.base_interfaces))) + f''' {{\n{self.class_like(cls_)}\n}}''', source_file.classes))
funcs = list(map(lambda func: f'''{self.leading(func)}function {func.name}{self.method_base(func, func.returns)}''', source_file.funcs))
main = self.raw_block(source_file.main_block)
result = f'''// export scope: {source_file.export_scope.package_name}/{source_file.export_scope.scope_name}\n''' + "\n\n".join(list(filter(lambda x: x != "", ["\n".join(imps), "\n".join(enums), "\n\n".join(intfs), "\n\n".join(classes), "\n\n".join(funcs), main])))
return result | true | true |
f72241d12600a83665a811559204ad355ea924e5 | 56,226 | py | Python | pyx12/map_if.py | azoner/pyx12 | f60ad528d54c0b0ae25044d2629a29faeefa2f5f | [
"BSD-3-Clause"
] | 120 | 2015-01-30T07:17:26.000Z | 2022-03-25T16:42:15.000Z | pyx12/map_if.py | azoner/pyx12 | f60ad528d54c0b0ae25044d2629a29faeefa2f5f | [
"BSD-3-Clause"
] | 43 | 2015-02-12T18:42:26.000Z | 2021-12-12T22:22:20.000Z | pyx12/map_if.py | azoner/pyx12 | f60ad528d54c0b0ae25044d2629a29faeefa2f5f | [
"BSD-3-Clause"
] | 85 | 2015-02-12T16:44:28.000Z | 2022-03-24T20:20:46.000Z | ######################################################################
# Copyright (c)
# All rights reserved.
#
# This software is licensed as described in the file LICENSE.txt, which
# you should have received as part of this distribution.
#
######################################################################
"""
Interface to a X12N IG Map
"""
import logging
import os.path
import sys
import re
import xml.etree.cElementTree as et
from pkg_resources import resource_stream
# Intrapackage imports
from .errors import EngineError
from . import codes
from . import dataele
from . import path
from . import validation
from .syntax import is_syntax_valid
MAXINT = 2147483647
class x12_node(object):
"""
X12 Node Superclass
"""
def __init__(self):
self.id = None
self.name = None
self.parent = None
self.children = []
self.path = ''
self._x12path = None
self._fullpath = None
def __eq__(self, other):
if isinstance(other, x12_node):
return self.id == other.id and self.parent.id == other.parent.id
return NotImplemented
def __ne__(self, other):
res = type(self).__eq__(self, other)
if res is NotImplemented:
return res
return not res
def __lt__(self, other):
return NotImplemented
__le__ = __lt__
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
def __hash__(self):
return (self.id + self.parent.id).__hash__()
def __len__(self):
return len(self.children)
def __repr__(self):
"""
@rtype: string
"""
return self.name
def getnodebypath(self, path):
"""
"""
pathl = path.split('/')
if len(pathl) == 0:
return None
for child in self.children:
if child.id.lower() == pathl[0].lower():
if len(pathl) == 1:
return child
else:
if child.is_loop():
return child.getnodebypath('/'.join(pathl[1:]))
else:
break
raise EngineError('getnodebypath failed. Path "%s" not found' % path)
def get_child_count(self):
return len(self.children)
def get_child_node_by_idx(self, idx):
"""
@param idx: zero based
"""
if idx >= len(self.children):
return None
else:
return self.children[idx]
def get_child_node_by_ordinal(self, ordinal):
"""
Get a child element or composite by the X12 ordinal
@param ord: one based element/composite index. Corresponds to the map <seq> element
@type ord: int
"""
return self.get_child_node_by_idx(ordinal - 1)
def get_path(self):
"""
@return: path - XPath style
@rtype: string
"""
if self._fullpath:
return self._fullpath
parent_path = self.parent.get_path()
if parent_path == '/':
self._fullpath = '/' + self.path
return self._fullpath
else:
self._fullpath = parent_path + '/' + self.path
return self._fullpath
def _get_x12_path(self):
"""
@return: X12 node path
@rtype: L{path<path.X12Path>}
"""
if self._x12path:
return self._x12path
p = path.X12Path(self.get_path())
self._x12path = p
return p
x12path = property(_get_x12_path, None, None)
def is_first_seg_in_loop(self):
"""
@rtype: boolean
"""
return False
def is_map_root(self):
"""
@rtype: boolean
"""
return False
def is_loop(self):
"""
@rtype: boolean
"""
return False
def is_segment(self):
"""
@rtype: boolean
"""
return False
def is_element(self):
"""
@rtype: boolean
"""
return False
def is_composite(self):
"""
@rtype: boolean
"""
return False
############################################################
# Map file interface
############################################################
class map_if(x12_node):
"""
Map file interface
"""
def __init__(self, eroot, param, base_path=None):
"""
@param eroot: ElementTree root
@param param: map of parameters
"""
x12_node.__init__(self)
self.children = None
self.pos_map = {}
self.cur_path = '/transaction'
self.path = '/'
#self.cur_iter_node = self
self.param = param
#global codes
self.ext_codes = codes.ExternalCodes(base_path,
param.get('exclude_external_codes'))
self.data_elements = dataele.DataElements(base_path)
self.id = eroot.get('xid')
self.name = eroot.get('name') if eroot.get('name') else eroot.findtext('name')
self.base_name = 'transaction'
for e in eroot.findall('loop'):
loop_node = loop_if(self, self, e)
#if loop_node.pos in self.pos_map:
# if self.pos_map[loop_node.pos][0].id != loop_node.id:
# raise EngineError('Invalid pos {} for path {}'.format(loop_node.pos, loop_node.x12path))
#if len(self.pos_map) > 0 and loop_node.pos < max(self.pos_map.keys()):
# raise EngineError('Loop position should only increment. Is not for path {}'.format(loop_node.x12path))
try:
self.pos_map[loop_node.pos].append(loop_node)
except KeyError:
self.pos_map[loop_node.pos] = [loop_node]
for e in eroot.findall('segment'):
seg_node = segment_if(self, self, e)
#if seg_node.pos in self.pos_map:
# if self.pos_map[seg_node.pos][0].id != seg_node.id:
# raise EngineError('Invalid pos {} for path {}'.format(seg_node.pos, seg_node.x12path))
#if len(self.pos_map) > 0 and seg_node.pos < max(self.pos_map.keys()):
# raise EngineError('Segment position should only increment. Is not for path {}'.format(seg_node.x12path))
try:
self.pos_map[seg_node.pos].append(seg_node)
except KeyError:
self.pos_map[seg_node.pos] = [seg_node]
self.icvn = self._get_icvn()
def _get_icvn(self):
"""
Get the Interchange version of this map
Map must have a first ISA segment
ISA12
"""
ipath = '/ISA_LOOP/ISA'
try:
node = self.getnodebypath(ipath).children[11]
icvn = node.valid_codes[0]
return icvn
except Exception:
return None
def debug_print(self):
sys.stdout.write(self.__repr__())
for ord1 in sorted(self.pos_map):
for node in self.pos_map[ord1]:
node.debug_print()
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return (self.id).__hash__()
def __len__(self):
i = 0
for ord1 in sorted(self.pos_map):
i += len(self.pos_map[ord1])
return i
def get_child_count(self):
return self.__len__()
def get_first_node(self):
pos_keys = sorted(self.pos_map)
if len(pos_keys) > 0:
return self.pos_map[pos_keys[0]][0]
else:
return None
def get_first_seg(self):
first = self.get_first_node()
if first.is_segment():
return first
else:
return None
def __repr__(self):
"""
@rtype: string
"""
return '%s\n' % (self.id)
def _path_parent(self):
"""
@rtype: string
"""
return os.path.basename(os.path.dirname(self.cur_path))
def get_path(self):
"""
@rtype: string
"""
return self.path
def get_child_node_by_idx(self, idx):
"""
@param idx: zero based
"""
raise EngineError('map_if.get_child_node_by_idx is not a valid call')
def getnodebypath(self, spath):
"""
@param spath: Path string; /1000/2000/2000A/NM102-3
@type spath: string
"""
pathl = spath.split('/')[1:]
if len(pathl) == 0:
return None
#logger.debug('%s %s %s' % (self.base_name, self.id, pathl[1]))
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.id.lower() == pathl[0].lower():
if len(pathl) == 1:
return child
else:
return child.getnodebypath('/'.join(pathl[1:]))
raise EngineError('getnodebypath failed. Path "%s" not found' % spath)
def getnodebypath2(self, path_str):
"""
@param path: Path string; /1000/2000/2000A/NM102-3
@type path: string
"""
x12path = path.X12Path(path_str)
if x12path.empty():
return None
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.id.upper() == x12path.loop_list[0]:
if len(x12path.loop_list) == 1:
return child
else:
del x12path.loop_list[0]
return child.getnodebypath2(x12path.format())
raise EngineError(
'getnodebypath2 failed. Path "%s" not found' % path_str)
def is_map_root(self):
"""
@rtype: boolean
"""
return True
def reset_child_count(self):
"""
Set cur_count of child nodes to zero
"""
raise DeprecationWarning('Moved to nodeCounter')
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
child.reset_cur_count()
def reset_cur_count(self):
"""
Set cur_count of child nodes to zero
"""
raise DeprecationWarning('Moved to nodeCounter')
self.reset_child_count()
def __iter__(self):
return self
def loop_segment_iterator(self):
yield self
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.is_loop() or child.is_segment():
for c in child.loop_segment_iterator():
yield c
############################################################
# Loop Interface
############################################################
class loop_if(x12_node):
"""
Loop Interface
"""
def __init__(self, root, parent, elem):
"""
"""
x12_node.__init__(self)
self.root = root
self.parent = parent
self.pos_map = {}
#self.path = ''
self.base_name = 'loop'
#self.type = 'implicit'
self._cur_count = 0
self.id = elem.get('xid')
self.path = self.id
self.type = elem.get('type')
self.name = elem.get(
'name') if elem.get('name') else elem.findtext('name')
self.usage = elem.get(
'usage') if elem.get('usage') else elem.findtext('usage')
self.pos = int(elem.get(
'pos')) if elem.get('pos') else int(elem.findtext('pos'))
self.repeat = elem.get('repeat') if elem.get(
'repeat') else elem.findtext('repeat')
for e in elem.findall('loop'):
loop_node = loop_if(self.root, self, e)
#if loop_node.pos in self.pos_map:
# if self.pos_map[loop_node.pos][0].id != loop_node.id:
# raise EngineError('Invalid pos {} for path {}'.format(loop_node.pos, loop_node.x12path))
#if len(self.pos_map) > 0 and loop_node.pos < max(self.pos_map.keys()):
# raise EngineError('Loop position should only increment. Is not for path {}'.format(loop_node.pos, loop_node.x12path))
#if self.pos_map:
# assert loop_node.pos >= max(self.pos_map.keys()), 'Bad ordinal %s' % (loop_node)
try:
self.pos_map[loop_node.pos].append(loop_node)
except KeyError:
self.pos_map[loop_node.pos] = [loop_node]
for e in elem.findall('segment'):
seg_node = segment_if(self.root, self, e)
#if seg_node.pos in self.pos_map:
# if self.pos_map[seg_node.pos][0].id != seg_node.id:
# raise EngineError('Invalid pos {} for path {}'.format(seg_node.pos, seg_node.x12path))
#if len(self.pos_map) > 0 and seg_node.pos < max(self.pos_map.keys()):
# raise EngineError('Loop position should only increment. Is not for path {}'.format(seg_node.pos, seg_node.x12path))
#if self.pos_map:
# assert seg_node.pos >= max(self.pos_map.keys()), 'Bad ordinal %s' % (seg_node)
try:
self.pos_map[seg_node.pos].append(seg_node)
except KeyError:
self.pos_map[seg_node.pos] = [seg_node]
# For the segments with duplicate ordinals, adjust the path to be unique
for ord1 in sorted(self.pos_map):
if len(self.pos_map[ord1]) > 1:
for seg_node in [n for n in self.pos_map[ord1] if n.is_segment()]:
id_elem = seg_node.guess_unique_key_id_element()
if id_elem is not None:
seg_node.path = seg_node.path + '[' + id_elem.valid_codes[0] + ']'
def debug_print(self):
sys.stdout.write(self.__repr__())
for ord1 in sorted(self.pos_map):
for node in self.pos_map[ord1]:
node.debug_print()
def __len__(self):
i = 0
for ord1 in sorted(self.pos_map):
i += len(self.pos_map[ord1])
return i
def __repr__(self):
"""
@rtype: string
"""
out = ''
if self.id:
out += 'LOOP %s' % (self.id)
if self.name:
out += ' "%s"' % (self.name)
if self.usage:
out += ' usage: %s' % (self.usage)
if self.pos:
out += ' pos: %s' % (self.pos)
if self.repeat:
out += ' repeat: %s' % (self.repeat)
out += '\n'
return out
def get_max_repeat(self):
if self.repeat is None:
return MAXINT
if self.repeat == '>1' or self.repeat == '>1':
return MAXINT
return int(self.repeat)
def get_parent(self):
return self.parent
def get_first_node(self):
pos_keys = sorted(self.pos_map)
if len(pos_keys) > 0:
return self.pos_map[pos_keys[0]][0]
else:
return None
def get_first_seg(self):
first = self.get_first_node()
if first.is_segment():
return first
else:
return None
def childIterator(self):
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
yield child
def getnodebypath(self, spath):
"""
@param spath: remaining path to match
@type spath: string
@return: matching node, or None is no match
"""
pathl = spath.split('/')
if len(pathl) == 0:
return None
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.is_loop():
if child.id.upper() == pathl[0].upper():
if len(pathl) == 1:
return child
else:
return child.getnodebypath('/'.join(pathl[1:]))
elif child.is_segment() and len(pathl) == 1:
if pathl[0].find('[') == -1: # No id to match
if pathl[0] == child.id:
return child
else:
seg_id = pathl[0][0:pathl[0].find('[')]
id_val = pathl[0][pathl[0].find('[')
+ 1:pathl[0].find(']')]
if seg_id == child.id:
possible = child.get_unique_key_id_element(id_val)
if possible is not None:
return child
raise EngineError('getnodebypath failed. Path "%s" not found' % spath)
def getnodebypath2(self, path_str):
"""
Try x12 path
@param path_str: remaining path to match
@type path_str: string
@return: matching node, or None is no match
"""
x12path = path.X12Path(path_str)
if x12path.empty():
return None
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.is_loop() and len(x12path.loop_list) > 0:
if child.id.upper() == x12path.loop_list[0].upper():
if len(x12path.loop_list) == 1 and x12path.seg_id is None:
return child
else:
del x12path.loop_list[0]
return child.getnodebypath2(x12path.format())
elif child.is_segment() and len(x12path.loop_list) == 0 and x12path.seg_id is not None:
if x12path.id_val is None:
if x12path.seg_id == child.id:
return child.getnodebypath2(x12path.format())
else:
seg_id = x12path.seg_id
id_val = x12path.id_val
if seg_id == child.id:
possible = child.get_unique_key_id_element(id_val)
if possible is not None:
return child.getnodebypath2(x12path.format())
raise EngineError(
'getnodebypath2 failed. Path "%s" not found' % path_str)
def get_child_count(self):
return self.__len__()
def get_child_node_by_idx(self, idx):
"""
@param idx: zero based
"""
raise EngineError('loop_if.get_child_node_by_idx is not a valid call for a loop_if')
def get_seg_count(self):
"""
@return: Number of child segments
@rtype: integer
"""
i = 0
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.is_segment():
i += 1
return i
def is_loop(self):
"""
@rtype: boolean
"""
return True
def is_match(self, seg_data):
"""
@type seg_data: L{segment<segment.Segment>}
@return: Is the segment a match to this loop?
@rtype: boolean
"""
pos_keys = sorted(self.pos_map)
child = self.pos_map[pos_keys[0]][0]
if child.is_loop():
return child.is_match(seg_data)
elif child.is_segment():
if child.is_match(seg_data):
return True
else:
return False # seg does not match the first segment in loop, so not valid
else:
return False
def get_child_seg_node(self, seg_data):
"""
Return the child segment matching the segment data
"""
for child in self.childIterator():
if child.is_segment() and child.is_match(seg_data):
return child
return None
def get_child_loop_node(self, seg_data):
"""
Return the child segment matching the segment data
"""
for child in self.childIterator():
if child.is_loop() and child.is_match(seg_data):
return child
return None
def get_cur_count(self):
"""
@return: current count
@rtype: int
"""
raise DeprecationWarning('Moved to nodeCounter')
return self._cur_count
def incr_cur_count(self):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count += 1
def reset_child_count(self):
"""
Set cur_count of child nodes to zero
"""
raise DeprecationWarning('Moved to nodeCounter')
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
child.reset_cur_count()
def reset_cur_count(self):
"""
Set cur_count of node and child nodes to zero
"""
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count = 0
self.reset_child_count()
def set_cur_count(self, ct):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count = ct
def get_counts_list(self, ct_list):
"""
Build a list of (path, ct) of the current node and parents
Gets the node counts to apply to another map
@param ct_list: List to append to
@type ct_list: list[(string, int)]
"""
raise DeprecationWarning('Moved to nodeCounter')
my_ct = (self.get_path(), self._cur_count)
ct_list.append(my_ct)
if not self.parent.is_map_root():
self.parent.get_counts_list(ct_list)
return True
def loop_segment_iterator(self):
yield self
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.is_loop() or child.is_segment():
for c in child.loop_segment_iterator():
yield c
class segment_if(x12_node):
"""
Segment Interface
"""
def __init__(self, root, parent, elem):
"""
@param parent: parent node
"""
x12_node.__init__(self)
self.root = root
self.parent = parent
self.children = []
#self.path = ''
self.base_name = 'segment'
self._cur_count = 0
self.syntax = []
self.id = elem.get('xid')
self.path = self.id
self.type = elem.get('type')
self.name = elem.get(
'name') if elem.get('name') else elem.findtext('name')
self.usage = elem.get(
'usage') if elem.get('usage') else elem.findtext('usage')
self.pos = int(elem.get(
'pos')) if elem.get('pos') else int(elem.findtext('pos'))
self.max_use = elem.get('max_use') if elem.get(
'max_use') else elem.findtext('max_use')
self.repeat = elem.get('repeat') if elem.get(
'repeat') else elem.findtext('repeat')
self.end_tag = elem.get('end_tag') if elem.get(
'end_tag') else elem.findtext('end_tag')
for s in elem.findall('syntax'):
syn_list = self._split_syntax(s.text)
if syn_list is not None:
self.syntax.append(syn_list)
children_map = {}
for e in elem.findall('element'):
seq = int(e.get('seq')) if e.get('seq') else int(e.findtext('seq'))
children_map[seq] = e
#self.children.append(element_if(self.root, self, e))
for e in elem.findall('composite'):
seq = int(e.get('seq')) if e.get('seq') else int(e.findtext('seq'))
children_map[seq] = e
#self.children.append(composite_if(self.root, self, e))
for seq in sorted(children_map.keys()):
if children_map[seq].tag == 'element':
self.children.append(element_if(
self.root, self, children_map[seq]))
elif children_map[seq].tag == 'composite':
self.children.append(composite_if(
self.root, self, children_map[seq]))
def debug_print(self):
sys.stdout.write(self.__repr__())
for node in self.children:
node.debug_print()
def __repr__(self):
"""
@rtype: string
"""
out = '%s "%s"' % (self.id, self.name)
if self.usage:
out += ' usage: %s' % (self.usage)
if self.pos:
out += ' pos: %i' % (self.pos)
if self.max_use:
out += ' max_use: %s' % (self.max_use)
out += '\n'
return out
def get_child_node_by_idx(self, idx):
"""
@param idx: zero based
"""
if idx >= len(self.children):
return None
else:
m = [c for c in self.children if c.seq == idx + 1]
if len(m) == 1:
return m[0]
else:
raise EngineError('idx %i not found in %s' % (idx, self.id))
def get_child_node_by_ordinal(self, ord):
"""
Get a child element or composite by the X12 ordinal
@param ord: one based element/composite index. Corresponds to the map <seq> element
@type ord: int
"""
return self.get_child_node_by_idx(ord - 1)
def getnodebypath2(self, path_str):
"""
Try x12 path
@param path_str: remaining path to match
@type path_str: string
@return: matching node, or None is no match
"""
x12path = path.X12Path(path_str)
if x12path.empty():
return None
if x12path.ele_idx is None:
return self # matched segment only
ele = self.get_child_node_by_ordinal(x12path.ele_idx)
if x12path.subele_idx is None:
return ele
return ele.get_child_node_by_ordinal(x12path.subele_idx)
raise EngineError('getnodebypath2 failed. Path "%s" not found' % path_str)
def get_max_repeat(self):
if self.max_use is None or self.max_use == '>1':
return MAXINT
return int(self.max_use)
def get_parent(self):
"""
@return: ref to parent class instance
@rtype: pyx12.x12_node
"""
return self.parent
def is_first_seg_in_loop(self):
"""
@rtype: boolean
"""
if self is self.get_parent().get_first_seg():
return True
else:
return False
def is_match(self, seg):
"""
Is data segment given a match to this segment node?
@param seg: data segment instance
@return: boolean
@rtype: boolean
"""
if seg.get_seg_id() == self.id:
if self.children[0].is_element() \
and self.children[0].get_data_type() == 'ID' \
and self.children[0].usage == 'R' \
and len(self.children[0].valid_codes) > 0 \
and seg.get_value('01') not in self.children[0].valid_codes:
#logger.debug('is_match: %s %s' % (seg.get_seg_id(), seg[1]), self.children[0].valid_codes)
return False
# Special Case for 820
elif seg.get_seg_id() == 'ENT' \
and self.children[1].is_element() \
and self.children[1].get_data_type() == 'ID' \
and len(self.children[1].valid_codes) > 0 \
and seg.get_value('02') not in self.children[1].valid_codes:
#logger.debug('is_match: %s %s' % (seg.get_seg_id(), seg[1]), self.children[0].valid_codes)
return False
# Special Case for 999 CTX
# IG defines the dataelement 2100/CT01-1 as an AN, but acts like an ID
elif seg.get_seg_id() == 'CTX' \
and self.children[0].is_composite() \
and self.children[0].children[0].get_data_type() == 'AN' \
and len(self.children[0].children[0].valid_codes) > 0 \
and seg.get_value('01-1') not in self.children[0].children[0].valid_codes:
return False
elif self.children[0].is_composite() \
and self.children[0].children[0].get_data_type() == 'ID' \
and len(self.children[0].children[0].valid_codes) > 0 \
and seg.get_value('01-1') not in self.children[0].children[0].valid_codes:
return False
elif seg.get_seg_id() == 'HL' and self.children[2].is_element() \
and len(self.children[2].valid_codes) > 0 \
and seg.get_value('03') not in self.children[2].valid_codes:
return False
else:
return True
else:
return False
def is_match_qual(self, seg_data, seg_id, qual_code):
"""
Is segment id and qualifier a match to this segment node and to this particular segment data?
@param seg_data: data segment instance
@type seg_data: L{segment<segment.Segment>}
@param seg_id: data segment ID
@param qual_code: an ID qualifier code
@return: (True if a match, qual_code, element_index, subelement_index)
@rtype: tuple(boolean, string, int, int)
"""
if seg_id == self.id:
if qual_code is None:
return (True, None, None, None)
elif self.children[0].is_element() \
and self.children[0].get_data_type() == 'ID' \
and self.children[0].usage == 'R' \
and len(self.children[0].valid_codes) > 0:
if qual_code in self.children[0].valid_codes and seg_data.get_value('01') == qual_code:
return (True, qual_code, 1, None)
else:
return (False, None, None, None)
# Special Case for 820
elif seg_id == 'ENT' \
and self.children[1].is_element() \
and self.children[1].get_data_type() == 'ID' \
and len(self.children[1].valid_codes) > 0:
if qual_code in self.children[1].valid_codes and seg_data.get_value('02') == qual_code:
return (True, qual_code, 2, None)
else:
return (False, None, None, None)
elif self.children[0].is_composite() \
and self.children[0].children[0].get_data_type() == 'ID' \
and len(self.children[0].children[0].valid_codes) > 0:
if qual_code in self.children[0].children[0].valid_codes and seg_data.get_value('01-1') == qual_code:
return (True, qual_code, 1, 1)
else:
return (False, None, None, None)
elif seg_id == 'HL' and self.children[2].is_element() \
and len(self.children[2].valid_codes) > 0:
if qual_code in self.children[2].valid_codes and seg_data.get_value('03') == qual_code:
return (True, qual_code, 3, None)
else:
return (False, None, None, None)
else:
return (True, None, None, None)
else:
return (False, None, None, None)
def guess_unique_key_id_element(self):
"""
Some segments, like REF, DTP, and DTP are duplicated. They are matched using the value of an ID element.
Which element to use varies. This function tries to find a good candidate.
"""
if self.children[0].is_element() and self.children[0].get_data_type() == 'ID' and len(self.children[0].valid_codes) > 0:
return self.children[0]
# Special Case for 820
elif self.id == 'ENT' and self.children[1].is_element() and self.children[1].get_data_type() == 'ID' and len(self.children[1].valid_codes) > 0:
return self.children[1]
elif self.children[0].is_composite() and self.children[0].children[0].get_data_type() == 'ID' and len(self.children[0].children[0].valid_codes) > 0:
return self.children[0].children[0]
elif self.id == 'HL' and self.children[2].is_element() and len(self.children[2].valid_codes) > 0:
return self.children[2]
return None
def get_unique_key_id_element(self, id_val):
"""
Some segments, like REF, DTP, and DTP are duplicated. They are matched using the value of an ID element.
Which element to use varies. This function tries to find a good candidate, using a key value
"""
if self.children[0].is_element() and self.children[0].get_data_type() == 'ID' \
and len(self.children[0].valid_codes) > 0 and id_val in self.children[0].valid_codes:
return self.children[0]
# Special Case for 820
elif self.id == 'ENT' and self.children[1].is_element() and self.children[1].get_data_type() == 'ID' \
and len(self.children[1].valid_codes) > 0 and id_val in self.children[1].valid_codes:
return self.children[1]
elif self.children[0].is_composite() and self.children[0].children[0].get_data_type() == 'ID' \
and len(self.children[0].children[0].valid_codes) > 0 and id_val in self.children[0].children[0].valid_codes:
return self.children[0].children[0]
elif self.id == 'HL' and self.children[2].is_element() and len(self.children[2].valid_codes) > 0 and id_val in self.children[2].valid_codes:
return self.children[2]
return None
def is_segment(self):
"""
@rtype: boolean
"""
return True
def is_valid(self, seg_data, errh):
"""
@param seg_data: data segment instance
@type seg_data: L{segment<segment.Segment>}
@param errh: instance of error_handler
@rtype: boolean
"""
valid = True
child_count = self.get_child_count()
if len(seg_data) > child_count:
#child_node = self.get_child_node_by_idx(child_count+1)
err_str = 'Too many elements in segment "%s" (%s). Has %i, should have %i' % \
(self.name, seg_data.get_seg_id(), len(seg_data), child_count)
#self.logger.error(err_str)
ref_des = '%02i' % (child_count + 1)
err_value = seg_data.get_value(ref_des)
errh.ele_error('3', err_str, err_value, ref_des)
valid = False
dtype = []
type_list = []
for i in range(min(len(seg_data), child_count)):
#self.logger.debug('i=%i, len(seg_data)=%i / child_count=%i' % \
# (i, len(seg_data), self.get_child_count()))
child_node = self.get_child_node_by_idx(i)
if child_node.is_composite():
# Validate composite
ref_des = '%02i' % (i + 1)
comp_data = seg_data.get(ref_des)
subele_count = child_node.get_child_count()
if seg_data.ele_len(ref_des) > subele_count and child_node.usage != 'N':
subele_node = child_node.get_child_node_by_idx(
subele_count + 1)
err_str = 'Too many sub-elements in composite "%s" (%s)' % \
(subele_node.name, subele_node.refdes)
err_value = seg_data.get_value(ref_des)
errh.ele_error('3', err_str, err_value, ref_des)
valid &= child_node.is_valid(comp_data, errh)
elif child_node.is_element():
# Validate Element
if i == 1 and seg_data.get_seg_id() == 'DTP' \
and seg_data.get_value('02') in ('RD8', 'D8', 'D6', 'DT', 'TM'):
dtype = [seg_data.get_value('02')]
if child_node.data_ele == '1250':
type_list.extend(child_node.valid_codes)
ele_data = seg_data.get('%02i' % (i + 1))
if i == 2 and seg_data.get_seg_id() == 'DTP':
valid &= child_node.is_valid(ele_data, errh, dtype)
elif child_node.data_ele == '1251' and len(type_list) > 0:
valid &= child_node.is_valid(ele_data, errh, type_list)
else:
valid &= child_node.is_valid(ele_data, errh)
for i in range(min(len(seg_data), child_count), child_count):
#missing required elements?
child_node = self.get_child_node_by_idx(i)
valid &= child_node.is_valid(None, errh)
for syn in self.syntax:
(bResult, err_str) = is_syntax_valid(seg_data, syn)
if not bResult:
syn_type = syn[0]
if syn_type == 'E':
errh.ele_error('10', err_str, None, syn[1])
else:
errh.ele_error('2', err_str, None, syn[1])
valid &= False
return valid
def _split_syntax(self, syntax):
"""
Split a Syntax string into a list
"""
if syntax[0] not in ['P', 'R', 'C', 'L', 'E']:
#self.logger.error('Syntax %s is not valid' % (syntax))
return None
syn = [syntax[0]]
for i in range(len(syntax[1:]) // 2):
syn.append(int(syntax[i * 2 + 1:i * 2 + 3]))
return syn
def get_cur_count(self):
"""
@return: current count
@rtype: int
"""
raise DeprecationWarning('Moved to nodeCounter')
return self._cur_count
def incr_cur_count(self):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count += 1
def reset_cur_count(self):
"""
Set cur_count of node to zero
"""
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count = 0
def set_cur_count(self, ct):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count = ct
def get_counts_list(self, ct_list):
"""
Build a list of (path, ct) of the current node and parents
Gets the node counts to apply to another map
@param ct_list: List to append to
@type ct_list: list[(string, int)]
"""
raise DeprecationWarning('Moved to nodeCounter')
my_ct = (self.get_path(), self._cur_count)
ct_list.append(my_ct)
if not self.parent.is_map_root():
self.parent.get_counts_list(ct_list)
return True
def loop_segment_iterator(self):
yield self
############################################################
# Element Interface
############################################################
class element_if(x12_node):
"""
Element Interface
"""
def __init__(self, root, parent, elem):
"""
@param parent: parent node
"""
x12_node.__init__(self)
self.children = []
self.root = root
self.parent = parent
self.base_name = 'element'
self.valid_codes = []
self.external_codes = None
self.rec = None
self.id = elem.get('xid')
self.refdes = self.id
self.data_ele = elem.get('data_ele') if elem.get(
'data_ele') else elem.findtext('data_ele')
self.usage = elem.get(
'usage') if elem.get('usage') else elem.findtext('usage')
self.name = elem.get(
'name') if elem.get('name') else elem.findtext('name')
self.seq = int(elem.get(
'seq')) if elem.get('seq') else int(elem.findtext('seq'))
self.path = elem.get(
'seq') if elem.get('seq') else elem.findtext('seq')
self.max_use = elem.get('max_use') if elem.get(
'max_use') else elem.findtext('max_use')
self.res = elem.findtext('regex')
try:
if self.res is not None and self.res != '':
self.rec = re.compile(self.res, re.S)
except Exception:
raise EngineError('Element regex "%s" failed to compile' % (self.res))
v = elem.find('valid_codes')
if v is not None:
self.external_codes = v.get('external')
for c in v.findall('code'):
self.valid_codes.append(c.text)
def debug_print(self):
sys.stdout.write(self.__repr__())
for node in self.children:
node.debug_print()
def __repr__(self):
"""
@rtype: string
"""
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
out = '%s "%s"' % (self.refdes, self.name)
if self.data_ele:
out += ' data_ele: %s' % (self.data_ele)
if self.usage:
out += ' usage: %s' % (self.usage)
if self.seq:
out += ' seq: %i' % (self.seq)
out += ' %s(%i, %i)' % (data_ele['data_type'], data_ele[
'min_len'], data_ele['max_len'])
if self.external_codes:
out += ' external codes: %s' % (self.external_codes)
out += '\n'
return out
# def __del__(self):
# pass
def _error(self, errh, err_str, err_cde, elem_val):
"""
Forward the error to an error_handler
"""
errh.ele_error(err_cde, err_str, elem_val, self.refdes)
# pos=self.seq, data_ele=self.data_ele)
def _valid_code(self, code):
"""
Verify the x12 element value is in the given list of valid codes
@return: True if found, else False
@rtype: boolean
"""
#if not self.valid_codes:
# return True
if code in self.valid_codes:
return True
return False
def get_parent(self):
"""
@return: ref to parent class instance
"""
return self.parent
def is_match(self):
"""
@return:
@rtype: boolean
"""
# match also by ID
raise NotImplementedError('Override in sub-class')
#return False
def is_valid(self, elem, errh, type_list=[]):
"""
Is this a valid element?
@param elem: element instance
@type elem: L{element<segment.Element>}
@param errh: instance of error_handler
@param check_dte: date string to check against (YYYYMMDD)
@param type_list: Optional data/time type list
@type type_list: list[string]
@return: True if valid
@rtype: boolean
"""
errh.add_ele(self)
if elem and elem.is_composite():
err_str = 'Data element "%s" (%s) is an invalid composite' % \
(self.name, self.refdes)
self._error(errh, err_str, '6', elem.__repr__())
return False
if elem is None or elem.get_value() == '':
if self.usage in ('N', 'S'):
return True
elif self.usage == 'R':
if self.seq != 1 or not self.parent.is_composite() or self.parent.usage == 'R':
err_str = 'Mandatory data element "%s" (%s) is missing' % (
self.name, self.refdes)
self._error(errh, err_str, '1', None)
return False
else:
return True
if self.usage == 'N' and elem.get_value() != '':
err_str = 'Data element "%s" (%s) is marked as Not Used' % (
self.name, self.refdes)
self._error(errh, err_str, '10', None)
return False
elem_val = elem.get_value()
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
data_type = data_ele['data_type']
min_len = data_ele['min_len']
max_len = data_ele['max_len']
valid = True
# Validate based on data_elem_num
# Then, validate on more specific criteria
if (not data_type is None) and (data_type == 'R' or data_type[0] == 'N'):
elem_strip = elem_val.replace('-', '').replace('.', '')
elem_len = len(elem_strip)
if len(elem_strip) < min_len:
err_str = 'Data element "%s" (%s) is too short: len("%s") = %i < %i (min_len)' % \
(self.name, self.refdes, elem_val, elem_len, min_len)
self._error(errh, err_str, '4', elem_val)
valid = False
if len(elem_strip) > max_len:
err_str = 'Data element "%s" (%s) is too long: len("%s") = %i > %i (max_len)' % \
(self.name, self.refdes, elem_val, elem_len, max_len)
self._error(errh, err_str, '5', elem_val)
valid = False
else:
elem_len = len(elem_val)
if len(elem_val) < min_len:
err_str = 'Data element "%s" (%s) is too short: len("%s") = %i < %i (min_len)' % \
(self.name, self.refdes, elem_val, elem_len, min_len)
self._error(errh, err_str, '4', elem_val)
valid = False
if len(elem_val) > max_len:
err_str = 'Data element "%s" (%s) is too long: len("%s") = %i > %i (max_len)' % \
(self.name, self.refdes, elem_val, elem_len, max_len)
self._error(errh, err_str, '5', elem_val)
valid = False
(res, bad_string) = validation.contains_control_character(elem_val)
if res:
err_str = 'Data element "%s" (%s), contains an invalid control character(%s)' % \
(self.name, self.refdes, bad_string)
self._error(errh, err_str, '6', bad_string)
return False # skip following checks, control character errors trump all
if data_type in ['AN', 'ID'] and elem_val[-1] == ' ':
if len(elem_val.rstrip()) >= min_len:
err_str = 'Data element "%s" (%s) has unnecessary trailing spaces. (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '6', elem_val)
valid = False
if not self._is_valid_code(elem_val, errh):
valid = False
if not validation.IsValidDataType(elem_val, data_type, self.root.param.get('charset'), self.root.icvn):
if data_type in ('RD8', 'DT', 'D8', 'D6'):
err_str = 'Data element "%s" (%s) contains an invalid date (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '8', elem_val)
valid = False
elif data_type == 'TM':
err_str = 'Data element "%s" (%s) contains an invalid time (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '9', elem_val)
valid = False
else:
err_str = 'Data element "%s" (%s) is type %s, contains an invalid character(%s)' % \
(self.name, self.refdes, data_type, elem_val)
self._error(errh, err_str, '6', elem_val)
valid = False
if len(type_list) > 0:
valid_type = False
for dtype in type_list:
valid_type |= validation.IsValidDataType(elem_val, dtype, self.root.param.get('charset'))
if not valid_type:
if 'TM' in type_list:
err_str = 'Data element "%s" (%s) contains an invalid time (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '9', elem_val)
elif 'RD8' in type_list or 'DT' in type_list or 'D8' in type_list or 'D6' in type_list:
err_str = 'Data element "%s" (%s) contains an invalid date (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '8', elem_val)
valid = False
if self.rec:
m = self.rec.search(elem_val)
if not m:
err_str = 'Data element "%s" with a value of (%s)' % \
(self.name, elem_val)
err_str += ' failed to match the regular expression "%s"' % (
self.res)
self._error(errh, err_str, '7', elem_val)
valid = False
return valid
def _is_valid_code(self, elem_val, errh):
"""
@rtype: boolean
"""
bValidCode = False
if len(self.valid_codes) == 0 and self.external_codes is None:
bValidCode = True
if elem_val in self.valid_codes:
bValidCode = True
if self.external_codes is not None and \
self.root.ext_codes.isValid(self.external_codes, elem_val):
bValidCode = True
if not bValidCode:
err_str = '(%s) is not a valid code for %s (%s)' % (
elem_val, self.name, self.refdes)
self._error(errh, err_str, '7', elem_val)
return False
return True
def get_data_type(self):
"""
"""
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['data_type']
@property
def data_type(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['data_type']
@property
def min_len(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['min_len']
@property
def max_len(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['max_len']
@property
def data_element_name(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['name']
def get_seg_count(self):
"""
"""
pass
def is_element(self):
"""
@rtype: boolean
"""
return True
def get_path(self):
"""
@return: path - XPath style
@rtype: string
"""
if self._fullpath:
return self._fullpath
#get enclosing loop
parent_path = self.get_parent_segment().parent.get_path()
# add the segment, element, and sub-element path
self._fullpath = parent_path + '/' + self.id
return self._fullpath
def get_parent_segment(self):
# pop to enclosing loop
p = self.parent
while not p.is_segment():
p = p.parent
return p
############################################################
# Composite Interface
############################################################
class composite_if(x12_node):
"""
Composite Node Interface
"""
def __init__(self, root, parent, elem):
"""
Get the values for this composite
@param parent: parent node
"""
x12_node.__init__(self)
self.children = []
self.root = root
self.parent = parent
self.path = ''
self.base_name = 'composite'
self.id = elem.get('xid')
self.refdes = elem.findtext(
'refdes') if elem.findtext('refdes') else self.id
self.data_ele = elem.get('data_ele') if elem.get(
'data_ele') else elem.findtext('data_ele')
self.usage = elem.get(
'usage') if elem.get('usage') else elem.findtext('usage')
self.seq = int(elem.get(
'seq')) if elem.get('seq') else int(elem.findtext('seq'))
self.repeat = int(elem.get('repeat')) if elem.get('repeat') else int(
elem.findtext('repeat')) if elem.findtext('repeat') else 1
self.name = elem.get(
'name') if elem.get('name') else elem.findtext('name')
for e in elem.findall('element'):
self.children.append(element_if(self.root, self, e))
def _error(self, errh, err_str, err_cde, elem_val):
"""
Forward the error to an error_handler
"""
err_str2 = err_str.replace('\n', '').replace('\r', '')
elem_val2 = elem_val.replace('\n', '').replace('\r', '')
errh.ele_error(err_cde, err_str2, elem_val2, self.refdes)
#, pos=self.seq, data_ele=self.data_ele)
def debug_print(self):
sys.stdout.write(self.__repr__())
for node in self.children:
node.debug_print()
def __repr__(self):
"""
@rtype: string
"""
out = '%s "%s"' % (self.id, self.name)
if self.usage:
out += ' usage %s' % (self.usage)
if self.seq:
out += ' seq %i' % (self.seq)
if self.refdes:
out += ' refdes %s' % (self.refdes)
out += '\n'
return out
def xml(self):
"""
Sends an xml representation of the composite to stdout
"""
sys.stdout.write('<composite>\n')
for sub_elem in self.children:
sub_elem.xml()
sys.stdout.write('</composite>\n')
def is_valid(self, comp_data, errh):
"""
Validates the composite
@param comp_data: data composite instance, has multiple values
@param errh: instance of error_handler
@rtype: boolean
"""
valid = True
if (comp_data is None or comp_data.is_empty()) and self.usage in ('N', 'S'):
return True
if self.usage == 'R':
good_flag = False
for sub_ele in comp_data:
if sub_ele is not None and len(sub_ele.get_value()) > 0:
good_flag = True
break
if not good_flag:
err_str = 'At least one component of composite "%s" (%s) is required' % \
(self.name, self.refdes)
errh.ele_error('2', err_str, None, self.refdes)
return False
if self.usage == 'N' and not comp_data.is_empty():
err_str = 'Composite "%s" (%s) is marked as Not Used' % (
self.name, self.refdes)
errh.ele_error('5', err_str, None, self.refdes)
return False
if len(comp_data) > self.get_child_count():
err_str = 'Too many sub-elements in composite "%s" (%s)' % (
self.name, self.refdes)
errh.ele_error('3', err_str, None, self.refdes)
valid = False
for i in range(min(len(comp_data), self.get_child_count())):
valid &= self.get_child_node_by_idx(i).is_valid(comp_data[i], errh)
for i in range(min(len(comp_data), self.get_child_count()), self.get_child_count()):
if i < self.get_child_count():
#Check missing required elements
valid &= self.get_child_node_by_idx(i).is_valid(None, errh)
return valid
def is_composite(self):
"""
@rtype: boolean
"""
return True
def load_map_file(map_file, param, map_path=None):
"""
Create the map object from a file
@param map_file: absolute path for file
@type map_file: string
@rtype: pyx12.map_if
@param map_path: Override directory containing map xml files. If None,
uses package resource folder
@type map_path: string
"""
logger = logging.getLogger('pyx12')
if map_path is not None:
logger.debug("Looking for map file '{}' in map_path '{}'".format(map_file, map_path))
if not os.path.isdir(map_path):
raise OSError(2, "Map path does not exist", map_path)
if not os.path.isdir(map_path):
raise OSError(2, "Pyx12 map file '{}' does not exist in map path".format(map_file), map_path)
map_fd = open(os.path.join(map_path, map_file))
else:
logger.debug("Looking for map file '{}' in pkg_resources".format(map_file))
map_fd = resource_stream(__name__, os.path.join('map', map_file))
imap = None
try:
logger.debug('Create map from %s' % (map_file))
#etree = et.parse(map_fd)
parser = et.XMLParser(encoding="utf-8")
etree = et.parse(map_fd, parser=parser)
imap = map_if(etree.getroot(), param, map_path)
except AssertionError:
logger.error('Load of map file failed: %s' % (map_file))
raise
except Exception:
raise
#raise EngineError('Load of map file failed: %s' % (map_file))
map_fd.close()
return imap
| 36.204765 | 156 | 0.533543 | possible = child.get_unique_key_id_element(id_val)
if possible is not None:
return child
raise EngineError('getnodebypath failed. Path "%s" not found' % spath)
def getnodebypath2(self, path_str):
x12path = path.X12Path(path_str)
if x12path.empty():
return None
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.is_loop() and len(x12path.loop_list) > 0:
if child.id.upper() == x12path.loop_list[0].upper():
if len(x12path.loop_list) == 1 and x12path.seg_id is None:
return child
else:
del x12path.loop_list[0]
return child.getnodebypath2(x12path.format())
elif child.is_segment() and len(x12path.loop_list) == 0 and x12path.seg_id is not None:
if x12path.id_val is None:
if x12path.seg_id == child.id:
return child.getnodebypath2(x12path.format())
else:
seg_id = x12path.seg_id
id_val = x12path.id_val
if seg_id == child.id:
possible = child.get_unique_key_id_element(id_val)
if possible is not None:
return child.getnodebypath2(x12path.format())
raise EngineError(
'getnodebypath2 failed. Path "%s" not found' % path_str)
def get_child_count(self):
return self.__len__()
def get_child_node_by_idx(self, idx):
raise EngineError('loop_if.get_child_node_by_idx is not a valid call for a loop_if')
def get_seg_count(self):
i = 0
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.is_segment():
i += 1
return i
def is_loop(self):
return True
def is_match(self, seg_data):
pos_keys = sorted(self.pos_map)
child = self.pos_map[pos_keys[0]][0]
if child.is_loop():
return child.is_match(seg_data)
elif child.is_segment():
if child.is_match(seg_data):
return True
else:
return False
else:
return False
def get_child_seg_node(self, seg_data):
for child in self.childIterator():
if child.is_segment() and child.is_match(seg_data):
return child
return None
def get_child_loop_node(self, seg_data):
for child in self.childIterator():
if child.is_loop() and child.is_match(seg_data):
return child
return None
def get_cur_count(self):
raise DeprecationWarning('Moved to nodeCounter')
return self._cur_count
def incr_cur_count(self):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count += 1
def reset_child_count(self):
raise DeprecationWarning('Moved to nodeCounter')
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
child.reset_cur_count()
def reset_cur_count(self):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count = 0
self.reset_child_count()
def set_cur_count(self, ct):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count = ct
def get_counts_list(self, ct_list):
raise DeprecationWarning('Moved to nodeCounter')
my_ct = (self.get_path(), self._cur_count)
ct_list.append(my_ct)
if not self.parent.is_map_root():
self.parent.get_counts_list(ct_list)
return True
def loop_segment_iterator(self):
yield self
for ord1 in sorted(self.pos_map):
for child in self.pos_map[ord1]:
if child.is_loop() or child.is_segment():
for c in child.loop_segment_iterator():
yield c
class segment_if(x12_node):
def __init__(self, root, parent, elem):
x12_node.__init__(self)
self.root = root
self.parent = parent
self.children = []
self.base_name = 'segment'
self._cur_count = 0
self.syntax = []
self.id = elem.get('xid')
self.path = self.id
self.type = elem.get('type')
self.name = elem.get(
'name') if elem.get('name') else elem.findtext('name')
self.usage = elem.get(
'usage') if elem.get('usage') else elem.findtext('usage')
self.pos = int(elem.get(
'pos')) if elem.get('pos') else int(elem.findtext('pos'))
self.max_use = elem.get('max_use') if elem.get(
'max_use') else elem.findtext('max_use')
self.repeat = elem.get('repeat') if elem.get(
'repeat') else elem.findtext('repeat')
self.end_tag = elem.get('end_tag') if elem.get(
'end_tag') else elem.findtext('end_tag')
for s in elem.findall('syntax'):
syn_list = self._split_syntax(s.text)
if syn_list is not None:
self.syntax.append(syn_list)
children_map = {}
for e in elem.findall('element'):
seq = int(e.get('seq')) if e.get('seq') else int(e.findtext('seq'))
children_map[seq] = e
for e in elem.findall('composite'):
seq = int(e.get('seq')) if e.get('seq') else int(e.findtext('seq'))
children_map[seq] = e
for seq in sorted(children_map.keys()):
if children_map[seq].tag == 'element':
self.children.append(element_if(
self.root, self, children_map[seq]))
elif children_map[seq].tag == 'composite':
self.children.append(composite_if(
self.root, self, children_map[seq]))
def debug_print(self):
sys.stdout.write(self.__repr__())
for node in self.children:
node.debug_print()
def __repr__(self):
out = '%s "%s"' % (self.id, self.name)
if self.usage:
out += ' usage: %s' % (self.usage)
if self.pos:
out += ' pos: %i' % (self.pos)
if self.max_use:
out += ' max_use: %s' % (self.max_use)
out += '\n'
return out
def get_child_node_by_idx(self, idx):
if idx >= len(self.children):
return None
else:
m = [c for c in self.children if c.seq == idx + 1]
if len(m) == 1:
return m[0]
else:
raise EngineError('idx %i not found in %s' % (idx, self.id))
def get_child_node_by_ordinal(self, ord):
return self.get_child_node_by_idx(ord - 1)
def getnodebypath2(self, path_str):
x12path = path.X12Path(path_str)
if x12path.empty():
return None
if x12path.ele_idx is None:
return self
ele = self.get_child_node_by_ordinal(x12path.ele_idx)
if x12path.subele_idx is None:
return ele
return ele.get_child_node_by_ordinal(x12path.subele_idx)
raise EngineError('getnodebypath2 failed. Path "%s" not found' % path_str)
def get_max_repeat(self):
if self.max_use is None or self.max_use == '>1':
return MAXINT
return int(self.max_use)
def get_parent(self):
return self.parent
def is_first_seg_in_loop(self):
if self is self.get_parent().get_first_seg():
return True
else:
return False
def is_match(self, seg):
if seg.get_seg_id() == self.id:
if self.children[0].is_element() \
and self.children[0].get_data_type() == 'ID' \
and self.children[0].usage == 'R' \
and len(self.children[0].valid_codes) > 0 \
and seg.get_value('01') not in self.children[0].valid_codes:
return False
elif seg.get_seg_id() == 'ENT' \
and self.children[1].is_element() \
and self.children[1].get_data_type() == 'ID' \
and len(self.children[1].valid_codes) > 0 \
and seg.get_value('02') not in self.children[1].valid_codes:
return False
elif seg.get_seg_id() == 'CTX' \
and self.children[0].is_composite() \
and self.children[0].children[0].get_data_type() == 'AN' \
and len(self.children[0].children[0].valid_codes) > 0 \
and seg.get_value('01-1') not in self.children[0].children[0].valid_codes:
return False
elif self.children[0].is_composite() \
and self.children[0].children[0].get_data_type() == 'ID' \
and len(self.children[0].children[0].valid_codes) > 0 \
and seg.get_value('01-1') not in self.children[0].children[0].valid_codes:
return False
elif seg.get_seg_id() == 'HL' and self.children[2].is_element() \
and len(self.children[2].valid_codes) > 0 \
and seg.get_value('03') not in self.children[2].valid_codes:
return False
else:
return True
else:
return False
def is_match_qual(self, seg_data, seg_id, qual_code):
if seg_id == self.id:
if qual_code is None:
return (True, None, None, None)
elif self.children[0].is_element() \
and self.children[0].get_data_type() == 'ID' \
and self.children[0].usage == 'R' \
and len(self.children[0].valid_codes) > 0:
if qual_code in self.children[0].valid_codes and seg_data.get_value('01') == qual_code:
return (True, qual_code, 1, None)
else:
return (False, None, None, None)
elif seg_id == 'ENT' \
and self.children[1].is_element() \
and self.children[1].get_data_type() == 'ID' \
and len(self.children[1].valid_codes) > 0:
if qual_code in self.children[1].valid_codes and seg_data.get_value('02') == qual_code:
return (True, qual_code, 2, None)
else:
return (False, None, None, None)
elif self.children[0].is_composite() \
and self.children[0].children[0].get_data_type() == 'ID' \
and len(self.children[0].children[0].valid_codes) > 0:
if qual_code in self.children[0].children[0].valid_codes and seg_data.get_value('01-1') == qual_code:
return (True, qual_code, 1, 1)
else:
return (False, None, None, None)
elif seg_id == 'HL' and self.children[2].is_element() \
and len(self.children[2].valid_codes) > 0:
if qual_code in self.children[2].valid_codes and seg_data.get_value('03') == qual_code:
return (True, qual_code, 3, None)
else:
return (False, None, None, None)
else:
return (True, None, None, None)
else:
return (False, None, None, None)
def guess_unique_key_id_element(self):
if self.children[0].is_element() and self.children[0].get_data_type() == 'ID' and len(self.children[0].valid_codes) > 0:
return self.children[0]
elif self.id == 'ENT' and self.children[1].is_element() and self.children[1].get_data_type() == 'ID' and len(self.children[1].valid_codes) > 0:
return self.children[1]
elif self.children[0].is_composite() and self.children[0].children[0].get_data_type() == 'ID' and len(self.children[0].children[0].valid_codes) > 0:
return self.children[0].children[0]
elif self.id == 'HL' and self.children[2].is_element() and len(self.children[2].valid_codes) > 0:
return self.children[2]
return None
def get_unique_key_id_element(self, id_val):
if self.children[0].is_element() and self.children[0].get_data_type() == 'ID' \
and len(self.children[0].valid_codes) > 0 and id_val in self.children[0].valid_codes:
return self.children[0]
elif self.id == 'ENT' and self.children[1].is_element() and self.children[1].get_data_type() == 'ID' \
and len(self.children[1].valid_codes) > 0 and id_val in self.children[1].valid_codes:
return self.children[1]
elif self.children[0].is_composite() and self.children[0].children[0].get_data_type() == 'ID' \
and len(self.children[0].children[0].valid_codes) > 0 and id_val in self.children[0].children[0].valid_codes:
return self.children[0].children[0]
elif self.id == 'HL' and self.children[2].is_element() and len(self.children[2].valid_codes) > 0 and id_val in self.children[2].valid_codes:
return self.children[2]
return None
def is_segment(self):
return True
def is_valid(self, seg_data, errh):
valid = True
child_count = self.get_child_count()
if len(seg_data) > child_count:
err_str = 'Too many elements in segment "%s" (%s). Has %i, should have %i' % \
(self.name, seg_data.get_seg_id(), len(seg_data), child_count)
ref_des = '%02i' % (child_count + 1)
err_value = seg_data.get_value(ref_des)
errh.ele_error('3', err_str, err_value, ref_des)
valid = False
dtype = []
type_list = []
for i in range(min(len(seg_data), child_count)):
child_node = self.get_child_node_by_idx(i)
if child_node.is_composite():
ref_des = '%02i' % (i + 1)
comp_data = seg_data.get(ref_des)
subele_count = child_node.get_child_count()
if seg_data.ele_len(ref_des) > subele_count and child_node.usage != 'N':
subele_node = child_node.get_child_node_by_idx(
subele_count + 1)
err_str = 'Too many sub-elements in composite "%s" (%s)' % \
(subele_node.name, subele_node.refdes)
err_value = seg_data.get_value(ref_des)
errh.ele_error('3', err_str, err_value, ref_des)
valid &= child_node.is_valid(comp_data, errh)
elif child_node.is_element():
if i == 1 and seg_data.get_seg_id() == 'DTP' \
and seg_data.get_value('02') in ('RD8', 'D8', 'D6', 'DT', 'TM'):
dtype = [seg_data.get_value('02')]
if child_node.data_ele == '1250':
type_list.extend(child_node.valid_codes)
ele_data = seg_data.get('%02i' % (i + 1))
if i == 2 and seg_data.get_seg_id() == 'DTP':
valid &= child_node.is_valid(ele_data, errh, dtype)
elif child_node.data_ele == '1251' and len(type_list) > 0:
valid &= child_node.is_valid(ele_data, errh, type_list)
else:
valid &= child_node.is_valid(ele_data, errh)
for i in range(min(len(seg_data), child_count), child_count):
child_node = self.get_child_node_by_idx(i)
valid &= child_node.is_valid(None, errh)
for syn in self.syntax:
(bResult, err_str) = is_syntax_valid(seg_data, syn)
if not bResult:
syn_type = syn[0]
if syn_type == 'E':
errh.ele_error('10', err_str, None, syn[1])
else:
errh.ele_error('2', err_str, None, syn[1])
valid &= False
return valid
def _split_syntax(self, syntax):
if syntax[0] not in ['P', 'R', 'C', 'L', 'E']:
return None
syn = [syntax[0]]
for i in range(len(syntax[1:]) // 2):
syn.append(int(syntax[i * 2 + 1:i * 2 + 3]))
return syn
def get_cur_count(self):
raise DeprecationWarning('Moved to nodeCounter')
return self._cur_count
def incr_cur_count(self):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count += 1
def reset_cur_count(self):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count = 0
def set_cur_count(self, ct):
raise DeprecationWarning('Moved to nodeCounter')
self._cur_count = ct
def get_counts_list(self, ct_list):
raise DeprecationWarning('Moved to nodeCounter')
my_ct = (self.get_path(), self._cur_count)
ct_list.append(my_ct)
if not self.parent.is_map_root():
self.parent.get_counts_list(ct_list)
return True
def loop_segment_iterator(self):
yield self
value() != '':
err_str = 'Data element "%s" (%s) is marked as Not Used' % (
self.name, self.refdes)
self._error(errh, err_str, '10', None)
return False
elem_val = elem.get_value()
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
data_type = data_ele['data_type']
min_len = data_ele['min_len']
max_len = data_ele['max_len']
valid = True
if (not data_type is None) and (data_type == 'R' or data_type[0] == 'N'):
elem_strip = elem_val.replace('-', '').replace('.', '')
elem_len = len(elem_strip)
if len(elem_strip) < min_len:
err_str = 'Data element "%s" (%s) is too short: len("%s") = %i < %i (min_len)' % \
(self.name, self.refdes, elem_val, elem_len, min_len)
self._error(errh, err_str, '4', elem_val)
valid = False
if len(elem_strip) > max_len:
err_str = 'Data element "%s" (%s) is too long: len("%s") = %i > %i (max_len)' % \
(self.name, self.refdes, elem_val, elem_len, max_len)
self._error(errh, err_str, '5', elem_val)
valid = False
else:
elem_len = len(elem_val)
if len(elem_val) < min_len:
err_str = 'Data element "%s" (%s) is too short: len("%s") = %i < %i (min_len)' % \
(self.name, self.refdes, elem_val, elem_len, min_len)
self._error(errh, err_str, '4', elem_val)
valid = False
if len(elem_val) > max_len:
err_str = 'Data element "%s" (%s) is too long: len("%s") = %i > %i (max_len)' % \
(self.name, self.refdes, elem_val, elem_len, max_len)
self._error(errh, err_str, '5', elem_val)
valid = False
(res, bad_string) = validation.contains_control_character(elem_val)
if res:
err_str = 'Data element "%s" (%s), contains an invalid control character(%s)' % \
(self.name, self.refdes, bad_string)
self._error(errh, err_str, '6', bad_string)
return False
if data_type in ['AN', 'ID'] and elem_val[-1] == ' ':
if len(elem_val.rstrip()) >= min_len:
err_str = 'Data element "%s" (%s) has unnecessary trailing spaces. (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '6', elem_val)
valid = False
if not self._is_valid_code(elem_val, errh):
valid = False
if not validation.IsValidDataType(elem_val, data_type, self.root.param.get('charset'), self.root.icvn):
if data_type in ('RD8', 'DT', 'D8', 'D6'):
err_str = 'Data element "%s" (%s) contains an invalid date (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '8', elem_val)
valid = False
elif data_type == 'TM':
err_str = 'Data element "%s" (%s) contains an invalid time (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '9', elem_val)
valid = False
else:
err_str = 'Data element "%s" (%s) is type %s, contains an invalid character(%s)' % \
(self.name, self.refdes, data_type, elem_val)
self._error(errh, err_str, '6', elem_val)
valid = False
if len(type_list) > 0:
valid_type = False
for dtype in type_list:
valid_type |= validation.IsValidDataType(elem_val, dtype, self.root.param.get('charset'))
if not valid_type:
if 'TM' in type_list:
err_str = 'Data element "%s" (%s) contains an invalid time (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '9', elem_val)
elif 'RD8' in type_list or 'DT' in type_list or 'D8' in type_list or 'D6' in type_list:
err_str = 'Data element "%s" (%s) contains an invalid date (%s)' % \
(self.name, self.refdes, elem_val)
self._error(errh, err_str, '8', elem_val)
valid = False
if self.rec:
m = self.rec.search(elem_val)
if not m:
err_str = 'Data element "%s" with a value of (%s)' % \
(self.name, elem_val)
err_str += ' failed to match the regular expression "%s"' % (
self.res)
self._error(errh, err_str, '7', elem_val)
valid = False
return valid
def _is_valid_code(self, elem_val, errh):
bValidCode = False
if len(self.valid_codes) == 0 and self.external_codes is None:
bValidCode = True
if elem_val in self.valid_codes:
bValidCode = True
if self.external_codes is not None and \
self.root.ext_codes.isValid(self.external_codes, elem_val):
bValidCode = True
if not bValidCode:
err_str = '(%s) is not a valid code for %s (%s)' % (
elem_val, self.name, self.refdes)
self._error(errh, err_str, '7', elem_val)
return False
return True
def get_data_type(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['data_type']
@property
def data_type(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['data_type']
@property
def min_len(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['min_len']
@property
def max_len(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['max_len']
@property
def data_element_name(self):
data_ele = self.root.data_elements.get_by_elem_num(self.data_ele)
return data_ele['name']
def get_seg_count(self):
pass
def is_element(self):
return True
def get_path(self):
if self._fullpath:
return self._fullpath
parent_path = self.get_parent_segment().parent.get_path()
self._fullpath = parent_path + '/' + self.id
return self._fullpath
def get_parent_segment(self):
p = self.parent
while not p.is_segment():
p = p.parent
return p
lf):
return True
def load_map_file(map_file, param, map_path=None):
logger = logging.getLogger('pyx12')
if map_path is not None:
logger.debug("Looking for map file '{}' in map_path '{}'".format(map_file, map_path))
if not os.path.isdir(map_path):
raise OSError(2, "Map path does not exist", map_path)
if not os.path.isdir(map_path):
raise OSError(2, "Pyx12 map file '{}' does not exist in map path".format(map_file), map_path)
map_fd = open(os.path.join(map_path, map_file))
else:
logger.debug("Looking for map file '{}' in pkg_resources".format(map_file))
map_fd = resource_stream(__name__, os.path.join('map', map_file))
imap = None
try:
logger.debug('Create map from %s' % (map_file))
parser = et.XMLParser(encoding="utf-8")
etree = et.parse(map_fd, parser=parser)
imap = map_if(etree.getroot(), param, map_path)
except AssertionError:
logger.error('Load of map file failed: %s' % (map_file))
raise
except Exception:
raise
map_fd.close()
return imap
| true | true |
f7224303011a387772607dd8dc079207898f3c44 | 3,544 | py | Python | guillotina/fields/dynamic.py | vinissimus/guillotina | 4240adfa5607c022ff6dc5f7335e2c59c1f2217d | [
"BSD-2-Clause"
] | null | null | null | guillotina/fields/dynamic.py | vinissimus/guillotina | 4240adfa5607c022ff6dc5f7335e2c59c1f2217d | [
"BSD-2-Clause"
] | null | null | null | guillotina/fields/dynamic.py | vinissimus/guillotina | 4240adfa5607c022ff6dc5f7335e2c59c1f2217d | [
"BSD-2-Clause"
] | null | null | null | from collections import namedtuple
from guillotina import configure
from guillotina import schema
from guillotina.component import get_adapter
from guillotina.exceptions import ComponentLookupError
from guillotina.exceptions import ValueDeserializationError
from guillotina.fields.interfaces import IDynamicField
from guillotina.fields.interfaces import IDynamicFieldOperation
from guillotina.fields.patch import field_converter
from guillotina.fields.patch import PatchDictDel
from guillotina.fields.patch import PatchDictSet
from guillotina.fields.patch import PatchDictUpdate
from guillotina.fields.patch import PatchField
from guillotina.interfaces import IJSONToValue
from guillotina.schema.interfaces import IDict
from zope.interface import implementer
from zope.interface import Interface
@implementer(IDynamicField)
class DynamicField(PatchField):
operation_type = IDynamicFieldOperation
@configure.value_deserializer(IDynamicField)
def dynamic_field_converter(field, value, context):
if not isinstance(value, dict) or "op" not in value:
raise ValueDeserializationError(field, value, "Not valid payload")
return field_converter(field, value, context)
class IDynamicType(Interface):
"""
Used to dynamicly bind data to validate
new values against
"""
date = schema.Datetime(required=False)
text = schema.Text(required=False)
integer = schema.Int(required=False)
float = schema.Float(required=False)
boolean = schema.Bool(required=False)
keyword = schema.UnionField(
schema.List(required=False, value_type=schema.Text(), max_length=1000),
schema.Text(required=False),
required=False,
)
def _validate_field(field, context, value):
if "key" not in value or "value" not in value:
raise ValueDeserializationError(field, value, f"Invalid data")
from guillotina.behaviors.dynamic import find_field
field = find_field(context, value["key"])
# now, verify value...
if not field:
raise ValueDeserializationError(field, value, f"Dynamic field not found")
field_type = field.get("type", "unknown")
try:
valid_type = namedtuple("temp_assign_type", [field_type])
ob = valid_type({field_type: None})
bound_field = IDynamicType[field_type].bind(ob)
# validate and convert
real_value = get_adapter(bound_field, IJSONToValue, args=[value["value"], ob])
bound_field.validate(real_value)
value["value"] = real_value
except (KeyError, ComponentLookupError):
raise ValueDeserializationError(field, value, f"Invalid type {field_type}")
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="assign")
class DynamicDictSet(PatchDictSet):
def __call__(self, context, value):
if "key" in value and "value" in value:
_validate_field(self.field, context, value)
return super().__call__(context, value)
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="update")
class DynamicDictUpdate(PatchDictUpdate):
def __call__(self, context, value):
if not isinstance(value, list):
raise ValueDeserializationError(
self.field, value, f"Invalid type patch data, must be list of updates"
)
for item in value:
_validate_field(self.field, context, item)
return super().__call__(context, value)
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="del")
class DynamicDictDel(PatchDictDel):
""" """
| 36.916667 | 86 | 0.738431 | from collections import namedtuple
from guillotina import configure
from guillotina import schema
from guillotina.component import get_adapter
from guillotina.exceptions import ComponentLookupError
from guillotina.exceptions import ValueDeserializationError
from guillotina.fields.interfaces import IDynamicField
from guillotina.fields.interfaces import IDynamicFieldOperation
from guillotina.fields.patch import field_converter
from guillotina.fields.patch import PatchDictDel
from guillotina.fields.patch import PatchDictSet
from guillotina.fields.patch import PatchDictUpdate
from guillotina.fields.patch import PatchField
from guillotina.interfaces import IJSONToValue
from guillotina.schema.interfaces import IDict
from zope.interface import implementer
from zope.interface import Interface
@implementer(IDynamicField)
class DynamicField(PatchField):
operation_type = IDynamicFieldOperation
@configure.value_deserializer(IDynamicField)
def dynamic_field_converter(field, value, context):
if not isinstance(value, dict) or "op" not in value:
raise ValueDeserializationError(field, value, "Not valid payload")
return field_converter(field, value, context)
class IDynamicType(Interface):
date = schema.Datetime(required=False)
text = schema.Text(required=False)
integer = schema.Int(required=False)
float = schema.Float(required=False)
boolean = schema.Bool(required=False)
keyword = schema.UnionField(
schema.List(required=False, value_type=schema.Text(), max_length=1000),
schema.Text(required=False),
required=False,
)
def _validate_field(field, context, value):
if "key" not in value or "value" not in value:
raise ValueDeserializationError(field, value, f"Invalid data")
from guillotina.behaviors.dynamic import find_field
field = find_field(context, value["key"])
if not field:
raise ValueDeserializationError(field, value, f"Dynamic field not found")
field_type = field.get("type", "unknown")
try:
valid_type = namedtuple("temp_assign_type", [field_type])
ob = valid_type({field_type: None})
bound_field = IDynamicType[field_type].bind(ob)
real_value = get_adapter(bound_field, IJSONToValue, args=[value["value"], ob])
bound_field.validate(real_value)
value["value"] = real_value
except (KeyError, ComponentLookupError):
raise ValueDeserializationError(field, value, f"Invalid type {field_type}")
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="assign")
class DynamicDictSet(PatchDictSet):
def __call__(self, context, value):
if "key" in value and "value" in value:
_validate_field(self.field, context, value)
return super().__call__(context, value)
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="update")
class DynamicDictUpdate(PatchDictUpdate):
def __call__(self, context, value):
if not isinstance(value, list):
raise ValueDeserializationError(
self.field, value, f"Invalid type patch data, must be list of updates"
)
for item in value:
_validate_field(self.field, context, item)
return super().__call__(context, value)
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="del")
class DynamicDictDel(PatchDictDel):
| true | true |
f722454298c1db6252d7edf5af2ce44da826cc10 | 86 | py | Python | AtCoder/ABC114/A.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | 1 | 2018-11-25T04:15:45.000Z | 2018-11-25T04:15:45.000Z | AtCoder/ABC114/A.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | null | null | null | AtCoder/ABC114/A.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | 2 | 2018-08-08T13:01:14.000Z | 2018-11-25T12:38:36.000Z | n = int(input())
if n == 7 or n == 5 or n== 3:
print("YES")
else:
print("NO") | 14.333333 | 29 | 0.465116 | n = int(input())
if n == 7 or n == 5 or n== 3:
print("YES")
else:
print("NO") | true | true |
f72245eef78fd33aa667b1d0ea796a7bf15d8024 | 8,219 | py | Python | easyimap/easyimap.py | kujiy/easyimap | 4da69d7b7fc051a3384b6e5208930c20dc2aad21 | [
"BSD-3-Clause"
] | null | null | null | easyimap/easyimap.py | kujiy/easyimap | 4da69d7b7fc051a3384b6e5208930c20dc2aad21 | [
"BSD-3-Clause"
] | null | null | null | easyimap/easyimap.py | kujiy/easyimap | 4da69d7b7fc051a3384b6e5208930c20dc2aad21 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import imaplib
import email
from email.header import decode_header
import time
import re
import mimetypes
import chardet
try:
unicode('')
except NameError:
# for python3 compatibility.
unicode = str
class MailObj(object):
def __init__(self, message, uid=-1, raw=''):
self._message = message
self._uid = uid if uid > -1 else None
self._raw = raw if raw else None
@property
def uid(self):
return self._uid
@property
def title(self):
return _decode_header(self._message.get('Subject'))
@property
def to(self):
return _decode_header(self._message.get('To'))
@property
def from_addr(self):
"""Method name includes _addr so it does not conflict with the `from`
keyword in Python."""
return _decode_header(self._message.get('From'))
@property
def sender(self):
return _decode_header(self._message.get('Sender'))
@property
def cc(self):
return _decode_header(self._message.get('CC'))
@property
def delivered_to(self):
return _decode_header(self._message.get('Delivered-To'))
@property
def content_type(self):
return _decode_header(self._message.get('Content-Type'))
@property
def content_transfer_encoding(self):
return _decode_header(self._message.get('Content-Transfer-Encoding'))
@property
def references(self):
return _decode_header(self._message.get('References'))
@property
def in_reply_to(self):
return _decode_header(self._message.get('In-Reply-To'))
@property
def reply_to(self):
return _decode_header(self._message.get('Reply-To'))
@property
def return_path(self):
return _decode_header(self._message.get('Return-Path'))
@property
def mime_version(self):
return _decode_header(self._message.get('MIME-Version'))
@property
def message_id(self):
return _decode_header(self._message.get('Message-ID'))
@property
def date(self):
return _decode_header(self._message.get('Date'))
@property
def raw(self):
return self._raw
@property
def body(self):
for part in self._message.walk():
maintype = part.get_content_maintype()
if maintype != 'multipart' and not part.get_filename():
return _decode_body(part)
if maintype == 'multipart':
for p in part.get_payload():
if p.get_content_maintype() == 'text':
return _decode_body(p)
raise Exception("orz... something... something happened.")
@property
def attachments(self):
counter = 1
attachments = []
for part in self._message.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get_filename():
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
# Use a generic bag-of-bits extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
counter += 1
data = part.get_payload(decode=True)
content_type = part.get_content_type()
if not data:
continue
attachments.append((filename, data, content_type))
return attachments
def __str__(self):
template = "{date}", "{sender}", "{title}"
represent = " || ".join(template).format(
date=self.date,
sender=self.sender,
title=self.title.encode("utf8")
)
return represent
class Imapper(object):
def __init__(self, host, user, password, mailbox, timeout, ssl, port, **kwargs):
self._fetch_message_parts = kwargs.get("fetch_message_parts", "(UID RFC822)")
self._read_only = kwargs.get("read_only", False)
self._mailer = self._get_mailer(host, user, password, mailbox, timeout, ssl, port)
def _get_mailer(self, host, user, password, mailbox, timeout, ssl, port):
timeout = time.time() + timeout
if ssl:
mailer = imaplib.IMAP4_SSL(host=host, port=port)
else:
mailer = imaplib.IMAP4(host=host, port=port)
mailer.login(user, password)
while True:
status, msgs = mailer.select(mailbox, self._read_only)
if status == 'OK':
break
if time.time() > timeout:
raise Exception("Timeout.")
return mailer
def quit(self):
"""close and logout"""
self._mailer.close()
self._mailer.logout()
def change_mailbox(self, mailbox):
status, msgs = self._mailer.select(mailbox, self._read_only)
if status == 'OK':
return True
return False
def unseen(self, limit=10):
return self.listup(limit, 'UNSEEN')
def listids(self, limit=10, criterion=None):
criterion = criterion or 'ALL'
status, msgs = self._mailer.uid('search', None, criterion)
if status == 'OK':
email_ids = msgs[0].split()
start = min(len(email_ids), limit)
return email_ids[-1:-(start + 1):-1]
else:
raise Exception("Could not get ALL")
def listup(self, limit=10, criterion=None, include_raw=False):
email_ids = self.listids(limit, criterion)
result = []
for i in email_ids:
typ, content = self._mailer.uid('fetch', i, self._fetch_message_parts)
if typ == 'OK':
mail = _parse_email(content, include_raw=include_raw)
result.append(mail)
return result
def mail(self, uid, include_raw=False):
"""returns MailObj by specified id"""
typ, content = self._mailer.uid('fetch', uid, self._fetch_message_parts)
if typ == 'OK':
mail = _parse_email(content, include_raw=include_raw)
return mail
else:
raise Exception("Could not get email.")
def connect(host, user, password, mailbox='INBOX', timeout=15, ssl=True, port=993, **kwargs):
return Imapper(host, user, password, mailbox, timeout, ssl, port, **kwargs)
def _decode_header(data):
if data is None:
return
decoded_headers = decode_header(data)
headers = []
for decoded_str, charset in decoded_headers:
if isinstance(decoded_str, unicode):
headers.append(decoded_str)
elif charset:
headers.append(unicode(decoded_str, charset))
else:
encoding = chardet.detect(decoded_str)
if encoding.get('encoding'):
headers.append(unicode(decoded_str, encoding['encoding']))
else:
headers.append(decoded_str)
return "".join(headers)
def _decode_body(part):
charset = str(part.get_content_charset())
payload = part.get_payload(decode=True)
try:
body = unicode(payload, charset) if charset else part.get_payload()
except:
encoding = chardet.detect(payload)
if encoding.get('encoding'):
body = unicode(payload, encoding['encoding'])
else:
body = payload
return body
def _parse_email(data, include_raw=False):
string_or_bytes_message = data[0][1]
string_or_bytes_uid = data[0][0]
if not isinstance(string_or_bytes_message, str):
encoding = chardet.detect(string_or_bytes_message)
string_or_bytes_message = string_or_bytes_message.decode(encoding.get('encoding'))
if not isinstance(string_or_bytes_uid, str):
encoding = chardet.detect(string_or_bytes_uid)
string_or_bytes_uid = string_or_bytes_uid.decode(encoding.get('encoding'))
message = email.message_from_string(string_or_bytes_message)
uid = re.findall('[UID ](\d+)', string_or_bytes_uid)
args = {}
if uid:
args['uid'] = int(uid[0])
if include_raw:
args['raw'] = data[0][1]
return MailObj(message, **args)
| 31.490421 | 93 | 0.602993 |
import imaplib
import email
from email.header import decode_header
import time
import re
import mimetypes
import chardet
try:
unicode('')
except NameError:
unicode = str
class MailObj(object):
def __init__(self, message, uid=-1, raw=''):
self._message = message
self._uid = uid if uid > -1 else None
self._raw = raw if raw else None
@property
def uid(self):
return self._uid
@property
def title(self):
return _decode_header(self._message.get('Subject'))
@property
def to(self):
return _decode_header(self._message.get('To'))
@property
def from_addr(self):
return _decode_header(self._message.get('From'))
@property
def sender(self):
return _decode_header(self._message.get('Sender'))
@property
def cc(self):
return _decode_header(self._message.get('CC'))
@property
def delivered_to(self):
return _decode_header(self._message.get('Delivered-To'))
@property
def content_type(self):
return _decode_header(self._message.get('Content-Type'))
@property
def content_transfer_encoding(self):
return _decode_header(self._message.get('Content-Transfer-Encoding'))
@property
def references(self):
return _decode_header(self._message.get('References'))
@property
def in_reply_to(self):
return _decode_header(self._message.get('In-Reply-To'))
@property
def reply_to(self):
return _decode_header(self._message.get('Reply-To'))
@property
def return_path(self):
return _decode_header(self._message.get('Return-Path'))
@property
def mime_version(self):
return _decode_header(self._message.get('MIME-Version'))
@property
def message_id(self):
return _decode_header(self._message.get('Message-ID'))
@property
def date(self):
return _decode_header(self._message.get('Date'))
@property
def raw(self):
return self._raw
@property
def body(self):
for part in self._message.walk():
maintype = part.get_content_maintype()
if maintype != 'multipart' and not part.get_filename():
return _decode_body(part)
if maintype == 'multipart':
for p in part.get_payload():
if p.get_content_maintype() == 'text':
return _decode_body(p)
raise Exception("orz... something... something happened.")
@property
def attachments(self):
counter = 1
attachments = []
for part in self._message.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get_filename():
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
counter += 1
data = part.get_payload(decode=True)
content_type = part.get_content_type()
if not data:
continue
attachments.append((filename, data, content_type))
return attachments
def __str__(self):
template = "{date}", "{sender}", "{title}"
represent = " || ".join(template).format(
date=self.date,
sender=self.sender,
title=self.title.encode("utf8")
)
return represent
class Imapper(object):
def __init__(self, host, user, password, mailbox, timeout, ssl, port, **kwargs):
self._fetch_message_parts = kwargs.get("fetch_message_parts", "(UID RFC822)")
self._read_only = kwargs.get("read_only", False)
self._mailer = self._get_mailer(host, user, password, mailbox, timeout, ssl, port)
def _get_mailer(self, host, user, password, mailbox, timeout, ssl, port):
timeout = time.time() + timeout
if ssl:
mailer = imaplib.IMAP4_SSL(host=host, port=port)
else:
mailer = imaplib.IMAP4(host=host, port=port)
mailer.login(user, password)
while True:
status, msgs = mailer.select(mailbox, self._read_only)
if status == 'OK':
break
if time.time() > timeout:
raise Exception("Timeout.")
return mailer
def quit(self):
self._mailer.close()
self._mailer.logout()
def change_mailbox(self, mailbox):
status, msgs = self._mailer.select(mailbox, self._read_only)
if status == 'OK':
return True
return False
def unseen(self, limit=10):
return self.listup(limit, 'UNSEEN')
def listids(self, limit=10, criterion=None):
criterion = criterion or 'ALL'
status, msgs = self._mailer.uid('search', None, criterion)
if status == 'OK':
email_ids = msgs[0].split()
start = min(len(email_ids), limit)
return email_ids[-1:-(start + 1):-1]
else:
raise Exception("Could not get ALL")
def listup(self, limit=10, criterion=None, include_raw=False):
email_ids = self.listids(limit, criterion)
result = []
for i in email_ids:
typ, content = self._mailer.uid('fetch', i, self._fetch_message_parts)
if typ == 'OK':
mail = _parse_email(content, include_raw=include_raw)
result.append(mail)
return result
def mail(self, uid, include_raw=False):
typ, content = self._mailer.uid('fetch', uid, self._fetch_message_parts)
if typ == 'OK':
mail = _parse_email(content, include_raw=include_raw)
return mail
else:
raise Exception("Could not get email.")
def connect(host, user, password, mailbox='INBOX', timeout=15, ssl=True, port=993, **kwargs):
return Imapper(host, user, password, mailbox, timeout, ssl, port, **kwargs)
def _decode_header(data):
if data is None:
return
decoded_headers = decode_header(data)
headers = []
for decoded_str, charset in decoded_headers:
if isinstance(decoded_str, unicode):
headers.append(decoded_str)
elif charset:
headers.append(unicode(decoded_str, charset))
else:
encoding = chardet.detect(decoded_str)
if encoding.get('encoding'):
headers.append(unicode(decoded_str, encoding['encoding']))
else:
headers.append(decoded_str)
return "".join(headers)
def _decode_body(part):
charset = str(part.get_content_charset())
payload = part.get_payload(decode=True)
try:
body = unicode(payload, charset) if charset else part.get_payload()
except:
encoding = chardet.detect(payload)
if encoding.get('encoding'):
body = unicode(payload, encoding['encoding'])
else:
body = payload
return body
def _parse_email(data, include_raw=False):
string_or_bytes_message = data[0][1]
string_or_bytes_uid = data[0][0]
if not isinstance(string_or_bytes_message, str):
encoding = chardet.detect(string_or_bytes_message)
string_or_bytes_message = string_or_bytes_message.decode(encoding.get('encoding'))
if not isinstance(string_or_bytes_uid, str):
encoding = chardet.detect(string_or_bytes_uid)
string_or_bytes_uid = string_or_bytes_uid.decode(encoding.get('encoding'))
message = email.message_from_string(string_or_bytes_message)
uid = re.findall('[UID ](\d+)', string_or_bytes_uid)
args = {}
if uid:
args['uid'] = int(uid[0])
if include_raw:
args['raw'] = data[0][1]
return MailObj(message, **args)
| true | true |
f722463f411f8a127a6d80dda1849d6cfca25b76 | 981 | py | Python | estoque/migrations/0002_auto_20191003_1822.py | lucasgsouza03/NGKS_Shop | 002efbb141a74c1df97479e6bd6f112e4617ff3d | [
"Unlicense"
] | 1 | 2018-03-23T23:08:21.000Z | 2018-03-23T23:08:21.000Z | estoque/migrations/0002_auto_20191003_1822.py | lucasgsouza03/NGKS_Shop | 002efbb141a74c1df97479e6bd6f112e4617ff3d | [
"Unlicense"
] | 7 | 2018-09-25T02:59:08.000Z | 2022-03-11T23:35:05.000Z | estoque/migrations/0002_auto_20191003_1822.py | lucasgsouza03/NGKS_Shop | 002efbb141a74c1df97479e6bd6f112e4617ff3d | [
"Unlicense"
] | 2 | 2018-07-10T19:00:34.000Z | 2018-08-14T21:05:47.000Z | # Generated by Django 2.0.6 on 2019-10-03 21:22
from django.db import migrations, models
import django.db.models.expressions
class Migration(migrations.Migration):
dependencies = [
('estoque', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Telefone_Fornecedor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero', models.IntegerField(null=True)),
],
),
migrations.AddField(
model_name='fornecedor',
name='endereco',
field=models.CharField(max_length=150, null=True),
),
migrations.AddField(
model_name='telefone_fornecedor',
name='fornecedor',
field=models.ForeignKey(on_delete=django.db.models.expressions.Case, to='estoque.fornecedor', verbose_name='Telefone Fornecedor'),
),
]
| 30.65625 | 142 | 0.602446 |
from django.db import migrations, models
import django.db.models.expressions
class Migration(migrations.Migration):
dependencies = [
('estoque', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Telefone_Fornecedor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero', models.IntegerField(null=True)),
],
),
migrations.AddField(
model_name='fornecedor',
name='endereco',
field=models.CharField(max_length=150, null=True),
),
migrations.AddField(
model_name='telefone_fornecedor',
name='fornecedor',
field=models.ForeignKey(on_delete=django.db.models.expressions.Case, to='estoque.fornecedor', verbose_name='Telefone Fornecedor'),
),
]
| true | true |
f72246c907433f3935cdad81e72759bec38c89c6 | 414 | py | Python | server/db/models/book.py | thinkjones/AFSP-Seed | 5a7766960c01394902c4898b764580b0320b6941 | [
"MIT",
"Unlicense"
] | 1 | 2020-02-23T07:48:46.000Z | 2020-02-23T07:48:46.000Z | server/db/models/book.py | thinkjones/AFSP-Seed | 5a7766960c01394902c4898b764580b0320b6941 | [
"MIT",
"Unlicense"
] | null | null | null | server/db/models/book.py | thinkjones/AFSP-Seed | 5a7766960c01394902c4898b764580b0320b6941 | [
"MIT",
"Unlicense"
] | null | null | null | from sqlalchemy import Column, Integer, String
from server.db.database import Base
class Book(Base):
""" Books in a library """
__tablename__ = 'books'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
def __init__(self, name=None, email=None):
self.name = name
self.email = email
def __repr__(self):
return '<User %r>' % (self.name)
| 24.352941 | 46 | 0.640097 | from sqlalchemy import Column, Integer, String
from server.db.database import Base
class Book(Base):
__tablename__ = 'books'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
def __init__(self, name=None, email=None):
self.name = name
self.email = email
def __repr__(self):
return '<User %r>' % (self.name)
| true | true |
f72246ca5c7d73abd9b2276530d5bd66524ba63b | 75,573 | py | Python | sockeye/arguments.py | hanayashiki/sockeye | 391996eec407d5c1c928e0b9cadd8881b846ac61 | [
"Apache-2.0"
] | null | null | null | sockeye/arguments.py | hanayashiki/sockeye | 391996eec407d5c1c928e0b9cadd8881b846ac61 | [
"Apache-2.0"
] | null | null | null | sockeye/arguments.py | hanayashiki/sockeye | 391996eec407d5c1c928e0b9cadd8881b846ac61 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017, 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Defines commandline arguments for the main CLIs with reasonable defaults.
"""
import argparse
import os
import sys
import types
import yaml
from typing import Any, Callable, Dict, List, Tuple, Optional
from . import constants as C
from . import data_io
from .lr_scheduler import LearningRateSchedulerFixedStep
class ConfigArgumentParser(argparse.ArgumentParser):
"""
Extension of argparse.ArgumentParser supporting config files.
The option --config is added automatically and expects a YAML serialized
dictionary, similar to the return value of parse_args(). Command line
parameters have precedence over config file values. Usage should be
transparent, just substitute argparse.ArgumentParser with this class.
Extended from
https://stackoverflow.com/questions/28579661/getting-required-option-from-namespace-in-python
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.argument_definitions = {} # type: Dict[Tuple, Dict]
self.argument_actions = [] # type: List[Any]
self._overwrite_add_argument(self)
self.add_argument("--config", help="Config file in YAML format.", type=str)
# Note: not FileType so that we can get the path here
def _register_argument(self, _action, *args, **kwargs):
self.argument_definitions[args] = kwargs
self.argument_actions.append(_action)
def _overwrite_add_argument(self, original_object):
def _new_add_argument(this_self, *args, **kwargs):
action = this_self.original_add_argument(*args, **kwargs)
this_self.config_container._register_argument(action, *args, **kwargs)
original_object.original_add_argument = original_object.add_argument
original_object.config_container = self
original_object.add_argument = types.MethodType(_new_add_argument, original_object)
return original_object
def add_argument_group(self, *args, **kwargs):
group = super().add_argument_group(*args, **kwargs)
return self._overwrite_add_argument(group)
def parse_args(self, args=None, namespace=None) -> argparse.Namespace:
# Mini argument parser to find the config file
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument("--config", type=regular_file())
config_args, _ = config_parser.parse_known_args(args=args)
initial_args = argparse.Namespace()
if config_args.config:
initial_args = load_args(config_args.config)
# Remove the 'required' flag from options loaded from config file
for action in self.argument_actions:
if action.dest in initial_args:
action.required = False
return super().parse_args(args=args, namespace=initial_args)
def save_args(args: argparse.Namespace, fname: str):
with open(fname, 'w') as out:
yaml.safe_dump(args.__dict__, out, default_flow_style=False)
def load_args(fname: str) -> argparse.Namespace:
with open(fname, 'r') as inp:
return argparse.Namespace(**yaml.safe_load(inp))
def regular_file() -> Callable:
"""
Returns a method that can be used in argument parsing to check the argument is a regular file or a symbolic link,
but not, e.g., a process substitution.
:return: A method that can be used as a type in argparse.
"""
def check_regular_file(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isfile(value_to_check):
raise argparse.ArgumentTypeError("must exist and be a regular file.")
return value_to_check
return check_regular_file
def regular_folder() -> Callable:
"""
Returns a method that can be used in argument parsing to check the argument is a directory.
:return: A method that can be used as a type in argparse.
"""
def check_regular_directory(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isdir(value_to_check):
raise argparse.ArgumentTypeError("must be a directory.")
return value_to_check
return check_regular_directory
def int_greater_or_equal(threshold: int) -> Callable:
"""
Returns a method that can be used in argument parsing to check that the argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse.
"""
def check_greater_equal(value_to_check):
value_to_check = int(value_to_check)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %d." % threshold)
return value_to_check
return check_greater_equal
def learning_schedule() -> Callable:
"""
Returns a method that can be used in argument parsing to check that the argument is a valid learning rate schedule
string.
:return: A method that can be used as a type in argparse.
"""
def parse(schedule_str):
try:
schedule = LearningRateSchedulerFixedStep.parse_schedule_str(schedule_str)
except ValueError:
raise argparse.ArgumentTypeError(
"Learning rate schedule string should have form rate1:num_updates1[,rate2:num_updates2,...]")
return schedule
return parse
def simple_dict() -> Callable:
"""
A simple dictionary format that does not require spaces or quoting.
Supported types: bool, int, float
:return: A method that can be used as a type in argparse.
"""
def parse(dict_str: str):
def _parse(value: str):
if value == "True":
return True
if value == "False":
return False
if "." in value:
return float(value)
return int(value)
_dict = dict()
try:
for entry in dict_str.split(","):
key, value = entry.split(":")
_dict[key] = _parse(value)
except ValueError:
raise argparse.ArgumentTypeError("Specify argument dictionary as key1:value1,key2:value2,..."
" Supported types: bool, int, float.")
return _dict
return parse
def multiple_values(num_values: int = 0,
greater_or_equal: Optional[float] = None,
data_type: Callable = int) -> Callable:
"""
Returns a method to be used in argument parsing to parse a string of the form "<val>:<val>[:<val>...]" into
a tuple of values of type data_type.
:param num_values: Optional number of ints required.
:param greater_or_equal: Optional constraint that all values should be greater or equal to this value.
:param data_type: Type of values. Default: int.
:return: Method for parsing.
"""
def parse(value_to_check):
if ':' in value_to_check:
expected_num_separators = num_values - 1 if num_values else 0
if expected_num_separators > 0 and (value_to_check.count(':') != expected_num_separators):
raise argparse.ArgumentTypeError("Expected either a single value or %d values separated by %s" %
(num_values, C.ARG_SEPARATOR))
values = tuple(map(data_type, value_to_check.split(C.ARG_SEPARATOR, num_values - 1)))
else:
values = tuple([data_type(value_to_check)] * num_values)
if greater_or_equal is not None:
if any((value < greater_or_equal for value in values)):
raise argparse.ArgumentTypeError("Must provide value greater or equal to %d" % greater_or_equal)
return values
return parse
def file_or_stdin() -> Callable:
"""
Returns a file descriptor from stdin or opening a file from a given path.
"""
def parse(path):
if path is None or path == "-":
return sys.stdin
else:
return data_io.smart_open(path)
return parse
def add_average_args(params):
average_params = params.add_argument_group("Averaging")
average_params.add_argument(
"inputs",
metavar="INPUT",
type=str,
nargs="+",
help="either a single model directory (automatic checkpoint selection) "
"or multiple .params files (manual checkpoint selection)")
average_params.add_argument(
"--metric",
help="Name of the metric to choose n-best checkpoints from. Default: %(default)s.",
default=C.PERPLEXITY,
choices=C.METRICS)
average_params.add_argument(
"-n",
type=int,
default=4,
help="number of checkpoints to find. Default: %(default)s.")
average_params.add_argument(
"--output", "-o", required=True, type=str, help="File to write averaged parameters to.")
average_params.add_argument(
"--strategy",
choices=["best", "last", "lifespan"],
default="best",
help="selection method. Default: %(default)s.")
def add_extract_args(params):
extract_params = params.add_argument_group("Extracting")
extract_params.add_argument("input",
metavar="INPUT",
type=str,
help="Either a model directory (using params.best) or a specific params.x file.")
extract_params.add_argument('--names', '-n',
nargs='*',
default=[],
help='Names of parameters to be extracted.')
extract_params.add_argument('--list-all', '-l',
action='store_true',
help='List names of all available parameters.')
extract_params.add_argument('--output', '-o',
type=str,
help="File to write extracted parameters to (in .npz format).")
def add_rerank_args(params):
rerank_params = params.add_argument_group("Reranking")
rerank_params.add_argument("--reference", "-r",
type=str,
required=True,
help="File where target reference translations are stored.")
rerank_params.add_argument("--hypotheses", "-hy",
type=str,
required=True,
help="File with nbest translations, one nbest list per line,"
"in JSON format as returned by sockeye.translate with --nbest-size x.")
rerank_params.add_argument("--metric", "-m",
type=str,
required=False,
default=C.RERANK_BLEU,
choices=C.RERANK_METRICS,
help="Sentence-level metric used to compare each nbest translation to the reference."
"Default: %(default)s.")
rerank_params.add_argument("--output-best",
action="store_true",
help="Output only the best hypothesis from each nbest list.")
rerank_params.add_argument("--return-score",
action="store_true",
help="Returns the reranking scores as scores in output JSON objects.")
def add_lexicon_args(params):
lexicon_params = params.add_argument_group("Model & Top-k")
lexicon_params.add_argument("--model", "-m", required=True,
help="Model directory containing source and target vocabularies.")
lexicon_params.add_argument("-k", type=int, default=200,
help="Number of target translations to keep per source. Default: %(default)s.")
def add_lexicon_create_args(params):
lexicon_params = params.add_argument_group("I/O")
lexicon_params.add_argument("--input", "-i", required=True,
help="Probabilistic lexicon (fast_align format) to build top-k lexicon from.")
lexicon_params.add_argument("--output", "-o", required=True, help="File name to write top-k lexicon to.")
def add_lexicon_inspect_args(params):
lexicon_params = params.add_argument_group("Lexicon to inspect")
lexicon_params.add_argument("--lexicon", "-l", required=True, help="File name of top-k lexicon to inspect.")
def add_logging_args(params):
logging_params = params.add_argument_group("Logging")
logging_params.add_argument('--quiet', '-q',
default=False,
action="store_true",
help='Suppress console logging.')
def add_training_data_args(params, required=False):
params.add_argument(C.TRAINING_ARG_SOURCE, '-s',
required=required,
type=regular_file(),
help='Source side of parallel training data.')
params.add_argument('--source-factors', '-sf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel source side factors. Default: %(default)s.')
params.add_argument(C.TRAINING_ARG_TARGET, '-t',
required=required,
type=regular_file(),
help='Target side of parallel training data.')
def add_validation_data_params(params):
params.add_argument('--validation-source', '-vs',
required=True,
type=regular_file(),
help='Source side of validation data.')
params.add_argument('--validation-source-factors', '-vsf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel validation source side factors. '
'Default: %(default)s.')
params.add_argument('--validation-target', '-vt',
required=True,
type=regular_file(),
help='Target side of validation data.')
def add_prepared_data_args(params):
params.add_argument(C.TRAINING_ARG_PREPARED_DATA, '-d',
type=regular_folder(),
help='Prepared training data directory created through python -m sockeye.prepare_data.')
def add_monitoring_args(params):
params.add_argument('--monitor-pattern',
default=None,
type=str,
help="Pattern to match outputs/weights/gradients to monitor. '.*' monitors everything. "
"Default: %(default)s.")
params.add_argument('--monitor-stat-func',
default=C.STAT_FUNC_DEFAULT,
choices=list(C.MONITOR_STAT_FUNCS.keys()),
help="Statistics function to run on monitored outputs/weights/gradients. "
"Default: %(default)s.")
def add_training_output_args(params):
params.add_argument('--output', '-o',
required=True,
help='Folder where model & training results are written to.')
params.add_argument('--overwrite-output',
action='store_true',
help='Delete all contents of the model directory if it already exists.')
def add_training_io_args(params):
params = params.add_argument_group("Data & I/O")
# Unfortunately we must set --source/--target to not required as we either accept these parameters
# or --prepared-data which can not easily be encoded in argparse.
add_training_data_args(params, required=False)
add_prepared_data_args(params)
add_validation_data_params(params)
add_bucketing_args(params)
add_vocab_args(params)
add_training_output_args(params)
add_monitoring_args(params)
def add_bucketing_args(params):
params.add_argument('--no-bucketing',
action='store_true',
help='Disable bucketing: always unroll the graph to --max-seq-len. Default: %(default)s.')
params.add_argument('--bucket-width',
type=int_greater_or_equal(1),
default=10,
help='Width of buckets in tokens. Default: %(default)s.')
params.add_argument(C.TRAINING_ARG_MAX_SEQ_LEN,
type=multiple_values(num_values=2, greater_or_equal=1),
default=(99, 99),
help='Maximum sequence length in tokens.'
'Use "x:x" to specify separate values for src&tgt. Default: %(default)s.')
def add_prepare_data_cli_args(params):
params = params.add_argument_group("Data preparation.")
add_training_data_args(params, required=True)
add_vocab_args(params)
add_bucketing_args(params)
params.add_argument('--num-samples-per-shard',
type=int_greater_or_equal(1),
default=1000000,
help='The approximate number of samples per shard. Default: %(default)s.')
params.add_argument('--min-num-shards',
default=1,
type=int_greater_or_equal(1),
help='The minimum number of shards to use, even if they would not '
'reach the desired number of samples per shard. Default: %(default)s.')
params.add_argument('--seed',
type=int,
default=13,
help='Random seed used that makes shard assignments deterministic. Default: %(default)s.')
params.add_argument('--output', '-o',
required=True,
help='Folder where the prepared and possibly sharded data is written to.')
def add_device_args(params):
device_params = params.add_argument_group("Device parameters")
device_params.add_argument('--device-ids', default=[-1],
help='List or number of GPUs ids to use. Default: %(default)s. '
'Use negative numbers to automatically acquire a certain number of GPUs, e.g. -5 '
'will find 5 free GPUs. '
'Use positive numbers to acquire a specific GPU id on this host. '
'(Note that automatic acquisition of GPUs assumes that all GPU processes on '
'this host are using automatic sockeye GPU acquisition).',
nargs='+', type=int)
device_params.add_argument('--use-cpu',
action='store_true',
help='Use CPU device instead of GPU.')
device_params.add_argument('--disable-device-locking',
action='store_true',
help='Just use the specified device ids without locking.')
device_params.add_argument('--lock-dir',
default="/tmp",
help='When acquiring a GPU we do file based locking so that only one Sockeye process '
'can run on the a GPU. This is the folder in which we store the file '
'locks. For locking to work correctly it is assumed all processes use the same '
'lock directory. The only requirement for the directory are file '
'write permissions.')
def add_vocab_args(params):
params.add_argument('--source-vocab',
required=False,
default=None,
help='Existing source vocabulary (JSON).')
params.add_argument('--target-vocab',
required=False,
default=None,
help='Existing target vocabulary (JSON).')
params.add_argument('--source-factor-vocabs',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='Existing source factor vocabulary (-ies) (JSON).')
params.add_argument(C.VOCAB_ARG_SHARED_VOCAB,
action='store_true',
default=False,
help='Share source and target vocabulary. '
'Will be automatically turned on when using weight tying. Default: %(default)s.')
params.add_argument('--num-words',
type=multiple_values(num_values=2, greater_or_equal=0),
default=(0, 0),
help='Maximum vocabulary size. Use "x:x" to specify separate values for src&tgt. '
'A value of 0 indicates that the vocabulary unrestricted and determined from the data by '
'creating an entry for all words that occur at least --word-min-count times.'
'Default: %(default)s.')
params.add_argument('--word-min-count',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(1, 1),
help='Minimum frequency of words to be included in vocabularies. Default: %(default)s.')
params.add_argument('--pad-vocab-to-multiple-of',
type=int,
default=None,
help='Pad vocabulary to a multiple of this integer. Default: %(default)s.')
def add_model_parameters(params):
model_params = params.add_argument_group("ModelConfig")
model_params.add_argument('--params', '-p',
type=str,
default=None,
help='Initialize model parameters from file. Overrides random initializations.')
model_params.add_argument('--allow-missing-params',
action="store_true",
default=False,
help="Allow missing parameters when initializing model parameters from file. "
"Default: %(default)s.")
model_params.add_argument('--encoder',
choices=C.ENCODERS,
default=C.TRANSFORMER_TYPE,
help="Type of encoder. Default: %(default)s.")
model_params.add_argument('--decoder',
choices=C.DECODERS,
default=C.TRANSFORMER_TYPE,
help="Type of encoder. Default: %(default)s.")
model_params.add_argument('--num-layers',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(6, 6),
help='Number of layers for encoder & decoder. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--conv-embed-output-dim',
type=int_greater_or_equal(1),
default=None,
help="Project segment embeddings to this size for ConvolutionalEmbeddingEncoder. Omit to"
" avoid projection, leaving segment embeddings total size of all filters. Default:"
" %(default)s.")
model_params.add_argument('--conv-embed-max-filter-width',
type=int_greater_or_equal(1),
default=8,
help="Maximum filter width for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-num-filters',
type=multiple_values(greater_or_equal=1),
default=(200, 200, 250, 250, 300, 300, 300, 300),
help="List of number of filters of each width 1..max for ConvolutionalEmbeddingEncoder. "
"Default: %(default)s.")
model_params.add_argument('--conv-embed-pool-stride',
type=int_greater_or_equal(1),
default=5,
help="Pooling stride for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-num-highway-layers',
type=int_greater_or_equal(0),
default=4,
help="Number of highway layers for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-add-positional-encodings',
action='store_true',
default=False,
help="Add positional encodings to final segment embeddings for"
" ConvolutionalEmbeddingEncoder. Default: %(default)s.")
# convolutional encoder/decoder arguments arguments
model_params.add_argument('--cnn-kernel-width',
type=multiple_values(num_values=2, greater_or_equal=1, data_type=int),
default=(3, 3),
help='Kernel width of the convolutional encoder and decoder. Default: %(default)s.')
model_params.add_argument('--cnn-num-hidden',
type=int_greater_or_equal(1),
default=512,
help='Number of hidden units for the convolutional encoder and decoder. '
'Default: %(default)s.')
model_params.add_argument('--cnn-activation-type',
choices=C.CNN_ACTIVATION_TYPES,
default=C.GLU,
help="Type activation to use for each convolutional layer. Default: %(default)s.")
model_params.add_argument('--cnn-positional-embedding-type',
choices=C.POSITIONAL_EMBEDDING_TYPES,
default=C.LEARNED_POSITIONAL_EMBEDDING,
help='The type of positional embedding. Default: %(default)s.')
model_params.add_argument('--cnn-project-qkv',
action='store_true',
default=False,
help="Optionally apply query, key and value projections to the source and target hidden "
"vectors before applying the attention mechanism.")
# rnn arguments
model_params.add_argument('--rnn-cell-type',
choices=C.CELL_TYPES,
default=C.LSTM_TYPE,
help='RNN cell type for encoder and decoder. Default: %(default)s.')
model_params.add_argument('--rnn-num-hidden',
type=int_greater_or_equal(1),
default=1024,
help='Number of RNN hidden units for encoder and decoder. Default: %(default)s.')
model_params.add_argument('--rnn-encoder-reverse-input',
action='store_true',
help='Reverse input sequence for RNN encoder. Default: %(default)s.')
model_params.add_argument('--rnn-decoder-state-init',
default=C.RNN_DEC_INIT_LAST,
choices=C.RNN_DEC_INIT_CHOICES,
help='How to initialize RNN decoder states. Default: %(default)s.')
model_params.add_argument('--rnn-residual-connections',
action="store_true",
default=False,
help="Add residual connections to stacked RNNs. (see Wu ETAL'16). Default: %(default)s.")
model_params.add_argument('--rnn-first-residual-layer',
type=int_greater_or_equal(2),
default=2,
help='First RNN layer to have a residual connection. Default: %(default)s.')
model_params.add_argument('--rnn-context-gating', action="store_true",
help="Enables a context gate which adaptively weighs the RNN decoder input against the "
"source context vector before each update of the decoder hidden state.")
# transformer arguments
model_params.add_argument('--transformer-model-size',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(512, 512),
help='Number of hidden units in transformer layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-attention-heads',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(8, 8),
help='Number of heads for all self-attention when using transformer layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-feed-forward-num-hidden',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(2048, 2048),
help='Number of hidden units in transformers feed forward layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-activation-type',
choices=C.TRANSFORMER_ACTIVATION_TYPES,
default=C.RELU,
help="Type activation to use for each feed forward layer. Default: %(default)s.")
model_params.add_argument('--transformer-positional-embedding-type',
choices=C.POSITIONAL_EMBEDDING_TYPES,
default=C.FIXED_POSITIONAL_EMBEDDING,
help='The type of positional embedding. Default: %(default)s.')
model_params.add_argument('--transformer-preprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('n', 'n'),
help='Transformer preprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
model_params.add_argument('--transformer-postprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('dr', 'dr'),
help='Transformer postprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
# LHUC
# TODO: The convolutional model does not support lhuc yet
model_params.add_argument('--lhuc',
nargs="+",
default=None,
choices=C.LHUC_CHOICES,
metavar="COMPONENT",
help="Use LHUC (Vilar 2018). Include an amplitude parameter to hidden units for"
" domain adaptation. Needs a pre-trained model. Valid values: {values}. Currently not"
" supported for convolutional models. Default: %(default)s.".format(
values=", ".join(C.LHUC_CHOICES)))
# embedding arguments
model_params.add_argument('--num-embed',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(512, 512),
help='Embedding size for source and target tokens. '
'Use "x:x" to specify separate values for src&tgt. Default: %(default)s.')
model_params.add_argument('--source-factors-num-embed',
type=int,
nargs='+',
default=[],
help='Embedding size for additional source factors. '
'You must provide as many dimensions as '
'(validation) source factor files. Default: %(default)s.')
# attention arguments
model_params.add_argument('--rnn-attention-type',
choices=C.ATT_TYPES,
default=C.ATT_MLP,
help='Attention model for RNN decoders. Choices: {%(choices)s}. '
'Default: %(default)s.')
model_params.add_argument('--rnn-attention-num-hidden',
default=None,
type=int,
help='Number of hidden units for attention layers. Default: equal to --rnn-num-hidden.')
model_params.add_argument('--rnn-attention-use-prev-word', action="store_true",
help="Feed the previous target embedding into the attention mechanism.")
model_params.add_argument('--rnn-scale-dot-attention',
action='store_true',
help='Optional scale before dot product. Only applicable to \'dot\' attention type. '
'[Vaswani et al, 2017]')
model_params.add_argument('--rnn-attention-coverage-type',
choices=C.COVERAGE_TYPES,
default=C.COVERAGE_COUNT,
help="Type of model for updating coverage vectors. 'count' refers to an update method "
"that accumulates attention scores. 'fertility' accumulates attention scores as well "
"but also computes a fertility value for every source word. "
"'tanh', 'sigmoid', 'relu', 'softrelu' "
"use non-linear layers with the respective activation type, and 'gru' uses a "
"GRU to update the coverage vectors. Default: %(default)s.")
model_params.add_argument('--rnn-attention-coverage-max-fertility',
type=int,
default=2,
help="Maximum fertility for individual source words. Default: %(default)s.")
model_params.add_argument('--rnn-attention-coverage-num-hidden',
type=int,
default=1,
help="Number of hidden units for coverage vectors. Default: %(default)s.")
model_params.add_argument('--rnn-attention-in-upper-layers',
action="store_true",
help="Pass the attention to the upper layers of the RNN decoder, similar "
"to GNMT paper. Only applicable if more than one layer is used.")
model_params.add_argument('--rnn-attention-mhdot-heads',
type=int, default=None,
help='Number of heads for Multi-head dot attention. Default: %(default)s.')
model_params.add_argument('--weight-tying',
action='store_true',
help='Turn on weight tying (see arxiv.org/abs/1608.05859). '
'The type of weight sharing is determined through '
'--weight-tying-type. Default: %(default)s.')
model_params.add_argument('--weight-tying-type',
default=C.WEIGHT_TYING_TRG_SOFTMAX,
choices=[C.WEIGHT_TYING_SRC_TRG_SOFTMAX,
C.WEIGHT_TYING_SRC_TRG,
C.WEIGHT_TYING_TRG_SOFTMAX],
help='The type of weight tying. source embeddings=src, target embeddings=trg, '
'target softmax weight matrix=softmax. Default: %(default)s.')
model_params.add_argument('--layer-normalization', action="store_true",
help="Adds layer normalization before non-linear activations. "
"This includes MLP attention, RNN decoder state initialization, "
"RNN decoder hidden state, and cnn layers."
"It does not normalize RNN cell activations "
"(this can be done using the '%s' or '%s' rnn-cell-type." % (C.LNLSTM_TYPE,
C.LNGLSTM_TYPE))
model_params.add_argument('--weight-normalization', action="store_true",
help="Adds weight normalization to decoder output layers "
"(and all convolutional weight matrices for CNN decoders). Default: %(default)s.")
def add_batch_args(params, default_batch_size=4096):
params.add_argument('--batch-size', '-b',
type=int_greater_or_equal(1),
default=default_batch_size,
help='Mini-batch size. Note that depending on the batch-type this either refers to '
'words or sentences.'
'Sentence: each batch contains X sentences, number of words varies. '
'Word: each batch contains (approximately) X words, number of sentences varies. '
'Default: %(default)s.')
params.add_argument("--batch-type",
type=str,
default=C.BATCH_TYPE_WORD,
choices=[C.BATCH_TYPE_SENTENCE, C.BATCH_TYPE_WORD],
help="Sentence: each batch contains X sentences, number of words varies."
"Word: each batch contains (approximately) X target words, "
"number of sentences varies. Default: %(default)s.")
def add_training_args(params):
train_params = params.add_argument_group("Training parameters")
add_batch_args(train_params)
train_params.add_argument('--decoder-only',
action='store_true',
help='Pre-train a decoder. This is currently for RNN decoders only. '
'Default: %(default)s.')
train_params.add_argument('--fill-up',
type=str,
default=C.FILL_UP_DEFAULT,
choices=C.FILL_UP_CHOICES,
help=argparse.SUPPRESS)
train_params.add_argument('--loss',
default=C.CROSS_ENTROPY,
choices=[C.CROSS_ENTROPY],
help='Loss to optimize. Default: %(default)s.')
train_params.add_argument('--label-smoothing',
default=0.1,
type=float,
help='Smoothing constant for label smoothing. Default: %(default)s.')
train_params.add_argument('--loss-normalization-type',
default=C.LOSS_NORM_VALID,
choices=[C.LOSS_NORM_VALID, C.LOSS_NORM_BATCH],
help='How to normalize the loss. By default loss is normalized by the number '
'of valid (non-PAD) tokens (%s).' % C.LOSS_NORM_VALID)
train_params.add_argument('--metrics',
nargs='+',
default=[C.PERPLEXITY],
choices=[C.PERPLEXITY, C.ACCURACY],
help='Names of metrics to track on training and validation data. Default: %(default)s.')
train_params.add_argument('--optimized-metric',
default=C.PERPLEXITY,
choices=C.METRICS,
help='Metric to optimize with early stopping {%(choices)s}. Default: %(default)s.')
train_params.add_argument('--min-updates',
type=int,
default=None,
help='Minimum number of updates before training can stop. Default: %(default)s.')
train_params.add_argument('--max-updates',
type=int,
default=None,
help='Maximum number of updates. Default: %(default)s.')
train_params.add_argument('--min-samples',
type=int,
default=None,
help='Minimum number of samples before training can stop. Default: %(default)s.')
train_params.add_argument('--max-samples',
type=int,
default=None,
help='Maximum number of samples. Default: %(default)s.')
train_params.add_argument(C.TRAIN_ARGS_CHECKPOINT_FREQUENCY,
type=int_greater_or_equal(1),
default=4000,
help='Checkpoint and evaluate every x updates/batches. Default: %(default)s.')
train_params.add_argument('--max-num-checkpoint-not-improved',
type=int,
default=32,
help='Maximum number of checkpoints the model is allowed to not improve in '
'<optimized-metric> on validation data before training is stopped. '
'Default: %(default)s.')
train_params.add_argument('--min-num-epochs',
type=int,
default=None,
help='Minimum number of epochs (passes through the training data) '
'before training can stop. Default: %(default)s.')
train_params.add_argument('--max-num-epochs',
type=int,
default=None,
help='Maximum number of epochs (passes through the training data) Default: %(default)s.')
train_params.add_argument('--embed-dropout',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='Dropout probability for source & target embeddings. Use "x:x" to specify '
'separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-inputs',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='RNN variational dropout probability for encoder & decoder RNN inputs. (Gal, 2015)'
'Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-states',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='RNN variational dropout probability for encoder & decoder RNN states. (Gal, 2015)'
'Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-recurrent',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='Recurrent dropout without memory loss (Semeniuta, 2016) for encoder & decoder '
'LSTMs. Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-no-attention',
action="store_true",
help='Disable attention mechanism,')
train_params.add_argument('--rnn-enc-last-hidden-concat-to-embedding',
action="store_true",
help='Concatenate the last hidden layer of the encoder to the input of the decoder, '
'instead of the previous state of the decoder. Default: %(default)s.')
train_params.add_argument('--rnn-decoder-hidden-dropout',
type=float,
default=.2,
help='Dropout probability for hidden state that combines the context with the '
'RNN hidden state in the decoder. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-attention',
type=float,
default=0.1,
help='Dropout probability for multi-head attention. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-act',
type=float,
default=0.1,
help='Dropout probability before activation in feed-forward block. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-prepost',
type=float,
default=0.1,
help='Dropout probability for pre/postprocessing blocks. Default: %(default)s.')
train_params.add_argument('--conv-embed-dropout',
type=float,
default=.0,
help="Dropout probability for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
train_params.add_argument('--cnn-hidden-dropout',
type=float,
default=.2,
help="Dropout probability for dropout between convolutional layers. Default: %(default)s.")
train_params.add_argument('--optimizer',
default=C.OPTIMIZER_ADAM,
choices=C.OPTIMIZERS,
help='SGD update rule. Default: %(default)s.')
train_params.add_argument('--optimizer-params',
type=simple_dict(),
default=None,
help='Additional optimizer params as dictionary. Format: key1:value1,key2:value2,...')
train_params.add_argument("--kvstore",
type=str,
default=C.KVSTORE_DEVICE,
choices=C.KVSTORE_TYPES,
help="The MXNet kvstore to use. 'device' is recommended for single process training. "
"Use any of 'dist_sync', 'dist_device_sync' and 'dist_async' for distributed "
"training. Default: %(default)s.")
train_params.add_argument("--gradient-compression-type",
type=str,
default=C.GRADIENT_COMPRESSION_NONE,
choices=C.GRADIENT_COMPRESSION_TYPES,
help='Type of gradient compression to use. Default: %(default)s.')
train_params.add_argument("--gradient-compression-threshold",
type=float,
default=0.5,
help="Threshold for gradient compression if --gctype is '2bit'. Default: %(default)s.")
train_params.add_argument('--weight-init',
type=str,
default=C.INIT_XAVIER,
choices=C.INIT_TYPES,
help='Type of base weight initialization. Default: %(default)s.')
train_params.add_argument('--weight-init-scale',
type=float,
default=3.0,
help='Weight initialization scale. Applies to uniform (scale) and xavier (magnitude). '
'Default: %(default)s.')
train_params.add_argument('--weight-init-xavier-factor-type',
type=str,
default=C.INIT_XAVIER_FACTOR_TYPE_AVG,
choices=C.INIT_XAVIER_FACTOR_TYPES,
help='Xavier factor type. Default: %(default)s.')
train_params.add_argument('--weight-init-xavier-rand-type',
type=str,
default=C.RAND_TYPE_UNIFORM,
choices=[C.RAND_TYPE_UNIFORM, C.RAND_TYPE_GAUSSIAN],
help='Xavier random number generator type. Default: %(default)s.')
train_params.add_argument('--embed-weight-init',
type=str,
default=C.EMBED_INIT_DEFAULT,
choices=C.EMBED_INIT_TYPES,
help='Type of embedding matrix weight initialization. If normal, initializes embedding '
'weights using a normal distribution with std=1/srqt(vocab_size). '
'Default: %(default)s.')
train_params.add_argument('--initial-learning-rate',
type=float,
default=0.0002,
help='Initial learning rate. Default: %(default)s.')
train_params.add_argument('--weight-decay',
type=float,
default=0.0,
help='Weight decay constant. Default: %(default)s.')
train_params.add_argument('--momentum',
type=float,
default=None,
help='Momentum constant. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-threshold',
type=float,
default=1.0,
help='Clip absolute gradients values greater than this value. '
'Set to negative to disable. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-type',
choices=C.GRADIENT_CLIPPING_TYPES,
default=C.GRADIENT_CLIPPING_TYPE_NONE,
help='The type of gradient clipping. Default: %(default)s.')
train_params.add_argument('--learning-rate-scheduler-type',
default=C.LR_SCHEDULER_PLATEAU_REDUCE,
choices=C.LR_SCHEDULERS,
help='Learning rate scheduler type. Default: %(default)s.')
train_params.add_argument('--learning-rate-reduce-factor',
type=float,
default=0.7,
help="Factor to multiply learning rate with "
"(for 'plateau-reduce' learning rate scheduler). Default: %(default)s.")
train_params.add_argument('--learning-rate-reduce-num-not-improved',
type=int,
default=8,
help="For 'plateau-reduce' learning rate scheduler. Adjust learning rate "
"if <optimized-metric> did not improve for x checkpoints. Default: %(default)s.")
train_params.add_argument('--learning-rate-schedule',
type=learning_schedule(),
default=None,
help="For 'fixed-step' scheduler. Fully specified learning schedule in the form"
" \"rate1:num_updates1[,rate2:num_updates2,...]\". Overrides all other args related"
" to learning rate and stopping conditions. Default: %(default)s.")
train_params.add_argument('--learning-rate-half-life',
type=float,
default=10,
help="Half-life of learning rate in checkpoints. For 'fixed-rate-*' "
"learning rate schedulers. Default: %(default)s.")
train_params.add_argument('--learning-rate-warmup',
type=int,
default=0,
help="Number of warmup steps. If set to x, linearly increases learning rate from 10%% "
"to 100%% of the initial learning rate. Default: %(default)s.")
train_params.add_argument('--learning-rate-decay-param-reset',
action='store_true',
help='Resets model parameters to current best when learning rate is reduced due to the '
'value of --learning-rate-reduce-num-not-improved. Default: %(default)s.')
train_params.add_argument('--learning-rate-decay-optimizer-states-reset',
choices=C.LR_DECAY_OPT_STATES_RESET_CHOICES,
default=C.LR_DECAY_OPT_STATES_RESET_OFF,
help="Action to take on optimizer states (e.g. Adam states) when learning rate is "
"reduced due to the value of --learning-rate-reduce-num-not-improved. "
"Default: %(default)s.")
train_params.add_argument('--rnn-forget-bias',
default=0.0,
type=float,
help='Initial value of RNN forget biases.')
train_params.add_argument('--rnn-h2h-init', type=str, default=C.RNN_INIT_ORTHOGONAL,
choices=[C.RNN_INIT_ORTHOGONAL, C.RNN_INIT_ORTHOGONAL_STACKED, C.RNN_INIT_DEFAULT],
help="Initialization method for RNN parameters. Default: %(default)s.")
train_params.add_argument('--fixed-param-names',
default=[],
nargs='*',
help="Names of parameters to fix at training time. Default: %(default)s.")
train_params.add_argument(C.TRAIN_ARGS_MONITOR_BLEU,
default=500,
type=int,
help='x>0: decode x sampled sentences from validation data and '
'compute evaluation metrics. x==-1: use full validation data. Default: %(default)s.')
train_params.add_argument('--decode-and-evaluate-use-cpu',
action='store_true',
help='Use CPU for decoding validation data. Overrides --decode-and-evaluate-device-id. '
'Default: %(default)s.')
train_params.add_argument('--decode-and-evaluate-device-id',
default=None,
type=int,
help='Separate device for decoding validation data. '
'Use a negative number to automatically acquire a GPU. '
'Use a positive number to acquire a specific GPU. Default: %(default)s.')
train_params.add_argument('--seed',
type=int,
default=13,
help='Random seed. Default: %(default)s.')
train_params.add_argument('--keep-last-params',
type=int,
default=-1,
help='Keep only the last n params files, use -1 to keep all files. Default: %(default)s')
train_params.add_argument('--keep-initializations',
action="store_true",
help='In addition to keeping the last n params files, also keep params from checkpoint 0.')
train_params.add_argument('--dry-run',
action='store_true',
help="Do not perform any actual training, but print statistics about the model"
" and mode of operation.")
def add_train_cli_args(params):
add_training_io_args(params)
add_model_parameters(params)
add_training_args(params)
add_device_args(params)
add_logging_args(params)
def add_translate_cli_args(params):
add_inference_args(params)
add_device_args(params)
add_logging_args(params)
def add_score_cli_args(params):
add_training_data_args(params, required=False)
add_vocab_args(params)
add_device_args(params)
add_logging_args(params)
add_batch_args(params, default_batch_size=500)
params = params.add_argument_group("Scoring parameters")
params.add_argument("--model", "-m", required=True,
help="Model directory containing trained model.")
params.add_argument('--max-seq-len',
type=multiple_values(num_values=2, greater_or_equal=1),
default=None,
help='Maximum sequence length in tokens.'
'Use "x:x" to specify separate values for src&tgt. Default: Read from model.')
params.add_argument('--length-penalty-alpha',
default=1.0,
type=float,
help='Alpha factor for the length penalty used in scoring: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. A value of 0.0 will therefore turn off '
'length normalization. Default: %(default)s')
params.add_argument('--length-penalty-beta',
default=0.0,
type=float,
help='Beta factor for the length penalty used in scoring: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. Default: %(default)s')
params.add_argument('--softmax-temperature',
type=float,
default=None,
help='Controls peakiness of model predictions. Values < 1.0 produce '
'peaked predictions, values > 1.0 produce smoothed distributions.')
params.add_argument("--output", "-o", default=None,
help="File to write output to. Default: STDOUT.")
params.add_argument('--output-type',
default=C.OUTPUT_HANDLER_SCORE,
choices=C.OUTPUT_HANDLERS_SCORING,
help='Output type. Default: %(default)s.')
params.add_argument('--score-type',
choices=C.SCORING_TYPE_CHOICES,
default=C.SCORING_TYPE_DEFAULT,
help='Score type to output. Default: %(default)s')
def add_max_output_cli_args(params):
params.add_argument('--max-output-length',
type=int,
default=None,
help='Maximum number of words to generate during translation. '
'If None, it will be computed automatically. Default: %(default)s.')
def add_inference_args(params):
decode_params = params.add_argument_group("Inference parameters")
decode_params.add_argument(C.INFERENCE_ARG_INPUT_LONG, C.INFERENCE_ARG_INPUT_SHORT,
default=None,
help='Input file to translate. One sentence per line. '
'If not given, will read from stdin.')
decode_params.add_argument(C.INFERENCE_ARG_INPUT_FACTORS_LONG, C.INFERENCE_ARG_INPUT_FACTORS_SHORT,
required=False,
nargs='+',
type=regular_file(),
default=None,
help='List of input files containing additional source factors,'
'each token-parallel to the source. Default: %(default)s.')
decode_params.add_argument('--json-input',
action='store_true',
default=False,
help="If given, the CLI expects string-serialized json objects as input."
"Requires at least the input text field, for example: "
"{'text': 'some input string'} "
"Optionally, a list of factors can be provided: "
"{'text': 'some input string', 'factors': ['C C C', 'X X X']}.")
decode_params.add_argument(C.INFERENCE_ARG_OUTPUT_LONG, C.INFERENCE_ARG_OUTPUT_SHORT,
default=None,
help='Output file to write translations to. '
'If not given, will write to stdout.')
decode_params.add_argument('--models', '-m',
required=True,
nargs='+',
help='Model folder(s). Use multiple for ensemble decoding. '
'Model determines config, best parameters and vocab files.')
decode_params.add_argument('--checkpoints', '-c',
default=None,
type=int,
nargs='+',
help='If not given, chooses best checkpoints for model(s). '
'If specified, must have the same length as --models and be integer')
decode_params.add_argument('--nbest-size',
type=int_greater_or_equal(1),
default=1,
help='Size of the nbest list of translations. Default: %(default)s.')
decode_params.add_argument('--beam-size', '-b',
type=int_greater_or_equal(1),
default=5,
help='Size of the beam. Default: %(default)s.')
decode_params.add_argument('--beam-prune', '-p',
type=float,
default=0,
help='Pruning threshold for beam search. All hypotheses with scores not within '
'this amount of the best finished hypothesis are discarded (0 = off). '
'Default: %(default)s.')
decode_params.add_argument('--beam-search-stop',
choices=[C.BEAM_SEARCH_STOP_ALL, C.BEAM_SEARCH_STOP_FIRST],
default=C.BEAM_SEARCH_STOP_ALL,
help='Stopping criteria. Quit when (all) hypotheses are finished '
'or when a finished hypothesis is in (first) position. Default: %(default)s.')
decode_params.add_argument('--batch-size',
type=int_greater_or_equal(1),
default=1,
help='Batch size during decoding. Determines how many sentences are translated '
'simultaneously. Default: %(default)s.')
decode_params.add_argument('--chunk-size',
type=int_greater_or_equal(1),
default=None,
help='Size of the chunks to be read from input at once. The chunks are sorted and then '
'split into batches. Therefore the larger the chunk size the better the grouping '
'of segments of similar length and therefore the higher the increase in throughput.'
' Default: %d without batching '
'and %d * batch_size with batching.' % (C.CHUNK_SIZE_NO_BATCHING,
C.CHUNK_SIZE_PER_BATCH_SEGMENT))
decode_params.add_argument('--skip-topk',
default=False,
action='store_true',
help='Use argmax instead of topk for greedy decoding (when --beam-size 1).'
'Default: %(default)s.')
decode_params.add_argument('--sample',
type=int_greater_or_equal(0),
default=None,
nargs='?',
const=0,
help='Sample from softmax instead of taking best. Optional argument will restrict '
'sampling to top N vocabulary items at each step. Default: %(default)s.')
decode_params.add_argument('--seed',
type=int,
default=None,
help='Random seed used if sampling. Default: %(default)s.')
decode_params.add_argument('--ensemble-mode',
type=str,
default='linear',
choices=['linear', 'log_linear'],
help='Ensemble mode. Default: %(default)s.')
decode_params.add_argument('--bucket-width',
type=int_greater_or_equal(0),
default=10,
help='Bucket width for encoder steps. 0 means no bucketing. Default: %(default)s.')
decode_params.add_argument('--max-input-len', '-n',
type=int,
default=None,
help='Maximum input sequence length. Default: value from model(s).')
decode_params.add_argument('--softmax-temperature',
type=float,
default=None,
help='Controls peakiness of model predictions. Values < 1.0 produce '
'peaked predictions, values > 1.0 produce smoothed distributions.')
decode_params.add_argument('--max-output-length-num-stds',
type=int,
default=C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
help='Number of target-to-source length ratio standard deviations from training to add '
'to calculate maximum output length for beam search for each sentence. '
'Default: %(default)s.')
decode_params.add_argument('--restrict-lexicon',
type=str,
default=None,
help="Specify top-k lexicon to restrict output vocabulary based on source. See lexicon "
"module. Default: %(default)s.")
decode_params.add_argument('--restrict-lexicon-topk',
type=int,
default=None,
help="Specify the number of translations to load for each source word from the lexicon "
"given with --restrict-lexicon. Default: Load all entries from the lexicon.")
decode_params.add_argument('--avoid-list',
type=str,
default=None,
help="Specify a file containing phrases (pre-processed, one per line) to block "
"from the output. Default: %(default)s.")
decode_params.add_argument('--strip-unknown-words',
action='store_true',
default=False,
help='Remove any <unk> symbols from outputs. Default: %(default)s.')
decode_params.add_argument('--output-type',
default='translation',
choices=C.OUTPUT_HANDLERS,
help='Output type. Default: %(default)s.')
decode_params.add_argument('--sure-align-threshold',
default=0.9,
type=float,
help='Threshold to consider a soft alignment a sure alignment. Default: %(default)s')
decode_params.add_argument('--length-penalty-alpha',
default=1.0,
type=float,
help='Alpha factor for the length penalty used in beam search: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. A value of 0.0 will therefore turn off '
'length normalization. Default: %(default)s')
decode_params.add_argument('--length-penalty-beta',
default=0.0,
type=float,
help='Beta factor for the length penalty used in beam search: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. Default: %(default)s')
decode_params.add_argument('--override-dtype',
default=None,
type=str,
help='EXPERIMENTAL: may be changed or removed in future. Overrides training dtype of '
'encoders and decoders during inference. Default: %(default)s')
def add_evaluate_args(params):
eval_params = params.add_argument_group("Evaluate parameters")
eval_params.add_argument('--references', '-r',
required=True,
type=str,
help="File with references.")
eval_params.add_argument('--hypotheses', '-i',
type=file_or_stdin(),
default=[sys.stdin],
nargs='+',
help="File(s) with hypotheses. If none will read from stdin. Default: stdin.")
eval_params.add_argument('--metrics',
nargs='+',
choices=C.EVALUATE_METRICS,
default=[C.BLEU, C.CHRF],
help='List of metrics to compute. Default: %(default)s.')
eval_params.add_argument('--sentence', '-s',
action="store_true",
help="Show sentence-level metrics. Default: %(default)s.")
eval_params.add_argument('--offset',
type=float,
default=0.01,
help="Numerical value of the offset of zero n-gram counts for BLEU. Default: %(default)s.")
eval_params.add_argument('--not-strict', '-n',
action="store_true",
help="Do not fail if number of hypotheses does not match number of references. "
"Default: %(default)s.")
def add_build_vocab_args(params):
params.add_argument('-i', '--inputs', required=True, nargs='+', help='List of text files to build vocabulary from.')
params.add_argument('-o', '--output', required=True, type=str, help="Output filename to write vocabulary to.")
add_vocab_args(params)
def add_init_embedding_args(params):
params.add_argument('--weight-files', '-w', required=True, nargs='+',
help='List of input weight files in .npy, .npz or Sockeye parameter format.')
params.add_argument('--vocabularies-in', '-i', required=True, nargs='+',
help='List of input vocabularies as token-index dictionaries in .json format.')
params.add_argument('--vocabularies-out', '-o', required=True, nargs='+',
help='List of output vocabularies as token-index dictionaries in .json format.')
params.add_argument('--names', '-n', nargs='+',
help='List of Sockeye parameter names for (embedding) weights. Default: %(default)s.',
default=[n + "weight" for n in [C.SOURCE_EMBEDDING_PREFIX, C.TARGET_EMBEDDING_PREFIX]])
params.add_argument('--file', '-f', required=True,
help='File to write initialized parameters to.')
params.add_argument('--encoding', '-c', type=str, default=C.VOCAB_ENCODING,
help='Open input vocabularies with specified encoding. Default: %(default)s.')
| 55.243421 | 121 | 0.526193 |
import argparse
import os
import sys
import types
import yaml
from typing import Any, Callable, Dict, List, Tuple, Optional
from . import constants as C
from . import data_io
from .lr_scheduler import LearningRateSchedulerFixedStep
class ConfigArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.argument_definitions = {}
self.argument_actions = []
self._overwrite_add_argument(self)
self.add_argument("--config", help="Config file in YAML format.", type=str)
def _register_argument(self, _action, *args, **kwargs):
self.argument_definitions[args] = kwargs
self.argument_actions.append(_action)
def _overwrite_add_argument(self, original_object):
def _new_add_argument(this_self, *args, **kwargs):
action = this_self.original_add_argument(*args, **kwargs)
this_self.config_container._register_argument(action, *args, **kwargs)
original_object.original_add_argument = original_object.add_argument
original_object.config_container = self
original_object.add_argument = types.MethodType(_new_add_argument, original_object)
return original_object
def add_argument_group(self, *args, **kwargs):
group = super().add_argument_group(*args, **kwargs)
return self._overwrite_add_argument(group)
def parse_args(self, args=None, namespace=None) -> argparse.Namespace:
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument("--config", type=regular_file())
config_args, _ = config_parser.parse_known_args(args=args)
initial_args = argparse.Namespace()
if config_args.config:
initial_args = load_args(config_args.config)
for action in self.argument_actions:
if action.dest in initial_args:
action.required = False
return super().parse_args(args=args, namespace=initial_args)
def save_args(args: argparse.Namespace, fname: str):
with open(fname, 'w') as out:
yaml.safe_dump(args.__dict__, out, default_flow_style=False)
def load_args(fname: str) -> argparse.Namespace:
with open(fname, 'r') as inp:
return argparse.Namespace(**yaml.safe_load(inp))
def regular_file() -> Callable:
def check_regular_file(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isfile(value_to_check):
raise argparse.ArgumentTypeError("must exist and be a regular file.")
return value_to_check
return check_regular_file
def regular_folder() -> Callable:
def check_regular_directory(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isdir(value_to_check):
raise argparse.ArgumentTypeError("must be a directory.")
return value_to_check
return check_regular_directory
def int_greater_or_equal(threshold: int) -> Callable:
def check_greater_equal(value_to_check):
value_to_check = int(value_to_check)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %d." % threshold)
return value_to_check
return check_greater_equal
def learning_schedule() -> Callable:
def parse(schedule_str):
try:
schedule = LearningRateSchedulerFixedStep.parse_schedule_str(schedule_str)
except ValueError:
raise argparse.ArgumentTypeError(
"Learning rate schedule string should have form rate1:num_updates1[,rate2:num_updates2,...]")
return schedule
return parse
def simple_dict() -> Callable:
def parse(dict_str: str):
def _parse(value: str):
if value == "True":
return True
if value == "False":
return False
if "." in value:
return float(value)
return int(value)
_dict = dict()
try:
for entry in dict_str.split(","):
key, value = entry.split(":")
_dict[key] = _parse(value)
except ValueError:
raise argparse.ArgumentTypeError("Specify argument dictionary as key1:value1,key2:value2,..."
" Supported types: bool, int, float.")
return _dict
return parse
def multiple_values(num_values: int = 0,
greater_or_equal: Optional[float] = None,
data_type: Callable = int) -> Callable:
def parse(value_to_check):
if ':' in value_to_check:
expected_num_separators = num_values - 1 if num_values else 0
if expected_num_separators > 0 and (value_to_check.count(':') != expected_num_separators):
raise argparse.ArgumentTypeError("Expected either a single value or %d values separated by %s" %
(num_values, C.ARG_SEPARATOR))
values = tuple(map(data_type, value_to_check.split(C.ARG_SEPARATOR, num_values - 1)))
else:
values = tuple([data_type(value_to_check)] * num_values)
if greater_or_equal is not None:
if any((value < greater_or_equal for value in values)):
raise argparse.ArgumentTypeError("Must provide value greater or equal to %d" % greater_or_equal)
return values
return parse
def file_or_stdin() -> Callable:
def parse(path):
if path is None or path == "-":
return sys.stdin
else:
return data_io.smart_open(path)
return parse
def add_average_args(params):
average_params = params.add_argument_group("Averaging")
average_params.add_argument(
"inputs",
metavar="INPUT",
type=str,
nargs="+",
help="either a single model directory (automatic checkpoint selection) "
"or multiple .params files (manual checkpoint selection)")
average_params.add_argument(
"--metric",
help="Name of the metric to choose n-best checkpoints from. Default: %(default)s.",
default=C.PERPLEXITY,
choices=C.METRICS)
average_params.add_argument(
"-n",
type=int,
default=4,
help="number of checkpoints to find. Default: %(default)s.")
average_params.add_argument(
"--output", "-o", required=True, type=str, help="File to write averaged parameters to.")
average_params.add_argument(
"--strategy",
choices=["best", "last", "lifespan"],
default="best",
help="selection method. Default: %(default)s.")
def add_extract_args(params):
extract_params = params.add_argument_group("Extracting")
extract_params.add_argument("input",
metavar="INPUT",
type=str,
help="Either a model directory (using params.best) or a specific params.x file.")
extract_params.add_argument('--names', '-n',
nargs='*',
default=[],
help='Names of parameters to be extracted.')
extract_params.add_argument('--list-all', '-l',
action='store_true',
help='List names of all available parameters.')
extract_params.add_argument('--output', '-o',
type=str,
help="File to write extracted parameters to (in .npz format).")
def add_rerank_args(params):
rerank_params = params.add_argument_group("Reranking")
rerank_params.add_argument("--reference", "-r",
type=str,
required=True,
help="File where target reference translations are stored.")
rerank_params.add_argument("--hypotheses", "-hy",
type=str,
required=True,
help="File with nbest translations, one nbest list per line,"
"in JSON format as returned by sockeye.translate with --nbest-size x.")
rerank_params.add_argument("--metric", "-m",
type=str,
required=False,
default=C.RERANK_BLEU,
choices=C.RERANK_METRICS,
help="Sentence-level metric used to compare each nbest translation to the reference."
"Default: %(default)s.")
rerank_params.add_argument("--output-best",
action="store_true",
help="Output only the best hypothesis from each nbest list.")
rerank_params.add_argument("--return-score",
action="store_true",
help="Returns the reranking scores as scores in output JSON objects.")
def add_lexicon_args(params):
lexicon_params = params.add_argument_group("Model & Top-k")
lexicon_params.add_argument("--model", "-m", required=True,
help="Model directory containing source and target vocabularies.")
lexicon_params.add_argument("-k", type=int, default=200,
help="Number of target translations to keep per source. Default: %(default)s.")
def add_lexicon_create_args(params):
lexicon_params = params.add_argument_group("I/O")
lexicon_params.add_argument("--input", "-i", required=True,
help="Probabilistic lexicon (fast_align format) to build top-k lexicon from.")
lexicon_params.add_argument("--output", "-o", required=True, help="File name to write top-k lexicon to.")
def add_lexicon_inspect_args(params):
lexicon_params = params.add_argument_group("Lexicon to inspect")
lexicon_params.add_argument("--lexicon", "-l", required=True, help="File name of top-k lexicon to inspect.")
def add_logging_args(params):
logging_params = params.add_argument_group("Logging")
logging_params.add_argument('--quiet', '-q',
default=False,
action="store_true",
help='Suppress console logging.')
def add_training_data_args(params, required=False):
params.add_argument(C.TRAINING_ARG_SOURCE, '-s',
required=required,
type=regular_file(),
help='Source side of parallel training data.')
params.add_argument('--source-factors', '-sf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel source side factors. Default: %(default)s.')
params.add_argument(C.TRAINING_ARG_TARGET, '-t',
required=required,
type=regular_file(),
help='Target side of parallel training data.')
def add_validation_data_params(params):
params.add_argument('--validation-source', '-vs',
required=True,
type=regular_file(),
help='Source side of validation data.')
params.add_argument('--validation-source-factors', '-vsf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel validation source side factors. '
'Default: %(default)s.')
params.add_argument('--validation-target', '-vt',
required=True,
type=regular_file(),
help='Target side of validation data.')
def add_prepared_data_args(params):
params.add_argument(C.TRAINING_ARG_PREPARED_DATA, '-d',
type=regular_folder(),
help='Prepared training data directory created through python -m sockeye.prepare_data.')
def add_monitoring_args(params):
params.add_argument('--monitor-pattern',
default=None,
type=str,
help="Pattern to match outputs/weights/gradients to monitor. '.*' monitors everything. "
"Default: %(default)s.")
params.add_argument('--monitor-stat-func',
default=C.STAT_FUNC_DEFAULT,
choices=list(C.MONITOR_STAT_FUNCS.keys()),
help="Statistics function to run on monitored outputs/weights/gradients. "
"Default: %(default)s.")
def add_training_output_args(params):
params.add_argument('--output', '-o',
required=True,
help='Folder where model & training results are written to.')
params.add_argument('--overwrite-output',
action='store_true',
help='Delete all contents of the model directory if it already exists.')
def add_training_io_args(params):
params = params.add_argument_group("Data & I/O")
add_training_data_args(params, required=False)
add_prepared_data_args(params)
add_validation_data_params(params)
add_bucketing_args(params)
add_vocab_args(params)
add_training_output_args(params)
add_monitoring_args(params)
def add_bucketing_args(params):
params.add_argument('--no-bucketing',
action='store_true',
help='Disable bucketing: always unroll the graph to --max-seq-len. Default: %(default)s.')
params.add_argument('--bucket-width',
type=int_greater_or_equal(1),
default=10,
help='Width of buckets in tokens. Default: %(default)s.')
params.add_argument(C.TRAINING_ARG_MAX_SEQ_LEN,
type=multiple_values(num_values=2, greater_or_equal=1),
default=(99, 99),
help='Maximum sequence length in tokens.'
'Use "x:x" to specify separate values for src&tgt. Default: %(default)s.')
def add_prepare_data_cli_args(params):
params = params.add_argument_group("Data preparation.")
add_training_data_args(params, required=True)
add_vocab_args(params)
add_bucketing_args(params)
params.add_argument('--num-samples-per-shard',
type=int_greater_or_equal(1),
default=1000000,
help='The approximate number of samples per shard. Default: %(default)s.')
params.add_argument('--min-num-shards',
default=1,
type=int_greater_or_equal(1),
help='The minimum number of shards to use, even if they would not '
'reach the desired number of samples per shard. Default: %(default)s.')
params.add_argument('--seed',
type=int,
default=13,
help='Random seed used that makes shard assignments deterministic. Default: %(default)s.')
params.add_argument('--output', '-o',
required=True,
help='Folder where the prepared and possibly sharded data is written to.')
def add_device_args(params):
device_params = params.add_argument_group("Device parameters")
device_params.add_argument('--device-ids', default=[-1],
help='List or number of GPUs ids to use. Default: %(default)s. '
'Use negative numbers to automatically acquire a certain number of GPUs, e.g. -5 '
'will find 5 free GPUs. '
'Use positive numbers to acquire a specific GPU id on this host. '
'(Note that automatic acquisition of GPUs assumes that all GPU processes on '
'this host are using automatic sockeye GPU acquisition).',
nargs='+', type=int)
device_params.add_argument('--use-cpu',
action='store_true',
help='Use CPU device instead of GPU.')
device_params.add_argument('--disable-device-locking',
action='store_true',
help='Just use the specified device ids without locking.')
device_params.add_argument('--lock-dir',
default="/tmp",
help='When acquiring a GPU we do file based locking so that only one Sockeye process '
'can run on the a GPU. This is the folder in which we store the file '
'locks. For locking to work correctly it is assumed all processes use the same '
'lock directory. The only requirement for the directory are file '
'write permissions.')
def add_vocab_args(params):
params.add_argument('--source-vocab',
required=False,
default=None,
help='Existing source vocabulary (JSON).')
params.add_argument('--target-vocab',
required=False,
default=None,
help='Existing target vocabulary (JSON).')
params.add_argument('--source-factor-vocabs',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='Existing source factor vocabulary (-ies) (JSON).')
params.add_argument(C.VOCAB_ARG_SHARED_VOCAB,
action='store_true',
default=False,
help='Share source and target vocabulary. '
'Will be automatically turned on when using weight tying. Default: %(default)s.')
params.add_argument('--num-words',
type=multiple_values(num_values=2, greater_or_equal=0),
default=(0, 0),
help='Maximum vocabulary size. Use "x:x" to specify separate values for src&tgt. '
'A value of 0 indicates that the vocabulary unrestricted and determined from the data by '
'creating an entry for all words that occur at least --word-min-count times.'
'Default: %(default)s.')
params.add_argument('--word-min-count',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(1, 1),
help='Minimum frequency of words to be included in vocabularies. Default: %(default)s.')
params.add_argument('--pad-vocab-to-multiple-of',
type=int,
default=None,
help='Pad vocabulary to a multiple of this integer. Default: %(default)s.')
def add_model_parameters(params):
model_params = params.add_argument_group("ModelConfig")
model_params.add_argument('--params', '-p',
type=str,
default=None,
help='Initialize model parameters from file. Overrides random initializations.')
model_params.add_argument('--allow-missing-params',
action="store_true",
default=False,
help="Allow missing parameters when initializing model parameters from file. "
"Default: %(default)s.")
model_params.add_argument('--encoder',
choices=C.ENCODERS,
default=C.TRANSFORMER_TYPE,
help="Type of encoder. Default: %(default)s.")
model_params.add_argument('--decoder',
choices=C.DECODERS,
default=C.TRANSFORMER_TYPE,
help="Type of encoder. Default: %(default)s.")
model_params.add_argument('--num-layers',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(6, 6),
help='Number of layers for encoder & decoder. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--conv-embed-output-dim',
type=int_greater_or_equal(1),
default=None,
help="Project segment embeddings to this size for ConvolutionalEmbeddingEncoder. Omit to"
" avoid projection, leaving segment embeddings total size of all filters. Default:"
" %(default)s.")
model_params.add_argument('--conv-embed-max-filter-width',
type=int_greater_or_equal(1),
default=8,
help="Maximum filter width for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-num-filters',
type=multiple_values(greater_or_equal=1),
default=(200, 200, 250, 250, 300, 300, 300, 300),
help="List of number of filters of each width 1..max for ConvolutionalEmbeddingEncoder. "
"Default: %(default)s.")
model_params.add_argument('--conv-embed-pool-stride',
type=int_greater_or_equal(1),
default=5,
help="Pooling stride for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-num-highway-layers',
type=int_greater_or_equal(0),
default=4,
help="Number of highway layers for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-add-positional-encodings',
action='store_true',
default=False,
help="Add positional encodings to final segment embeddings for"
" ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--cnn-kernel-width',
type=multiple_values(num_values=2, greater_or_equal=1, data_type=int),
default=(3, 3),
help='Kernel width of the convolutional encoder and decoder. Default: %(default)s.')
model_params.add_argument('--cnn-num-hidden',
type=int_greater_or_equal(1),
default=512,
help='Number of hidden units for the convolutional encoder and decoder. '
'Default: %(default)s.')
model_params.add_argument('--cnn-activation-type',
choices=C.CNN_ACTIVATION_TYPES,
default=C.GLU,
help="Type activation to use for each convolutional layer. Default: %(default)s.")
model_params.add_argument('--cnn-positional-embedding-type',
choices=C.POSITIONAL_EMBEDDING_TYPES,
default=C.LEARNED_POSITIONAL_EMBEDDING,
help='The type of positional embedding. Default: %(default)s.')
model_params.add_argument('--cnn-project-qkv',
action='store_true',
default=False,
help="Optionally apply query, key and value projections to the source and target hidden "
"vectors before applying the attention mechanism.")
model_params.add_argument('--rnn-cell-type',
choices=C.CELL_TYPES,
default=C.LSTM_TYPE,
help='RNN cell type for encoder and decoder. Default: %(default)s.')
model_params.add_argument('--rnn-num-hidden',
type=int_greater_or_equal(1),
default=1024,
help='Number of RNN hidden units for encoder and decoder. Default: %(default)s.')
model_params.add_argument('--rnn-encoder-reverse-input',
action='store_true',
help='Reverse input sequence for RNN encoder. Default: %(default)s.')
model_params.add_argument('--rnn-decoder-state-init',
default=C.RNN_DEC_INIT_LAST,
choices=C.RNN_DEC_INIT_CHOICES,
help='How to initialize RNN decoder states. Default: %(default)s.')
model_params.add_argument('--rnn-residual-connections',
action="store_true",
default=False,
help="Add residual connections to stacked RNNs. (see Wu ETAL'16). Default: %(default)s.")
model_params.add_argument('--rnn-first-residual-layer',
type=int_greater_or_equal(2),
default=2,
help='First RNN layer to have a residual connection. Default: %(default)s.')
model_params.add_argument('--rnn-context-gating', action="store_true",
help="Enables a context gate which adaptively weighs the RNN decoder input against the "
"source context vector before each update of the decoder hidden state.")
# transformer arguments
model_params.add_argument('--transformer-model-size',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(512, 512),
help='Number of hidden units in transformer layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-attention-heads',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(8, 8),
help='Number of heads for all self-attention when using transformer layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-feed-forward-num-hidden',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(2048, 2048),
help='Number of hidden units in transformers feed forward layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-activation-type',
choices=C.TRANSFORMER_ACTIVATION_TYPES,
default=C.RELU,
help="Type activation to use for each feed forward layer. Default: %(default)s.")
model_params.add_argument('--transformer-positional-embedding-type',
choices=C.POSITIONAL_EMBEDDING_TYPES,
default=C.FIXED_POSITIONAL_EMBEDDING,
help='The type of positional embedding. Default: %(default)s.')
model_params.add_argument('--transformer-preprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('n', 'n'),
help='Transformer preprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
model_params.add_argument('--transformer-postprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('dr', 'dr'),
help='Transformer postprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
# LHUC
# TODO: The convolutional model does not support lhuc yet
model_params.add_argument('--lhuc',
nargs="+",
default=None,
choices=C.LHUC_CHOICES,
metavar="COMPONENT",
help="Use LHUC (Vilar 2018). Include an amplitude parameter to hidden units for"
" domain adaptation. Needs a pre-trained model. Valid values: {values}. Currently not"
" supported for convolutional models. Default: %(default)s.".format(
values=", ".join(C.LHUC_CHOICES)))
# embedding arguments
model_params.add_argument('--num-embed',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(512, 512),
help='Embedding size for source and target tokens. '
'Use "x:x" to specify separate values for src&tgt. Default: %(default)s.')
model_params.add_argument('--source-factors-num-embed',
type=int,
nargs='+',
default=[],
help='Embedding size for additional source factors. '
'You must provide as many dimensions as '
'(validation) source factor files. Default: %(default)s.')
# attention arguments
model_params.add_argument('--rnn-attention-type',
choices=C.ATT_TYPES,
default=C.ATT_MLP,
help='Attention model for RNN decoders. Choices: {%(choices)s}. '
'Default: %(default)s.')
model_params.add_argument('--rnn-attention-num-hidden',
default=None,
type=int,
help='Number of hidden units for attention layers. Default: equal to --rnn-num-hidden.')
model_params.add_argument('--rnn-attention-use-prev-word', action="store_true",
help="Feed the previous target embedding into the attention mechanism.")
model_params.add_argument('--rnn-scale-dot-attention',
action='store_true',
help='Optional scale before dot product. Only applicable to \'dot\' attention type. '
'[Vaswani et al, 2017]')
model_params.add_argument('--rnn-attention-coverage-type',
choices=C.COVERAGE_TYPES,
default=C.COVERAGE_COUNT,
help="Type of model for updating coverage vectors. 'count' refers to an update method "
"that accumulates attention scores. 'fertility' accumulates attention scores as well "
"but also computes a fertility value for every source word. "
"'tanh', 'sigmoid', 'relu', 'softrelu' "
"use non-linear layers with the respective activation type, and 'gru' uses a "
"GRU to update the coverage vectors. Default: %(default)s.")
model_params.add_argument('--rnn-attention-coverage-max-fertility',
type=int,
default=2,
help="Maximum fertility for individual source words. Default: %(default)s.")
model_params.add_argument('--rnn-attention-coverage-num-hidden',
type=int,
default=1,
help="Number of hidden units for coverage vectors. Default: %(default)s.")
model_params.add_argument('--rnn-attention-in-upper-layers',
action="store_true",
help="Pass the attention to the upper layers of the RNN decoder, similar "
"to GNMT paper. Only applicable if more than one layer is used.")
model_params.add_argument('--rnn-attention-mhdot-heads',
type=int, default=None,
help='Number of heads for Multi-head dot attention. Default: %(default)s.')
model_params.add_argument('--weight-tying',
action='store_true',
help='Turn on weight tying (see arxiv.org/abs/1608.05859). '
'The type of weight sharing is determined through '
'--weight-tying-type. Default: %(default)s.')
model_params.add_argument('--weight-tying-type',
default=C.WEIGHT_TYING_TRG_SOFTMAX,
choices=[C.WEIGHT_TYING_SRC_TRG_SOFTMAX,
C.WEIGHT_TYING_SRC_TRG,
C.WEIGHT_TYING_TRG_SOFTMAX],
help='The type of weight tying. source embeddings=src, target embeddings=trg, '
'target softmax weight matrix=softmax. Default: %(default)s.')
model_params.add_argument('--layer-normalization', action="store_true",
help="Adds layer normalization before non-linear activations. "
"This includes MLP attention, RNN decoder state initialization, "
"RNN decoder hidden state, and cnn layers."
"It does not normalize RNN cell activations "
"(this can be done using the '%s' or '%s' rnn-cell-type." % (C.LNLSTM_TYPE,
C.LNGLSTM_TYPE))
model_params.add_argument('--weight-normalization', action="store_true",
help="Adds weight normalization to decoder output layers "
"(and all convolutional weight matrices for CNN decoders). Default: %(default)s.")
def add_batch_args(params, default_batch_size=4096):
params.add_argument('--batch-size', '-b',
type=int_greater_or_equal(1),
default=default_batch_size,
help='Mini-batch size. Note that depending on the batch-type this either refers to '
'words or sentences.'
'Sentence: each batch contains X sentences, number of words varies. '
'Word: each batch contains (approximately) X words, number of sentences varies. '
'Default: %(default)s.')
params.add_argument("--batch-type",
type=str,
default=C.BATCH_TYPE_WORD,
choices=[C.BATCH_TYPE_SENTENCE, C.BATCH_TYPE_WORD],
help="Sentence: each batch contains X sentences, number of words varies."
"Word: each batch contains (approximately) X target words, "
"number of sentences varies. Default: %(default)s.")
def add_training_args(params):
train_params = params.add_argument_group("Training parameters")
add_batch_args(train_params)
train_params.add_argument('--decoder-only',
action='store_true',
help='Pre-train a decoder. This is currently for RNN decoders only. '
'Default: %(default)s.')
train_params.add_argument('--fill-up',
type=str,
default=C.FILL_UP_DEFAULT,
choices=C.FILL_UP_CHOICES,
help=argparse.SUPPRESS)
train_params.add_argument('--loss',
default=C.CROSS_ENTROPY,
choices=[C.CROSS_ENTROPY],
help='Loss to optimize. Default: %(default)s.')
train_params.add_argument('--label-smoothing',
default=0.1,
type=float,
help='Smoothing constant for label smoothing. Default: %(default)s.')
train_params.add_argument('--loss-normalization-type',
default=C.LOSS_NORM_VALID,
choices=[C.LOSS_NORM_VALID, C.LOSS_NORM_BATCH],
help='How to normalize the loss. By default loss is normalized by the number '
'of valid (non-PAD) tokens (%s).' % C.LOSS_NORM_VALID)
train_params.add_argument('--metrics',
nargs='+',
default=[C.PERPLEXITY],
choices=[C.PERPLEXITY, C.ACCURACY],
help='Names of metrics to track on training and validation data. Default: %(default)s.')
train_params.add_argument('--optimized-metric',
default=C.PERPLEXITY,
choices=C.METRICS,
help='Metric to optimize with early stopping {%(choices)s}. Default: %(default)s.')
train_params.add_argument('--min-updates',
type=int,
default=None,
help='Minimum number of updates before training can stop. Default: %(default)s.')
train_params.add_argument('--max-updates',
type=int,
default=None,
help='Maximum number of updates. Default: %(default)s.')
train_params.add_argument('--min-samples',
type=int,
default=None,
help='Minimum number of samples before training can stop. Default: %(default)s.')
train_params.add_argument('--max-samples',
type=int,
default=None,
help='Maximum number of samples. Default: %(default)s.')
train_params.add_argument(C.TRAIN_ARGS_CHECKPOINT_FREQUENCY,
type=int_greater_or_equal(1),
default=4000,
help='Checkpoint and evaluate every x updates/batches. Default: %(default)s.')
train_params.add_argument('--max-num-checkpoint-not-improved',
type=int,
default=32,
help='Maximum number of checkpoints the model is allowed to not improve in '
'<optimized-metric> on validation data before training is stopped. '
'Default: %(default)s.')
train_params.add_argument('--min-num-epochs',
type=int,
default=None,
help='Minimum number of epochs (passes through the training data) '
'before training can stop. Default: %(default)s.')
train_params.add_argument('--max-num-epochs',
type=int,
default=None,
help='Maximum number of epochs (passes through the training data) Default: %(default)s.')
train_params.add_argument('--embed-dropout',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='Dropout probability for source & target embeddings. Use "x:x" to specify '
'separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-inputs',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='RNN variational dropout probability for encoder & decoder RNN inputs. (Gal, 2015)'
'Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-states',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='RNN variational dropout probability for encoder & decoder RNN states. (Gal, 2015)'
'Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-recurrent',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='Recurrent dropout without memory loss (Semeniuta, 2016) for encoder & decoder '
'LSTMs. Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-no-attention',
action="store_true",
help='Disable attention mechanism,')
train_params.add_argument('--rnn-enc-last-hidden-concat-to-embedding',
action="store_true",
help='Concatenate the last hidden layer of the encoder to the input of the decoder, '
'instead of the previous state of the decoder. Default: %(default)s.')
train_params.add_argument('--rnn-decoder-hidden-dropout',
type=float,
default=.2,
help='Dropout probability for hidden state that combines the context with the '
'RNN hidden state in the decoder. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-attention',
type=float,
default=0.1,
help='Dropout probability for multi-head attention. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-act',
type=float,
default=0.1,
help='Dropout probability before activation in feed-forward block. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-prepost',
type=float,
default=0.1,
help='Dropout probability for pre/postprocessing blocks. Default: %(default)s.')
train_params.add_argument('--conv-embed-dropout',
type=float,
default=.0,
help="Dropout probability for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
train_params.add_argument('--cnn-hidden-dropout',
type=float,
default=.2,
help="Dropout probability for dropout between convolutional layers. Default: %(default)s.")
train_params.add_argument('--optimizer',
default=C.OPTIMIZER_ADAM,
choices=C.OPTIMIZERS,
help='SGD update rule. Default: %(default)s.')
train_params.add_argument('--optimizer-params',
type=simple_dict(),
default=None,
help='Additional optimizer params as dictionary. Format: key1:value1,key2:value2,...')
train_params.add_argument("--kvstore",
type=str,
default=C.KVSTORE_DEVICE,
choices=C.KVSTORE_TYPES,
help="The MXNet kvstore to use. 'device' is recommended for single process training. "
"Use any of 'dist_sync', 'dist_device_sync' and 'dist_async' for distributed "
"training. Default: %(default)s.")
train_params.add_argument("--gradient-compression-type",
type=str,
default=C.GRADIENT_COMPRESSION_NONE,
choices=C.GRADIENT_COMPRESSION_TYPES,
help='Type of gradient compression to use. Default: %(default)s.')
train_params.add_argument("--gradient-compression-threshold",
type=float,
default=0.5,
help="Threshold for gradient compression if --gctype is '2bit'. Default: %(default)s.")
train_params.add_argument('--weight-init',
type=str,
default=C.INIT_XAVIER,
choices=C.INIT_TYPES,
help='Type of base weight initialization. Default: %(default)s.')
train_params.add_argument('--weight-init-scale',
type=float,
default=3.0,
help='Weight initialization scale. Applies to uniform (scale) and xavier (magnitude). '
'Default: %(default)s.')
train_params.add_argument('--weight-init-xavier-factor-type',
type=str,
default=C.INIT_XAVIER_FACTOR_TYPE_AVG,
choices=C.INIT_XAVIER_FACTOR_TYPES,
help='Xavier factor type. Default: %(default)s.')
train_params.add_argument('--weight-init-xavier-rand-type',
type=str,
default=C.RAND_TYPE_UNIFORM,
choices=[C.RAND_TYPE_UNIFORM, C.RAND_TYPE_GAUSSIAN],
help='Xavier random number generator type. Default: %(default)s.')
train_params.add_argument('--embed-weight-init',
type=str,
default=C.EMBED_INIT_DEFAULT,
choices=C.EMBED_INIT_TYPES,
help='Type of embedding matrix weight initialization. If normal, initializes embedding '
'weights using a normal distribution with std=1/srqt(vocab_size). '
'Default: %(default)s.')
train_params.add_argument('--initial-learning-rate',
type=float,
default=0.0002,
help='Initial learning rate. Default: %(default)s.')
train_params.add_argument('--weight-decay',
type=float,
default=0.0,
help='Weight decay constant. Default: %(default)s.')
train_params.add_argument('--momentum',
type=float,
default=None,
help='Momentum constant. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-threshold',
type=float,
default=1.0,
help='Clip absolute gradients values greater than this value. '
'Set to negative to disable. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-type',
choices=C.GRADIENT_CLIPPING_TYPES,
default=C.GRADIENT_CLIPPING_TYPE_NONE,
help='The type of gradient clipping. Default: %(default)s.')
train_params.add_argument('--learning-rate-scheduler-type',
default=C.LR_SCHEDULER_PLATEAU_REDUCE,
choices=C.LR_SCHEDULERS,
help='Learning rate scheduler type. Default: %(default)s.')
train_params.add_argument('--learning-rate-reduce-factor',
type=float,
default=0.7,
help="Factor to multiply learning rate with "
"(for 'plateau-reduce' learning rate scheduler). Default: %(default)s.")
train_params.add_argument('--learning-rate-reduce-num-not-improved',
type=int,
default=8,
help="For 'plateau-reduce' learning rate scheduler. Adjust learning rate "
"if <optimized-metric> did not improve for x checkpoints. Default: %(default)s.")
train_params.add_argument('--learning-rate-schedule',
type=learning_schedule(),
default=None,
help="For 'fixed-step' scheduler. Fully specified learning schedule in the form"
" \"rate1:num_updates1[,rate2:num_updates2,...]\". Overrides all other args related"
" to learning rate and stopping conditions. Default: %(default)s.")
train_params.add_argument('--learning-rate-half-life',
type=float,
default=10,
help="Half-life of learning rate in checkpoints. For 'fixed-rate-*' "
"learning rate schedulers. Default: %(default)s.")
train_params.add_argument('--learning-rate-warmup',
type=int,
default=0,
help="Number of warmup steps. If set to x, linearly increases learning rate from 10%% "
"to 100%% of the initial learning rate. Default: %(default)s.")
train_params.add_argument('--learning-rate-decay-param-reset',
action='store_true',
help='Resets model parameters to current best when learning rate is reduced due to the '
'value of --learning-rate-reduce-num-not-improved. Default: %(default)s.')
train_params.add_argument('--learning-rate-decay-optimizer-states-reset',
choices=C.LR_DECAY_OPT_STATES_RESET_CHOICES,
default=C.LR_DECAY_OPT_STATES_RESET_OFF,
help="Action to take on optimizer states (e.g. Adam states) when learning rate is "
"reduced due to the value of --learning-rate-reduce-num-not-improved. "
"Default: %(default)s.")
train_params.add_argument('--rnn-forget-bias',
default=0.0,
type=float,
help='Initial value of RNN forget biases.')
train_params.add_argument('--rnn-h2h-init', type=str, default=C.RNN_INIT_ORTHOGONAL,
choices=[C.RNN_INIT_ORTHOGONAL, C.RNN_INIT_ORTHOGONAL_STACKED, C.RNN_INIT_DEFAULT],
help="Initialization method for RNN parameters. Default: %(default)s.")
train_params.add_argument('--fixed-param-names',
default=[],
nargs='*',
help="Names of parameters to fix at training time. Default: %(default)s.")
train_params.add_argument(C.TRAIN_ARGS_MONITOR_BLEU,
default=500,
type=int,
help='x>0: decode x sampled sentences from validation data and '
'compute evaluation metrics. x==-1: use full validation data. Default: %(default)s.')
train_params.add_argument('--decode-and-evaluate-use-cpu',
action='store_true',
help='Use CPU for decoding validation data. Overrides --decode-and-evaluate-device-id. '
'Default: %(default)s.')
train_params.add_argument('--decode-and-evaluate-device-id',
default=None,
type=int,
help='Separate device for decoding validation data. '
'Use a negative number to automatically acquire a GPU. '
'Use a positive number to acquire a specific GPU. Default: %(default)s.')
train_params.add_argument('--seed',
type=int,
default=13,
help='Random seed. Default: %(default)s.')
train_params.add_argument('--keep-last-params',
type=int,
default=-1,
help='Keep only the last n params files, use -1 to keep all files. Default: %(default)s')
train_params.add_argument('--keep-initializations',
action="store_true",
help='In addition to keeping the last n params files, also keep params from checkpoint 0.')
train_params.add_argument('--dry-run',
action='store_true',
help="Do not perform any actual training, but print statistics about the model"
" and mode of operation.")
def add_train_cli_args(params):
add_training_io_args(params)
add_model_parameters(params)
add_training_args(params)
add_device_args(params)
add_logging_args(params)
def add_translate_cli_args(params):
add_inference_args(params)
add_device_args(params)
add_logging_args(params)
def add_score_cli_args(params):
add_training_data_args(params, required=False)
add_vocab_args(params)
add_device_args(params)
add_logging_args(params)
add_batch_args(params, default_batch_size=500)
params = params.add_argument_group("Scoring parameters")
params.add_argument("--model", "-m", required=True,
help="Model directory containing trained model.")
params.add_argument('--max-seq-len',
type=multiple_values(num_values=2, greater_or_equal=1),
default=None,
help='Maximum sequence length in tokens.'
'Use "x:x" to specify separate values for src&tgt. Default: Read from model.')
params.add_argument('--length-penalty-alpha',
default=1.0,
type=float,
help='Alpha factor for the length penalty used in scoring: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. A value of 0.0 will therefore turn off '
'length normalization. Default: %(default)s')
params.add_argument('--length-penalty-beta',
default=0.0,
type=float,
help='Beta factor for the length penalty used in scoring: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. Default: %(default)s')
params.add_argument('--softmax-temperature',
type=float,
default=None,
help='Controls peakiness of model predictions. Values < 1.0 produce '
'peaked predictions, values > 1.0 produce smoothed distributions.')
params.add_argument("--output", "-o", default=None,
help="File to write output to. Default: STDOUT.")
params.add_argument('--output-type',
default=C.OUTPUT_HANDLER_SCORE,
choices=C.OUTPUT_HANDLERS_SCORING,
help='Output type. Default: %(default)s.')
params.add_argument('--score-type',
choices=C.SCORING_TYPE_CHOICES,
default=C.SCORING_TYPE_DEFAULT,
help='Score type to output. Default: %(default)s')
def add_max_output_cli_args(params):
params.add_argument('--max-output-length',
type=int,
default=None,
help='Maximum number of words to generate during translation. '
'If None, it will be computed automatically. Default: %(default)s.')
def add_inference_args(params):
decode_params = params.add_argument_group("Inference parameters")
decode_params.add_argument(C.INFERENCE_ARG_INPUT_LONG, C.INFERENCE_ARG_INPUT_SHORT,
default=None,
help='Input file to translate. One sentence per line. '
'If not given, will read from stdin.')
decode_params.add_argument(C.INFERENCE_ARG_INPUT_FACTORS_LONG, C.INFERENCE_ARG_INPUT_FACTORS_SHORT,
required=False,
nargs='+',
type=regular_file(),
default=None,
help='List of input files containing additional source factors,'
'each token-parallel to the source. Default: %(default)s.')
decode_params.add_argument('--json-input',
action='store_true',
default=False,
help="If given, the CLI expects string-serialized json objects as input."
"Requires at least the input text field, for example: "
"{'text': 'some input string'} "
"Optionally, a list of factors can be provided: "
"{'text': 'some input string', 'factors': ['C C C', 'X X X']}.")
decode_params.add_argument(C.INFERENCE_ARG_OUTPUT_LONG, C.INFERENCE_ARG_OUTPUT_SHORT,
default=None,
help='Output file to write translations to. '
'If not given, will write to stdout.')
decode_params.add_argument('--models', '-m',
required=True,
nargs='+',
help='Model folder(s). Use multiple for ensemble decoding. '
'Model determines config, best parameters and vocab files.')
decode_params.add_argument('--checkpoints', '-c',
default=None,
type=int,
nargs='+',
help='If not given, chooses best checkpoints for model(s). '
'If specified, must have the same length as --models and be integer')
decode_params.add_argument('--nbest-size',
type=int_greater_or_equal(1),
default=1,
help='Size of the nbest list of translations. Default: %(default)s.')
decode_params.add_argument('--beam-size', '-b',
type=int_greater_or_equal(1),
default=5,
help='Size of the beam. Default: %(default)s.')
decode_params.add_argument('--beam-prune', '-p',
type=float,
default=0,
help='Pruning threshold for beam search. All hypotheses with scores not within '
'this amount of the best finished hypothesis are discarded (0 = off). '
'Default: %(default)s.')
decode_params.add_argument('--beam-search-stop',
choices=[C.BEAM_SEARCH_STOP_ALL, C.BEAM_SEARCH_STOP_FIRST],
default=C.BEAM_SEARCH_STOP_ALL,
help='Stopping criteria. Quit when (all) hypotheses are finished '
'or when a finished hypothesis is in (first) position. Default: %(default)s.')
decode_params.add_argument('--batch-size',
type=int_greater_or_equal(1),
default=1,
help='Batch size during decoding. Determines how many sentences are translated '
'simultaneously. Default: %(default)s.')
decode_params.add_argument('--chunk-size',
type=int_greater_or_equal(1),
default=None,
help='Size of the chunks to be read from input at once. The chunks are sorted and then '
'split into batches. Therefore the larger the chunk size the better the grouping '
'of segments of similar length and therefore the higher the increase in throughput.'
' Default: %d without batching '
'and %d * batch_size with batching.' % (C.CHUNK_SIZE_NO_BATCHING,
C.CHUNK_SIZE_PER_BATCH_SEGMENT))
decode_params.add_argument('--skip-topk',
default=False,
action='store_true',
help='Use argmax instead of topk for greedy decoding (when --beam-size 1).'
'Default: %(default)s.')
decode_params.add_argument('--sample',
type=int_greater_or_equal(0),
default=None,
nargs='?',
const=0,
help='Sample from softmax instead of taking best. Optional argument will restrict '
'sampling to top N vocabulary items at each step. Default: %(default)s.')
decode_params.add_argument('--seed',
type=int,
default=None,
help='Random seed used if sampling. Default: %(default)s.')
decode_params.add_argument('--ensemble-mode',
type=str,
default='linear',
choices=['linear', 'log_linear'],
help='Ensemble mode. Default: %(default)s.')
decode_params.add_argument('--bucket-width',
type=int_greater_or_equal(0),
default=10,
help='Bucket width for encoder steps. 0 means no bucketing. Default: %(default)s.')
decode_params.add_argument('--max-input-len', '-n',
type=int,
default=None,
help='Maximum input sequence length. Default: value from model(s).')
decode_params.add_argument('--softmax-temperature',
type=float,
default=None,
help='Controls peakiness of model predictions. Values < 1.0 produce '
'peaked predictions, values > 1.0 produce smoothed distributions.')
decode_params.add_argument('--max-output-length-num-stds',
type=int,
default=C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
help='Number of target-to-source length ratio standard deviations from training to add '
'to calculate maximum output length for beam search for each sentence. '
'Default: %(default)s.')
decode_params.add_argument('--restrict-lexicon',
type=str,
default=None,
help="Specify top-k lexicon to restrict output vocabulary based on source. See lexicon "
"module. Default: %(default)s.")
decode_params.add_argument('--restrict-lexicon-topk',
type=int,
default=None,
help="Specify the number of translations to load for each source word from the lexicon "
"given with --restrict-lexicon. Default: Load all entries from the lexicon.")
decode_params.add_argument('--avoid-list',
type=str,
default=None,
help="Specify a file containing phrases (pre-processed, one per line) to block "
"from the output. Default: %(default)s.")
decode_params.add_argument('--strip-unknown-words',
action='store_true',
default=False,
help='Remove any <unk> symbols from outputs. Default: %(default)s.')
decode_params.add_argument('--output-type',
default='translation',
choices=C.OUTPUT_HANDLERS,
help='Output type. Default: %(default)s.')
decode_params.add_argument('--sure-align-threshold',
default=0.9,
type=float,
help='Threshold to consider a soft alignment a sure alignment. Default: %(default)s')
decode_params.add_argument('--length-penalty-alpha',
default=1.0,
type=float,
help='Alpha factor for the length penalty used in beam search: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. A value of 0.0 will therefore turn off '
'length normalization. Default: %(default)s')
decode_params.add_argument('--length-penalty-beta',
default=0.0,
type=float,
help='Beta factor for the length penalty used in beam search: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. Default: %(default)s')
decode_params.add_argument('--override-dtype',
default=None,
type=str,
help='EXPERIMENTAL: may be changed or removed in future. Overrides training dtype of '
'encoders and decoders during inference. Default: %(default)s')
def add_evaluate_args(params):
eval_params = params.add_argument_group("Evaluate parameters")
eval_params.add_argument('--references', '-r',
required=True,
type=str,
help="File with references.")
eval_params.add_argument('--hypotheses', '-i',
type=file_or_stdin(),
default=[sys.stdin],
nargs='+',
help="File(s) with hypotheses. If none will read from stdin. Default: stdin.")
eval_params.add_argument('--metrics',
nargs='+',
choices=C.EVALUATE_METRICS,
default=[C.BLEU, C.CHRF],
help='List of metrics to compute. Default: %(default)s.')
eval_params.add_argument('--sentence', '-s',
action="store_true",
help="Show sentence-level metrics. Default: %(default)s.")
eval_params.add_argument('--offset',
type=float,
default=0.01,
help="Numerical value of the offset of zero n-gram counts for BLEU. Default: %(default)s.")
eval_params.add_argument('--not-strict', '-n',
action="store_true",
help="Do not fail if number of hypotheses does not match number of references. "
"Default: %(default)s.")
def add_build_vocab_args(params):
params.add_argument('-i', '--inputs', required=True, nargs='+', help='List of text files to build vocabulary from.')
params.add_argument('-o', '--output', required=True, type=str, help="Output filename to write vocabulary to.")
add_vocab_args(params)
def add_init_embedding_args(params):
params.add_argument('--weight-files', '-w', required=True, nargs='+',
help='List of input weight files in .npy, .npz or Sockeye parameter format.')
params.add_argument('--vocabularies-in', '-i', required=True, nargs='+',
help='List of input vocabularies as token-index dictionaries in .json format.')
params.add_argument('--vocabularies-out', '-o', required=True, nargs='+',
help='List of output vocabularies as token-index dictionaries in .json format.')
params.add_argument('--names', '-n', nargs='+',
help='List of Sockeye parameter names for (embedding) weights. Default: %(default)s.',
default=[n + "weight" for n in [C.SOURCE_EMBEDDING_PREFIX, C.TARGET_EMBEDDING_PREFIX]])
params.add_argument('--file', '-f', required=True,
help='File to write initialized parameters to.')
params.add_argument('--encoding', '-c', type=str, default=C.VOCAB_ENCODING,
help='Open input vocabularies with specified encoding. Default: %(default)s.')
| true | true |
f7224822f3ec084a854bf90a661c91aafcf101c5 | 12,571 | py | Python | recipes/VoxCeleb/SpeakerRec/speaker_verification_plda.py | pnsafari/speechbrain | 3a6956a838f3796ff6d041ee6a20bcdea55794cb | [
"Apache-2.0"
] | null | null | null | recipes/VoxCeleb/SpeakerRec/speaker_verification_plda.py | pnsafari/speechbrain | 3a6956a838f3796ff6d041ee6a20bcdea55794cb | [
"Apache-2.0"
] | null | null | null | recipes/VoxCeleb/SpeakerRec/speaker_verification_plda.py | pnsafari/speechbrain | 3a6956a838f3796ff6d041ee6a20bcdea55794cb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
"""Recipe for training a speaker verification system based on PLDA using the voxceleb dataset.
The system employs a pre-trained model followed by a PLDA transformation.
The pre-trained model is automatically downloaded from the web if not specified.
To run this recipe, run the following command:
> python speaker_verification_plda.py hyperparams/verification_plda_xvector.yaml
Authors
* Nauman Dawalatabad 2020
* Mirco Ravanelli 2020
"""
import os
import sys
import torch
import torchaudio
import logging
import speechbrain as sb
import numpy
import pickle
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import EER, minDCF
from speechbrain.processing.PLDA_LDA import StatObject_SB
from speechbrain.processing.PLDA_LDA import Ndx
from speechbrain.processing.PLDA_LDA import fast_PLDA_scoring
from speechbrain.utils.data_utils import download_file
from speechbrain.utils.distributed import run_on_main
# Compute embeddings from the waveforms
def compute_embeddings(wavs, wav_lens):
"""Compute speaker embeddings.
Arguments
---------
wavs : Torch.Tensor
Tensor containing the speech waveform (batch, time).
Make sure the sample rate is fs=16000 Hz.
wav_lens: Torch.Tensor
Tensor containing the relative length for each sentence
in the length (e.g., [0.8 0.6 1.0])
"""
wavs = wavs.to(params["device"])
wav_lens = wav_lens.to(params["device"])
with torch.no_grad():
feats = params["compute_features"](wavs)
feats = params["mean_var_norm"](feats, wav_lens)
embeddings = params["embedding_model"](feats, wav_lens)
embeddings = params["mean_var_norm_emb"](
embeddings, torch.ones(embeddings.shape[0]).to(embeddings.device)
)
return embeddings.squeeze(1)
def emb_computation_loop(split, set_loader, stat_file):
"""Computes the embeddings and saves the in a stat file"""
# Extract embeddings (skip if already done)
if not os.path.isfile(stat_file):
embeddings = numpy.empty(
shape=[0, params["emb_dim"]], dtype=numpy.float64
)
modelset = []
segset = []
with tqdm(set_loader, dynamic_ncols=True) as t:
for batch in t:
ids = batch.id
wavs, lens = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = modelset + mod
segset = segset + seg
# Enrollment and test embeddings
embs = compute_embeddings(wavs, lens)
xv = embs.squeeze().cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
# Stat object (used to collect embeddings)
stat_obj = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
logger.info(f"Saving stat obj for {split}")
stat_obj.save_stat_object(stat_file)
else:
logger.info(f"Skipping embedding Extraction for {split}")
logger.info(f"Loading previously saved stat_object for {split}")
with open(stat_file, "rb") as input:
stat_obj = pickle.load(input)
return stat_obj
def verification_performance(scores_plda):
"""Computes the Equal Error Rate give the PLDA scores"""
# Create ids, labels, and scoring list for EER evaluation
ids = []
labels = []
positive_scores = []
negative_scores = []
for line in open(veri_file_path):
lab = int(line.split(" ")[0].rstrip().split(".")[0].strip())
enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip()
test_id = line.split(" ")[2].rstrip().split(".")[0].strip()
# Assuming enrol_id and test_id are unique
i = int(numpy.where(scores_plda.modelset == enrol_id)[0][0])
j = int(numpy.where(scores_plda.segset == test_id)[0][0])
s = float(scores_plda.scoremat[i, j])
labels.append(lab)
ids.append(enrol_id + "<>" + test_id)
if lab == 1:
positive_scores.append(s)
else:
negative_scores.append(s)
# Clean variable
del scores_plda
# Final EER computation
eer, th = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
min_dcf, th = minDCF(
torch.tensor(positive_scores), torch.tensor(negative_scores)
)
return eer, min_dcf
# Function to get mod and seg
def get_utt_ids_for_test(ids, data_dict):
mod = [data_dict[x]["wav1"]["data"] for x in ids]
seg = [data_dict[x]["wav2"]["data"] for x in ids]
return mod, seg
def dataio_prep(params):
"Creates the dataloaders and their data processing pipelines."
data_folder = params["data_folder"]
# 1. Declarations:
# Train data (used for normalization)
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["train_data"], replacements={"data_root": data_folder},
)
train_data = train_data.filtered_sorted(
sort_key="duration", select_n=params["n_train_snts"]
)
# Enrol data
enrol_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["enrol_data"], replacements={"data_root": data_folder},
)
enrol_data = enrol_data.filtered_sorted(sort_key="duration")
# Test data
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, enrol_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id"])
# 4 Create dataloaders
train_dataloader = sb.dataio.dataloader.make_dataloader(
train_data, **params["train_dataloader_opts"]
)
enrol_dataloader = sb.dataio.dataloader.make_dataloader(
enrol_data, **params["enrol_dataloader_opts"]
)
test_dataloader = sb.dataio.dataloader.make_dataloader(
test_data, **params["test_dataloader_opts"]
)
return train_dataloader, enrol_dataloader, test_dataloader
if __name__ == "__main__":
# Logger setup
logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_dir))
# Load hyperparameters file with command-line overrides
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
# Download verification list (to exlude verification sentences from train)
veri_file_path = os.path.join(
params["save_folder"], os.path.basename(params["verification_file"])
)
download_file(params["verification_file"], veri_file_path)
from voxceleb_prepare import prepare_voxceleb # noqa E402
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=params["output_folder"],
hyperparams_to_save=params_file,
overrides=overrides,
)
# Prepare data from dev of Voxceleb1
logger.info("Data preparation")
prepare_voxceleb(
data_folder=params["data_folder"],
save_folder=params["save_folder"],
verification_pairs_file=veri_file_path,
splits=["train", "test"],
split_ratio=[90, 10],
seg_dur=3,
)
# here we create the datasets objects as well as tokenization and encoding
train_dataloader, enrol_dataloader, test_dataloader = dataio_prep(params)
# Initialize PLDA vars
modelset, segset = [], []
embeddings = numpy.empty(shape=[0, params["emb_dim"]], dtype=numpy.float64)
# Embedding file for train data
xv_file = os.path.join(
params["save_folder"], "VoxCeleb1_train_embeddings_stat_obj.pkl"
)
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(params["pretrainer"].collect_files)
params["pretrainer"].load_collected()
params["embedding_model"].eval()
params["embedding_model"].to(params["device"])
# Computing training embeddings (skip it of if already extracted)
if not os.path.exists(xv_file):
logger.info("Extracting embeddings from Training set..")
with tqdm(train_dataloader, dynamic_ncols=True) as t:
for batch in t:
snt_id = batch.id
wav, lens = batch.sig
spk_ids = batch.spk_id
# Flattening speaker ids
modelset = modelset + spk_ids
# For segset
segset = segset + snt_id
# Compute embeddings
emb = compute_embeddings(wav, lens)
xv = emb.squeeze(1).cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
# Speaker IDs and utterance IDs
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
embeddings_stat = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
del embeddings
# Save TRAINING embeddings in StatObject_SB object
embeddings_stat.save_stat_object(xv_file)
else:
# Load the saved stat object for train embedding
logger.info("Skipping embedding Extraction for training set")
logger.info(
"Loading previously saved stat_object for train embeddings.."
)
with open(xv_file, "rb") as input:
embeddings_stat = pickle.load(input)
# Training Gaussian PLDA model
logger.info("Training PLDA model")
params["compute_plda"].plda(embeddings_stat)
logger.info("PLDA training completed")
# Set paths for enrol/test embeddings
enrol_stat_file = os.path.join(params["save_folder"], "stat_enrol.pkl")
test_stat_file = os.path.join(params["save_folder"], "stat_test.pkl")
ndx_file = os.path.join(params["save_folder"], "ndx.pkl")
# Compute enrol and Test embeddings
enrol_obj = emb_computation_loop("enrol", enrol_dataloader, enrol_stat_file)
test_obj = emb_computation_loop("test", test_dataloader, test_stat_file)
# Prepare Ndx Object
if not os.path.isfile(ndx_file):
models = enrol_obj.modelset
testsegs = test_obj.modelset
logger.info("Preparing Ndx")
ndx_obj = Ndx(models=models, testsegs=testsegs)
logger.info("Saving ndx obj...")
ndx_obj.save_ndx_object(ndx_file)
else:
logger.info("Skipping Ndx preparation")
logger.info("Loading Ndx from disk")
with open(ndx_file, "rb") as input:
ndx_obj = pickle.load(input)
# PLDA scoring
logger.info("PLDA scoring...")
scores_plda = fast_PLDA_scoring(
enrol_obj,
test_obj,
ndx_obj,
params["compute_plda"].mean,
params["compute_plda"].F,
params["compute_plda"].Sigma,
)
logger.info("Computing EER... ")
# Cleaning variable
del enrol_dataloader
del test_dataloader
del enrol_obj
del test_obj
del embeddings_stat
# Final EER computation
eer, min_dcf = verification_performance(scores_plda)
logger.info("EER(%%)=%f", eer * 100)
logger.info("min_dcf=%f", min_dcf * 100)
| 33.522667 | 94 | 0.649988 |
import os
import sys
import torch
import torchaudio
import logging
import speechbrain as sb
import numpy
import pickle
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import EER, minDCF
from speechbrain.processing.PLDA_LDA import StatObject_SB
from speechbrain.processing.PLDA_LDA import Ndx
from speechbrain.processing.PLDA_LDA import fast_PLDA_scoring
from speechbrain.utils.data_utils import download_file
from speechbrain.utils.distributed import run_on_main
def compute_embeddings(wavs, wav_lens):
wavs = wavs.to(params["device"])
wav_lens = wav_lens.to(params["device"])
with torch.no_grad():
feats = params["compute_features"](wavs)
feats = params["mean_var_norm"](feats, wav_lens)
embeddings = params["embedding_model"](feats, wav_lens)
embeddings = params["mean_var_norm_emb"](
embeddings, torch.ones(embeddings.shape[0]).to(embeddings.device)
)
return embeddings.squeeze(1)
def emb_computation_loop(split, set_loader, stat_file):
if not os.path.isfile(stat_file):
embeddings = numpy.empty(
shape=[0, params["emb_dim"]], dtype=numpy.float64
)
modelset = []
segset = []
with tqdm(set_loader, dynamic_ncols=True) as t:
for batch in t:
ids = batch.id
wavs, lens = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = modelset + mod
segset = segset + seg
embs = compute_embeddings(wavs, lens)
xv = embs.squeeze().cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
stat_obj = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
logger.info(f"Saving stat obj for {split}")
stat_obj.save_stat_object(stat_file)
else:
logger.info(f"Skipping embedding Extraction for {split}")
logger.info(f"Loading previously saved stat_object for {split}")
with open(stat_file, "rb") as input:
stat_obj = pickle.load(input)
return stat_obj
def verification_performance(scores_plda):
ids = []
labels = []
positive_scores = []
negative_scores = []
for line in open(veri_file_path):
lab = int(line.split(" ")[0].rstrip().split(".")[0].strip())
enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip()
test_id = line.split(" ")[2].rstrip().split(".")[0].strip()
i = int(numpy.where(scores_plda.modelset == enrol_id)[0][0])
j = int(numpy.where(scores_plda.segset == test_id)[0][0])
s = float(scores_plda.scoremat[i, j])
labels.append(lab)
ids.append(enrol_id + "<>" + test_id)
if lab == 1:
positive_scores.append(s)
else:
negative_scores.append(s)
del scores_plda
eer, th = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
min_dcf, th = minDCF(
torch.tensor(positive_scores), torch.tensor(negative_scores)
)
return eer, min_dcf
def get_utt_ids_for_test(ids, data_dict):
mod = [data_dict[x]["wav1"]["data"] for x in ids]
seg = [data_dict[x]["wav2"]["data"] for x in ids]
return mod, seg
def dataio_prep(params):
data_folder = params["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["train_data"], replacements={"data_root": data_folder},
)
train_data = train_data.filtered_sorted(
sort_key="duration", select_n=params["n_train_snts"]
)
enrol_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["enrol_data"], replacements={"data_root": data_folder},
)
enrol_data = enrol_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, enrol_data, test_data]
@sb.utils.data_pipeline.takes("wav", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id"])
train_dataloader = sb.dataio.dataloader.make_dataloader(
train_data, **params["train_dataloader_opts"]
)
enrol_dataloader = sb.dataio.dataloader.make_dataloader(
enrol_data, **params["enrol_dataloader_opts"]
)
test_dataloader = sb.dataio.dataloader.make_dataloader(
test_data, **params["test_dataloader_opts"]
)
return train_dataloader, enrol_dataloader, test_dataloader
if __name__ == "__main__":
logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_dir))
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
veri_file_path = os.path.join(
params["save_folder"], os.path.basename(params["verification_file"])
)
download_file(params["verification_file"], veri_file_path)
from voxceleb_prepare import prepare_voxceleb
sb.core.create_experiment_directory(
experiment_directory=params["output_folder"],
hyperparams_to_save=params_file,
overrides=overrides,
)
logger.info("Data preparation")
prepare_voxceleb(
data_folder=params["data_folder"],
save_folder=params["save_folder"],
verification_pairs_file=veri_file_path,
splits=["train", "test"],
split_ratio=[90, 10],
seg_dur=3,
)
train_dataloader, enrol_dataloader, test_dataloader = dataio_prep(params)
modelset, segset = [], []
embeddings = numpy.empty(shape=[0, params["emb_dim"]], dtype=numpy.float64)
xv_file = os.path.join(
params["save_folder"], "VoxCeleb1_train_embeddings_stat_obj.pkl"
)
run_on_main(params["pretrainer"].collect_files)
params["pretrainer"].load_collected()
params["embedding_model"].eval()
params["embedding_model"].to(params["device"])
if not os.path.exists(xv_file):
logger.info("Extracting embeddings from Training set..")
with tqdm(train_dataloader, dynamic_ncols=True) as t:
for batch in t:
snt_id = batch.id
wav, lens = batch.sig
spk_ids = batch.spk_id
modelset = modelset + spk_ids
segset = segset + snt_id
emb = compute_embeddings(wav, lens)
xv = emb.squeeze(1).cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
embeddings_stat = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
del embeddings
embeddings_stat.save_stat_object(xv_file)
else:
logger.info("Skipping embedding Extraction for training set")
logger.info(
"Loading previously saved stat_object for train embeddings.."
)
with open(xv_file, "rb") as input:
embeddings_stat = pickle.load(input)
logger.info("Training PLDA model")
params["compute_plda"].plda(embeddings_stat)
logger.info("PLDA training completed")
enrol_stat_file = os.path.join(params["save_folder"], "stat_enrol.pkl")
test_stat_file = os.path.join(params["save_folder"], "stat_test.pkl")
ndx_file = os.path.join(params["save_folder"], "ndx.pkl")
enrol_obj = emb_computation_loop("enrol", enrol_dataloader, enrol_stat_file)
test_obj = emb_computation_loop("test", test_dataloader, test_stat_file)
if not os.path.isfile(ndx_file):
models = enrol_obj.modelset
testsegs = test_obj.modelset
logger.info("Preparing Ndx")
ndx_obj = Ndx(models=models, testsegs=testsegs)
logger.info("Saving ndx obj...")
ndx_obj.save_ndx_object(ndx_file)
else:
logger.info("Skipping Ndx preparation")
logger.info("Loading Ndx from disk")
with open(ndx_file, "rb") as input:
ndx_obj = pickle.load(input)
logger.info("PLDA scoring...")
scores_plda = fast_PLDA_scoring(
enrol_obj,
test_obj,
ndx_obj,
params["compute_plda"].mean,
params["compute_plda"].F,
params["compute_plda"].Sigma,
)
logger.info("Computing EER... ")
del enrol_dataloader
del test_dataloader
del enrol_obj
del test_obj
del embeddings_stat
eer, min_dcf = verification_performance(scores_plda)
logger.info("EER(%%)=%f", eer * 100)
logger.info("min_dcf=%f", min_dcf * 100)
| true | true |
f722484da1558b8fe0419c64530f9b4438f8aa08 | 1,757 | py | Python | ratcave/utils/vertices.py | aforren1/ratcave | e3862cdaba100ac2c6c78c08c4b09638e0c88fd4 | [
"MIT"
] | null | null | null | ratcave/utils/vertices.py | aforren1/ratcave | e3862cdaba100ac2c6c78c08c4b09638e0c88fd4 | [
"MIT"
] | 1 | 2018-06-09T14:53:19.000Z | 2018-06-09T14:53:19.000Z | ratcave/utils/vertices.py | aforren1/ratcave | e3862cdaba100ac2c6c78c08c4b09638e0c88fd4 | [
"MIT"
] | null | null | null | import itertools
import numpy as np
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def struct_to_ndarray(array):
"""Turns returns a view of a structured array as a regular ndarray."""
return array.view(array.dtype[0]).reshape((array.shape[0], -1))
def reindex_vertices(arrays=None):
all_arrays = np.hstack(arrays)
array_ncols = tuple(array.shape[1] for array in arrays)
# Build a new array list, composed of only the unique combinations (no redundant data)
row_searchable_array = all_arrays.view(all_arrays.dtype.descr * all_arrays.shape[1])
unique_combs = np.sort(np.unique(row_searchable_array))
new_indices = np.array([np.searchsorted(unique_combs, vert) for vert in row_searchable_array]).flatten().astype(np.uint32)
ucombs = struct_to_ndarray(unique_combs)
new_arrays = tuple(ucombs[:, start:end] for start, end in pairwise(np.append(0, np.cumsum(array_ncols))))
new_arrays = tuple(np.array(array, dtype=np.float32) for array in new_arrays)
return new_arrays, new_indices
def calculate_normals(vertices):
"""Return Nx3 normal array from Nx3 vertex array."""
verts = np.array(vertices, dtype=float)
normals = np.zeros_like(verts)
for start, end in pairwise(np.arange(0, verts.shape[0] + 1, 3)):
vecs = np.vstack((verts[start + 1] - verts[start], verts[start + 2] - verts[start])) # Get triangle of vertices and calculate 2-1 and 3-1
vecs /= np.linalg.norm(vecs, axis=1, keepdims=True) # normalize vectors
normal = np.cross(*vecs) # normal is the cross products of vectors.
normals[start:end, :] = normal / np.linalg.norm(normal)
return normals
| 40.860465 | 146 | 0.692658 | import itertools
import numpy as np
def pairwise(iterable):
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def struct_to_ndarray(array):
return array.view(array.dtype[0]).reshape((array.shape[0], -1))
def reindex_vertices(arrays=None):
all_arrays = np.hstack(arrays)
array_ncols = tuple(array.shape[1] for array in arrays)
row_searchable_array = all_arrays.view(all_arrays.dtype.descr * all_arrays.shape[1])
unique_combs = np.sort(np.unique(row_searchable_array))
new_indices = np.array([np.searchsorted(unique_combs, vert) for vert in row_searchable_array]).flatten().astype(np.uint32)
ucombs = struct_to_ndarray(unique_combs)
new_arrays = tuple(ucombs[:, start:end] for start, end in pairwise(np.append(0, np.cumsum(array_ncols))))
new_arrays = tuple(np.array(array, dtype=np.float32) for array in new_arrays)
return new_arrays, new_indices
def calculate_normals(vertices):
verts = np.array(vertices, dtype=float)
normals = np.zeros_like(verts)
for start, end in pairwise(np.arange(0, verts.shape[0] + 1, 3)):
vecs = np.vstack((verts[start + 1] - verts[start], verts[start + 2] - verts[start]))
vecs /= np.linalg.norm(vecs, axis=1, keepdims=True)
normal = np.cross(*vecs)
normals[start:end, :] = normal / np.linalg.norm(normal)
return normals
| true | true |
f722489cba16865871736137308362868fb163d9 | 2,886 | py | Python | crawler/utils/urltool.py | DallasLJ/spider | 53807439d4e8c3b63b8ff7f4b7d99109daf19a02 | [
"Apache-2.0"
] | null | null | null | crawler/utils/urltool.py | DallasLJ/spider | 53807439d4e8c3b63b8ff7f4b7d99109daf19a02 | [
"Apache-2.0"
] | null | null | null | crawler/utils/urltool.py | DallasLJ/spider | 53807439d4e8c3b63b8ff7f4b7d99109daf19a02 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from urllib.parse import urlparse
from crawler.error.urlerror import DomainUrlError
from crawler.utils.strtool import find_last_index
def start_with(str, prefix):
if str[0: len(prefix)] == prefix:
return True
else:
return False
def get_domain_url(url):
url_parse = urlparse(url)
if not url_parse.scheme == '' and not url_parse.netloc == '':
return url_parse.scheme + '://' + url_parse.netloc
else:
raise DomainUrlError("Base url can't parse domain url")
def url_patch(url, base_url):
if start_with(url, 'www'):
return 'http://' + url
if not start_with(url, '/') and not start_with(url, 'http') and not start_with(url, 'ftp'):
url = '/' + url
if urlparse(url).netloc == '':
try:
return get_domain_url(base_url) + url
except DomainUrlError as e:
print(e.message)
else:
return url
def dytt_patch(url, base_url):
if start_with(url, 'list'):
base_url = base_url[::-1]
base_url = base_url[base_url.find('/'):]
base_url = base_url[::-1]
return base_url + url
else:
return url_patch(url, base_url)
def encode_change(response, encode_dict):
url = response.url
for key in encode_dict.keys():
if start_with(url, key):
response.encoding = encode_dict[key]
return
def black_list_check(url, black_list):
domain_url = get_domain_url(url)
if domain_url in black_list:
return True
return False
def patch(url, base_url):
base_url_parse = urlparse(base_url)
url_parse = urlparse(url)
if base_url_parse.scheme == '' or base_url_parse.netloc == '':
#case of baseUrl with error, raise it
raise DomainUrlError("Base url can't parse domain url")
if start_with(url, 'www'):
#case of url without scheme, so I take it a default scheme 'http'
return 'http://' + url
elif start_with(url, '/'):
#case of url with whole path, just need to add scheme and netloc
return get_domain_url(base_url) + url
elif not url_parse.scheme == '' and not url_parse.netloc == '':
#case of url is ok, has all it need
return url
else:
#case url without '/'
path = base_url_parse.path
if not path == '':
#case of baseUrl with path and url without '/'
path = path[0: find_last_index(path, '/')+1]
return get_domain_url(base_url) + path + url
else:
#case of without '/' but in the root path
return get_domain_url(base_url) + '/' + url
if __name__ == '__main__':
url = 'index.html'
base_url = 'http://www.dytt8.net'
#print(url_patch(url, base_url))
#base_url = 'http://www.dytt8.net/html/gndy/dyzz/index.html'
#url = 'list_23_2.html'
print(patch(url, base_url))
| 32.426966 | 95 | 0.617117 |
from urllib.parse import urlparse
from crawler.error.urlerror import DomainUrlError
from crawler.utils.strtool import find_last_index
def start_with(str, prefix):
if str[0: len(prefix)] == prefix:
return True
else:
return False
def get_domain_url(url):
url_parse = urlparse(url)
if not url_parse.scheme == '' and not url_parse.netloc == '':
return url_parse.scheme + '://' + url_parse.netloc
else:
raise DomainUrlError("Base url can't parse domain url")
def url_patch(url, base_url):
if start_with(url, 'www'):
return 'http://' + url
if not start_with(url, '/') and not start_with(url, 'http') and not start_with(url, 'ftp'):
url = '/' + url
if urlparse(url).netloc == '':
try:
return get_domain_url(base_url) + url
except DomainUrlError as e:
print(e.message)
else:
return url
def dytt_patch(url, base_url):
if start_with(url, 'list'):
base_url = base_url[::-1]
base_url = base_url[base_url.find('/'):]
base_url = base_url[::-1]
return base_url + url
else:
return url_patch(url, base_url)
def encode_change(response, encode_dict):
url = response.url
for key in encode_dict.keys():
if start_with(url, key):
response.encoding = encode_dict[key]
return
def black_list_check(url, black_list):
domain_url = get_domain_url(url)
if domain_url in black_list:
return True
return False
def patch(url, base_url):
base_url_parse = urlparse(base_url)
url_parse = urlparse(url)
if base_url_parse.scheme == '' or base_url_parse.netloc == '':
#case of baseUrl with error, raise it
raise DomainUrlError("Base url can't parse domain url")
if start_with(url, 'www'):
return 'http://' + url
elif start_with(url, '/'):
return get_domain_url(base_url) + url
elif not url_parse.scheme == '' and not url_parse.netloc == '':
return url
else:
path = base_url_parse.path
if not path == '':
path = path[0: find_last_index(path, '/')+1]
return get_domain_url(base_url) + path + url
else:
return get_domain_url(base_url) + '/' + url
if __name__ == '__main__':
url = 'index.html'
base_url = 'http://www.dytt8.net'
print(patch(url, base_url))
| true | true |
f72248a7e8ac3765348e1d07ab9f7ec7004a9c8f | 4,283 | py | Python | tests/test_config_file.py | jberends/duplicity_backup | 99e27060b20dabc0ff8ed416b6a4aeac98143cae | [
"Apache-2.0"
] | 2 | 2019-12-16T10:42:16.000Z | 2021-02-24T11:43:17.000Z | tests/test_config_file.py | jberends/duplicity_backup | 99e27060b20dabc0ff8ed416b6a4aeac98143cae | [
"Apache-2.0"
] | null | null | null | tests/test_config_file.py | jberends/duplicity_backup | 99e27060b20dabc0ff8ed416b6a4aeac98143cae | [
"Apache-2.0"
] | 1 | 2020-06-30T22:57:06.000Z | 2020-06-30T22:57:06.000Z | from pathlib import Path
from tempfile import NamedTemporaryFile, SpooledTemporaryFile
from unittest import TestCase
from click.testing import CliRunner
from duplicity_backup_s3.config import check_config_file
class TestConfig(TestCase):
def test_default_config_provided_by_package(self):
from duplicity_backup_s3.defaults import CONFIG_TEMPLATE_PATH
from duplicity_backup_s3.defaults import CONFIG_SCHEMA_PATH
config_tempate_path = CONFIG_TEMPLATE_PATH
check_config_file(config_file=config_tempate_path, testing=True)
def test_vanilla_config(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: /home
excludes:
- _TESTFILE_TO_EXCLUDE
includes:
- Pictures
remote:
bucket: ''
path: '__test'
full_if_older_than: 7D
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertEqual(
check_config_file(config_file=Path(t.name), testing=True), Path(t.name)
)
def test_extra_key_fails(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: /home
excludes:
- _TESTFILE_TO_EXCLUDE
includes:
- Pictures
remote:
bucket: ''
path: '__test'
full_if_older_than: 7D
One_more_key: fail
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertDictEqual(
check_config_file(config_file=Path(t.name), testing=True),
{"One_more_key": ["unknown field"]},
)
def test_optional_missing_key_succeed(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: /home
remote:
bucket: ''
path: '__test'
full_if_older_than: 7D
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertEqual(
check_config_file(config_file=Path(t.name), testing=True), Path(t.name)
)
def test_required_missing_key_fails(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: /home
remote:
path: '__test'
full_if_older_than: 7D
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertDictEqual(
check_config_file(config_file=Path(t.name), testing=True),
{"remote": [{"bucket": ["required field"]}]},
)
def test_incorrect_value_type_fails(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: 1
remote:
bucket: ''
path: '__test'
full_if_older_than: 7D
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertDictEqual(
check_config_file(config_file=Path(t.name), testing=True),
{"backuproot": ["must be of string type"]},
)
def test_config_from_production_success(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: fakekey
AWS_SECRET_ACCESS_KEY: fakesecret
backuproot: /opt/dir/
includes:
- /opt/dir/*-media
- /opt/dir/var/archives
excludes:
- "**"
remote:
bucket: somebucket
path: __testpath
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertEqual(
check_config_file(config_file=Path(t.name), testing=True), Path(t.name),
)
| 30.375887 | 88 | 0.578333 | from pathlib import Path
from tempfile import NamedTemporaryFile, SpooledTemporaryFile
from unittest import TestCase
from click.testing import CliRunner
from duplicity_backup_s3.config import check_config_file
class TestConfig(TestCase):
def test_default_config_provided_by_package(self):
from duplicity_backup_s3.defaults import CONFIG_TEMPLATE_PATH
from duplicity_backup_s3.defaults import CONFIG_SCHEMA_PATH
config_tempate_path = CONFIG_TEMPLATE_PATH
check_config_file(config_file=config_tempate_path, testing=True)
def test_vanilla_config(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: /home
excludes:
- _TESTFILE_TO_EXCLUDE
includes:
- Pictures
remote:
bucket: ''
path: '__test'
full_if_older_than: 7D
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertEqual(
check_config_file(config_file=Path(t.name), testing=True), Path(t.name)
)
def test_extra_key_fails(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: /home
excludes:
- _TESTFILE_TO_EXCLUDE
includes:
- Pictures
remote:
bucket: ''
path: '__test'
full_if_older_than: 7D
One_more_key: fail
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertDictEqual(
check_config_file(config_file=Path(t.name), testing=True),
{"One_more_key": ["unknown field"]},
)
def test_optional_missing_key_succeed(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: /home
remote:
bucket: ''
path: '__test'
full_if_older_than: 7D
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertEqual(
check_config_file(config_file=Path(t.name), testing=True), Path(t.name)
)
def test_required_missing_key_fails(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: /home
remote:
path: '__test'
full_if_older_than: 7D
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertDictEqual(
check_config_file(config_file=Path(t.name), testing=True),
{"remote": [{"bucket": ["required field"]}]},
)
def test_incorrect_value_type_fails(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: foobar_aws_key_id
AWS_SECRET_ACCESS_KEY: foobar_aws_access_key
backuproot: 1
remote:
bucket: ''
path: '__test'
full_if_older_than: 7D
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertDictEqual(
check_config_file(config_file=Path(t.name), testing=True),
{"backuproot": ["must be of string type"]},
)
def test_config_from_production_success(self):
config_yaml = """
aws:
AWS_ACCESS_KEY_ID: fakekey
AWS_SECRET_ACCESS_KEY: fakesecret
backuproot: /opt/dir/
includes:
- /opt/dir/*-media
- /opt/dir/var/archives
excludes:
- "**"
remote:
bucket: somebucket
path: __testpath
"""
with NamedTemporaryFile(mode="w") as t:
t.write(config_yaml)
t.flush()
self.assertEqual(
check_config_file(config_file=Path(t.name), testing=True), Path(t.name),
)
| true | true |
f722493a284aff97c889421d4a9f473fb5eff05a | 39 | py | Python | ind2/flights/__init__.py | LokiTheGodOfBitchez/Lab_2.13 | 57dce79bac021983e7ce7bf5ffeb3d6fb5df0c44 | [
"MIT"
] | null | null | null | ind2/flights/__init__.py | LokiTheGodOfBitchez/Lab_2.13 | 57dce79bac021983e7ce7bf5ffeb3d6fb5df0c44 | [
"MIT"
] | null | null | null | ind2/flights/__init__.py | LokiTheGodOfBitchez/Lab_2.13 | 57dce79bac021983e7ce7bf5ffeb3d6fb5df0c44 | [
"MIT"
] | null | null | null | __all__ = ["get_fl", "disp", "select"]
| 19.5 | 38 | 0.589744 | __all__ = ["get_fl", "disp", "select"]
| true | true |
f722496af521cef904cb46d4978debcf2cdb7ef7 | 967 | py | Python | projects/PaLM/configs/palm_pretrain.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 55 | 2021-12-10T08:47:06.000Z | 2022-03-28T09:02:15.000Z | projects/PaLM/configs/palm_pretrain.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 106 | 2021-11-03T05:16:45.000Z | 2022-03-31T06:16:23.000Z | projects/PaLM/configs/palm_pretrain.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 13 | 2021-12-29T08:12:08.000Z | 2022-03-28T06:59:45.000Z | from libai.config import LazyCall, get_config
from .models.palm_small import model
from libai.evaluation import PPLEvaluator
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train
optim = get_config("common/optim.py").optim
data = get_config("common/data/gpt_dataset.py")
dataloader = data.dataloader
tokenization = data.tokenization
vocab_file = "./projects/PaLM/gpt_dataset/gpt2-vocab.json"
merge_files = "./projects/PaLM/gpt_dataset/gpt2-merges.txt"
data_prefix = "./projects/PaLM/gpt_dataset/loss_compara_content_sentence"
tokenization.tokenizer.vocab_file = vocab_file
tokenization.tokenizer.merges_file = merge_files
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
train.train_micro_batch_size = 4
train.activation_checkpoint.enabled = True
train.evaluation.evaluator = LazyCall(PPLEvaluator)()
train.output_dir = "./output/palm_output"
| 33.344828 | 73 | 0.813857 | from libai.config import LazyCall, get_config
from .models.palm_small import model
from libai.evaluation import PPLEvaluator
graph = get_config("common/models/graph.py").graph
train = get_config("common/train.py").train
optim = get_config("common/optim.py").optim
data = get_config("common/data/gpt_dataset.py")
dataloader = data.dataloader
tokenization = data.tokenization
vocab_file = "./projects/PaLM/gpt_dataset/gpt2-vocab.json"
merge_files = "./projects/PaLM/gpt_dataset/gpt2-merges.txt"
data_prefix = "./projects/PaLM/gpt_dataset/loss_compara_content_sentence"
tokenization.tokenizer.vocab_file = vocab_file
tokenization.tokenizer.merges_file = merge_files
dataloader.train.dataset[0].data_prefix = data_prefix
dataloader.train.dataset[0].indexed_dataset.data_prefix = data_prefix
train.train_micro_batch_size = 4
train.activation_checkpoint.enabled = True
train.evaluation.evaluator = LazyCall(PPLEvaluator)()
train.output_dir = "./output/palm_output"
| true | true |
f7224b5ea5613d417453b2c977cbbf01d3da0e91 | 14,542 | py | Python | py_kbible/kbible.py | sungcheolkim78/py_kbible | 3a576c20e5e49f5e85be6ddede20accb6df14663 | [
"Apache-2.0"
] | null | null | null | py_kbible/kbible.py | sungcheolkim78/py_kbible | 3a576c20e5e49f5e85be6ddede20accb6df14663 | [
"Apache-2.0"
] | null | null | null | py_kbible/kbible.py | sungcheolkim78/py_kbible | 3a576c20e5e49f5e85be6ddede20accb6df14663 | [
"Apache-2.0"
] | 1 | 2021-12-27T00:32:48.000Z | 2021-12-27T00:32:48.000Z | """
kbible.py - base bible object and commands
"""
import pandas as pd
import yaml
import os
import subprocess
__author__ = "Sungcheol Kim <sungcheol.kim78@gmail.com>"
__docformat__ = "restructuredtext en"
class KBible(object):
""" Bible text object """
def __init__(self, version="개역한글판성경", debug=False, **kwargs):
""" read or parse bible text """
self._biblelist = []
self._versionlist = {}
this_dir, this_filename = os.path.split(__file__)
listname = os.path.join(this_dir, "data", u"book_names.csv")
self._table = pd.read_csv(listname, index_col=0)
self.add(version, **kwargs)
def add(self, version, **kwargs):
""" add different version """
b = read_full_bible(version_name=version, **kwargs)
self._biblelist.append(b)
self._versionlist[version] = len(self._biblelist) - 1
def delete(self, version):
""" remove version """
if (version in self._versionlist) and (len(self._versionlist) > 1):
i = self._versionlist[version]
del self._versionlist[version]
del self._biblelist[i]
else:
print('... not found or only have one bible version: {}'.format(version))
def save(self, version="개역한글판성경"):
""" save bible text as compressed csv """
if version in self._versionlist:
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version + ".csv.gz")
b = self._biblelist[self._versionlist[version]]
b.to_csv(filename, compression='gzip')
print('... save file: {}'.format(filename))
def get(self, version=""):
""" return bible as pandas """
if version == "":
return self._biblelist[0]
try:
return self._biblelist[self._versionlist[version]]
except:
print('... no bible version: {}'.format(version))
return []
def bystr(self, sstr, form="md"):
""" extract bible verse """
if form == "pd":
res = pd.DataFrame()
for b in self._biblelist:
res = pd.concat([res, extract_bystr(b, sstr, form="pd")], axis=0)
return res
else:
msg = ""
for b in self._biblelist:
msg = msg + extract_bystr(b, sstr, form=form) + '\n'
return msg
def search(self, sstr, form="md", regex=False):
""" search string in bible """
res = pd.DataFrame()
for b in self._biblelist:
b_res_idx = b.text.str.contains(sstr, regex=regex)
if sum(b_res_idx) > 0:
res = pd.concat([res, b[b_res_idx]], axis=0)
if len(res) > 0:
return get_texts(res, form=form)
else:
print('... no matched results')
return []
def read(self, sstr, form='mdlines'):
""" search by short index string and show in markdown file """
msg = self.bystr(sstr, form=form)
with open('.temp.md', 'w') as f:
f.writelines(msg)
cmd = ['open', '.temp.md']
subprocess.call(cmd)
def bible_parser(version_name="개역한글판성경"):
""" read bible text and return panda database
inputs:
version_name : available versions 개역한글판성경, 개역개정판성경
output:
bible panda dataframe as short version
"""
# read bible txt file
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version_name + ".txt")
with open(filename, "r") as f:
lines = f.readlines()
# prepare data container
books = []
chapters = []
verses = []
texts = []
for i, line in enumerate(lines):
line = line.strip('\n\r')
# check comment line
if len(line) == 0:
continue
if line[0] == "#":
continue
if line.find(':') == -1:
continue
# find header
hp = line.find(' ')
if hp > 1 and hp < 25:
header = line[:hp]
text = line[hp+1:]
# find book, chapter, verse, text
try:
tmp = header.split(':')[0]
if tmp.find('.') > 0: # english bible short name
book = tmp.split('.')[0]
chapter = tmp.split('.')[1]
elif tmp[:2] in ["1_", "2_", "3_"]: # english bible long name
book = tmp[:2] + ''.join(filter(str.isalpha, tmp[2:]))
chapter = ''.join(filter(str.isdigit, tmp[2:]))
else: # korean bible
book = ''.join(filter(str.isalpha, tmp))
chapter = ''.join(filter(str.isdigit, tmp))
verse = header.split(':')[1]
except:
print('... header error: ({}) {}'.format(i, header))
continue
# convert data
try:
verse = int(verse)
chapter = int(chapter)
except:
print("... conversion error: ({}) {} {}".format(i, verse, chapter))
continue
# collect
books.append(book)
chapters.append(chapter)
verses.append(verse)
texts.append(text)
else:
print("... unrecognized line: ({}) {}".format(i, line))
df_bible = {'book':books, 'chapter':chapters, 'verse':verses, 'text':texts}
idx = range(len(books))
bible = pd.DataFrame(data=df_bible, index=idx)
return bible
def read_full_bible(version_name="개역한글판성경", save=False):
""" read bible version and combine book data
inputs:
version_name: bible version
save: [True|False]
output:
bible panda dataframe
"""
try:
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version_name + ".csv.gz")
full_bible = pd.read_csv(filename, index_col=0, compression = "gzip")
return full_bible
except FileNotFoundError:
print('... generate bible database: {}'.format(filename))
bible = bible_parser(version_name=version_name)
listname = os.path.join(this_dir, "data", u"book_names.csv")
table = pd.read_csv(listname, index_col=0)
if bible['book'][0] == 'Gen':
table['book'] = table['eng_short']
elif bible['book'][0] == 'Genesis':
table['book'] = table['eng']
else:
table['book'] = table['kor_short']
full_bible = pd.merge(bible, table, on='book', how='left')
if save:
full_bible.to_csv(filename, compression='gzip')
return full_bible
def find_id(bible, book=[], chapter=[], verse=[], verb=False):
""" find index on full bible database
inputs:
bible: bible panda database
book: book names as list
chapter: chapters as list
verse: verses as list
verb: [True|False] show details
output:
panda dataframe filtered by all combination of book, chapter, verses
"""
isfullbible = False
# check books
books = set(bible['book'])
if "code" in bible.columns:
isfullbible = True
if len(book) == 0:
book = books[0]
if isinstance(book, str):
book = [book]
if verb: print('... search book:{}'.format(book))
if isfullbible:
result = bible.loc[bible.kor.isin(book) | bible.kor_short.isin(book) | bible.eng.isin(book) | bible.eng_short.isin(book) | bible.book.isin(book)]
else:
result = bible.loc[bible.book.isin(book)]
# check chapter
if isinstance(chapter, int):
chapter = [chapter]
if len(chapter) == 0:
return result
if verb: print('... search chapter: {}'.format(chapter))
result = result.loc[bible.chapter.isin(chapter)]
# check verse
if isinstance(verse, int):
verse = [verse]
if len(verse) == 0:
return result
if verb: print('... search verse: {}'.format(verse))
result = result.loc[bible.verse.isin(verse)]
if len(result) > 0:
return result
else:
print("... not found: {}, {}, {}".format(book, chapter, verse))
return []
def extract_bystr(bible, sstr, form="pd"):
""" extract verse by short search string
inputs:
bible: panda database of bible version
sstr: search pattern
- example "창3:16", "고후5:3", '요일1:1', "창세기1:1"
- no spaces
- one separator :
- ',' and '-' is possible (창3:16,17) (창1:1-5)
form: output format
- "md" one line sentence with markdowm
- "string" text string
- "pd" panda dataframe
output:
object determined by form variable
"""
# remove all spaces
sstr = sstr.replace(" ", "")
# find components
if sstr.find(":") > 0:
head = sstr.split(':')[0]
verses = sstr.split(':')[1]
else:
head = sstr
verses = []
book = ''.join(filter(str.isalpha, head))
chapter = ''.join(filter(str.isdigit, head))
# if there is no chapter
if len(chapter) == 0:
chapter = []
else:
chapter = int(chapter)
# check , in verse
if len(verses) > 0:
if verses.find(',') > 0:
verses = verses.split(',')
# check - in verse
elif verses.find('-') > 0:
start = verses.split('-')[0]
end = verses.split('-')[1]
try:
verses = list(range(int(start), int(end)+1))
except:
print('... wrong format: {}'.format(sstr))
return 0
else:
verses = [int(verses)]
verses = [int(v) for v in verses]
#print(book, chapter, verses)
# return verses
res = find_id(bible, book=book, chapter=chapter, verse=verses)
if len(res) == 0:
return []
return get_texts(res, form=form, sstr=sstr)
def get_texts(bible_pd, form="md", sstr="", sep="", text_width=0):
""" print verses using different format """
if form == "pd":
return bible_pd
if len(bible_pd["book"]) == 0:
return ""
# show id as kor_short
bible_pd.loc[:, "id"] = bible_pd.loc[:, "kor_short"] + sep + bible_pd["chapter"].astype(str) + ":" + bible_pd["verse"].astype(str)
bible_pd = tidy_footnote(bible_pd)
if (len(set(bible_pd["book"])) == 1) and (sstr.find(":") == -1):
min_v = bible_pd["verse"].min()
max_v = bible_pd["verse"].max()
sstr = "{}:{}-{}".format(sstr, min_v, max_v)
if form == "string":
if sstr == "":
bible_pd[form] = bible_pd["id"] + " - " + bible_pd[form].astype(str)
msg = '\n'.join(bible_pd[form].values)
else:
msg = sstr + ' ' + ' '.join(bible_pd[form].values)
return msg
if form == "md":
if sstr == "":
bible_pd[form] = "`" + bible_pd["id"] + "` " + bible_pd[form].astype(str)
msg = '\n'.join(bible_pd[form].values)
else:
verse_string_list = [ '<sup>{}</sup> {}'.format(v, l) for v,l in zip(bible_pd['verse'], bible_pd[form]) ]
msg = '`{}` '.format(sstr) + ' '.join(verse_string_list)
if sum(bible_pd["footnote"] != "") > 0:
return msg + '\n' + ''.join(bible_pd["footnote"].values)
else:
return msg
if form == "mdlines":
bible_pd["md"] = "`" + bible_pd["id"] + "` " + bible_pd["md"].astype(str)
msg = '\n'.join(bible_pd["md"].values)
if sum(bible_pd["footnote"] != "") > 0:
return msg + '\n' + ''.join(bible_pd["footnote"].values)
else:
return msg
print('... {} format is not implemented: ["pd", "md", "string"]'.format(form))
return []
def tidy_footnote(bible_pd, keyword="FOOTNOTE"):
""" remove footnote """
bible_pd["md"] = bible_pd["text"]
bible_pd["string"] = bible_pd["text"]
bible_pd["footnote"] = ""
start_word = "__a__{}__a__".format(keyword)
end_word = "__b__{}__b__".format(keyword)
fn_idx = ["a", "b", "c", "d", "e", "f"]
# search different verses
for i in bible_pd.index[bible_pd.text.str.contains(start_word)]:
# search in one verse
text = bible_pd.at[i, "text"]
tmp = text.replace("_b_", "_a_").split(start_word)
bible_pd.at[i, "string"] = tmp[0] + ''.join(tmp[2::2])
# check multiple footnotes
md = tmp[0]
fn = ""
for j in range(int(len(tmp)/2)):
md = md + "[^{}{}]".format(bible_pd.at[i, "id"], fn_idx[j]) + tmp[j*2 + 2]
fn = fn + "[^{}{}]:".format(bible_pd.at[i, "id"], fn_idx[j]) + tmp[j*2 + 1].replace("TR","") + '\n'
bible_pd.at[i, "md"] = md
bible_pd.at[i, "footnote"] = fn
return bible_pd
def make_mdpage(bible, day_info, save_dir=None):
""" print all verses in list using markdown format
inputs:
bible: name of version or panda dataframe
day_info: name of day information file or yaml data
save: [True|False]
output:
text strings of markdown page
"""
# check day_info.yml file
if isinstance(day_info, str):
try:
with open(day_info, "r") as f:
day_info = yaml.load(f, yaml.BaseLoader)
except:
print("... file: {} parser error!".format(day_info))
return 0
bible_version = ""
# check bible version
if isinstance(bible, str):
try:
bible_version = "-" + bible
bible = read_full_bible(bible)
except:
print("... read bible error: {}".format(bible_version[1:]))
return 0
msg = "# {}일차 - {}\n\n".format(day_info["day"],day_info["title"])
msg = msg + "찬양 : {}\n\n".format(day_info["song"])
msg = msg + "기도 : {}\n\n".format(day_info["prayer"])
msg = msg + "요약 : {}\n\n".format(day_info["summary"])
msg = msg + "성경 버전 : {}\n\n".format(bible_version[1:])
for v in day_info["verses"]:
msg = msg + '- {}\n\n'.format(extract_bystr(bible, v, form="md"))
msg = msg + "### info\n\n"
msg = msg + "- 성경 구절 갯수 : {}".format(len(day_info["verses"]))
if save_dir is not None:
filename = '{}/day{}-{}{}.md'.format(save_dir, day_info["day"], day_info["title"].replace(" ", ""), bible_version)
with open(filename, "w") as f:
f.write(msg)
print('... save to {}'.format(filename))
return msg
| 30.744186 | 153 | 0.536652 |
import pandas as pd
import yaml
import os
import subprocess
__author__ = "Sungcheol Kim <sungcheol.kim78@gmail.com>"
__docformat__ = "restructuredtext en"
class KBible(object):
def __init__(self, version="개역한글판성경", debug=False, **kwargs):
self._biblelist = []
self._versionlist = {}
this_dir, this_filename = os.path.split(__file__)
listname = os.path.join(this_dir, "data", u"book_names.csv")
self._table = pd.read_csv(listname, index_col=0)
self.add(version, **kwargs)
def add(self, version, **kwargs):
b = read_full_bible(version_name=version, **kwargs)
self._biblelist.append(b)
self._versionlist[version] = len(self._biblelist) - 1
def delete(self, version):
if (version in self._versionlist) and (len(self._versionlist) > 1):
i = self._versionlist[version]
del self._versionlist[version]
del self._biblelist[i]
else:
print('... not found or only have one bible version: {}'.format(version))
def save(self, version="개역한글판성경"):
if version in self._versionlist:
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version + ".csv.gz")
b = self._biblelist[self._versionlist[version]]
b.to_csv(filename, compression='gzip')
print('... save file: {}'.format(filename))
def get(self, version=""):
if version == "":
return self._biblelist[0]
try:
return self._biblelist[self._versionlist[version]]
except:
print('... no bible version: {}'.format(version))
return []
def bystr(self, sstr, form="md"):
if form == "pd":
res = pd.DataFrame()
for b in self._biblelist:
res = pd.concat([res, extract_bystr(b, sstr, form="pd")], axis=0)
return res
else:
msg = ""
for b in self._biblelist:
msg = msg + extract_bystr(b, sstr, form=form) + '\n'
return msg
def search(self, sstr, form="md", regex=False):
res = pd.DataFrame()
for b in self._biblelist:
b_res_idx = b.text.str.contains(sstr, regex=regex)
if sum(b_res_idx) > 0:
res = pd.concat([res, b[b_res_idx]], axis=0)
if len(res) > 0:
return get_texts(res, form=form)
else:
print('... no matched results')
return []
def read(self, sstr, form='mdlines'):
msg = self.bystr(sstr, form=form)
with open('.temp.md', 'w') as f:
f.writelines(msg)
cmd = ['open', '.temp.md']
subprocess.call(cmd)
def bible_parser(version_name="개역한글판성경"):
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version_name + ".txt")
with open(filename, "r") as f:
lines = f.readlines()
books = []
chapters = []
verses = []
texts = []
for i, line in enumerate(lines):
line = line.strip('\n\r')
if len(line) == 0:
continue
if line[0] == "#":
continue
if line.find(':') == -1:
continue
hp = line.find(' ')
if hp > 1 and hp < 25:
header = line[:hp]
text = line[hp+1:]
try:
tmp = header.split(':')[0]
if tmp.find('.') > 0:
book = tmp.split('.')[0]
chapter = tmp.split('.')[1]
elif tmp[:2] in ["1_", "2_", "3_"]:
book = tmp[:2] + ''.join(filter(str.isalpha, tmp[2:]))
chapter = ''.join(filter(str.isdigit, tmp[2:]))
else:
book = ''.join(filter(str.isalpha, tmp))
chapter = ''.join(filter(str.isdigit, tmp))
verse = header.split(':')[1]
except:
print('... header error: ({}) {}'.format(i, header))
continue
try:
verse = int(verse)
chapter = int(chapter)
except:
print("... conversion error: ({}) {} {}".format(i, verse, chapter))
continue
books.append(book)
chapters.append(chapter)
verses.append(verse)
texts.append(text)
else:
print("... unrecognized line: ({}) {}".format(i, line))
df_bible = {'book':books, 'chapter':chapters, 'verse':verses, 'text':texts}
idx = range(len(books))
bible = pd.DataFrame(data=df_bible, index=idx)
return bible
def read_full_bible(version_name="개역한글판성경", save=False):
try:
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version_name + ".csv.gz")
full_bible = pd.read_csv(filename, index_col=0, compression = "gzip")
return full_bible
except FileNotFoundError:
print('... generate bible database: {}'.format(filename))
bible = bible_parser(version_name=version_name)
listname = os.path.join(this_dir, "data", u"book_names.csv")
table = pd.read_csv(listname, index_col=0)
if bible['book'][0] == 'Gen':
table['book'] = table['eng_short']
elif bible['book'][0] == 'Genesis':
table['book'] = table['eng']
else:
table['book'] = table['kor_short']
full_bible = pd.merge(bible, table, on='book', how='left')
if save:
full_bible.to_csv(filename, compression='gzip')
return full_bible
def find_id(bible, book=[], chapter=[], verse=[], verb=False):
isfullbible = False
books = set(bible['book'])
if "code" in bible.columns:
isfullbible = True
if len(book) == 0:
book = books[0]
if isinstance(book, str):
book = [book]
if verb: print('... search book:{}'.format(book))
if isfullbible:
result = bible.loc[bible.kor.isin(book) | bible.kor_short.isin(book) | bible.eng.isin(book) | bible.eng_short.isin(book) | bible.book.isin(book)]
else:
result = bible.loc[bible.book.isin(book)]
if isinstance(chapter, int):
chapter = [chapter]
if len(chapter) == 0:
return result
if verb: print('... search chapter: {}'.format(chapter))
result = result.loc[bible.chapter.isin(chapter)]
if isinstance(verse, int):
verse = [verse]
if len(verse) == 0:
return result
if verb: print('... search verse: {}'.format(verse))
result = result.loc[bible.verse.isin(verse)]
if len(result) > 0:
return result
else:
print("... not found: {}, {}, {}".format(book, chapter, verse))
return []
def extract_bystr(bible, sstr, form="pd"):
sstr = sstr.replace(" ", "")
if sstr.find(":") > 0:
head = sstr.split(':')[0]
verses = sstr.split(':')[1]
else:
head = sstr
verses = []
book = ''.join(filter(str.isalpha, head))
chapter = ''.join(filter(str.isdigit, head))
if len(chapter) == 0:
chapter = []
else:
chapter = int(chapter)
if len(verses) > 0:
if verses.find(',') > 0:
verses = verses.split(',')
elif verses.find('-') > 0:
start = verses.split('-')[0]
end = verses.split('-')[1]
try:
verses = list(range(int(start), int(end)+1))
except:
print('... wrong format: {}'.format(sstr))
return 0
else:
verses = [int(verses)]
verses = [int(v) for v in verses]
res = find_id(bible, book=book, chapter=chapter, verse=verses)
if len(res) == 0:
return []
return get_texts(res, form=form, sstr=sstr)
def get_texts(bible_pd, form="md", sstr="", sep="", text_width=0):
if form == "pd":
return bible_pd
if len(bible_pd["book"]) == 0:
return ""
bible_pd.loc[:, "id"] = bible_pd.loc[:, "kor_short"] + sep + bible_pd["chapter"].astype(str) + ":" + bible_pd["verse"].astype(str)
bible_pd = tidy_footnote(bible_pd)
if (len(set(bible_pd["book"])) == 1) and (sstr.find(":") == -1):
min_v = bible_pd["verse"].min()
max_v = bible_pd["verse"].max()
sstr = "{}:{}-{}".format(sstr, min_v, max_v)
if form == "string":
if sstr == "":
bible_pd[form] = bible_pd["id"] + " - " + bible_pd[form].astype(str)
msg = '\n'.join(bible_pd[form].values)
else:
msg = sstr + ' ' + ' '.join(bible_pd[form].values)
return msg
if form == "md":
if sstr == "":
bible_pd[form] = "`" + bible_pd["id"] + "` " + bible_pd[form].astype(str)
msg = '\n'.join(bible_pd[form].values)
else:
verse_string_list = [ '<sup>{}</sup> {}'.format(v, l) for v,l in zip(bible_pd['verse'], bible_pd[form]) ]
msg = '`{}` '.format(sstr) + ' '.join(verse_string_list)
if sum(bible_pd["footnote"] != "") > 0:
return msg + '\n' + ''.join(bible_pd["footnote"].values)
else:
return msg
if form == "mdlines":
bible_pd["md"] = "`" + bible_pd["id"] + "` " + bible_pd["md"].astype(str)
msg = '\n'.join(bible_pd["md"].values)
if sum(bible_pd["footnote"] != "") > 0:
return msg + '\n' + ''.join(bible_pd["footnote"].values)
else:
return msg
print('... {} format is not implemented: ["pd", "md", "string"]'.format(form))
return []
def tidy_footnote(bible_pd, keyword="FOOTNOTE"):
bible_pd["md"] = bible_pd["text"]
bible_pd["string"] = bible_pd["text"]
bible_pd["footnote"] = ""
start_word = "__a__{}__a__".format(keyword)
end_word = "__b__{}__b__".format(keyword)
fn_idx = ["a", "b", "c", "d", "e", "f"]
for i in bible_pd.index[bible_pd.text.str.contains(start_word)]:
text = bible_pd.at[i, "text"]
tmp = text.replace("_b_", "_a_").split(start_word)
bible_pd.at[i, "string"] = tmp[0] + ''.join(tmp[2::2])
md = tmp[0]
fn = ""
for j in range(int(len(tmp)/2)):
md = md + "[^{}{}]".format(bible_pd.at[i, "id"], fn_idx[j]) + tmp[j*2 + 2]
fn = fn + "[^{}{}]:".format(bible_pd.at[i, "id"], fn_idx[j]) + tmp[j*2 + 1].replace("TR","") + '\n'
bible_pd.at[i, "md"] = md
bible_pd.at[i, "footnote"] = fn
return bible_pd
def make_mdpage(bible, day_info, save_dir=None):
if isinstance(day_info, str):
try:
with open(day_info, "r") as f:
day_info = yaml.load(f, yaml.BaseLoader)
except:
print("... file: {} parser error!".format(day_info))
return 0
bible_version = ""
if isinstance(bible, str):
try:
bible_version = "-" + bible
bible = read_full_bible(bible)
except:
print("... read bible error: {}".format(bible_version[1:]))
return 0
msg = "# {}일차 - {}\n\n".format(day_info["day"],day_info["title"])
msg = msg + "찬양 : {}\n\n".format(day_info["song"])
msg = msg + "기도 : {}\n\n".format(day_info["prayer"])
msg = msg + "요약 : {}\n\n".format(day_info["summary"])
msg = msg + "성경 버전 : {}\n\n".format(bible_version[1:])
for v in day_info["verses"]:
msg = msg + '- {}\n\n'.format(extract_bystr(bible, v, form="md"))
msg = msg + "### info\n\n"
msg = msg + "- 성경 구절 갯수 : {}".format(len(day_info["verses"]))
if save_dir is not None:
filename = '{}/day{}-{}{}.md'.format(save_dir, day_info["day"], day_info["title"].replace(" ", ""), bible_version)
with open(filename, "w") as f:
f.write(msg)
print('... save to {}'.format(filename))
return msg
| true | true |
f7224ba8e912b27faf46182144d00f98d96be6c9 | 3,203 | py | Python | test/packaging/place-files-in-subdirectory.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 1 | 2015-11-04T22:22:10.000Z | 2015-11-04T22:22:10.000Z | test/packaging/place-files-in-subdirectory.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 1 | 2020-05-19T02:59:19.000Z | 2020-05-21T09:05:19.000Z | test/packaging/place-files-in-subdirectory.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 2 | 2018-01-16T11:29:16.000Z | 2020-05-13T16:48:26.000Z | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the requirement to place files in a given subdirectory before archiving.
"""
import os
import TestSCons
python = TestSCons.python
test = TestSCons.TestSCons()
tar = test.detect('TAR', 'tar')
if not tar:
test.skip_test('tar not found, skipping test\n')
#
# TEST: subdir creation and file copying
#
test.subdir('src')
test.write('src/main.c', '')
test.write('SConstruct', """
env = Environment(tools=['default', 'packaging'])
env.Package( NAME = 'libfoo',
PACKAGEROOT = 'libfoo',
PACKAGETYPE = 'src_zip',
VERSION = '1.2.3',
source = [ 'src/main.c', 'SConstruct' ] )
""")
test.run(arguments='libfoo-1.2.3.zip', stderr = None)
test.must_exist( 'libfoo' )
test.must_exist( 'libfoo/SConstruct' )
test.must_exist( 'libfoo/src/main.c' )
#
# TEST: subdir guessing and file copying.
#
test.subdir('src')
test.write('src/main.c', '')
test.write('SConstruct', """
env = Environment(tools=['default', 'packaging'])
env.Package( NAME = 'libfoo',
VERSION = '1.2.3',
PACKAGETYPE = 'src_zip',
TARGET = 'src.zip',
source = [ 'src/main.c', 'SConstruct' ] )
""")
test.run(stderr = None)
test.must_exist( 'libfoo-1.2.3' )
test.must_exist( 'libfoo-1.2.3/SConstruct' )
test.must_exist( 'libfoo-1.2.3/src/main.c' )
#
# TEST: unpacking without the buildir.
#
test.subdir('src')
test.subdir('temp')
test.write('src/main.c', '')
test.write('SConstruct', """
env = Environment(tools=['default', 'packaging'])
env.Package( NAME = 'libfoo',
VERSION = '1.2.3',
PACKAGETYPE = 'src_targz',
source = [ 'src/main.c', 'SConstruct' ] )
""")
test.run(stderr = None)
str = os.popen( 'tar -tzf %s'%test.workpath('libfoo-1.2.3.tar.gz') ).read()
test.fail_test( str != "libfoo-1.2.3/src/main.c\nlibfoo-1.2.3/SConstruct\n" )
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 27.852174 | 77 | 0.667187 |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import TestSCons
python = TestSCons.python
test = TestSCons.TestSCons()
tar = test.detect('TAR', 'tar')
if not tar:
test.skip_test('tar not found, skipping test\n')
test.subdir('src')
test.write('src/main.c', '')
test.write('SConstruct', """
env = Environment(tools=['default', 'packaging'])
env.Package( NAME = 'libfoo',
PACKAGEROOT = 'libfoo',
PACKAGETYPE = 'src_zip',
VERSION = '1.2.3',
source = [ 'src/main.c', 'SConstruct' ] )
""")
test.run(arguments='libfoo-1.2.3.zip', stderr = None)
test.must_exist( 'libfoo' )
test.must_exist( 'libfoo/SConstruct' )
test.must_exist( 'libfoo/src/main.c' )
test.subdir('src')
test.write('src/main.c', '')
test.write('SConstruct', """
env = Environment(tools=['default', 'packaging'])
env.Package( NAME = 'libfoo',
VERSION = '1.2.3',
PACKAGETYPE = 'src_zip',
TARGET = 'src.zip',
source = [ 'src/main.c', 'SConstruct' ] )
""")
test.run(stderr = None)
test.must_exist( 'libfoo-1.2.3' )
test.must_exist( 'libfoo-1.2.3/SConstruct' )
test.must_exist( 'libfoo-1.2.3/src/main.c' )
test.subdir('src')
test.subdir('temp')
test.write('src/main.c', '')
test.write('SConstruct', """
env = Environment(tools=['default', 'packaging'])
env.Package( NAME = 'libfoo',
VERSION = '1.2.3',
PACKAGETYPE = 'src_targz',
source = [ 'src/main.c', 'SConstruct' ] )
""")
test.run(stderr = None)
str = os.popen( 'tar -tzf %s'%test.workpath('libfoo-1.2.3.tar.gz') ).read()
test.fail_test( str != "libfoo-1.2.3/src/main.c\nlibfoo-1.2.3/SConstruct\n" )
test.pass_test()
| true | true |
f7224c2e58ee05e710df8793a015821ab6d25cf2 | 1,128 | py | Python | imporTant_ModulE/complicated_code.py | bradchesney79/illacceptanything | 4594ae4634fdb5e39263a6423dc255ed46c25208 | [
"MIT"
] | 1 | 2017-08-21T19:15:43.000Z | 2017-08-21T19:15:43.000Z | imporTant_ModulE/complicated_code.py | bradchesney79/illacceptanything | 4594ae4634fdb5e39263a6423dc255ed46c25208 | [
"MIT"
] | null | null | null | imporTant_ModulE/complicated_code.py | bradchesney79/illacceptanything | 4594ae4634fdb5e39263a6423dc255ed46c25208 | [
"MIT"
] | 1 | 2018-10-26T23:46:06.000Z | 2018-10-26T23:46:06.000Z | def main(important_parameter, ignored_parameter):
"""
:return: The answer to everything
"""
important_field = important_parameter # this way the parameter was actually used, hence making it important.
def realmain():
def actualrealrealmain():
def nownotevenkiddingtherealfunction():
print "The answer to everything"
# a
# comment
return nownotevenkiddingtherealfunction
#every
#now
return actualrealrealmain
#and
#then
return realmain
if __name__ == "__main__":
#need like
#300+ lines of code
#to give you the Answer to everything
#i think that's fair
f = main(1, 2)
while hasattr(f, '__call__'):
f = f()
| 2.96063 | 113 | 0.414007 | def main(important_parameter, ignored_parameter):
"""
:return: The answer to everything
"""
important_field = important_parameter
def realmain():
def actualrealrealmain():
def nownotevenkiddingtherealfunction():
print "The answer to everything"
return nownotevenkiddingtherealfunction
return actualrealrealmain
return realmain
if __name__ == "__main__":
f = main(1, 2)
while hasattr(f, '__call__'):
f = f()
| false | true |
f7224cdc48875b4ef34c0499563e922d8a4ccc4b | 509 | py | Python | snake/food_obj.py | sradley/snake-py | 90531c6a867e2eaaffab48218aeab5e2df6f5101 | [
"MIT"
] | null | null | null | snake/food_obj.py | sradley/snake-py | 90531c6a867e2eaaffab48218aeab5e2df6f5101 | [
"MIT"
] | 1 | 2018-07-18T16:18:14.000Z | 2018-07-18T16:18:14.000Z | snake/food_obj.py | sradley/snake-py | 90531c6a867e2eaaffab48218aeab5e2df6f5101 | [
"MIT"
] | 1 | 2019-12-11T16:48:25.000Z | 2019-12-11T16:48:25.000Z | """
snake/food_obj.py
author: Stephen Radley
date: 2018/07/05
package: snake
version: 0.0.1
"""
from random import randint
from snake.location_obj import Location
from snake.functions import find_valid_locs
"""
Food ...
"""
class Food:
"""
__init__ ...
"""
def __init__(self, dim_x, dim_y, snake):
valid_locs = find_valid_locs(dim_x, dim_y, snake)
loc = valid_locs[randint(0, len(valid_locs)-1)]
self.loc = Location(loc[0], loc[1]) | 18.851852 | 57 | 0.616896 |
from random import randint
from snake.location_obj import Location
from snake.functions import find_valid_locs
class Food:
def __init__(self, dim_x, dim_y, snake):
valid_locs = find_valid_locs(dim_x, dim_y, snake)
loc = valid_locs[randint(0, len(valid_locs)-1)]
self.loc = Location(loc[0], loc[1]) | true | true |
f7224cf70c4a227f3f15c140799654f94546b6af | 2,709 | py | Python | src/sot_talos_balance/test/test_zmpEstimator.py | imaroger/sot-talos-balance | 5e56700b4e105273ecf6feb3474789beac469a77 | [
"BSD-2-Clause"
] | null | null | null | src/sot_talos_balance/test/test_zmpEstimator.py | imaroger/sot-talos-balance | 5e56700b4e105273ecf6feb3474789beac469a77 | [
"BSD-2-Clause"
] | null | null | null | src/sot_talos_balance/test/test_zmpEstimator.py | imaroger/sot-talos-balance | 5e56700b4e105273ecf6feb3474789beac469a77 | [
"BSD-2-Clause"
] | null | null | null | from time import sleep
import matplotlib.pyplot as plt
import numpy as np
from sot_talos_balance.utils.run_test_utils import evalCommandClient, run_ft_calibration, run_test, runCommandClient
try:
# Python 2
input = raw_input # noqa
except NameError:
pass
run_test('appli_zmpEstimator.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
# plug ZMP emergency signal
runCommandClient('plug(robot.zmp_estimator.emergencyStop,robot.cm.emergencyStop_zmp)')
sleep(2.0)
runCommandClient('robot.comTrajGen.move(1,-0.025,1.0)')
sleep(20.0)
runCommandClient('robot.comTrajGen.startSinusoid(1,0.05,2.0)')
sleep(20.0)
runCommandClient('dump_tracer(robot.tracer)')
# --- DISPLAY
zmpEst_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.zmp_estimator.name') + '-zmp.dat')
zmpDyn_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.dynamic.name') + '-zmp.dat')
com_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.dynamic.name') + '-com.dat')
forceRLEG_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.device.name') + '-forceRLEG.dat')
forceLLEG_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.device.name') + '-forceLLEG.dat')
plt.ion()
plt.figure()
plt.plot(zmpEst_data[:, 1], 'b-')
plt.plot(zmpDyn_data[:, 1], 'b--')
plt.plot(com_data[:, 1], 'b:')
plt.plot(zmpEst_data[:, 2], 'r-')
plt.plot(zmpDyn_data[:, 2], 'r--')
plt.plot(com_data[:, 2], 'r:')
plt.title('ZMP estimate vs dynamic vs CoM (planar)')
plt.legend(['x estimate', 'x dynamic', 'x CoM', 'y estimate', 'y dynamic', 'y CoM'])
plt.figure()
plt.plot(com_data[:, 1], 'b-')
plt.plot(com_data[:, 2], 'r-')
plt.plot(com_data[:, 3], 'g-')
plt.title('COM')
plt.legend(['x', 'y', 'z'])
plt.figure()
plt.plot(zmpDyn_data[:, 1], 'b-')
plt.plot(zmpDyn_data[:, 2], 'r-')
plt.plot(zmpDyn_data[:, 3], 'g-')
plt.title('ZMP dynamic')
plt.legend(['x', 'y', 'z'])
plt.figure()
plt.plot(zmpEst_data[:, 1], 'b-')
plt.plot(zmpEst_data[:, 2], 'r-')
plt.plot(zmpEst_data[:, 3], 'g-')
plt.title('ZMP estimate')
plt.legend(['x', 'y', 'z'])
plt.figure()
plt.plot(forceLLEG_data[:, 1], 'b-')
plt.plot(forceLLEG_data[:, 2], 'r-')
plt.plot(forceLLEG_data[:, 3], 'g-')
plt.plot(forceLLEG_data[:, 4], 'b--')
plt.plot(forceLLEG_data[:, 5], 'r--')
plt.plot(forceLLEG_data[:, 6], 'g--')
plt.title('forceLLEG')
plt.legend(['fx', 'fy', 'fz', 'tx', 'ty', 'tz'])
plt.figure()
plt.plot(forceRLEG_data[:, 1], 'b-')
plt.plot(forceRLEG_data[:, 2], 'r-')
plt.plot(forceRLEG_data[:, 3], 'g-')
plt.plot(forceRLEG_data[:, 4], 'b--')
plt.plot(forceRLEG_data[:, 5], 'r--')
plt.plot(forceRLEG_data[:, 6], 'g--')
plt.title('forceRLEG')
plt.legend(['fx', 'fy', 'fz', 'tx', 'ty', 'tz'])
input("Wait before leaving the simulation")
| 30.1 | 116 | 0.665559 | from time import sleep
import matplotlib.pyplot as plt
import numpy as np
from sot_talos_balance.utils.run_test_utils import evalCommandClient, run_ft_calibration, run_test, runCommandClient
try:
input = raw_input
except NameError:
pass
run_test('appli_zmpEstimator.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
runCommandClient('plug(robot.zmp_estimator.emergencyStop,robot.cm.emergencyStop_zmp)')
sleep(2.0)
runCommandClient('robot.comTrajGen.move(1,-0.025,1.0)')
sleep(20.0)
runCommandClient('robot.comTrajGen.startSinusoid(1,0.05,2.0)')
sleep(20.0)
runCommandClient('dump_tracer(robot.tracer)')
zmpEst_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.zmp_estimator.name') + '-zmp.dat')
zmpDyn_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.dynamic.name') + '-zmp.dat')
com_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.dynamic.name') + '-com.dat')
forceRLEG_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.device.name') + '-forceRLEG.dat')
forceLLEG_data = np.loadtxt('/tmp/dg_' + evalCommandClient('robot.device.name') + '-forceLLEG.dat')
plt.ion()
plt.figure()
plt.plot(zmpEst_data[:, 1], 'b-')
plt.plot(zmpDyn_data[:, 1], 'b--')
plt.plot(com_data[:, 1], 'b:')
plt.plot(zmpEst_data[:, 2], 'r-')
plt.plot(zmpDyn_data[:, 2], 'r--')
plt.plot(com_data[:, 2], 'r:')
plt.title('ZMP estimate vs dynamic vs CoM (planar)')
plt.legend(['x estimate', 'x dynamic', 'x CoM', 'y estimate', 'y dynamic', 'y CoM'])
plt.figure()
plt.plot(com_data[:, 1], 'b-')
plt.plot(com_data[:, 2], 'r-')
plt.plot(com_data[:, 3], 'g-')
plt.title('COM')
plt.legend(['x', 'y', 'z'])
plt.figure()
plt.plot(zmpDyn_data[:, 1], 'b-')
plt.plot(zmpDyn_data[:, 2], 'r-')
plt.plot(zmpDyn_data[:, 3], 'g-')
plt.title('ZMP dynamic')
plt.legend(['x', 'y', 'z'])
plt.figure()
plt.plot(zmpEst_data[:, 1], 'b-')
plt.plot(zmpEst_data[:, 2], 'r-')
plt.plot(zmpEst_data[:, 3], 'g-')
plt.title('ZMP estimate')
plt.legend(['x', 'y', 'z'])
plt.figure()
plt.plot(forceLLEG_data[:, 1], 'b-')
plt.plot(forceLLEG_data[:, 2], 'r-')
plt.plot(forceLLEG_data[:, 3], 'g-')
plt.plot(forceLLEG_data[:, 4], 'b--')
plt.plot(forceLLEG_data[:, 5], 'r--')
plt.plot(forceLLEG_data[:, 6], 'g--')
plt.title('forceLLEG')
plt.legend(['fx', 'fy', 'fz', 'tx', 'ty', 'tz'])
plt.figure()
plt.plot(forceRLEG_data[:, 1], 'b-')
plt.plot(forceRLEG_data[:, 2], 'r-')
plt.plot(forceRLEG_data[:, 3], 'g-')
plt.plot(forceRLEG_data[:, 4], 'b--')
plt.plot(forceRLEG_data[:, 5], 'r--')
plt.plot(forceRLEG_data[:, 6], 'g--')
plt.title('forceRLEG')
plt.legend(['fx', 'fy', 'fz', 'tx', 'ty', 'tz'])
input("Wait before leaving the simulation")
| true | true |
f7224e7cd4f3a5fd5563d15fb1ff78a94c670d63 | 98,578 | py | Python | hexapodengine2.py | jonathan-sung/Hexapod-GA-Gait | 5e82c2f141f6bd88d8b6c0a7b658c8ce0c5be8f4 | [
"MIT"
] | null | null | null | hexapodengine2.py | jonathan-sung/Hexapod-GA-Gait | 5e82c2f141f6bd88d8b6c0a7b658c8ce0c5be8f4 | [
"MIT"
] | null | null | null | hexapodengine2.py | jonathan-sung/Hexapod-GA-Gait | 5e82c2f141f6bd88d8b6c0a7b658c8ce0c5be8f4 | [
"MIT"
] | 1 | 2022-03-10T12:57:56.000Z | 2022-03-10T12:57:56.000Z | import pybullet as p
import math
import pybullet_data
import time
import random
import numpy as np
import serial
def radToPwm(angle):
return ((2000 * angle) / math.pi) + 1500
# t in ms; the closer t is to 0, more accuracy but less smooth motion
def updateRealServos(ser, t):
# right legs
ser.write(
f'#0P{radToPwm(-p.getJointState(hexapod_ID, 8)[0])}T{t}#1P{radToPwm(p.getJointState(hexapod_ID, 9)[0])}T{t}#2P{radToPwm(-p.getJointState(hexapod_ID, 10)[0])-100}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#4P{radToPwm(-p.getJointState(hexapod_ID, 4)[0])}T{t}#5P{radToPwm(p.getJointState(hexapod_ID, 5)[0])}T{t}#6P{radToPwm(-p.getJointState(hexapod_ID, 6)[0])+100}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#8P{radToPwm(-p.getJointState(hexapod_ID, 0)[0])}T{t}#9P{radToPwm(p.getJointState(hexapod_ID, 1)[0])}T{t}#10P{radToPwm(-p.getJointState(hexapod_ID, 2)[0])}T{t}\r'.encode(
'utf-8'))
# left legs
ser.write(
f'#24P{radToPwm(-p.getJointState(hexapod_ID, 12)[0])}T{t}#25P{radToPwm(p.getJointState(hexapod_ID, 13)[0])}T{t}#26P{radToPwm(-p.getJointState(hexapod_ID, 14)[0])+100}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#20P{radToPwm(-p.getJointState(hexapod_ID, 16)[0])}T{t}#21P{radToPwm(p.getJointState(hexapod_ID, 17)[0])}T{t}#22P{radToPwm(-p.getJointState(hexapod_ID, 18)[0])}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#16P{radToPwm(-p.getJointState(hexapod_ID, 20)[0])}T{t}#17P{radToPwm(p.getJointState(hexapod_ID, 21)[0])}T{t}#18P{radToPwm(-p.getJointState(hexapod_ID, 22)[0])-50}T{t}\r'.encode(
'utf-8'))
def init_debug_parameters():
for j in list(range(0, 6)):
control_IDs.append(p.addUserDebugParameter(f"Pelvis {j}", -servoRangeOfMotion, servoRangeOfMotion, 0))
control_IDs.append(p.addUserDebugParameter(f"Hip {j}", -servoRangeOfMotion, servoRangeOfMotion, 0))
control_IDs.append(p.addUserDebugParameter(f"Knee {j}", -servoRangeOfMotion, servoRangeOfMotion, 0))
def read_debug_parameters():
angles = []
for x in control_IDs:
angles.append(p.readUserDebugParameter(x))
return angles
def chromosomeCreator():
pos = []
duration = 1
force = 200
# pos.extend([duration] + [0] * NUM_OF_SERVOS)
for j in range(LENGTH_OF_SEQUENCE - 1):
gaitState = [0] * NUM_OF_SERVOS
gaitState[j] = servoRangeOfMotion
pos.extend([duration] + gaitState)
print(len(pos))
return [duration] + [force] + pos
def readGait(progress, chromosome):
global firstCycleComplete
end_index = LENGTH_OF_SEQUENCE
if not firstCycleComplete and progress >= sum([chromosome[x] for x in range(0, len(chromosome), LENGTH_OF_GAIT_STATE)]):
firstCycleComplete = True
if firstCycleComplete:
progress = progress - sum([chromosome[x] for x in range(0, ((LENGTH_OF_START_SEQUENCE - 1) * LENGTH_OF_GAIT_STATE) + 1, LENGTH_OF_GAIT_STATE)])
chromosome = chromosome[LENGTH_OF_START_SEQUENCE * LENGTH_OF_GAIT_STATE:]
end_index = LENGTH_OF_CYCLE
start_index = 0
total_duration = sum([chromosome[x] for x in range(0, len(chromosome), LENGTH_OF_GAIT_STATE)])
# duration_of_start_sequence = sum([chromosome[x] for x in range(0, ((LENGTH_OF_START_SEQUENCE - 1) * LENGTH_OF_GAIT_STATE) + 1, LENGTH_OF_GAIT_STATE)])
# duration_of_cycle = total_duration - duration_of_start_sequence
progress = progress % total_duration
current_duration_index = 0
next_duration_index = 0
sum_of_durations = 0
for j in range(start_index, end_index):
current_position_index = j * LENGTH_OF_GAIT_STATE
sum_of_durations = sum([chromosome[x] for x in range(start_index, current_position_index + 1, LENGTH_OF_GAIT_STATE)])
if progress < sum_of_durations:
current_duration_index = current_position_index
next_duration_index = (j + 1) * LENGTH_OF_GAIT_STATE
if (j + 1) >= end_index:
next_duration_index = start_index * LENGTH_OF_GAIT_STATE
break
current_gait_state = chromosome[current_duration_index + 1: current_duration_index + LENGTH_OF_GAIT_STATE]
next_gait_state = chromosome[next_duration_index + 1: next_duration_index + LENGTH_OF_GAIT_STATE]
if not firstCycleComplete and current_duration_index == (LENGTH_OF_SEQUENCE - 1) * LENGTH_OF_GAIT_STATE:
next_gait_state = chromosome[(LENGTH_OF_START_SEQUENCE * LENGTH_OF_GAIT_STATE) + 1: (LENGTH_OF_START_SEQUENCE * LENGTH_OF_GAIT_STATE) + LENGTH_OF_GAIT_STATE]
alpha = (progress - (sum_of_durations - chromosome[current_duration_index])) / chromosome[current_duration_index]
interpolated_gait_state = [interpolate(a, b, alpha) for a, b in zip(current_gait_state, next_gait_state)]
# print(progress, alpha, sum_of_durations, chromosome[current_duration_index])
return interpolated_gait_state
def interpolate(a, b, alpha):
return a * (1 - alpha) + b * alpha
def resetLegJoints():
p.resetJointStatesMultiDof(hexapod_ID, JOINT_INDEXES, [[0]] * 18, targetVelocities=[[0]] * 18)
p.setJointMotorControlArray(hexapod_ID, JOINT_INDEXES, p.POSITION_CONTROL, targetPositions=([0] * 18), forces=([150] * 18))
def resetEnvironment():
resetLegJoints()
p.resetBasePositionAndOrientation(hexapod_ID, [0, STARTING_Y, STARTING_HEIGHT + random.uniform(0, 0.002)], [0, 0, 0, 1])
p.stepSimulation()
def resetPyBulletSimulation():
global plane_ID
global hexapod_ID
p.resetSimulation()
p.setGravity(0, 0, -9.8)
plane_ID = p.loadURDF("plane.urdf", globalScaling=4)
# testAngle = p.getQuaternionFromEuler([0, math.pi/2, math.pi])
hexapod_ID = p.loadURDF("robot3.urdf", [0, STARTING_Y, STARTING_HEIGHT + random.uniform(0, 0.002)], [0, 0, 0, 1])
print(p.getEulerFromQuaternion(p.getBasePositionAndOrientation(hexapod_ID)[1]))
def gaitScore(bodyID):
current_position = p.getBasePositionAndOrientation(bodyID)[0]
distance = distanceFromOrigin(bodyID)
angle = angleBetweenVectors(np.array([0, 1]), np.array([current_position[0], current_position[1]]))
return distance, abs(angle)
def distanceFromOrigin(bodyID):
return p.getBasePositionAndOrientation(bodyID)[0][1]
def angleBetweenVectors(a, b):
unit_vector_1 = a / np.linalg.norm(a)
unit_vector_2 = b / np.linalg.norm(b)
dot_product = np.dot(unit_vector_1, unit_vector_2)
angle = np.arccos(dot_product)
return angle
def inverseCurve(x, a):
# 1/e^2 = 0.135
# a = 0.135
# a = 1
# (pi/2.0)^2 = 2.467
# a = 2.467
y = a / (a + (x * x))
return y
def collidingLegs():
# numOfCollisions = 0
for j in range(24):
aabb = (p.getAABB(hexapod_ID, j))
familyOfLinks = [x for x in range(24) if math.floor(j / 4) == math.floor(x / 4)] + [-1]
# collisionObjects = [x[1] for x in p.getOverlappingObjects(aabb[0], aabb[1]) if x[1] not in familyOfLinks and (j not in FEET_INDEXES or x[0] == hexapod_ID)]
collisionObjects = [x[1] for x in p.getOverlappingObjects(aabb[0], aabb[1]) if (j not in FEET_INDEXES and (x[1] not in familyOfLinks or x[0] != hexapod_ID)) or (j in FEET_INDEXES and x[1] not in familyOfLinks and x[0] == hexapod_ID)]
if len(collisionObjects) > 0:
return True
return False
def runGait(individual):
global REAL_HEXAPOD_CONNECTED
lastTime = time.time()
global firstCycleComplete
dt = 0
firstCycleComplete = False
initDuration = individual[0]
force = individual[1]
gaitChromosome = individual[2:]
gaitChromosome = ([initDuration] + [0] * NUM_OF_SERVOS) + gaitChromosome
resetEnvironment()
stabilityScore = 0
heightScore = 0
collisionScore = 0
sampleCounter = 0
p.setRealTimeSimulation(1)
while True:
if CONFIG_MODE:
p.setJointMotorControlArray(hexapod_ID, JOINT_INDEXES, p.POSITION_CONTROL, targetPositions=read_debug_parameters(), forces=([force] * 18))
else:
p.setJointMotorControlArray(hexapod_ID, JOINT_INDEXES, p.POSITION_CONTROL, targetPositions=readGait(dt, gaitChromosome), forces=([force] * 18))
if REAL_HEXAPOD_CONNECTED:
updateRealServos(ssc32, 100)
# Evaluation Metrics
hexapodBasePosAndOrn = p.getBasePositionAndOrientation(hexapod_ID)
currentStability = sum([abs(angle) for angle in list(p.getEulerFromQuaternion(hexapodBasePosAndOrn[1]))])
currentHeight = abs(1.375 - hexapodBasePosAndOrn[0][2])
stabilityScore += currentStability
heightScore += currentHeight
#collisionScore += collidingLegs()
sampleCounter += 1
# timing variables
now = time.time()
dt += now - lastTime
lastTime = now
# Finish evaluation after 12.5 seconds
if dt >= 12.5:
break
hexapodBasePosAndOrn = p.getBasePositionAndOrientation(hexapod_ID)
currentPosition = hexapodBasePosAndOrn[0]
distance = hexapodBasePosAndOrn[0][1]
straightness = abs(angleBetweenVectors(np.array([0, 1]), np.array([currentPosition[0], currentPosition[1]])))
avgHeight = abs(heightScore / sampleCounter)
avgStability = stabilityScore / sampleCounter
avgNumOfCollisions = collisionScore / sampleCounter
fitness_distance = distance / 100.0
fitness_straight = 1.0 - (straightness / math.pi)
fitness_stability = inverseCurve(avgStability, 1)
fitness_height = inverseCurve(avgHeight, 1)
fitness_collisions = round(1 - avgNumOfCollisions, 2)
fitness_total = (fitness_distance + fitness_straight + fitness_stability + fitness_height + fitness_collisions) / 5.0
line = f'ID: {UNIQUE_THREAD_ID} | Time Elapsed: {dt} | Evaluation: {fitness_distance, fitness_straight, fitness_stability, fitness_height, fitness_collisions, fitness_total} | Chromosome: {individual}'
print(line)
with open('C:/Users/Jonathan/Desktop/results_normal_cyclic.txt', 'a') as f:
f.write(line)
f.write('\n')
return fitness_total
def sinusoidalTestGait(t):
coxa0 = (math.pi / 4) * math.sin((2 * t) + math.pi)
femur0 = 0.2 * math.sin((2 * t) + ((5 * math.pi) / 2))
tibia0 = 1.3 * math.sin((0 * t) + ((3 * math.pi) / 2))
coxa1 = (math.pi / 4) * math.sin((2 * t) + 0)
femur1 = 0.2 * math.sin((2 * t) + ((3 * math.pi) / 2))
tibia1 = 1.3 * math.sin((0 * t) + ((3 * math.pi) / 2))
return [coxa0, femur0, tibia0, coxa1, femur1, tibia1, coxa0, femur0, tibia0] + [-coxa0, -femur0, -tibia0, -coxa1, -femur1, -tibia1, -coxa0, -femur0, -tibia0]
def evaluateGait(individual):
lastTime = time.time()
numOfPhysicsSteps = 3000
samplesPerEval = 100
stabilityUpdateRate = int(numOfPhysicsSteps / samplesPerEval)
stabilityScore = 0
heightScore = 0
collisionScore = 0
global firstCycleComplete
while True:
dt = 0
firstCycleComplete = False
initDuration = individual[0]
force = individual[1]
gaitChromosome = individual[2:]
gaitChromosome = ([initDuration] + [0] * NUM_OF_SERVOS) + gaitChromosome
resetEnvironment()
for ii in range(numOfPhysicsSteps):
if ii % stabilityUpdateRate == 0:
hexapodBasePosAndOrn = p.getBasePositionAndOrientation(hexapod_ID)
currentStability = sum([abs(angle) for angle in list(p.getEulerFromQuaternion(hexapodBasePosAndOrn[1]))])
currentHeight = abs(TARGET_HEIGHT - hexapodBasePosAndOrn[0][2])
stabilityScore += currentStability
heightScore += currentHeight
collisionScore += collidingLegs()
p.setJointMotorControlArray(hexapod_ID, JOINT_INDEXES, p.POSITION_CONTROL, targetPositions=readGait(dt, gaitChromosome), forces=([force] * 18))
p.stepSimulation()
dt += 1. / 240.
# time.sleep(1. / 30.)
hexapodBasePosAndOrn = p.getBasePositionAndOrientation(hexapod_ID)
currentPosition = hexapodBasePosAndOrn[0]
distance = hexapodBasePosAndOrn[0][1]
# straightness = abs(p.getEulerFromQuaternion(hexapodBasePosAndOrn[1])[2])
straightness = abs(angleBetweenVectors(np.array([0, 1]), np.array([currentPosition[0], currentPosition[1]])))
avgHeight = abs(heightScore / samplesPerEval)
avgStability = stabilityScore / samplesPerEval
avgNumOfCollisions = collisionScore / samplesPerEval
fitness_distance = distance / 100.0
fitness_straight = 1.0 - (straightness / math.pi)
fitness_stability = inverseCurve(avgStability, 1)
fitness_height = inverseCurve(avgHeight, 1)
fitness_collisions = round(1 - avgNumOfCollisions, 2)
fitness_total = (fitness_distance + fitness_straight + fitness_stability + fitness_height + fitness_collisions) / 5.0
print(f'ID: {UNIQUE_THREAD_ID} | Time Elapsed: {time.time() - lastTime} | Evaluation: {fitness_distance, fitness_straight, fitness_stability, fitness_height, fitness_collisions, fitness_total} | Chromosome: {individual}')
if not math.isnan(distance):
break
else:
print("PyBullet Glitch")
resetPyBulletSimulation()
return fitness_total,
# start of main program
MAX_MOTIONS_IN_SEQUENCE = 4
NUM_OF_LEGS = 6
NUM_OF_JOINTS_PER_LEG = 3
NUM_OF_SERVOS = NUM_OF_LEGS * NUM_OF_JOINTS_PER_LEG
UNIQUE_THREAD_ID = random.randint(1, 10000)
LENGTH_OF_CYCLE = 12
LENGTH_OF_START_SEQUENCE = 2 + 1
LENGTH_OF_SEQUENCE = LENGTH_OF_START_SEQUENCE + LENGTH_OF_CYCLE
LENGTH_OF_GAIT_STATE = NUM_OF_SERVOS + 1
STARTING_HEIGHT = 1.375
STARTING_Y = 0.01
TARGET_HEIGHT = STARTING_HEIGHT
firstCycleComplete = False
REAL_HEXAPOD_CONNECTED = False
CONFIG_MODE = False
ssc32 = None
if REAL_HEXAPOD_CONNECTED:
ssc32 = serial.Serial('COM3', 115200, timeout=2) # open serial port
control_IDs = []
# PyBullet Init
physicsClient = None
if __name__ == "__main__":
physicsClient = p.connect(p.GUI)
else:
physicsClient = p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
plane_ID = None
hexapod_ID = None
resetPyBulletSimulation()
programStartTime = time.time()
servoRangeOfMotion = (math.pi * 3 / 8)
JOINT_INDEXES = [x for x in range(0, 24) if (x + 1) % 4 != 0]
FEET_INDEXES = [x for x in range(0, 24) if (x + 1) % 4 == 0]
p.setRealTimeSimulation(0)
print(f'PyBullet Instance ID: {UNIQUE_THREAD_ID}')
def main():
init_debug_parameters()
print("START")
# Back-flipper 400 Gen (use robot2.URDF)
# runGait([0.18403936561894646, 300, 0.1549125676230881, -1.0737228551789968, 1.1074034854568302, -0.5075648311566827, 0.9410632472797863, -0.034075539924461766, 0.04087326062084187, 0.016970270367710892, 1.0693994730213368, 1.155687813177694, 0.8737522824643122, -0.8647752630675463, 0.34299370658943856, -1.0211653604035968, 0.9327693440640767, -1.1011770525732225, 0.9557766462321559, 0.12072049734526494, 0.6379668000414453, 0.38230918565376665, 0.38766472211677316, -0.23345728123331336, 0.42693206671590045, 0.7481283759819202, -1.0035764425332458, -0.37743247317618395, 0.09703593626443223, 0.4096524242490998, 0.5659367640154525, -0.48634980239282366, 0.9997166523571077, 0.9918650566996552, 0.3533658070404414, 1.0175827219942823, 0.18743930901921652, -0.4458899131062403, 0.4201947524885313, 0.9657458794711107, 0.44323639067960297, 0.8503041963843176, -1.1368206866652761, -0.5949936064673491, 0.8431072036023203, -0.8259446127674113, -1.0362372103175401, 0.15060262481341474, -0.9859007911949546, -0.21962601316827257, 0.09533520637849181, -0.640348357362408, -0.8041160416557014, -0.7812025840957206, 0.11151372791644286, 0.8274774721561737, -0.587046978534175, -0.32808303668861744, 0.2379704057869646, 0.18775280488712448, -0.4377264809328173, 0.5907623805366229, 0.05202338940205825, -0.2451747086887523, 0.8470984599764407, -0.7682503206243262, 0.3612923441372827, 0.6886023359929412, -0.9693524777736018, 0.304702690167437, -1.1660144413974514, -0.8249616411604217, 0.8945052164694315, 0.17544939809574825, -0.8272460898178078, 1.1407934664917578, -0.2000536217205298, 0.18640987180335764, 0.2679299333926803, 1.145665972340061, 0.9764033023740143, -1.0945467100291453, 0.521372365408202, -0.08805314721161141, -0.38934451999230035, 1.1416905493697052, 0.9003889322068577, -0.8129184705562436, 0.3440473274506305, -0.72472648064133, 0.05399471861877388, 0.5194134990647563, 0.6874785346278812, -0.8122596509146587, -0.650730661490093, -0.7272921738378133, 0.4576376839803699, 0.3904171653145497, -0.9810005319300584, -0.00285443936171912, -0.36535172159131735, 0.6144096327192144, -0.15837248303352225, -0.8994195620240998, -0.550954584675316, 0.6429230964102726, 0.8005796647490672, 0.06531222692534534, 0.34974094724817234, 1.176172791792436, -0.5252710369532949, 0.2649227929594751, 1.175606830532634, 0.9455873746485913, 1.0932290476666615, -0.5309007757921452, 0.2682629673178282, -1.019068207596018, 0.7873164405123094, 0.47109953022389206, 0.9043790189976995, -0.4554792669173991, -0.7129058230136001, 0.06376465587541758, 0.8334084349886761, 0.4891839072422823, -0.8195171322070331, 0.355909241507946, -0.45439362049140547, 0.7612376551762988, -0.05436372347589597, -0.8253395241105608, 0.5353956017148908, -1.1278223769955171, -0.2672704939182695, 0.08236845953569603, -0.3944515835531134, 1.0532839324087664, 0.2152014313163344, -0.32115401363252016, -0.4401831731301486, -0.8316311944948868, -0.8204912647642763, -0.20684288623086328, 0.656586992390969, 1.040632220799805, -0.5681960529259297, -0.8935973924071543, -0.5523442578187371, 1.019030749112957, -0.9889777010534911, -0.7813891126298407, -0.31025456229208886, 0.21737299067653276, 0.11975606760923985, 1.1489133897234511, -0.732747722798833, 0.46248225304712504, 0.8367764473513415, -1.1345064996816838, 0.8487271456519844, 0.47079084546084493, -0.3880313224602788, 0.5726640163660738, 0.3169122624606709, 0.3125375497368141, 1.0725665493127594, 0.04471119068333991, 1.1194888957575166, -0.694350406505744, -0.3409595779421505, 0.6171251620061712, 0.22135934758297232, 0.21247623650675107, 0.8669374876359844, -0.31535073366399385, 0.5538889770906046, 0.3757263311055013, -0.597927592864876, -0.6925085946018497, 0.34871269014582607, -0.7181727784647577, -0.9959175484190584, -0.3941241000777373, 0.5843467229636823, -0.1298465717484976, 1.1265371185410973, 0.10481709067861797, -0.6208804733701672, -0.23209634549660108, 0.09164363735575871, -0.5218291197106613, 0.13407195016157047, -1.0811880597442607, 0.6537001639909427, -0.9112100951312594, 1.1331704450421252, 0.09526547721035253, 1.0691734422557067, 0.28286325422558284, 0.2155840511857598, 0.34219576355889947, -0.06858382641835256, 0.6352320740995255, -0.8534759393001677, -0.30215311605080114, 0.4825461632485498, -0.7878025062942408, 1.1767649332440155, -0.36658719987216254, 0.614083134565041, 0.017395772324190255, -0.8141910253664506, -0.9789440851674488, -0.760947748240976, 0.10514245629454896, -0.012445121344935495, 0.8955364949519948, -0.9407396155918096, -0.5059512257000718, 1.0169038534816517, -0.5661662987702231, 1.0472948411801215, 0.9527469422382762, -1.0438693320196037, -1.0197514641091732, 0.5923565043667608, -1.1513518068819006, 1.1426265213356581, 0.7314115693481198, 0.035878365067006056, 1.1268253186061643, 1.1381898892090025, 0.050668047659784055, -0.3374091279069584, 0.7307081467526516, 0.026235117587053144, 0.5330659472771377, -0.03700545752962979, -0.23289865423067055, -0.03238468060810259, -0.8203158342693152, 0.5683798195792417, -1.1246799094062077, -0.758692396093738, 0.21847165129039725, 0.5696240201910205, -0.3529263485054064, 1.0260220368312303, 0.01224911889098179, -0.1809953840590205, 0.16439486454032642, -1.0079506314517355, -0.7339119466544526, -0.796638067199314, -1.0901440827406046, 0.6979503548178353, 0.7015791939518996, -0.07233693177751095, 0.5560875159234098, 1.1110141779625964, -0.15396573755282433, -1.1582494805739991, -0.12961450456604595, -0.651868837749322, -0.31774126871869124, -0.26337831137926265, 1.1722112677888488])
# Experimenting with stability
# runGait([0.29107088383373003, 156.78061666398597, 0.5353900646350714, -1.1333425119926464, 1.0829260730302332, -1.1640892198484636, -0.38152815636010307, -0.7197641554363123, -0.5865711046576539, -0.7927045883259933, 0.00438855244070635, -0.6432876760380152, 0.08594928155643794, 1.0817177784301633, -0.5378762158033574, 0.6930352009582383, 0.3152893767243987, -0.8955744085304733, -0.19691910222378564, -0.6131664945408353, 0.5512774089140886, 0.6670518195451283, -0.19309128337814246, -0.39607435850701944, 0.47480066743434857, -0.37028244976875146, -1.0989195356925148, 0.30442539638418725, -0.9337728215237907, 0.30406338012158807, 1.0455043322618425, -0.06789772225569224, 0.5806250136267157, -1.0375724064199314, -0.3543513598059251, 0.1989952263373842, 0.23561168218655393, 1.0508947475484427, -0.5301435173978837, 0.954111551885351, 0.17938815590296145, 0.12832022231695767, -0.1490689385957007, 1.0330605104523767, -1.0623171252199062, 0.8445117720781832, -0.36027189597546316, 0.5443268480270156, -0.1000204487317421, 0.8404845535139747, 0.7974970727477276, 0.7264552844874479, -0.7000730851293409, -0.759341129282885, 0.7648050158369193, -0.7522289855617343, -0.9984554385243157, 0.08999817877211987, 0.9363902863024169, 0.7202283975791759, -0.3961247258926616, -0.36531006526282306, 0.6003213302935796, 0.38400973936199784, 0.07770508777397611, -0.4360267539477962, -0.5751568134137885, 0.43883791069800354, -0.18052901071260805, -0.20490900220420233, 0.18186013658998493, 0.692889951182456, 1.1009750256518542, -0.5280637382212376, 0.9864059841841444, 0.735701606227071, 0.8506684523432434, 0.19014925787028816, 0.02734747647350761, -0.14246780172817314, -0.16363487369520202, -0.9807750892297045, -0.5974559893332009, 0.7052210645249009, -0.5722152424030353, 0.4150366036529538, 0.8451537180291235, 0.4434838540550997, -0.20824516651145206, 0.6091705295348645, -0.9590250992142602, 0.9808724317974076, 0.4183489440801338, 0.3607884449764098, 0.06383687946190619, 0.5937967791598316, 0.24274029229005406, 0.29029702682807396, -0.603878266910079, -0.08806950361411958, 0.7212683704190836, 0.6724813333613975, -0.18666038908670848, -1.103082059804687, 0.8768587952536211, 0.1404761787154466, -1.0478894200143816, -1.1437694960941056, -1.03754562219342, 0.9085399008442834, 1.1227743423279155, 0.498537990420221, -0.8371826748208432, -0.09808959558757369, -1.0626830441409378, 0.5380159945100353, 0.06381426021825665, -0.5683621599007505, -0.8656452557602182, -0.3593917993503882, -0.33715498495122065, -0.7801978053428386, 0.9153213044740255, -0.7555054418695308, 0.26788356436185146, -0.06902203453873508, 0.25274353461079657, -0.6944626972070734, -1.0430307172162676, 0.01812578724476625, 0.02313415896879832, -0.6806192043426953, 1.100081799618252, 0.827785090927609, 0.3269284020247252, 0.03206468028287163, 0.3034997439357618, -0.2990653227916562, 0.7397771966931967, -0.8780480762102226, -0.08487561447549834, -0.577616393319167, 1.0833921351724336, -0.45990422848121937, -0.6346960024499542, 0.7918294091829395, 0.027155163465394427, 0.19054579609590222, 0.21557173632598228, 0.2980525771157228, -0.7559737274895846, -0.97517502765191, 0.06865090400328322, -0.5031462207447848, 0.10497243089263641, -0.7965555365903747, -1.0373626266104656, -1.0533096615397561, 0.10728070263954059, 0.796368793127805, 0.0718344507091692, -0.9989396260631868, 0.6356575989158342, -0.6255437988888762, 0.9131334879875902, 0.45646847811471414, -1.0169463676260804, -0.3933819058755607, -0.5997997874897212, 0.17878990565350586, -1.1569178781134557, 0.10159645335836587, 0.7154971633906789, 0.17032819164387833, -0.01835367286864094, -0.9505402084183495, 0.9691636502442869, -1.0866819107494512, 0.19673356296061473, -1.0813009593788294, 0.6727946528411259, 1.177458334902635, 1.1463233157921833, 0.145360816245766, -0.7110332186361785, 1.1672615674161702, -0.3210995288856508, -0.3412206078486237, -1.1150104372864078, 0.41469339605306227, -0.2438596429359537, 0.539355647844741, 0.12085515871084321, -0.9647547341312186, 1.0521097335095957, 0.38872376706386774, 0.12699195498661892, -0.1666314031269644, -0.1452609089409052, -0.9161542968661167, -0.0576685820645067, -1.0362064288895902, -0.5438335521979928, 0.6421961281435908, -0.8782675763606693, -0.32039420495397747, 0.6517605169997935, -0.34461234725989986, 1.0265223840919862, -0.9642919006925839, 0.6343523074380544, 0.01045648767965579, 0.3839206068543592, 0.3625094480567086, 0.5988029218201046, -0.8066055092585431, 0.8291837194941895, 0.9966471145724585, -0.5512131701360924, 0.558229602974704, 0.6430208438139704, -0.36772966559972137, 0.9071931330847132, -0.30657207454292457, 0.18015360737564146, -1.1574946716164571, 0.6901959363107916, 0.786073839325254, 1.0524799852271292, -0.48261528673935933, 0.3021126071842598, 0.6780681739932523, 0.2650223276064161, -1.006069056570006, -0.37549673659808014, -0.8831740449899401, 0.6404770888288831, -0.29655423133316006, -0.30248718319006307, 0.2914366205771275, -0.26389183625692514, -1.0895101207281785, 1.0340066693226782, 0.9883010962564867, 0.13283052185185668, -1.0053930692545063, 0.9173399063162657, -1.1359574713434795, -0.9135848528331729, 0.05275828150306455, -0.8544265299335544, -0.6004625904664892, 0.7568333265679985, -0.11361613904339729, -1.0251203832530935, -1.1051123528377, 1.104096469074662, -0.4090842273664589, 0.23362094428508276, 1.122749227526524, 0.2089305257744809, -0.07565225626772398, -0.19006931939016714, 0.9450976678385695, -0.25602949043929973, 0.8979865614568747, -0.7508855781432413, -0.468020831328862])
# NSGA
# runGait([0.18229659766382353, 155.6458409097608, 0.9083504748793311, 0.14110476724188775, 1.093443926606583, 0.8999005754220708, 0.8149729234408216, -0.6359985441288066, 0.5861105866627622, 0.9646599108012346, 1.132867106787237, 0.7918013480335797, 1.146016728309355, 1.166593885399247, -1.0830432373628303, -0.9263478251771459, 1.0107748178924647, 0.4646106439794383, -1.169978052196896, 0.0749939582085001, 0.4339923743202392, 0.4579402086767843, 0.4206199667029586, 0.1397555030784438, 1.1401145649029063, 0.09302868565904275, 0.4914790880995965, -0.4651971857366567, 1.1659566230018072, 0.3706345064730484, 0.07552351079101301, -1.1232067067347673, 0.2222935344062407, 0.7748993910829708, 0.13034880610013827, -0.4806035746604289, 0.3172527997840071, 0.5068224937104546, -0.46511584843166, -0.3467931766616326, 0.3427084205149607, -0.32737952333572434, -0.90724945399064, -0.5250338620208708, 0.6880580209305295, -0.5448070713760786, -0.18858065144794406, 0.014731130802321182, 0.05906164696150665, 0.5284225601490862, 0.2234470472115875, -0.6232032858763568, -0.90751250478335, -0.9199446731694133, 0.24647114606718526, 1.1071349261385088, -0.22236693394071033, 0.49967256016722467, 0.8997056992139608, 0.2047903228421882, 0.9318238130993908, -1.1170109958568422, 0.3134441187993395, -0.7308917666312805, -1.069328123854823, -0.8079594196034741, -0.8413209463842631, 1.1062445646940164, -0.03586696528282618, 0.9654148851126274, -0.9432680014273601, 0.5234582594311347, -0.7471694311620266, -1.022219081571329, -1.0800214764917782, 0.7474112702428726, 0.20819166616338916, 0.7215699077419591, 0.38031567283758844, -0.5238628816347054, 1.0770153104321716, -0.6397760818081154, -0.22435045232641265, 1.0706893179893495, -0.5518655141151997, -0.19087636009819303, -0.3512479711738529, -0.6606861068197772, -0.8685585946822181, 0.3604633384909186, 1.0275382741711763, 0.06655444813978417, 0.9935834582186229, 0.7279386983043616, -1.0070347596788973, -0.2442584535361799, -0.3012388201961493, 0.7247939705316814, 0.6269234457029824, -0.009873626827067952, 0.482981540629763, 0.8598378851596727, 0.4741322053329257, 0.675550009524477, 0.346302537230719, -1.1243070756199751, 0.22681429162070263, -0.3097657746518692, 0.6778382742087453, 0.814184670743237, -1.010608111150304, -0.27376846268023297, -0.9948460491716468, 0.5069784751074181, -1.119671608976601, -0.9303075410380663, 0.7246078940736616, -0.6722076482955358, 0.37757103600219066, -0.313874069830721, -0.43472066633036216, -1.1278978108552458, -0.9970308936270744, -0.5565800651858721, -0.9069457748848149, 0.8230975287693061, -0.41294547922588815, 1.1712915826924468, 0.5098575610704372, -0.9343470208547747, -0.7502818925909562, 0.8572882565769667, 1.1527965107091545, -0.5985576253107086, 0.9812633011454751, 0.43198678500041227, 0.5217073857233142, 0.9761183062018322, 0.14128704712955387, -0.2776161554656262, 0.7504777433267875, 1.0294036660645036, 0.09622107476155035, 1.084571969315032, 0.5909472984707462, 0.21678827645458928, -0.20873040261437428, -0.8425470304835826, -0.5794336238166817, -0.7224193962610191, 0.7320581158158446, -0.3615401506592452, 1.1405747073218429, -0.3039589800092729, 0.2894225332294616, 0.26050649010553945, 0.681767611672782, -0.5129831573243372, -0.19268708535592294, 0.2842271081958875, -0.4316514377659478, 0.1747797126503924, 0.16177042672268072, -0.9595387421298439, -0.8913796116466794, -0.2835623569393363, -0.9021243233987757, 0.04675926339236765, 0.5877718252549794, -0.020728046886497195, 0.1960504582672131, 0.828348254755653, -0.3256674408706686, 0.6430416383221862, -0.20525504926868066, -0.8518231015695202, -1.0599288641751397, 0.6287112429011469, 0.12367108041799399, -1.0720406710260566, -0.22472210546705562, 0.8706060321838783, 0.5291724611088444, 0.10250068539672591, 0.7278411385365671, -1.1566550009574674, -0.48415340513814353, -0.14201813891013926, -0.42454015353898894, -0.5588938960807662, -0.3294006824868001, -0.804967243168935, 0.8162080012175026, 0.18496746845666612, 0.3891323361310516, -0.7744570170530798, 0.11870436656346904, 0.9500286012565656, 1.091687566807378, 0.3307255383169255, 0.7118468053052446, -0.9013453096963969, 0.1945196784104959, -0.7862302171325798, 1.0371600096585611, 0.8279744386055418, 0.5349665267082687, -1.0492245155619515, 0.24063714267361025, -0.5253103025206994, -0.6958371376482045, 0.5127663834795291, -0.5668633184192481, 0.028756495211944066, -0.8180067305130339, -0.7325151144334637, 1.0874568313948747, -0.6204051504490435, 0.16734526054734847, 0.9079945409597623, -0.1605782825625574, -0.9493435726574494, 0.9729771352267385, -1.041240909006733, 0.8207784518169133, 0.2019719768666147, -1.1386639991254925, -0.6372470497638072, -0.5284328510489867, -1.1424013728720803, 0.31553420746729177, 1.0380382961752586, -1.1411444021812454, 0.09246165325365872, 0.1706564355825929, -0.6346966931891327, -1.0909483051470628, -0.06566436851792082, -0.2776209741481568, 0.41899201957450416, -0.18759177838803043, 1.1555756485784177, -0.5822077172996798, -0.9193289683677482, -1.0371716158033841, 1.1423430653663564, 0.8779681991740422, 0.7805125432142439, -0.7721653288915576, 0.5155584453512811, 0.14486889941392897, 0.8446819611612648, 0.05327295198343703, -0.33865091333049985, -0.14899995870829524, 0.1953458127677415, 0.8939023739574229, 0.07623855879466708, 0.6130493982347537, 1.0918462763933745, -0.8759140979104185, -0.9919552795899489, 0.531399920610189, 1.035948455011811, -0.6346835942687693, -0.9522149883706655, 0.12740143026457326, 0.9609495300188751])
# 3000 Pop
# runGait([0.904983046733017, 159.59983729578843, 0.060696523522925525, -0.30971496430036916, 0.7353270535594881, -0.6505978383780697, 1.1023304598221446, -0.30768612827725195, -1.0234200031222016, 0.0034031598955847732, 0.21330511114420406, 0.9254559265553964, -0.6604354617020531, 0.15158813875789579, -0.7854467191411058, -0.9925929220629403, -0.2080119772713885, 0.9748408255452494, 0.32631902985278716, -1.1409334395171054, 1.0693652898105703, 0.12206649342856929, -0.2847542804409562, 0.4801514292150744, 1.0332765953216592, -0.7938197444031939, 0.2563131442452105, -0.14869709222993316, -0.9347675501625538, -0.524006106702254, -0.11909684998489307, 0.8400278760972555, -0.23718061034206372, -0.337660742691555, -1.1686283476437243, -1.1031105878405127, -0.7646199378803564, 0.6354804367029614, 0.8837680367321205, 0.15899423130435295, 0.6712575159053917, 0.4215958247199955, -0.3373087437071557, -0.9305356508955385, 0.9082056114784287, -0.9284716662471378, -0.04940610858174345, -0.06555931514262112, -0.09399768472542123, 1.1247909715839755, -0.48379285974626773, 0.570525480919135, -0.02569623444311407, 0.10714561439371209, -0.10783544772282928, 0.7051584770708194, -1.0406922832593428, 0.06343296206551907, -0.8505979742277806, 0.10694125521184705, 0.16935548980784373, 1.1420837333010894, 0.24204597235287523, 0.6475736003934104, 0.17055212844135192, 0.49474061804168407, -1.1398076486306385, -0.03973245191392927, -0.939386769890378, -0.402476064224687, 0.025119711897026987, -1.05440268787099, -0.2752953135618634, 1.1685995968528273, 1.1257083961904089, 0.29732029639677987, -0.17567621997968294, 0.9564892264662193, 0.751911791222228, 0.18761642526516176, -0.3232472318368491, 0.7932493092806446, -1.1052582188288498, 0.5487840041424124, -0.70316267873878, 0.5857454028700838, -0.30104018247011155, 0.3990877589811526, -0.27622278756255064, 0.5007368227279884, -0.34255158345432923, 0.7860783873697565, 0.3299290546401101, 0.4212830410670498, -0.5000823971778967, 0.20211647698962795, -0.5954542100162727, 0.7891262765370252, 0.8398523515973949, 0.04208448038937486, -0.24762466260929505, -0.18298569176248317, -0.36744847594043806, 0.11711890578959555, 0.6784909064918407, 0.043547048965568554, 0.915347385672616, 0.1943537417670627, 0.1961032921533715, 0.27380381570388196, -0.6925505753816085, 0.13409295730737364, 0.9238606281112889, 0.7232290263266692, -0.08475645900857555, 1.0918689969424247, 0.11494102395735217, 0.13564497247063464, 0.07791390649505298, 0.09101847685593711, 1.0802565634925867, 0.008017176336832414, -0.40863001168996127, -0.4349722126523693, 0.34453158076792034, -0.6910315034834699, -0.8948888951214328, 0.36598368092354766, -0.43159001817612835, -0.09136672190834433, -0.8265804981583658, -0.41569623861994176, 0.7235977966485335, -0.2699267245166663, 1.0160916486038205, 0.05956580763846824, -0.553446262414352, -0.9234155272540824, -0.5105546990572772, -0.634441156000243, -0.40216456144001933, -0.659878523232472, 0.016898776661833703, 0.04370343683036865, -1.0525099132085054, -0.1360717628812732, 0.5358560458811824, 0.052683861773873776, -0.953171635298017, -0.15148360309076792, 0.8695124128462544, -0.33150174271113014, 1.1018672681161934, 0.8122315339180584, 0.05632606046036302, 0.4062446410358813, 0.9032403720408388, -0.4898652953167396, 0.5547542361296853, 0.34407051580411596, -0.9688368827468914, 0.9051281478921898, -0.382672404368127, -1.0351659289591528, -0.28339507478975706, 0.2604244554458595, 1.141276061674245, 0.9511590958810777, 0.030865821870599747, 0.25589416378172836, -0.899230741333316, -0.6890771547052199, -0.36098686027424987, 0.2046761115043388, -0.003370011028005335, -1.0326969178585779, -0.6399760361033779, 0.2945889455809976, -0.5693772211328435, 0.5190623470719792, 0.9594447543339326, 0.6647471001461713, 0.1765304159915148, 0.43139180526620574, -0.7827997940081548, -0.3212428257056203, 1.08691662804371, 0.5724684958868459, -0.8190711895017851, -0.21893910988788873, 0.43754121343708036, 0.7761978329434286, 0.023307117981586058, -0.8196812812240051, 0.18349841454177002, -0.03275888140948699, -0.9354688626764325, -0.29752922729690606, 0.1038144707355869, 1.157887360987824, -0.012635618639992992, 1.1772296247879417, 1.0476346313277654, 0.10969465515895042, 0.9315375765707544, 0.4127446193866228, 1.027376379551766, -0.21108693714197815, -0.6492523615384952, -0.1992276005394296, 0.9471808298697213, 0.003879051139011558, -1.007300896674733, -0.8988174362275243, 0.4571936981359096, -0.3114735176334528, -1.0288979365415865, 0.21589235355521563, 1.1668173760981395, -1.0513185987779647, 0.12533634979511898, 1.0255130494459233, 0.748872451843244, 0.09667778038896534, -0.984941038411984, -0.07093582524591927, 0.5164103929817825, -0.27201410547664984, -0.3729048874293173, -1.1149579500690774, 0.8813975424355013, 0.621449531061543, -0.6997908270429463, 0.5020729261400154, 1.0064048881599486, -0.7789049856237727, 0.1128585572848923, -0.19354726814305145, -0.464830471290481, 1.093822978982704, -0.7444302566691616, 0.5228503281130006, 1.0127816296546253, -1.1495705447930669, 1.1634921469729014, 1.017059004992762, 0.6538475673570939, 0.2985847382952567, 0.44100054350476847, 0.20413700406627103, -0.7151778221967301, 0.40107695707208585, -0.7845232708359956, 1.126462565607725, -0.7541211894437213, 0.28493713718804164, -0.5111505775098296, 0.053487615522491705, -0.5972630106930484, 0.867696699744679, -0.591697544975821, 0.393417762872977, -0.36433296593800835, -0.8484271043364143, -0.7889442880625308, 0.5526505308931524, 0.36476343329950656, -0.010331075612058583])
# 407 Gen
# runGait([0.8781196143633982, 157.10313398974353, 0.33682254295973746, 0.28781781595826333, 0.1860859338114289, -0.20420881621380305, -0.12123927275926893, 0.4063033014361015, -0.20359193737052045, -0.6840192443790235, -0.28254702343692906, 0.5940254747152814, -0.29777220678601746, -0.07426962639294979, 0.9652159747014115, 0.2473752294851017, -0.6107793773528992, -0.13244976914336656, 0.5280864171637462, -0.059421386701237325, -0.07844527791237588, 0.6322545653056038, 0.4724601535718872, -0.3287282121853098, 0.5434097111921933, -0.12151448936067652, -0.06979580112172787, -0.2961931277705389, -0.4805882127012387, 0.3364130911622725, 1.0005701725869078, -0.21847954935067443, 0.1323700155317861, -0.32215689673367087, 0.14430331665049628, 0.09917150470049849, -0.5170401624287033, 0.5409236317536736, -1.02363228425136, -0.5320260643642395, 0.9218582075166826, 0.2807391864517396, -0.5941315102406781, -0.7080454310389085, -0.31585622472883196, -0.009146624918177084, -0.2012219697627285, -0.4409244609725404, 0.8768753147403492, 0.9343776617888977, -0.02141775939762952, -0.26104580922522647, -0.24705414040844845, 0.34300112955081297, -0.3356925840764099, -0.6775278951424454, 0.632154513923834, -0.5828623947214877, -0.5182796152392433, 0.606350185900368, 0.3246392910354486, 0.8302757207728939, -0.0023731755697271803, -0.16107561586683852, -0.5441686853739696, -0.8288171478401807, -0.397679027844255, 1.0145087733378917, 0.5771456856002078, 0.1928252929176832, 0.2801312178087099, -0.424024932102493, 0.028490342932658078, 0.5663828653801755, -0.2578226202044432, 0.4758103361385356, 0.11262059630304765, 0.17852984636877395, 0.7819886760768826, 0.4777904992793935, 1.1215684250587168, 0.7012165047082592, 0.08680724775780826, -0.12794232943092734, -0.02749916734935132, -0.45447494760953283, 0.39467736190532837, 0.556999354112899, -0.47790256532634695, 0.15041111386454983, 1.0555134056088191, 0.1949848324658606, -0.1334691560253397, 0.8282086290450636, 0.41689095269036547, 0.3213829957559171, -0.45920978149223457, 0.6474648195495811, 0.6759097856561475, -1.0097973227736488, 0.5730950969137286, -0.4092749722577607, -0.039674737074665734, 0.18021687378702161, -0.6615793121935327, 0.03142374866540614, -0.7336966171880697, -0.3283666283009425, -0.7807776498788782, 0.04535750599480415, 0.1704720716786568, 0.07139750224822779, -0.39506187566783524, 0.5372040246741976, -1.1473534956177283, -0.4013147582043887, 0.38703856371800294, 0.3971836501834768, 1.1715304273819858, 0.5442662829228595, -0.0980978149061639, -0.4344320237816741, 0.7384324675711982, -0.6187275059274405, -0.3575034072195102, -1.1288861687148302, -0.3261414464285166, 0.7523726891212104, -0.15988727327072425, 0.1131072052563967, 0.34829181401975273, -0.6281159632389499, 0.7689620896680501, -0.12006262327557764, 0.9842080944836419, 0.26578776890675115, 0.5127275308397158, 0.6177831879058816, 0.6136850718675044, -0.7101342612681036, 0.22667486103048795, -0.6637649111191182, -0.49116115181432607, 1.1778110294059096, 0.6536538976088225, 0.337894833822659, -0.8418763216442848, -0.11767229020786187, 0.3713509801518212, -1.062628223739996, -0.6364809135462092, 0.25314885600822956, -0.4532780204714063, -0.9156058815025188, 0.38398735762219094, 0.6504528410510488, -0.8483565675469873, -0.19248354081194868, -0.10272402672891327, -0.21538792572640295, -0.19546972821258218, -0.9483484869960997, 0.022956480787546467, 0.10722697400615173, -0.5339228101100632, -0.35402855681089174, -0.4909015301740968, -0.18175978846992896, 0.6859271886866835, -0.5159021239613567, -0.4536971124614899, 0.33596686235547757, -0.8380097153759376, 0.2564768448158085, -0.17784663515089585, 0.8848479741072743, 0.45734330263531453, -0.09873367668089586, -0.7500713258217162, -0.25406396619910043, -0.9054849581453508, -0.41576987655001807, 0.39503507766193025, -0.5857624014296016, 0.4560242643397691, 0.23900721343893294, 0.4454950080559121, -0.008251391139937114, -0.5778461849029692, 0.49439432908178516, -0.08522125551940313, 0.9544048902657116, 0.26600604145504003, 0.3367464085720411, 1.0096321755173974, 0.5254892106771129, -0.09967034763491985, -0.21229560617239768, 0.5697413449479685, -0.33752083603028504, 0.1099730086514387, 0.6135896282792587, -0.2316015516647347, 0.27698810246217326, -0.41050437696734554, 0.27114015297957794, 0.5172821702323088, -0.8557836333808082, 0.1431613671576169, 0.26614523302465865, -0.9784779658422884, 0.22069893362397217, 0.4698093606975688, -0.3444787910995501, 0.3215750648265838, -0.1275396420123813, -0.16744367567541496, -0.2865130786194499, -0.203914306582315, 0.09531066543160083, -1.0473530567418816, -0.43792746630963003, -0.8246099035388195, -0.1995013012675149, -0.05971337685132197, 0.07864737594828375, -0.7640329802075775, 0.7133374177394672, 0.3450741117987123, -0.6613122911561858, 0.2175616273524131, 0.39585440284780726, -0.6045817099745175, -1.1406619528893633, 0.06642553919379446, 0.17478825655989105, -0.48174098942017146, -0.5192782669857927, -1.1714093392113059, 0.5938325737824399, -0.482930381615228, 1.078819781022143, -0.8684695968207783, -0.15775647868438034, -0.19836857010162148, 0.12721713278404426, 0.20363848723966207, 0.11179394420943511, 0.49603448462048155, 0.3600952668625213, 0.3128222724984558, 0.404650588027724, 0.0840823881356966, 0.19944902798724976, 0.12426183303095407, -0.6113147369273974, -0.40558552009503623, 0.4393747149838387, 1.0064976254947864, 0.02535220524117529, 0.6867814865985039, -0.4423754966018487, 0.20201446246914118, -0.697738158333364, -0.9883884469952561, 0.4564284783687155, -0.9310257794603739, -0.7670742706760328])
# 6000 Pop
# runGait([0.5833710183294218, 159.94477382941363, 0.274991231117698, 0.238089263008252, -0.1417409643619611, -0.12761521449834923, -0.19289461538039873, -0.10856112933014755, 0.15968577636162862, -0.17368631551323016, 0.07203736702130702, 0.881941236232128, -0.49131125475461257, 0.41963359660824545, 1.0286266905353563, 0.08461166310190373, -0.0930092118460774, 0.6688730971003121, -0.5272671341802382, 0.3759165556754689, 1.0284225286200426, 0.281022494747565, 0.0009195019049819536, -0.0879260351696233, 0.36182544936252586, 0.1544240116221797, -0.3379165966729512, -0.07107860607401342, -0.35347372811378175, 0.24474628711192828, -0.9554210776881508, -0.2446425071911576, -0.21834542364787896, 0.02941224160898587, 0.19860309964710376, 0.32015885118565973, -0.38463037767537034, 0.2721652517032083, 0.4498871803091191, 0.2843738257583236, 0.501327523282536, 0.669013035550511, -0.37715689084783993, -0.7193636388118547, -0.2383013342521316, -0.17489396233602697, 0.06083890600102712, -0.4034525364101677, -0.24583511137643727, 0.05016350547975096, -0.5231072760701854, 0.0920601174252217, -0.3798879187489547, -0.06425162833626528, 0.1175629295006112, 0.02682125752619795, 0.5075090858782456, -0.16206766607753353, -0.9027943006438802, 0.5191380547248162, 0.1922772367714138, 0.3722573359482723, 0.27824784135917774, -0.36599087581441164, -0.06620007679763394, -0.37707864764924415, -0.3432745401212583, 0.1890972969239655, 0.9771314779636118, -0.6379190437437263, 0.5327515128308239, 1.1266802573644779, 0.4853618562003829, 0.03715655903182573, 0.07311510274068314, 0.5423300464375319, -0.0658356136420452, 0.6733211262326829, 0.5412515512543659, 0.475841545559648, -0.5369352656406974, -0.026774867624149132, -0.27366768812094183, -0.21535394823513682, 1.1272641607914005, -0.6324828192170618, 0.22992240750612652, -0.8332942275738103, -0.4448812583609043, -0.5639998583724821, -0.28504303170819406, -0.13715306369785674, 0.3349484025718961, -0.3700368781101578, 0.20300787227134326, 0.22374862961672667, 0.027970795265832554, 0.7014861172229404, -0.04926493320095343, 0.4402290874741377, 0.3860194514832391, 0.11569030596073443, -0.06036997313965854, 0.1497256975919505, -0.377481545800565, 0.08298090520892161, 0.9438244188255105, -0.48021805376469584, 0.4543715274216308, 0.8678356926851799, -0.003915278924756722, 0.10352872557178089, 0.3358865719916397, 0.4211155579389066, -0.030249314775129762, -0.5658285551195321, 0.2548939424634452, 0.5745275199121783, -0.7796534931283465, 0.3451123282022226, -0.5444761756627212, 0.12200790829540269, -0.25898916669720645, -0.6724214809633824, 0.34635133694786935, -1.0685493620290625, -0.166454962800517, -0.8051985252291386, -0.4306033386198576, 0.3621432335285329, 0.014468787338504891, 0.141080510173102, 0.13964744684544284, -0.15421615523049945, -0.4317859615807832, 0.225587551388641, 0.693207792900198, 0.5533948757767216, 0.20097437713556277, 0.23256665133179352, -0.4990635733731684, 0.37724815041759296, -0.8484710927328951, 0.052329062943848995, -0.6454186305205749, 0.01709338435440333, 0.1426615820133712, -0.7496362823830726, -0.024592492969917387, 0.07160640453502068, -0.2474844962946594, 0.5941575845367926, -0.20960304431184107, 0.6424578239764861, 0.2920273219156567, 0.7036560308915455, -0.8121144845665177, -0.2789410770162129, -0.7413476580353197, 0.08188596178827257, 0.07931227840034549, -0.7207975890283618, -0.6065813517836143, 0.3983191376566657, -0.5635381703226274, 0.4088177741187736, 0.8161358908559947, 0.6554301845419963, 0.04547395492205422, 0.08051995385752733, 0.7945827746307063, 0.11087351442670304, -0.590752837198396, 0.2065658076101474, 0.0751712923684167, 0.6709125887262557, 0.1373187383960103, -0.18183312802940133, -0.4350057499267376, -0.3766430661862623, -0.8199596582372628, -0.14153603961297806, 0.590381220135425, -0.16508543450631305, -0.20708569485397604, -0.34591459093209215, -0.16651848898298874, 0.5178287410957361, -0.03657852374819068, 0.7219509009910949, -0.22937310869060928, 1.1464596068133195, 0.21233031874020497, -0.3609307120798186, -0.41136793770748015, 0.16347336752562386, -0.04569336581669571, -0.12320285070861678, 0.08240315222323638, 0.4579164525630148, 0.10194743670878537, 0.5748048740706077, -0.38484763478500494, 0.8009525610955103, 0.7333605699869847, 0.37124845323434263, -0.03501117975518403, 0.012948485735009754, 0.29139932552493186, 0.34343670572619495, 0.8542624160112243, 0.2047939987143607, 0.3903803959743837, -0.20493239305818473, -1.1287011492813999, -0.32936672671984124, -0.36581898984821176, 0.2451494881151558, -0.5756460181132672, -0.030221322542399086, 0.16449751038968874, -0.3567999251278406, -0.1618212236300447, -0.11207687799559582, 0.05735981109003743, 0.9415542138674963, -0.3554473551841668, 0.5357750527639715, 0.21498207309781378, 0.4532008047694893, 0.21329882952215284, 0.5859846457864504, -0.16362093353740018, 0.1319546289160159, -0.2194715016448026, -0.266878066809855, 0.19007538470038587, -0.6214579041470789, 0.07758190484905561, -0.7515667963793465, 0.24700843522334995, -0.292447662725082, -0.4181253106644778, 0.19564903243421003, 0.19724000917384554, -0.2063833311816462, 0.46455125472211967, -0.0899164519697946, -0.4859666940225116, 0.2204956850628324, 0.5537344147667811, 0.3710020504693896, 0.42084808456566025, 0.22826893049722402, -0.3009973798887208, 0.3133299056345898, -0.5362470634819437, -0.07363025268708201, -0.4903844709824772, -0.4212031706808154, 0.593200663984306, 0.03428638943992187, 0.24491294188014479, 0.46221509482741235, -0.20849095803967968, 0.6337473393725238, -0.05747450930384633, 0.8875435750416844])
# 0
runGait([0.7387319894185713, 147.82355001579506, 0.6305372633499426, -0.6089588304954848, 0.8912756231059142, 0.2812261582743101, -0.5713260153151064, -0.17272091345083518, -0.011128621604706623, -0.802059235021269, -0.07153954960452258, -0.5428904527014263, -0.04381109750503209, 0.09113787494874881, 0.7723570365515549, 0.1241992107232388, 0.8337401506855846, 1.115458030805498, -0.013540807256189336, -0.5839520097163835, -0.7340746975491128, 0.5868023045338302, -0.9298983610216932, 0.5438917235683132, -0.05782837894738324, 0.4198029392031244, -1.0399101725816757, -0.06612708025138092, -0.5082125750188753, 0.9201548240864632, 0.06444257109533891, 0.3314957278066273, 0.43523252410016, 0.0101257284239189, -0.3455692282668785, -0.11991854934521307, 0.8938694635098376, -0.5612059600360004, -1.1311528570896223, -0.5932545380125831, 0.4344991139274895, 0.3428898066225319, -0.2906991777632436, 0.48606253534779564, 0.5357348004370723, 0.08998319318443504, -0.9267085738943713, -0.8937243514792317, 1.0577416788427096, -0.37068573549717954, 0.9165099103165408, -0.8428317356586791, 0.6907079780081538, -0.763945038433602, 1.0409972769402867, -0.7930695812727582, -0.45187653759465174, -0.5161324587418127, -0.7914439627122227, 0.833033591113862, 1.0408039580092332, -0.05381107166535859, 0.8438281153722502, -0.0387362598590002, 0.6164861225837377, -0.6286851880995831, 0.8640915900752995, -0.7744000726632477, -1.1733643185821832, -0.09815300085455836, -0.2477118609768313, 1.024101066375414, -1.147511226358933, 0.35649235792115364, 1.1024258715004915, -1.011618835769622, 0.5915335712528709, -0.030590833481361157, 0.21394935583673327, 0.2677965469007608, 0.5549362872301691, 0.2695802858891776, -0.8655473520001171, -0.13250526441705102, 0.17727687014444649, -1.070467039423309, 0.09651563983885625, -0.9558246154936185, 1.1511036912990131, 0.8111082334542412, -0.3165391333624401, -1.1028022950228613, -0.8702156791239426, -1.1681706777717666, -0.652290858655339, 1.003148181057313, -0.10090114268197192, 0.23187888015208769, 0.5941647728264801, -0.43999609085011204, -0.11509272070881571, -1.0798002236171276, 0.018290046530861526, -0.7279320899826196, -0.498825849932375, 0.5922026329566983, 1.1770495895717317, 1.1658461699766112, 0.5387616073370702, 0.6762210875494419, 0.564309749770725, -0.3035549596906124, -0.23885528257994526, 1.1072615720375825, 0.5666318535111361, -0.45569851974439834, 0.8338190610059566, -0.6359449813770147, 0.2596402577409722, -0.7767216770530929, -0.90418267806025, 0.113288160612949, 0.39315211887973467, 0.15879221931780196, 0.758361875600458, 0.8700712002631037, 0.306520197643136, 0.7532325435435356, -1.0353300637178853, -0.4455790005356547, 0.33046558165864237, -0.41986999994668306, 0.773773975624336, -0.5730775662391308, -0.32242207870145256, 0.5695427482221903, 0.06540060708986029, -1.1068765041634638, 0.8444999211248407, 0.04543079459398691, 0.4642442589105744, -0.6039790052127436, -0.892455957263908, 1.1129699696404938, 0.342772182719143, -1.115584864083039, 1.0625540212723195, -0.057194100238716405, -0.5879196602166177, 0.5790752639491412, 0.6440806383356259, -0.7481329504140624, 0.20534187872140564, -1.0990982256561714, 0.2791331755311888, 0.20300461353714883, -0.8197464538582441, -0.7741517445104196, 0.36122048253607164, 0.813782786457911, 0.39582928554897623, -0.02580819987456784, -1.1628778529586477, 1.0745713708553488, -0.5089798478319643, 1.0062303026439694, 0.6478357664888437, -1.1138156319365986, -0.4955658167644643, 0.01673202498902171, 0.9162968925255734, 1.1449260986124963, 0.45197676369281314, 0.4913407885919339, 0.9059066063082057, -0.6513168739283108, 0.08060475225758434, -0.8062943072398908, -0.5854814411007928, 0.8888342908698426, -0.9445568643031321, -0.7753945536759548, -0.3003503278781188, 0.6951193721206237, 1.0356586073990068, 0.8830749880175515, -1.0664223102877843, -0.609899276809712, 0.8167470737757756, 1.038925181199521, -0.5200440777902839, 0.4128415160980885, 0.8988517426568858, 0.23012308000225246, -0.981407304217973, -0.6000864286294282, -0.8302474366129275, 0.3022460425741058, -0.7232702813935017, 0.3225916050209834, 0.1690591643089261, -0.731263207027456, 1.0793778048303206, 0.6724712011076479, -0.7393802772190122, 0.52180702196962, 0.653704773120031, -0.8435500860065721, -0.503370357216786, 1.0089409411880252, 0.8239113158523748, 0.5789158304017497, 0.8017043064577623, -0.81666613981439, 0.4674783795387365, -0.44533480654686275, -0.4893466194479631, 0.9007928200059672, 0.02483073138245584, -0.5944238649035959, 0.28518215854040774, -0.24733421237552355, -0.8505607276413669, 0.5571358775523416, 0.9045395124615454, -0.6657820640979742, -0.9652597006250097, -0.4591179819423816, 0.05481742210849316, 0.28907992231884405, 0.7124381991670108, -0.6030190226588272, 0.3369808339189879, 0.3038297672106993, -0.995236450812498, 1.0777162746217996, -1.1439340008569936, -0.7047632734591232, -0.532467147310556, -0.7640334064115123, 0.9758524528369246, -0.24433882388927364, -1.019693761734842, -0.11397606855688958, -0.26140300759258983, -0.5755519362573679, 0.15416561480192903, 0.8157008073605059, -0.988078686018823, 0.7532529999268318, 0.31269649944522704, -0.34261705061474756, -0.8905985767822926, 0.6096508828791698, -1.0668100653950248, -0.379130418894862, 0.9096912331462104, -1.001701666565123, 0.6416783457901682, 0.14610426544728483, 0.7031005674547539, 0.4247842879063306, 0.4021107592425244, 0.8928714878416208, -1.089585355806771, 0.5386513324462168, 1.043195877742578, -0.9701398946532979])
# 50
runGait([0.6109376305551025, 147.5781875317787, 0.8620634914832349, -0.5626015076241742, 0.9150007577136691, -0.20417512686545095, 0.6544359094278946, -0.31327506130394855, 0.8452968857390276, 0.13887431059938735, 1.1771529929786608, -0.9178071433237085, 0.21286308614656976, -0.6984312985937364, 0.8250658263071654, 0.38678895878185166, -0.8386364979015601, -0.8324431895189177, -0.31964574670169177, -0.9513705765809792, 0.7833723510749697, 0.9633303649676936, -1.077086285278876, -0.5823511574760045, -0.24329005344133708, 0.36075110180937114, -0.8737875239530779, -0.7120336903431772, 0.9694421297627523, -0.681817972163381, -0.7263666665964092, 0.04202279641735396, 0.5376884588766628, 0.5528900104757648, -0.3762309750477318, -0.5347146669245599, 0.30309856425260817, 0.02701219403931735, -1.1761371301420371, -0.3097959495083542, 0.637250448114133, 0.45383108548435047, -1.0293681131385823, -0.34337946728402396, -0.20240409776563223, -0.30376152527443845, 0.18856656091055635, 0.13958997918335925, 0.14259244107987795, -0.3669671234254508, -0.1371355859726133, -0.3999333309862724, 0.08190816860672162, 0.9531999241577855, 0.4305115008592596, -0.15969404241405846, -0.10706207687230784, -0.5875234717318174, -0.8888652093021814, 0.7018823816096099, 0.0097155460541066, -0.31852823774787353, -0.7161533552571332, -0.6946183251839217, -0.44771872458142625, -0.8020418614747353, 0.08697562850763507, -0.33670122865676794, -0.016255823485752413, -0.44553776220063407, 0.7067040729755709, -0.3305529141109416, -0.12353152419246438, -0.06405724257227025, -0.4289379681288039, 1.0169932699634694, 0.9219679687362414, -0.07926430997569933, 0.8461155106368546, -0.5108459915920485, 0.01721461698106075, 0.25640166227431, -0.0012638280470829346, -0.32081211955480904, 0.965518227098844, 0.07977377006028907, 0.9914084076788008, 0.9368602392194756, 0.79622005627768, -0.12120211619815363, -0.07642712418177038, 0.15250148243132808, 0.8820133072304428, -0.15324005900457768, -0.012947970781577268, 0.5314107654179234, 0.2657806659207431, 0.21867155408318162, -0.5645131510256867, -0.16370059560939496, 0.2210581088064703, 0.39055811273202895, 0.2826802498295499, -0.4229943862011821, -0.835900738908754, 0.9612898958738532, 0.9962752356339487, -0.053303884261599155, 0.30951330649963815, 0.34386442126447203, 0.3167278260159818, 1.0850909905354877, 1.0088643013546652, 0.6148040192088533, -0.32713499022688375, 0.13265347253599408, 0.729050651796031, 0.4385817170037452, -0.8104814221892234, -0.08204642341024995, -0.429968478624448, -1.1469995240847928, 0.05053455747353239, -0.6868806082011671, -0.7681702363623998, -0.6240472813916106, 0.8999904570363008, -0.5540755976757679, -0.1395815200095187, 0.7216419755694113, 0.00341887019974102, -1.1314219417339941, 0.47079825372218626, 0.38634641962439187, -0.4969326894940178, 0.9700897618329442, 0.31738524085942643, 0.5918654468120178, 0.0649345288163781, 0.9223422749742548, -1.062672657821793, 0.30896020749191433, -0.28885102187939604, 0.5103642056497637, 1.1586385214620625, 0.47011741255653366, 0.7362591411689603, 0.695808261288789, -0.6331424834334121, -0.6156728717869878, -0.6958300056305404, -1.1223768304439534, -1.1079218078030504, 0.4832955305303133, 0.7872713684414188, 0.23742695514821588, -1.0325192716558655, -0.5035525254625557, -0.11125895540865731, -0.06707968995471164, -1.0901891398628312, -0.05185746626132734, 0.17939884745186596, -0.7629342237736646, -0.25568469662030346, -0.3436266275846107, 0.5234963038843524, 0.532265503526556, -0.6045831852953002, 0.9974128911214244, -0.17925607028201557, 0.5791020459001643, 0.6873833790287516, 0.21880371572846155, 0.11009481032702205, 0.12865186069194162, -0.3268975846759916, 0.02259596959226029, 0.5559932137864555, 0.5932843214246097, -0.3710969455539212, 0.14529725265287333, 0.7044006452814845, -0.008852974292849092, -0.6124416737681215, 0.9682131380447476, 1.1375649719691259, 0.37091879445775916, 0.9352732490378375, 0.1539095660283667, 0.777440000897248, 0.22606717389632985, 0.6069013838761509, 0.30093397706517244, 1.1442039256026204, -0.7161426712243226, 0.588887225888025, 0.6972839960175987, -0.3500160949784321, 0.5128375679350539, 0.7935689766031192, -0.19559794779993978, -0.6253604887410248, 1.0145936629813255, -0.6706586307879839, 0.003436295592896238, -0.417246076528322, 0.8556308147276876, -0.209938526431461, -1.049104873280623, -0.33207489467651996, 0.6814354585137341, 0.3417057443470919, -0.059172559496654564, -0.8715300782572717, -0.2556518530893984, -1.0671471245233821, 0.3614377209651649, -0.15680078741943126, 1.166195067305183, 0.32081449773971193, 0.18756280575004247, -0.19985672490920128, 0.7805915689741869, -1.0536988894142132, 1.0857317947415768, -0.48900363536886143, -0.425774798688516, 0.17741903723193003, -0.1303947078559029, 0.9502549942826287, 0.5361055442035286, 0.7290061426453971, 0.29795698418990374, 0.26959259813541037, -0.8620031075888178, -1.0011321698786917, -0.48116523293039026, -0.3455270947267371, 0.4655510054575944, 0.21073592473488212, -0.7393279519987309, -0.7331986835493073, -0.19722904469418162, -1.0802395643312521, 0.2934313950761999, 1.1402520649753152, -0.9086071535929683, 0.02395654499079075, 0.3684164909317418, 0.5614399969048851, 0.9946642592430968, 1.124476487906898, 0.19575442149823322, -0.24851818064192416, 0.3905054965095121, -1.0073781554239616, 0.44320833092972917, -1.167994336275549, 0.6853930357205337, 0.09224681578137976, -0.9244241472672434, 0.32327571642704045, -0.08511712913634017, -0.1417785078648055, 1.0629206015460837, 0.3359763967683185, -0.30410336954697426])
# 100
runGait([0.6640463205265905, 140.52675516067927, 0.8199810222722016, -0.25481952456604723, -0.3425457104430286, -0.29895559122008386, -0.03691952728541809, -0.2926749796378846, 0.32589540084946145, -0.917897553277117, -0.1788579132316635, -1.1021614115353857, 0.16879565842355682, 0.9654926553955153, -0.8262395642035657, -0.032366000748803886, -1.035706777515601, 0.421355764319076, 0.7572712816111169, 0.20885822322721553, 0.4327161270350611, 0.9459811540427878, 0.8261945712691102, -1.1252075254946101, 0.47151105047328135, -0.20370646005147414, -0.17791531392877594, -0.1951822044066674, -0.3347713397441206, -0.8437477462605121, 0.6043792513174889, 0.8135465977213642, 0.1161484116712968, 0.2520589879602344, -0.013875994605011654, 0.6015180797865725, -0.6176009285822545, 0.12004417622115887, -0.9961965091337416, -0.3125071727309118, 0.8937107026868603, 0.08912944387701997, -0.06790816311341702, 0.22325347059753536, -0.09025762128409298, -0.011920073737759995, 0.35752949513101395, -0.45906903012374745, -0.788562694194514, -1.0050198450114056, -0.0361372172292605, -0.5954885246272503, 0.26895619166201407, 0.27184604863769407, -0.37353950677917513, 0.40112354341843237, 0.6375471269499228, 0.3141153522934928, -0.9509274229231759, 0.8779304554614856, 0.04594048324209277, 0.2495420932435201, 0.08241559351660238, -0.17642988998764342, -0.4447938485357037, -0.29330972785416387, -1.1174363806073722, -0.9616482147806933, -0.11200817025193116, -0.2108973739829038, 0.7633783825024583, -0.3175711563677784, -0.055385576075883194, -0.23831266165748738, 0.08104352235723783, 0.023356401512964642, -0.24320747848452665, 0.5859836965526846, 0.5423632994485224, -0.1667373762787313, 0.28209152742387117, 0.57278826903801, -0.2032795427955932, -0.4025112301636612, -0.5183539956075682, -0.19265157313585188, 0.17583720556492372, -0.5476355988504892, -0.6627234804934719, -0.11118988812304811, -0.7805070621113364, -0.4529826769989489, 0.7097466060065737, -0.620126964770608, 0.17005073215107458, -0.310351639008707, 0.7097004232177474, 0.6271790085262221, 0.619307086397858, 0.8131713488548474, 0.1497738579998278, 0.02526583014944417, -0.20005273447658806, -0.5474277783268228, -0.9924782021468856, 0.5028217189827595, -0.19263703499936438, 0.20588243107956172, 1.1385437676064627, 0.23883082558624708, 0.004571684392304376, 0.44876050879385027, 0.12812072388391504, -0.24208052529798593, -0.11013899281343525, -1.1196512365364517, 0.9507652902470094, 0.4545046069675115, 0.011125408478537602, 0.6549444015747156, -0.9034580752892476, -0.816680671954233, 0.42267629830319997, -0.610277202583551, 0.5500134814297155, -1.0434024414406398, -0.1937217218579826, 0.5755930968565269, -0.3839059765475241, 0.5719780788906925, 1.0347321090545003, 0.44370082946255346, 0.876765248918222, 0.2923665942100978, 0.9325649935073932, 0.5916765606714397, -0.8204301829871634, 0.5375111299520478, -0.9513662298918846, -0.48330360663587535, -0.014861243621365067, -0.7494281866976655, -0.08825309305806134, -0.9149440954101552, 1.1656564955027569, -0.319007800832239, 0.24788655005464028, -0.3902252669018448, 0.37813696098203525, 1.0257086358944083, -0.22782064462614982, 1.0142953972331836, -0.09326047201660298, -0.9608786031964357, 0.7922821770960228, -0.5752078753307402, -0.8741277024150452, 0.42074556397962476, -0.17088790293716782, -0.27812595030330334, 0.16024650373276178, -0.9015926997014683, 0.16765286991610046, 1.0745410173910972, -0.6109232086797313, -0.6866105711698087, 0.9586175739077698, 0.36069963154315404, -0.8179673245547332, 0.28221064021521486, 0.07881056395367772, 0.9621841067996804, 0.32611659794735176, 0.44963057094757963, 0.07055641109546117, -1.0165648669301155, -0.03825226786436686, -0.2562167100164272, 0.7081096623524648, 1.0925864874641888, 0.7985534527556107, -0.576117648391484, 1.128905296865967, -0.6836316776207304, -0.2843403105351967, -0.2875350090436179, -0.8884226806370785, 1.0867043497721995, 0.15936102064610103, 0.7213678990932757, 0.20189075328906597, 0.21909012756117713, 0.35081305063305934, -0.5533491342604271, -0.39139018059751557, -0.6063198386438513, -0.5324644497030327, -0.9211774284351677, 0.8014720982812371, -0.5110042508153627, 0.9729234799472499, 0.6198329213846822, -0.7172777918874114, 0.71393306635313, -0.8845569787211464, 0.8037248436039608, -0.4005762723352054, -0.41068663652781295, -0.12408256332477458, 0.2156827075982222, -0.5842278773022845, 0.5993604325472786, -0.1714832792903668, -1.073883900675711, 0.5300023789111741, -0.393565753380665, -1.1077691155993767, -0.19722587955218868, -0.10417172968555885, 0.6584848022547337, -0.5329152183955796, -0.3950555151941949, -0.10882724704645816, 1.1644009182829218, 0.1641879666963921, -0.6681618516093505, -0.927583868963644, -1.132447578431215, 0.9601897942960477, -0.3719235237813504, 0.568283689266319, 0.30050241478858897, -0.40449584730595933, 0.4561858885619927, 0.2909602044742249, 0.32523163443121184, 0.42755122870221796, -0.43174561514948073, -0.6752553468030823, 0.4204116282761027, -0.33022122289804556, -0.10054716261882779, -0.4499960947229804, 0.5718195749224134, -0.7769045028417172, 1.119286880787453, 1.0339119406352926, 0.05274713166468337, -0.6460257891457617, 0.8332178051984421, 0.03159851737606184, 0.4659457011446515, -1.1466040486387152, 0.40969329358334694, -0.055850231800543215, 1.0507510732129994, -0.05830916713550008, 0.6673647072199382, -0.93841995086702, -0.5252036523060817, -0.4085500615196542, -0.04419643170246719, 0.5892251042672283, -0.07265048037271993, 0.6495073424232363, 0.41894378985827774, -0.12136863342675985, 1.0354604832131513])
# 150
runGait([0.9676294826559563, 150.70882987508452, 0.7774265785769597, 0.30438411789744857, -1.0457358937004009, 0.06569446508772794, -0.15498365774078185, -0.2377622141182121, -0.5523307256530675, -0.08686467446610878, -1.0526089743219282, 0.8693892350692811, -0.17802396330235765, 0.984292423423254, -0.2150566341031429, 0.06941172841331614, -0.3042670031127815, 0.9739683090160918, 0.3387525680108089, 0.16403610960584877, 0.3348632802624376, 0.726765357781341, -0.9126961896061363, -0.7229583630620422, -0.3051251400627612, -0.18571836602256658, -0.31864411222929206, -0.19706640874261128, -0.11173710313723983, 0.3565369869153394, 0.1971428113859381, -0.2763605210478526, -0.10988448914397031, 0.4865331848012372, 0.19579861099733442, 0.0985008925024958, 0.10508611443908723, -0.5180802186096138, 0.602575401604621, -0.8201174052127542, 0.9018628672824313, 0.0498784872337901, -0.521071153653444, 0.4481479231233141, -0.2524266051402479, -0.253402947342113, -0.3651587489535104, -0.04675187762525251, -0.5284469398758529, 0.2654544927821044, 0.46605508251848005, 0.04966663957014836, 0.2745167545365336, 0.4868187826011344, -0.1497691281628623, -0.19339266529087248, 0.24136863629471383, -0.43924814954512725, -0.6263279786320504, 0.8484699492362705, 0.1585129887321573, -0.2588768245023081, 0.4805335508720735, -0.23474437738054899, -0.33209420118866895, -0.8818001009909548, -0.3070185272881074, 0.32004109284523, 0.6027386544692759, -0.6121775505303508, 0.4490581540464468, 0.5642596329992625, -0.04982483368519975, 0.12126979404157245, 1.03751465671718, 0.3745312020424947, -0.047861781623897426, 0.23215652576049822, 0.48636218211863, -0.1235050817634415, -0.43294538682063655, 0.25467725985917183, -0.3065392897369155, -0.1557890602814631, -0.1535446617220671, -0.594059819196811, 0.2979599287648182, -0.47393639849675195, -0.1498269458086204, -0.5013840640153254, -0.4272045457213613, 0.41099576969282453, 0.435974275195693, -0.4947517141811753, 0.5589410579791683, 0.1652474308641266, 0.40923547743201916, 0.8096533414423498, -0.1614737073645072, 0.1666180159560079, 0.11988387794171795, -0.07807848102820575, -0.7442135781834419, -0.34406828342523743, -0.05221941010307851, 0.3010681255245467, -0.5477604345796725, -0.48790074433118147, 0.5210477108826119, 0.8054612805115671, -0.06963557072433436, -0.15839013687807313, -0.8487419175268571, 0.10322897440577106, 0.28014718041568193, -0.34631485812371793, 0.9430752549792686, 0.47027663953356336, -0.5794821932554436, 0.4692452807674205, -0.11158040045677747, 0.27992277267081245, 0.3776130667252946, -0.6369352430800613, -0.2996620853089707, 0.15891663821065996, -0.2687942812407239, -0.4471464630512332, 0.12797248611817225, 0.03778787082978413, -0.04817441866063496, -0.14364337110596323, 0.2456276684919766, -0.2193460148147342, 0.309322461049816, 0.8838971330552886, 0.9139067446448306, 0.7520794039116794, 0.3506023525822065, -0.25510377807771967, 0.16331308210797202, 0.8879935062411524, -0.5959693041241926, -0.20759495151814522, 0.2108418708456552, -0.7452310360910046, 0.12664881554927482, -0.047080807460529284, 0.4692849434023498, 0.7458543281348985, 0.8107049435101739, 0.7656829635908563, 0.2585428684080011, 0.9841469724781623, 0.8620949405960008, 0.4870555312544564, -1.122938689793093, -0.35529359973828545, -0.327327233145238, -0.3160934335831116, 0.39591363428761267, -1.0579727707301056, -0.31077112675144547, -0.7938933327626483, -0.3590902641595922, 0.19493924076014235, 0.5890886736882655, -0.009036129790468118, 0.596132945850858, -0.03994141013499769, 0.07806084523888342, 0.5692918377135482, 0.4974953937971758, 0.3775889672978954, 0.12873405457093853, 0.3895539355393124, -0.014462890465162359, -0.55425962690001, -0.6132736305766234, -0.41102081373534033, 0.10835255920191245, -0.39706623995337986, -0.05463237156533987, -0.8821361662438925, -0.4293498179030984, -0.44754327412891415, -0.1880089783045697, -0.7609470792799706, -0.3607734401106461, 0.7228114557048017, -0.32350458406045907, 0.003881601223850173, 0.4645169228853948, 0.30933674367871833, -0.5511008060190785, 0.019215497706641993, -0.1901675903409386, -0.2365943643295504, -0.8261367539601678, -0.5582895426381002, 0.8430889595735926, -0.10476902705139379, -0.7224029312799556, 0.27571772103786507, 0.0885536238095041, 0.015217124065685712, 0.3616330436036972, 0.4495807925732856, -0.25846532959546853, 0.02789168870877641, -0.30766351675851644, 0.4048762627077507, 0.27121677550071394, -0.15171546326311514, -0.7757535339112696, 0.07895603889692605, 0.3394112118661255, 0.2762139076230077, -0.8609718868797998, 0.6072119320035823, 0.5713716255007126, -0.6528469102896424, -0.10679737275320846, 0.05933745485535778, 1.0330880833994396, 0.3798427665421174, -0.05376631490501693, 0.35154721956479057, -0.2278546286708691, 1.1215415803557947, 0.37551720594302973, 0.20578900503472414, -1.101421780287494, -0.16904074302935618, -0.3137973325657055, -0.31107743259952253, -0.5054336950181983, -0.6015459471353947, -0.30583499460877933, -0.5710653400947024, -0.9093940918050203, 0.4258975120803453, -0.21935467585339385, 0.7752057901473814, 0.9048914510700778, -0.7846269882405049, 0.42094152311983585, 0.579829888624975, -0.17584369852195275, 0.3434019156849982, 0.4575688137719436, -0.14604013456644133, -0.3645678519465499, 0.369472539160814, 0.8790080502109889, -0.19650675936686374, -0.18752237139683797, 0.5586386030306622, 0.9508801265095614, 0.9411240544168558, -0.345415755136577, -0.14676816884141242, -0.6025122074152638, 1.0662310264379242, 0.18388246055206153, -0.07562722289757418, -0.6679498245650091, 0.18818487201688333])
# 200
runGait([0.9348218778087588, 146.91267362984482, 0.8744564653177251, -0.2311034676019011, -0.0162125073980263, 0.23441739730394234, -0.04068722081101439, -0.01314909472419457, 0.4223694738439635, -0.25574717281863013, 0.11241756397861355, 0.863412621923713, -0.5039035218187172, 0.8097751196961653, 0.5091863473931626, -0.10148835532593689, -0.28922562386776374, 0.5858340382268268, 0.43524750474759666, 0.23821865739642933, 0.46385381613992593, 0.9239038510623541, -0.4014932024069757, -0.7366198054221755, -0.26224195420212393, -0.26567675307257604, -0.6406999273924185, -0.3947291539512541, -0.13202897393736873, -0.36868075869905625, -0.05946923101403889, -0.27726495980956967, -0.5626455668474689, -0.011352949007235264, 0.229512162107839, 0.39705904834877165, -0.4006127993614362, 0.45741277302226363, -0.04872220530283761, 0.939231875796499, 0.9244874997661925, 0.421354026869032, -0.30719169912092076, -0.28374626104543155, 0.036224616108006924, -0.25044075786835196, 0.45388624765306124, -0.2717804606894174, -0.4275921887976257, -0.6646840055520035, -0.14474013946854764, 0.13888294858271502, -0.6177532030009414, 0.2746798600901093, -0.12203515742935869, -0.2628136886221271, 0.38679528157113163, -0.19822470557065894, -0.8884013180032637, 0.9228748319499014, -0.4928839251159022, 0.27037805010739346, -0.36086268564047347, -0.24969367306344575, -0.3336638120272727, -0.19129594916608794, -0.5412798207097986, -0.16378037787753816, 0.8380380076966655, -0.5602998817983854, 0.18838739261107923, 0.5039137894474521, 0.21024282762391727, 0.08436579427566918, 0.6355601001466065, 0.5267180582633789, 0.15087882211657277, 0.2588579542917172, 0.8159804644068003, -0.38735187036573715, -0.40374868821921456, 0.3176228472810542, -0.29327017193023885, -0.5501866453799408, -0.7389217625605805, -0.5615715453807273, 0.3712336142992907, -0.41428358751812816, -0.10726134390360059, -0.4099102222197733, -0.09159826087452744, 0.19893389971361133, -0.13132545262706974, -0.7570399899916764, -0.04170327254544387, 0.6914142826643184, 0.398856999424931, 0.9233641685403612, 0.3554904000885851, 0.07458506209142986, 0.21135000375679375, -0.23744305079600145, -0.013330649851585474, 0.01157025728448935, -0.7142922193662176, -0.057521311631223385, -0.6442594978866643, 0.1356536272774043, 0.9557393672331027, 0.5233808828963422, 0.11374875444427712, 0.09649391854350325, -0.4504172430652743, 0.6539212469538538, 0.1418178498877025, -1.0285250393762446, 0.8140662921364629, 0.22032729153232467, -0.3131857222772149, -0.48190358196718097, -0.6137839859402877, 0.4530943681506018, -0.24490264309157425, -0.7036205243667922, 0.22880630883881176, -0.9353768616975839, -0.22729920059617795, -0.3986687624180637, 0.3771370805444293, 0.2962185862198067, 0.2904151502090839, 0.41187261747985526, 0.5568412571554673, -0.12906786905832562, -0.29816090327711275, 0.6695654120828216, 0.6511164416374783, 0.2354423280825553, -0.2735118074560536, -0.09228336036973724, -0.8324667409784399, -0.5181961380658293, -0.29006499483710846, 0.20790847562845022, -0.5417341319611597, 0.38491216942962997, -0.26276464862820204, -0.4194681593642782, 0.18074940042710627, 0.3028577666580481, 0.7988109336459481, 0.3168960795244507, -0.06708903960865052, 1.065337334032617, 0.5121231247483485, 0.4516447688051512, -1.1319252709654564, 0.36013046523298803, -0.34875120783540486, -0.4784054810104189, -0.6383425055163587, -0.6044113298680207, 0.44898870301162985, -0.8409890804695659, -0.13518983270380686, 0.7206151228912457, 0.6402573302424278, 0.10412067706361405, 0.054761428387725804, 0.7078674741419084, 0.6835347252533357, 0.21226096258043248, -0.3590529214883623, 0.4262896753265209, -0.048765644390856705, 0.38707830574861446, 0.084150534306334, -0.3647673644633279, -0.33591157304158037, 0.543209090584881, -0.9227930221772509, 0.2793779445429621, 0.11437608190319354, -0.7320684310072323, -0.6016577518295898, -0.45131872796737754, 0.05815823303022799, 0.8858937450029887, -0.0019911283316581506, 0.9972537913440365, -0.03610523718702252, 0.7079473862344268, 0.2573924393198488, -0.04475989790206414, -0.046463632383485554, -0.5773905293012538, -0.21619326250264764, -0.18188721111452147, -0.07658954437605886, -0.7681077839396901, 0.09322698951176928, 0.1445427413363699, -0.26358485945073823, -0.35945630531652395, 0.6607553408730602, 0.5395749627096668, -0.06565247358481555, -0.13117986498165218, 0.6166842950843242, 0.4062530279955314, -0.32413286459352975, 0.3382214897259326, 0.060478191467047124, -0.30111192010527943, -0.6852023659365898, -0.6491951557870825, -0.6082929168718781, 1.0411226013092971, -0.6704184953876647, 0.2345803856056618, 0.4849343305291035, -0.18728401603681968, -0.19357983579761226, 0.6070055818353226, -0.1077936670840348, 0.7443380029642911, -0.17385870614262244, 0.4822669757853692, 0.389397991023952, -0.5111936026821581, 0.20633197759969002, 0.1718434261981503, -0.4672185062811099, -0.17236565565445167, -0.13557244311953168, 0.15571859432119767, -0.7500101603166999, -0.23515066300234422, 0.13714967863221472, -0.5267606306271126, 0.13519136956396957, 0.02524328959004487, 0.5235642763509845, 0.8384859468187118, 0.8448970827991501, 0.8107454533312748, 0.5496009157719298, -0.7861009121069743, -0.6234808705813863, 0.4696984323447554, 0.47429018972121284, 0.623971023164932, 0.19594794245176425, -0.08196145489238126, -0.44583013816552275, 0.331803658493219, -0.8110909742267071, -0.21348457194384118, -0.2793726604175222, 0.14931602985100503, 0.12574273195266228, -0.09011644679991501, 0.42322007305737674, 0.4167395715073748, -0.6212842551478526, 0.7246903575782073, -0.06965771001708732, 0.6997039268852637])
# 300
runGait([0.9580170156701137, 156.55023833080287, 0.8697263186451583, 0.41527680177951753, -0.3091878185605595, -0.6992501726886724, -0.3621632626756743, -0.08866639808357406, 0.04011602722874327, 0.07026709630165444, 0.08310069123845333, 0.9771964256154151, -0.400666267244531, 1.0176212186192009, 0.4686604002686661, -0.05444682325152628, -0.3446318452762033, 1.167921573447584, -0.24084550251954484, 0.11520414858809458, -0.9064091144422616, 0.6287846280182579, 0.4042274363728242, -0.17984894184261202, 1.1430310808904414, 0.12829599754811477, -0.2870163134708252, -0.18392037110494033, -0.1249514559839385, 0.40824155792459593, -1.0366104970327679, -0.21924341218226873, -0.10471741599076292, -0.039709787387529435, 0.24308506785995695, 0.5029106171652411, 0.00735981801132328, 0.46310261520766255, 0.2962573675541653, 0.5716571352437573, 0.9229307062858603, 0.4495458291143761, -0.32374844918493895, -0.5428033664800668, -0.1061657887085444, 0.09327418752618749, 0.1360933416106767, -0.05729803839333989, -0.5721972886843723, -0.7501829092993302, -0.15349800908427572, 0.18791462737535045, -0.5470482537350623, 0.0694805637919472, 0.11109949906739805, -0.05080609217009568, -0.10310782753879975, -0.11306331572492027, -0.7342942829959108, 0.5899346008666371, 0.15418825045439946, -0.02081659679571593, 0.23629696966788344, -0.20051899964600228, -0.1124416281658899, -0.2846997071631564, -0.7192552571252601, 0.37433250100587284, 0.4563424915786525, -0.6333746841090807, 0.48199161211899844, 0.7988546437125186, 0.3226337048037727, 0.08751293246797012, 0.6319836666961702, 0.5183905403464221, -0.03681080646589069, 0.32577945075501186, 0.6121356022112711, 0.3422973617966078, -0.38300865284018476, 0.3869972769037436, -0.3438931711267918, -0.2925667840718638, 0.02073915908080158, -0.6601106786744819, 0.18786226365552441, -0.4674216311409396, -0.20596050805709923, -0.6805376409410507, -0.11035629757866405, 0.128064195205727, 0.25006628117465546, -0.3911802588296395, 0.2206671476325982, 0.09977946265209521, -0.8307214146667822, 0.9171381291965639, -0.4940453191512136, 0.1984116913539024, 0.16554188896519878, -0.1928373027542588, 0.035841851144624626, 0.19266064738694355, -0.5850119457302836, 0.2215164403315185, -0.5896064710472241, -0.5102020534325775, 0.7417367519970856, 0.6225121973006423, 0.19164293014838338, -0.05737129492026963, 0.6121162351865662, 0.6866768284728066, 0.14965833690587552, -0.4193640570236367, 0.8160069586540502, 0.4684910259589942, -0.3030223187406086, -0.22660524229484055, -0.26703340415352106, 0.2211243087352533, -0.34540536962148954, -0.49431357086808453, 0.4597744966917602, -1.1664564277056044, -0.42563404464543353, -0.2676448037922594, 0.10874078586225963, 0.30510521880764874, 0.0201911678551102, 0.06799655432754058, 0.2696266035980911, -0.022093390708932178, -0.23885757257157703, 0.23240754800646202, 0.3154235780471931, 0.41222261068120575, 0.21621816215661968, -0.49383086489683936, 0.22028919935866093, 0.3085302761259844, -0.7607054242258245, -0.05524612679499659, -0.4005994571597372, -0.0005409548233603068, 0.09356401716698916, -0.28019790711798187, 0.11366960643707247, 0.11455280438074295, 0.23238882534231647, 0.38151138367739235, -0.6805858547100075, 1.0336614238387605, 0.3031048722576475, 0.3326128459698742, -0.4841094623424372, -0.14572762596592148, -0.6989095508218981, 0.08856510384007225, -0.7833815783352288, -0.6051206720201369, 0.05183879858837576, 0.8525690594373536, -0.21238052918801256, 0.3064353096452067, 0.5227279651783818, -0.26970368473406214, 0.30040315174676463, 0.8044560092955977, -0.31738694194412304, -0.03793873042044421, -0.43780967817899613, 0.44695577264660596, 0.07431440259337181, 0.30468167123379775, -0.17018353015014637, -0.3717841160485929, -0.38018867550367835, -0.8319113906639981, -0.3172654371086346, 0.19103501813376841, 0.7907537210342017, -0.9089959391273528, -0.296319135720049, -0.3417512761829329, -0.023084343039717133, 0.4040896098348129, -0.31660229354210917, 0.20977471047292015, 0.06648686941410495, 0.569876899843004, 0.21573326092963024, 0.12794663668726255, 0.13345639453704178, 0.06166211393616905, -0.03240968584900304, 0.5204613173650747, -0.054310331255427405, -0.606521604737881, 0.1468424637174045, -0.10111301306428899, -0.28436289560722927, 0.8017857089530022, 0.31257612218556174, 0.1945880804760491, -0.6089705211035303, -0.14618931754382708, 0.39343797880425146, -0.5405366749846467, -0.8955466868745385, 0.24857173830306561, -0.18623545173861095, -0.3623886194435283, -0.09699504056721092, -0.3486736104159766, -0.5453068629962625, -0.04010192714300855, -0.5306328827959214, -0.1354335299516863, 0.44670333258196254, -0.29490113171676874, -0.09463147398839705, 0.874510578425735, -0.08994591375239581, 0.9780471833760542, -0.5336783066507795, 0.44298379683008876, -0.44674080020159146, -0.31978046647941477, 0.4387881341782599, 0.5350973549899811, -0.27423388863198106, -0.39599195531628345, -0.17873234672818766, -0.3297290606571569, 0.4175261890217202, -0.41268092911177146, -0.22703685041841498, -0.274965576004083, 0.18191455558091849, -0.4939031672186963, -0.5349104573630611, 0.11079268810336479, 0.29357806459055, -0.7619449431049254, 0.712338129607013, -0.10469146069165344, -0.6452483516150597, 0.27405977939835985, 0.3831614384076198, 0.5368152956549854, 0.1178851135066101, 0.4460847058945193, -0.3883316848615107, 0.29727456716368683, -0.9048653966482885, -0.014346612174598924, -0.30126039008201877, -0.4640418178366639, 0.7986905413889673, -0.07702758460544154, 0.2259118808338296, 0.04026857681135493, -0.26928463274492503, 0.3490075592061821, -0.0973833708982666, 0.6135129639803163])
# 500
runGait([0.7788908289099306, 159.1275018887873, 0.3675002143633652, 0.29358082034076927, -0.15802555334813242, -0.10120259171082222, -0.30648918293561567, -0.05565177235370438, 0.3049336061735364, 0.019485785927701088, 0.07832249749716115, 0.7871716752724793, -0.11324944128855793, 0.33911735703936136, 0.5918403744719638, 0.023601544667689423, -0.28854114361833516, 0.3817510406516811, -0.7632588163737368, 0.022344201194842275, -0.9273228127530271, 0.23720909885208666, 0.15504539382606097, -0.06533263447758221, 0.6586022121665115, -0.29344233757111227, -0.6100745589428764, 0.17959646981207406, -0.2567640532742428, 0.3822693241621865, -0.7185816271396638, -0.17076272358383804, -0.19398205231627866, 0.030812971237587354, 0.13867994513962895, 0.3166621409958447, 0.22733110475404655, 0.5283963851565332, 0.3000850798497766, 0.6660952670105973, 0.78894440862353, 0.5157348667100712, -0.38489800997374557, -0.5993480088686595, -0.18953762119493234, -0.142888877844017, -0.2487995360903048, -0.2553184465203176, -0.6835072531097912, -0.08818770700477133, -0.36234611577801024, 0.1882081759188938, -0.5403677941379689, 0.12135448182929069, 0.13736905269298347, -0.19255224947663963, 0.40446549296965023, -0.12911247255617248, -0.8830555265743198, 0.5421547081545663, 0.19311176458866605, 0.32721151153007055, 0.2615659111455483, -0.3394759597454976, -0.06908691052009064, -0.4945679102133535, -0.664437236127388, 0.455900172021705, 0.9711538928263904, -0.6122014695612167, 0.5055842831599157, 1.0056138878174767, -0.12461961762482054, 0.07270199762420153, 0.1945368404538606, 0.6057165296982737, -0.036218178124653, 0.47457880179122397, 0.561082214156418, 0.5518568617850256, -0.5389721874824412, 0.018553983034344634, -0.33762141187925576, -0.2910578505162613, 0.14535357070447755, -0.5450888267216742, 0.3147936923887069, -0.8630308630441204, -0.35532598921412856, -0.6465004633853288, -0.13894664477940558, -0.05983048923837514, 0.365281504661632, -0.5068437133042195, 0.27739826556422653, 0.19813563846913917, 0.4204274997035509, 0.8205768952006834, 0.20430739373514373, 0.3485830076102606, 0.19097357428020537, 0.11039935013513971, -0.05699918798476541, 0.17873025134016074, -0.5020918329264104, 0.06913567443773855, 0.824576743344076, -0.5181143051990277, 0.5324747932677494, 0.4047169187725307, 0.17512694920182592, 0.09027062872679636, -0.020184312316256805, 0.45759775735791614, 0.3541811364432514, -0.42810856403905084, 0.34330793691017164, 0.37803887536859593, -0.6335568031247926, 0.7451449971471023, -0.422905787574568, 0.1108746870208986, -0.48281598513746043, -0.8845798783329725, 0.2848773437400216, -1.024048537916983, -0.3564009884656577, -0.12474613038765203, -0.20868110635282452, 0.34321844967647513, 0.026989145418742592, 0.0056243347284405665, 0.2157974836543368, -0.13733758298445253, -0.33659375632067434, 0.22898564273416638, 0.5087202771881477, 0.5372813556304213, 0.2763875601801632, -0.0931809119638537, -0.15966469444157633, 0.6929648835534945, -0.6910669985856048, -0.19389406110887974, -0.3902847322396408, 0.14551156290910644, 0.16618646822053293, -0.8039787804703373, 0.15428898913228964, 0.2190997039624487, 0.6297767323586855, 0.679561926817978, -0.25269142672699957, 1.073396465709869, 0.29029445313069896, 0.3205284197358836, -0.8690640106312963, -0.28879170523811604, -0.3779137324309013, 0.11936316568503534, 0.12976155632213082, -0.6265683661237571, -0.4855309257343185, -0.5955282235528421, -0.6845815418683121, 0.4737288107745596, 0.9252056200075451, 0.34567749439018725, 0.0363068008180909, 0.5036072080080863, 0.7640949628326337, 0.23505279305957724, -0.7167717818166955, 0.22850447613489278, -0.030879680605631232, 0.8293535643185203, -0.12780675301767844, -0.4412923860207338, -0.4755533837206397, -0.7447986150788337, -0.351029516667939, 0.14495494618770427, 0.4535847436338252, -0.3531591185986031, -0.3501206760556444, -0.34008350299413603, -0.1719031304459102, 0.5791280201924914, -0.1307266287188878, 0.5722057672119683, -0.15710148018631528, 0.8971534994186912, 0.2136354954365922, 0.10736052402292397, -0.09859594062374435, 0.029800704664000633, -0.036340884369813475, 0.13376827373262254, 0.06508877177327688, 0.3209725597538162, 0.25401652110289125, 0.306412096915022, -0.4062443766586342, 0.8116135654848087, 0.5038625296989976, 0.31304069230057774, 0.27907345633577596, -0.0628432232149184, 0.7488103240677573, 0.26866161782653364, -0.3422913295247829, 0.2620956604170435, 0.3869320718994709, -0.31031616120110883, -0.6430657281164895, -0.22156900833723173, -0.38501223572432874, 0.253328681493752, -0.43126896631860384, 0.13122580788583768, 0.38236533839599784, -0.41315518332835866, -0.06881562913283318, -0.06011531751913962, -0.1786891777692833, 0.5379347036881686, -0.40503481008181036, 0.5715406089598921, 0.09206279188231829, -0.06971523980576558, 0.2873942092356773, 0.5853487864435241, -0.2276481518074298, 0.024954174691643023, -0.45483892434063, -0.24065990358463835, 0.009443952578838826, -0.4484870705490043, 0.020876918478014064, -0.3827342793037868, 0.08765966633266016, -0.27582852508027256, -0.31975146101964735, 0.23584006084015846, 0.29568147715946885, -0.8673046059997449, 0.731200312180833, 0.023929423446759857, -0.573758689689352, 0.20993143702530428, 0.47001185232098075, 0.5565009861719601, 0.4404197794305241, 0.185040383009213, -0.4131600591525496, 0.16851972924086844, -0.4306339258923647, -0.08726456746018109, -0.4968797363532158, 0.11631152950365159, 0.12129503091276284, -0.2797069038719245, 0.28964257172500313, 0.30047606726857523, 0.7797290276248695, 0.6214545112158738, -0.05292732565014542, 0.7730879197180376])
# 750
runGait([0.5798525952403486, 159.40058262811087, 0.2565373666541486, 0.4318803359566548, -0.12670809771496028, -0.18389697331663157, -0.2107828186822762, -0.08822530542530635, 0.22285787517794267, -0.14626266057699905, -0.010708372490804578, 0.8284588168920229, -0.4722713879921887, 0.19208169499854316, 0.7411503431106623, 0.040109802039862204, -0.09388586098031043, 0.3021068442167352, -0.7632253195827811, 0.7506268013480695, 0.9817795923219715, 0.24702479164747443, 0.18619723055382778, -0.01944816101487032, 0.48047453154015674, 0.044027151050185204, -0.3811611523444315, 0.07043423693100055, -0.3519428921487882, 0.06993952938115677, -0.5223378593335369, -0.22654293849301965, -0.3119867211980983, 0.02203965621516251, 0.13948563872031947, 0.303700575374144, -0.38645242345574016, 0.28023524831943847, 0.48072548736928467, 0.23302786453056437, 0.5986122931605656, 0.6980983847283087, -0.34928346917889913, -0.7471794250508657, -0.17324487759716678, -0.17474742917438432, -0.016090144920211283, -0.3939766926275115, -0.22629036214619017, 0.16700221777337937, -0.37273771647989734, 0.09062261530742664, -0.37891402716818134, -0.06917514694333406, 0.17291180959585456, -0.04844904642867835, 0.4909898309834386, -0.12380149375214647, -0.9111999943893617, 0.5239305389825415, 0.18171587810764148, 0.38432773481912763, 0.2485646260803057, -0.34491165699137327, 0.006209690525156852, -0.37412477201907574, -0.4604269737237233, 0.18972851127462684, 0.9223517018397529, -0.6210098084521306, 0.49640907383479516, 1.0891123792650426, 0.49418162637573465, 0.06420244660188437, -0.12089596773664718, 0.550748464763964, -0.06108552913804381, 0.5552559735763302, 0.5522750091665136, 0.4683531478559523, -0.49173267096532985, -0.03517143099932757, 0.06100743363377523, -0.1902922529235461, 1.1122974946275956, -0.6276776253711288, 0.18178541719252822, -0.8132832667298763, -0.4537338576551622, -0.5808862777121856, -0.23668011060600833, 0.09569218539264038, 0.304579054169755, -0.4494551817405288, 0.15870763199473473, 0.3069755905115654, 0.3959705547451262, 0.7213258637748241, 0.3763728637422603, 0.40391108748034943, 0.37339498399229337, 0.01831140131395486, -0.07811735353228262, 0.13357830561871784, -0.3758537743243868, 0.05195547431005445, 0.9379615461508354, -0.5620520288627262, 0.4539455849665125, 0.8143570677050026, 0.004283675773725867, 0.11729610271034435, 0.25800504348420195, 0.3861845305371114, 0.041281706706534374, -0.5754739036307105, 0.25496830847083185, 0.5497065484163852, -0.6197402288256176, 0.4497922477657804, -0.4509130489838098, 0.08492188046586338, -0.3745067142483794, -0.6326481434329293, 0.35798335360645445, -1.0756026955774058, -0.1658013536974587, -0.7407148571190698, -0.4210965495924469, 0.31622190189278027, -0.037060520648369094, 0.027025421714000526, 0.12225503371751616, -0.1873821138347538, -0.39270249305607063, 0.20600245925762312, 0.6985349231049772, 0.538141419017502, 0.28367589673434807, 0.2408451341677153, -0.4857764076187511, 0.3515662648369627, -0.5970692688174171, 0.02074320821791737, -0.4780204081199166, 0.1369717613278151, 0.1389681583857107, -0.7410246577802894, -0.026563078155500804, 0.15750473450145544, -0.13448669931945875, 0.6551277580017717, -0.15601141002050287, 0.7448104569077326, 0.2902755482562471, 0.5364234542988243, -0.7792137180797938, -0.2846168758998948, -0.5839320530147617, 0.0628617276240415, 0.04002665260768596, -0.5479117597172838, -0.34196183099322613, 0.7056701204997842, -0.6647297748826204, 0.42806286178547925, 1.036676035059638, 0.642474334419992, -0.007131427190290577, 0.4639036629286082, 0.7267327436184402, 0.10043705596129168, -0.6440769086811172, 0.21343864782202604, -0.10329009445544304, 0.6425250240843889, 0.05230900542901544, -0.11251073882320739, -0.35085266645906615, -0.4199713623669293, -0.3351922780693797, -0.1672466366670506, 0.5379892464340184, -0.16330627356393349, -0.056804450777555304, -0.4017389922325012, -0.1611244393278351, 0.5367215850055974, 0.26665926782883664, 0.7219467754507272, -0.23576690967651864, 0.9079195952027548, 0.2062387616905374, -0.46534175178439574, -0.10399688011724603, 0.03668728875610021, -0.036926473584827625, -0.10113397006234737, -0.0394881387655676, 0.350606385291519, 0.13403276401659836, 0.5123948398917316, -0.35772789686003825, 0.9668024245923357, 1.027298011515463, 0.48938829842316445, -0.016483670605963077, -0.11757722414303058, 0.2537310530675592, 0.2784246188624572, 0.7695997113771061, 0.20066387160146376, 0.41486141094226053, -0.41256985585307027, -1.0477965177073278, -0.3200262420965195, -0.4739320902039974, 0.1565114856750497, -0.4241862276994479, -0.014128604687211607, 0.16050204319589437, -0.40332695432173227, -0.17516701158934914, -0.11260377701162964, -0.13167892785497093, 0.7970663227025759, -0.5351328770237976, 0.5899558689297649, 0.23041684659804587, -0.15627868369477282, 0.2137694705032495, 0.5696194997458192, -0.17336701928954443, 0.10876003399835937, -0.17072054408485435, -0.329611678752721, -0.14558125057844667, -0.5971107203993231, 0.09088625634430318, -0.9113803135864458, 0.20662562476533045, -0.1850266147294778, -0.3839648611252884, -0.06266604342717771, 0.32620231538556216, -0.4511759755602935, 0.46462930437425676, -0.19098144950012377, -0.45748534958561105, 0.21835806888483988, 0.375990472674876, 0.3152963988042483, 0.4174992880580025, 0.2417839091584328, -0.24397794422280902, 0.35917942428087724, -0.3968249675417042, -0.020614707571517774, -0.5895590906441834, -0.4121546533845332, 0.7169006234784104, 0.00017719522626339322, -0.009118568606292298, 0.43285003350721596, 0.9231244888866221, 0.6350781716028882, 0.025592267149331688, 0.8892290172542283])
# best
runGait([0.5833710183294218, 159.94477382941363, 0.274991231117698, 0.238089263008252, -0.1417409643619611, -0.12761521449834923, -0.19289461538039873, -0.10856112933014755, 0.15968577636162862, -0.17368631551323016, 0.07203736702130702, 0.881941236232128, -0.49131125475461257, 0.41963359660824545, 1.0286266905353563, 0.08461166310190373, -0.0930092118460774, 0.6688730971003121, -0.5272671341802382, 0.3759165556754689, 1.0284225286200426, 0.281022494747565, 0.0009195019049819536, -0.0879260351696233, 0.36182544936252586, 0.1544240116221797, -0.3379165966729512, -0.07107860607401342, -0.35347372811378175, 0.24474628711192828, -0.9554210776881508, -0.2446425071911576, -0.21834542364787896, 0.02941224160898587, 0.19860309964710376, 0.32015885118565973, -0.38463037767537034, 0.2721652517032083, 0.4498871803091191, 0.2843738257583236, 0.501327523282536, 0.669013035550511, -0.37715689084783993, -0.7193636388118547, -0.2383013342521316, -0.17489396233602697, 0.06083890600102712, -0.4034525364101677, -0.24583511137643727, 0.05016350547975096, -0.5231072760701854, 0.0920601174252217, -0.3798879187489547, -0.06425162833626528, 0.1175629295006112, 0.02682125752619795, 0.5075090858782456, -0.16206766607753353, -0.9027943006438802, 0.5191380547248162, 0.1922772367714138, 0.3722573359482723, 0.27824784135917774, -0.36599087581441164, -0.06620007679763394, -0.37707864764924415, -0.3432745401212583, 0.1890972969239655, 0.9771314779636118, -0.6379190437437263, 0.5327515128308239, 1.1266802573644779, 0.4853618562003829, 0.03715655903182573, 0.07311510274068314, 0.5423300464375319, -0.0658356136420452, 0.6733211262326829, 0.5412515512543659, 0.475841545559648, -0.5369352656406974, -0.026774867624149132, -0.27366768812094183, -0.21535394823513682, 1.1272641607914005, -0.6324828192170618, 0.22992240750612652, -0.8332942275738103, -0.4448812583609043, -0.5639998583724821, -0.28504303170819406, -0.13715306369785674, 0.3349484025718961, -0.3700368781101578, 0.20300787227134326, 0.22374862961672667, 0.027970795265832554, 0.7014861172229404, -0.04926493320095343, 0.4402290874741377, 0.3860194514832391, 0.11569030596073443, -0.06036997313965854, 0.1497256975919505, -0.377481545800565, 0.08298090520892161, 0.9438244188255105, -0.48021805376469584, 0.4543715274216308, 0.8678356926851799, -0.003915278924756722, 0.10352872557178089, 0.3358865719916397, 0.4211155579389066, -0.030249314775129762, -0.5658285551195321, 0.2548939424634452, 0.5745275199121783, -0.7796534931283465, 0.3451123282022226, -0.5444761756627212, 0.12200790829540269, -0.25898916669720645, -0.6724214809633824, 0.34635133694786935, -1.0685493620290625, -0.166454962800517, -0.8051985252291386, -0.4306033386198576, 0.3621432335285329, 0.014468787338504891, 0.141080510173102, 0.13964744684544284, -0.15421615523049945, -0.4317859615807832, 0.225587551388641, 0.693207792900198, 0.5533948757767216, 0.20097437713556277, 0.23256665133179352, -0.4990635733731684, 0.37724815041759296, -0.8484710927328951, 0.052329062943848995, -0.6454186305205749, 0.01709338435440333, 0.1426615820133712, -0.7496362823830726, -0.024592492969917387, 0.07160640453502068, -0.2474844962946594, 0.5941575845367926, -0.20960304431184107, 0.6424578239764861, 0.2920273219156567, 0.7036560308915455, -0.8121144845665177, -0.2789410770162129, -0.7413476580353197, 0.08188596178827257, 0.07931227840034549, -0.7207975890283618, -0.6065813517836143, 0.3983191376566657, -0.5635381703226274, 0.4088177741187736, 0.8161358908559947, 0.6554301845419963, 0.04547395492205422, 0.08051995385752733, 0.7945827746307063, 0.11087351442670304, -0.590752837198396, 0.2065658076101474, 0.0751712923684167, 0.6709125887262557, 0.1373187383960103, -0.18183312802940133, -0.4350057499267376, -0.3766430661862623, -0.8199596582372628, -0.14153603961297806, 0.590381220135425, -0.16508543450631305, -0.20708569485397604, -0.34591459093209215, -0.16651848898298874, 0.5178287410957361, -0.03657852374819068, 0.7219509009910949, -0.22937310869060928, 1.1464596068133195, 0.21233031874020497, -0.3609307120798186, -0.41136793770748015, 0.16347336752562386, -0.04569336581669571, -0.12320285070861678, 0.08240315222323638, 0.4579164525630148, 0.10194743670878537, 0.5748048740706077, -0.38484763478500494, 0.8009525610955103, 0.7333605699869847, 0.37124845323434263, -0.03501117975518403, 0.012948485735009754, 0.29139932552493186, 0.34343670572619495, 0.8542624160112243, 0.2047939987143607, 0.3903803959743837, -0.20493239305818473, -1.1287011492813999, -0.32936672671984124, -0.36581898984821176, 0.2451494881151558, -0.5756460181132672, -0.030221322542399086, 0.16449751038968874, -0.3567999251278406, -0.1618212236300447, -0.11207687799559582, 0.05735981109003743, 0.9415542138674963, -0.3554473551841668, 0.5357750527639715, 0.21498207309781378, 0.4532008047694893, 0.21329882952215284, 0.5859846457864504, -0.16362093353740018, 0.1319546289160159, -0.2194715016448026, -0.266878066809855, 0.19007538470038587, -0.6214579041470789, 0.07758190484905561, -0.7515667963793465, 0.24700843522334995, -0.292447662725082, -0.4181253106644778, 0.19564903243421003, 0.19724000917384554, -0.2063833311816462, 0.46455125472211967, -0.0899164519697946, -0.4859666940225116, 0.2204956850628324, 0.5537344147667811, 0.3710020504693896, 0.42084808456566025, 0.22826893049722402, -0.3009973798887208, 0.3133299056345898, -0.5362470634819437, -0.07363025268708201, -0.4903844709824772, -0.4212031706808154, 0.593200663984306, 0.03428638943992187, 0.24491294188014479, 0.46221509482741235, -0.20849095803967968, 0.6337473393725238, -0.05747450930384633, 0.8875435750416844])
p.disconnect(physicsClient)
if __name__ == "__main__":
main()
| 259.415789 | 5,640 | 0.809653 | import pybullet as p
import math
import pybullet_data
import time
import random
import numpy as np
import serial
def radToPwm(angle):
return ((2000 * angle) / math.pi) + 1500
def updateRealServos(ser, t):
ser.write(
f'#0P{radToPwm(-p.getJointState(hexapod_ID, 8)[0])}T{t}#1P{radToPwm(p.getJointState(hexapod_ID, 9)[0])}T{t}#2P{radToPwm(-p.getJointState(hexapod_ID, 10)[0])-100}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#4P{radToPwm(-p.getJointState(hexapod_ID, 4)[0])}T{t}#5P{radToPwm(p.getJointState(hexapod_ID, 5)[0])}T{t}#6P{radToPwm(-p.getJointState(hexapod_ID, 6)[0])+100}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#8P{radToPwm(-p.getJointState(hexapod_ID, 0)[0])}T{t}#9P{radToPwm(p.getJointState(hexapod_ID, 1)[0])}T{t}#10P{radToPwm(-p.getJointState(hexapod_ID, 2)[0])}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#24P{radToPwm(-p.getJointState(hexapod_ID, 12)[0])}T{t}#25P{radToPwm(p.getJointState(hexapod_ID, 13)[0])}T{t}#26P{radToPwm(-p.getJointState(hexapod_ID, 14)[0])+100}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#20P{radToPwm(-p.getJointState(hexapod_ID, 16)[0])}T{t}#21P{radToPwm(p.getJointState(hexapod_ID, 17)[0])}T{t}#22P{radToPwm(-p.getJointState(hexapod_ID, 18)[0])}T{t}\r'.encode(
'utf-8'))
ser.write(
f'#16P{radToPwm(-p.getJointState(hexapod_ID, 20)[0])}T{t}#17P{radToPwm(p.getJointState(hexapod_ID, 21)[0])}T{t}#18P{radToPwm(-p.getJointState(hexapod_ID, 22)[0])-50}T{t}\r'.encode(
'utf-8'))
def init_debug_parameters():
for j in list(range(0, 6)):
control_IDs.append(p.addUserDebugParameter(f"Pelvis {j}", -servoRangeOfMotion, servoRangeOfMotion, 0))
control_IDs.append(p.addUserDebugParameter(f"Hip {j}", -servoRangeOfMotion, servoRangeOfMotion, 0))
control_IDs.append(p.addUserDebugParameter(f"Knee {j}", -servoRangeOfMotion, servoRangeOfMotion, 0))
def read_debug_parameters():
angles = []
for x in control_IDs:
angles.append(p.readUserDebugParameter(x))
return angles
def chromosomeCreator():
pos = []
duration = 1
force = 200
for j in range(LENGTH_OF_SEQUENCE - 1):
gaitState = [0] * NUM_OF_SERVOS
gaitState[j] = servoRangeOfMotion
pos.extend([duration] + gaitState)
print(len(pos))
return [duration] + [force] + pos
def readGait(progress, chromosome):
global firstCycleComplete
end_index = LENGTH_OF_SEQUENCE
if not firstCycleComplete and progress >= sum([chromosome[x] for x in range(0, len(chromosome), LENGTH_OF_GAIT_STATE)]):
firstCycleComplete = True
if firstCycleComplete:
progress = progress - sum([chromosome[x] for x in range(0, ((LENGTH_OF_START_SEQUENCE - 1) * LENGTH_OF_GAIT_STATE) + 1, LENGTH_OF_GAIT_STATE)])
chromosome = chromosome[LENGTH_OF_START_SEQUENCE * LENGTH_OF_GAIT_STATE:]
end_index = LENGTH_OF_CYCLE
start_index = 0
total_duration = sum([chromosome[x] for x in range(0, len(chromosome), LENGTH_OF_GAIT_STATE)])
progress = progress % total_duration
current_duration_index = 0
next_duration_index = 0
sum_of_durations = 0
for j in range(start_index, end_index):
current_position_index = j * LENGTH_OF_GAIT_STATE
sum_of_durations = sum([chromosome[x] for x in range(start_index, current_position_index + 1, LENGTH_OF_GAIT_STATE)])
if progress < sum_of_durations:
current_duration_index = current_position_index
next_duration_index = (j + 1) * LENGTH_OF_GAIT_STATE
if (j + 1) >= end_index:
next_duration_index = start_index * LENGTH_OF_GAIT_STATE
break
current_gait_state = chromosome[current_duration_index + 1: current_duration_index + LENGTH_OF_GAIT_STATE]
next_gait_state = chromosome[next_duration_index + 1: next_duration_index + LENGTH_OF_GAIT_STATE]
if not firstCycleComplete and current_duration_index == (LENGTH_OF_SEQUENCE - 1) * LENGTH_OF_GAIT_STATE:
next_gait_state = chromosome[(LENGTH_OF_START_SEQUENCE * LENGTH_OF_GAIT_STATE) + 1: (LENGTH_OF_START_SEQUENCE * LENGTH_OF_GAIT_STATE) + LENGTH_OF_GAIT_STATE]
alpha = (progress - (sum_of_durations - chromosome[current_duration_index])) / chromosome[current_duration_index]
interpolated_gait_state = [interpolate(a, b, alpha) for a, b in zip(current_gait_state, next_gait_state)]
return interpolated_gait_state
def interpolate(a, b, alpha):
return a * (1 - alpha) + b * alpha
def resetLegJoints():
p.resetJointStatesMultiDof(hexapod_ID, JOINT_INDEXES, [[0]] * 18, targetVelocities=[[0]] * 18)
p.setJointMotorControlArray(hexapod_ID, JOINT_INDEXES, p.POSITION_CONTROL, targetPositions=([0] * 18), forces=([150] * 18))
def resetEnvironment():
resetLegJoints()
p.resetBasePositionAndOrientation(hexapod_ID, [0, STARTING_Y, STARTING_HEIGHT + random.uniform(0, 0.002)], [0, 0, 0, 1])
p.stepSimulation()
def resetPyBulletSimulation():
global plane_ID
global hexapod_ID
p.resetSimulation()
p.setGravity(0, 0, -9.8)
plane_ID = p.loadURDF("plane.urdf", globalScaling=4)
hexapod_ID = p.loadURDF("robot3.urdf", [0, STARTING_Y, STARTING_HEIGHT + random.uniform(0, 0.002)], [0, 0, 0, 1])
print(p.getEulerFromQuaternion(p.getBasePositionAndOrientation(hexapod_ID)[1]))
def gaitScore(bodyID):
current_position = p.getBasePositionAndOrientation(bodyID)[0]
distance = distanceFromOrigin(bodyID)
angle = angleBetweenVectors(np.array([0, 1]), np.array([current_position[0], current_position[1]]))
return distance, abs(angle)
def distanceFromOrigin(bodyID):
return p.getBasePositionAndOrientation(bodyID)[0][1]
def angleBetweenVectors(a, b):
unit_vector_1 = a / np.linalg.norm(a)
unit_vector_2 = b / np.linalg.norm(b)
dot_product = np.dot(unit_vector_1, unit_vector_2)
angle = np.arccos(dot_product)
return angle
def inverseCurve(x, a):
y = a / (a + (x * x))
return y
def collidingLegs():
for j in range(24):
aabb = (p.getAABB(hexapod_ID, j))
familyOfLinks = [x for x in range(24) if math.floor(j / 4) == math.floor(x / 4)] + [-1]
collisionObjects = [x[1] for x in p.getOverlappingObjects(aabb[0], aabb[1]) if (j not in FEET_INDEXES and (x[1] not in familyOfLinks or x[0] != hexapod_ID)) or (j in FEET_INDEXES and x[1] not in familyOfLinks and x[0] == hexapod_ID)]
if len(collisionObjects) > 0:
return True
return False
def runGait(individual):
global REAL_HEXAPOD_CONNECTED
lastTime = time.time()
global firstCycleComplete
dt = 0
firstCycleComplete = False
initDuration = individual[0]
force = individual[1]
gaitChromosome = individual[2:]
gaitChromosome = ([initDuration] + [0] * NUM_OF_SERVOS) + gaitChromosome
resetEnvironment()
stabilityScore = 0
heightScore = 0
collisionScore = 0
sampleCounter = 0
p.setRealTimeSimulation(1)
while True:
if CONFIG_MODE:
p.setJointMotorControlArray(hexapod_ID, JOINT_INDEXES, p.POSITION_CONTROL, targetPositions=read_debug_parameters(), forces=([force] * 18))
else:
p.setJointMotorControlArray(hexapod_ID, JOINT_INDEXES, p.POSITION_CONTROL, targetPositions=readGait(dt, gaitChromosome), forces=([force] * 18))
if REAL_HEXAPOD_CONNECTED:
updateRealServos(ssc32, 100)
hexapodBasePosAndOrn = p.getBasePositionAndOrientation(hexapod_ID)
currentStability = sum([abs(angle) for angle in list(p.getEulerFromQuaternion(hexapodBasePosAndOrn[1]))])
currentHeight = abs(1.375 - hexapodBasePosAndOrn[0][2])
stabilityScore += currentStability
heightScore += currentHeight
sampleCounter += 1
now = time.time()
dt += now - lastTime
lastTime = now
if dt >= 12.5:
break
hexapodBasePosAndOrn = p.getBasePositionAndOrientation(hexapod_ID)
currentPosition = hexapodBasePosAndOrn[0]
distance = hexapodBasePosAndOrn[0][1]
straightness = abs(angleBetweenVectors(np.array([0, 1]), np.array([currentPosition[0], currentPosition[1]])))
avgHeight = abs(heightScore / sampleCounter)
avgStability = stabilityScore / sampleCounter
avgNumOfCollisions = collisionScore / sampleCounter
fitness_distance = distance / 100.0
fitness_straight = 1.0 - (straightness / math.pi)
fitness_stability = inverseCurve(avgStability, 1)
fitness_height = inverseCurve(avgHeight, 1)
fitness_collisions = round(1 - avgNumOfCollisions, 2)
fitness_total = (fitness_distance + fitness_straight + fitness_stability + fitness_height + fitness_collisions) / 5.0
line = f'ID: {UNIQUE_THREAD_ID} | Time Elapsed: {dt} | Evaluation: {fitness_distance, fitness_straight, fitness_stability, fitness_height, fitness_collisions, fitness_total} | Chromosome: {individual}'
print(line)
with open('C:/Users/Jonathan/Desktop/results_normal_cyclic.txt', 'a') as f:
f.write(line)
f.write('\n')
return fitness_total
def sinusoidalTestGait(t):
coxa0 = (math.pi / 4) * math.sin((2 * t) + math.pi)
femur0 = 0.2 * math.sin((2 * t) + ((5 * math.pi) / 2))
tibia0 = 1.3 * math.sin((0 * t) + ((3 * math.pi) / 2))
coxa1 = (math.pi / 4) * math.sin((2 * t) + 0)
femur1 = 0.2 * math.sin((2 * t) + ((3 * math.pi) / 2))
tibia1 = 1.3 * math.sin((0 * t) + ((3 * math.pi) / 2))
return [coxa0, femur0, tibia0, coxa1, femur1, tibia1, coxa0, femur0, tibia0] + [-coxa0, -femur0, -tibia0, -coxa1, -femur1, -tibia1, -coxa0, -femur0, -tibia0]
def evaluateGait(individual):
lastTime = time.time()
numOfPhysicsSteps = 3000
samplesPerEval = 100
stabilityUpdateRate = int(numOfPhysicsSteps / samplesPerEval)
stabilityScore = 0
heightScore = 0
collisionScore = 0
global firstCycleComplete
while True:
dt = 0
firstCycleComplete = False
initDuration = individual[0]
force = individual[1]
gaitChromosome = individual[2:]
gaitChromosome = ([initDuration] + [0] * NUM_OF_SERVOS) + gaitChromosome
resetEnvironment()
for ii in range(numOfPhysicsSteps):
if ii % stabilityUpdateRate == 0:
hexapodBasePosAndOrn = p.getBasePositionAndOrientation(hexapod_ID)
currentStability = sum([abs(angle) for angle in list(p.getEulerFromQuaternion(hexapodBasePosAndOrn[1]))])
currentHeight = abs(TARGET_HEIGHT - hexapodBasePosAndOrn[0][2])
stabilityScore += currentStability
heightScore += currentHeight
collisionScore += collidingLegs()
p.setJointMotorControlArray(hexapod_ID, JOINT_INDEXES, p.POSITION_CONTROL, targetPositions=readGait(dt, gaitChromosome), forces=([force] * 18))
p.stepSimulation()
dt += 1. / 240.
hexapodBasePosAndOrn = p.getBasePositionAndOrientation(hexapod_ID)
currentPosition = hexapodBasePosAndOrn[0]
distance = hexapodBasePosAndOrn[0][1]
straightness = abs(angleBetweenVectors(np.array([0, 1]), np.array([currentPosition[0], currentPosition[1]])))
avgHeight = abs(heightScore / samplesPerEval)
avgStability = stabilityScore / samplesPerEval
avgNumOfCollisions = collisionScore / samplesPerEval
fitness_distance = distance / 100.0
fitness_straight = 1.0 - (straightness / math.pi)
fitness_stability = inverseCurve(avgStability, 1)
fitness_height = inverseCurve(avgHeight, 1)
fitness_collisions = round(1 - avgNumOfCollisions, 2)
fitness_total = (fitness_distance + fitness_straight + fitness_stability + fitness_height + fitness_collisions) / 5.0
print(f'ID: {UNIQUE_THREAD_ID} | Time Elapsed: {time.time() - lastTime} | Evaluation: {fitness_distance, fitness_straight, fitness_stability, fitness_height, fitness_collisions, fitness_total} | Chromosome: {individual}')
if not math.isnan(distance):
break
else:
print("PyBullet Glitch")
resetPyBulletSimulation()
return fitness_total,
MAX_MOTIONS_IN_SEQUENCE = 4
NUM_OF_LEGS = 6
NUM_OF_JOINTS_PER_LEG = 3
NUM_OF_SERVOS = NUM_OF_LEGS * NUM_OF_JOINTS_PER_LEG
UNIQUE_THREAD_ID = random.randint(1, 10000)
LENGTH_OF_CYCLE = 12
LENGTH_OF_START_SEQUENCE = 2 + 1
LENGTH_OF_SEQUENCE = LENGTH_OF_START_SEQUENCE + LENGTH_OF_CYCLE
LENGTH_OF_GAIT_STATE = NUM_OF_SERVOS + 1
STARTING_HEIGHT = 1.375
STARTING_Y = 0.01
TARGET_HEIGHT = STARTING_HEIGHT
firstCycleComplete = False
REAL_HEXAPOD_CONNECTED = False
CONFIG_MODE = False
ssc32 = None
if REAL_HEXAPOD_CONNECTED:
ssc32 = serial.Serial('COM3', 115200, timeout=2)
control_IDs = []
physicsClient = None
if __name__ == "__main__":
physicsClient = p.connect(p.GUI)
else:
physicsClient = p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
plane_ID = None
hexapod_ID = None
resetPyBulletSimulation()
programStartTime = time.time()
servoRangeOfMotion = (math.pi * 3 / 8)
JOINT_INDEXES = [x for x in range(0, 24) if (x + 1) % 4 != 0]
FEET_INDEXES = [x for x in range(0, 24) if (x + 1) % 4 == 0]
p.setRealTimeSimulation(0)
print(f'PyBullet Instance ID: {UNIQUE_THREAD_ID}')
def main():
init_debug_parameters()
print("START")
runGait([0.7387319894185713, 147.82355001579506, 0.6305372633499426, -0.6089588304954848, 0.8912756231059142, 0.2812261582743101, -0.5713260153151064, -0.17272091345083518, -0.011128621604706623, -0.802059235021269, -0.07153954960452258, -0.5428904527014263, -0.04381109750503209, 0.09113787494874881, 0.7723570365515549, 0.1241992107232388, 0.8337401506855846, 1.115458030805498, -0.013540807256189336, -0.5839520097163835, -0.7340746975491128, 0.5868023045338302, -0.9298983610216932, 0.5438917235683132, -0.05782837894738324, 0.4198029392031244, -1.0399101725816757, -0.06612708025138092, -0.5082125750188753, 0.9201548240864632, 0.06444257109533891, 0.3314957278066273, 0.43523252410016, 0.0101257284239189, -0.3455692282668785, -0.11991854934521307, 0.8938694635098376, -0.5612059600360004, -1.1311528570896223, -0.5932545380125831, 0.4344991139274895, 0.3428898066225319, -0.2906991777632436, 0.48606253534779564, 0.5357348004370723, 0.08998319318443504, -0.9267085738943713, -0.8937243514792317, 1.0577416788427096, -0.37068573549717954, 0.9165099103165408, -0.8428317356586791, 0.6907079780081538, -0.763945038433602, 1.0409972769402867, -0.7930695812727582, -0.45187653759465174, -0.5161324587418127, -0.7914439627122227, 0.833033591113862, 1.0408039580092332, -0.05381107166535859, 0.8438281153722502, -0.0387362598590002, 0.6164861225837377, -0.6286851880995831, 0.8640915900752995, -0.7744000726632477, -1.1733643185821832, -0.09815300085455836, -0.2477118609768313, 1.024101066375414, -1.147511226358933, 0.35649235792115364, 1.1024258715004915, -1.011618835769622, 0.5915335712528709, -0.030590833481361157, 0.21394935583673327, 0.2677965469007608, 0.5549362872301691, 0.2695802858891776, -0.8655473520001171, -0.13250526441705102, 0.17727687014444649, -1.070467039423309, 0.09651563983885625, -0.9558246154936185, 1.1511036912990131, 0.8111082334542412, -0.3165391333624401, -1.1028022950228613, -0.8702156791239426, -1.1681706777717666, -0.652290858655339, 1.003148181057313, -0.10090114268197192, 0.23187888015208769, 0.5941647728264801, -0.43999609085011204, -0.11509272070881571, -1.0798002236171276, 0.018290046530861526, -0.7279320899826196, -0.498825849932375, 0.5922026329566983, 1.1770495895717317, 1.1658461699766112, 0.5387616073370702, 0.6762210875494419, 0.564309749770725, -0.3035549596906124, -0.23885528257994526, 1.1072615720375825, 0.5666318535111361, -0.45569851974439834, 0.8338190610059566, -0.6359449813770147, 0.2596402577409722, -0.7767216770530929, -0.90418267806025, 0.113288160612949, 0.39315211887973467, 0.15879221931780196, 0.758361875600458, 0.8700712002631037, 0.306520197643136, 0.7532325435435356, -1.0353300637178853, -0.4455790005356547, 0.33046558165864237, -0.41986999994668306, 0.773773975624336, -0.5730775662391308, -0.32242207870145256, 0.5695427482221903, 0.06540060708986029, -1.1068765041634638, 0.8444999211248407, 0.04543079459398691, 0.4642442589105744, -0.6039790052127436, -0.892455957263908, 1.1129699696404938, 0.342772182719143, -1.115584864083039, 1.0625540212723195, -0.057194100238716405, -0.5879196602166177, 0.5790752639491412, 0.6440806383356259, -0.7481329504140624, 0.20534187872140564, -1.0990982256561714, 0.2791331755311888, 0.20300461353714883, -0.8197464538582441, -0.7741517445104196, 0.36122048253607164, 0.813782786457911, 0.39582928554897623, -0.02580819987456784, -1.1628778529586477, 1.0745713708553488, -0.5089798478319643, 1.0062303026439694, 0.6478357664888437, -1.1138156319365986, -0.4955658167644643, 0.01673202498902171, 0.9162968925255734, 1.1449260986124963, 0.45197676369281314, 0.4913407885919339, 0.9059066063082057, -0.6513168739283108, 0.08060475225758434, -0.8062943072398908, -0.5854814411007928, 0.8888342908698426, -0.9445568643031321, -0.7753945536759548, -0.3003503278781188, 0.6951193721206237, 1.0356586073990068, 0.8830749880175515, -1.0664223102877843, -0.609899276809712, 0.8167470737757756, 1.038925181199521, -0.5200440777902839, 0.4128415160980885, 0.8988517426568858, 0.23012308000225246, -0.981407304217973, -0.6000864286294282, -0.8302474366129275, 0.3022460425741058, -0.7232702813935017, 0.3225916050209834, 0.1690591643089261, -0.731263207027456, 1.0793778048303206, 0.6724712011076479, -0.7393802772190122, 0.52180702196962, 0.653704773120031, -0.8435500860065721, -0.503370357216786, 1.0089409411880252, 0.8239113158523748, 0.5789158304017497, 0.8017043064577623, -0.81666613981439, 0.4674783795387365, -0.44533480654686275, -0.4893466194479631, 0.9007928200059672, 0.02483073138245584, -0.5944238649035959, 0.28518215854040774, -0.24733421237552355, -0.8505607276413669, 0.5571358775523416, 0.9045395124615454, -0.6657820640979742, -0.9652597006250097, -0.4591179819423816, 0.05481742210849316, 0.28907992231884405, 0.7124381991670108, -0.6030190226588272, 0.3369808339189879, 0.3038297672106993, -0.995236450812498, 1.0777162746217996, -1.1439340008569936, -0.7047632734591232, -0.532467147310556, -0.7640334064115123, 0.9758524528369246, -0.24433882388927364, -1.019693761734842, -0.11397606855688958, -0.26140300759258983, -0.5755519362573679, 0.15416561480192903, 0.8157008073605059, -0.988078686018823, 0.7532529999268318, 0.31269649944522704, -0.34261705061474756, -0.8905985767822926, 0.6096508828791698, -1.0668100653950248, -0.379130418894862, 0.9096912331462104, -1.001701666565123, 0.6416783457901682, 0.14610426544728483, 0.7031005674547539, 0.4247842879063306, 0.4021107592425244, 0.8928714878416208, -1.089585355806771, 0.5386513324462168, 1.043195877742578, -0.9701398946532979])
runGait([0.6109376305551025, 147.5781875317787, 0.8620634914832349, -0.5626015076241742, 0.9150007577136691, -0.20417512686545095, 0.6544359094278946, -0.31327506130394855, 0.8452968857390276, 0.13887431059938735, 1.1771529929786608, -0.9178071433237085, 0.21286308614656976, -0.6984312985937364, 0.8250658263071654, 0.38678895878185166, -0.8386364979015601, -0.8324431895189177, -0.31964574670169177, -0.9513705765809792, 0.7833723510749697, 0.9633303649676936, -1.077086285278876, -0.5823511574760045, -0.24329005344133708, 0.36075110180937114, -0.8737875239530779, -0.7120336903431772, 0.9694421297627523, -0.681817972163381, -0.7263666665964092, 0.04202279641735396, 0.5376884588766628, 0.5528900104757648, -0.3762309750477318, -0.5347146669245599, 0.30309856425260817, 0.02701219403931735, -1.1761371301420371, -0.3097959495083542, 0.637250448114133, 0.45383108548435047, -1.0293681131385823, -0.34337946728402396, -0.20240409776563223, -0.30376152527443845, 0.18856656091055635, 0.13958997918335925, 0.14259244107987795, -0.3669671234254508, -0.1371355859726133, -0.3999333309862724, 0.08190816860672162, 0.9531999241577855, 0.4305115008592596, -0.15969404241405846, -0.10706207687230784, -0.5875234717318174, -0.8888652093021814, 0.7018823816096099, 0.0097155460541066, -0.31852823774787353, -0.7161533552571332, -0.6946183251839217, -0.44771872458142625, -0.8020418614747353, 0.08697562850763507, -0.33670122865676794, -0.016255823485752413, -0.44553776220063407, 0.7067040729755709, -0.3305529141109416, -0.12353152419246438, -0.06405724257227025, -0.4289379681288039, 1.0169932699634694, 0.9219679687362414, -0.07926430997569933, 0.8461155106368546, -0.5108459915920485, 0.01721461698106075, 0.25640166227431, -0.0012638280470829346, -0.32081211955480904, 0.965518227098844, 0.07977377006028907, 0.9914084076788008, 0.9368602392194756, 0.79622005627768, -0.12120211619815363, -0.07642712418177038, 0.15250148243132808, 0.8820133072304428, -0.15324005900457768, -0.012947970781577268, 0.5314107654179234, 0.2657806659207431, 0.21867155408318162, -0.5645131510256867, -0.16370059560939496, 0.2210581088064703, 0.39055811273202895, 0.2826802498295499, -0.4229943862011821, -0.835900738908754, 0.9612898958738532, 0.9962752356339487, -0.053303884261599155, 0.30951330649963815, 0.34386442126447203, 0.3167278260159818, 1.0850909905354877, 1.0088643013546652, 0.6148040192088533, -0.32713499022688375, 0.13265347253599408, 0.729050651796031, 0.4385817170037452, -0.8104814221892234, -0.08204642341024995, -0.429968478624448, -1.1469995240847928, 0.05053455747353239, -0.6868806082011671, -0.7681702363623998, -0.6240472813916106, 0.8999904570363008, -0.5540755976757679, -0.1395815200095187, 0.7216419755694113, 0.00341887019974102, -1.1314219417339941, 0.47079825372218626, 0.38634641962439187, -0.4969326894940178, 0.9700897618329442, 0.31738524085942643, 0.5918654468120178, 0.0649345288163781, 0.9223422749742548, -1.062672657821793, 0.30896020749191433, -0.28885102187939604, 0.5103642056497637, 1.1586385214620625, 0.47011741255653366, 0.7362591411689603, 0.695808261288789, -0.6331424834334121, -0.6156728717869878, -0.6958300056305404, -1.1223768304439534, -1.1079218078030504, 0.4832955305303133, 0.7872713684414188, 0.23742695514821588, -1.0325192716558655, -0.5035525254625557, -0.11125895540865731, -0.06707968995471164, -1.0901891398628312, -0.05185746626132734, 0.17939884745186596, -0.7629342237736646, -0.25568469662030346, -0.3436266275846107, 0.5234963038843524, 0.532265503526556, -0.6045831852953002, 0.9974128911214244, -0.17925607028201557, 0.5791020459001643, 0.6873833790287516, 0.21880371572846155, 0.11009481032702205, 0.12865186069194162, -0.3268975846759916, 0.02259596959226029, 0.5559932137864555, 0.5932843214246097, -0.3710969455539212, 0.14529725265287333, 0.7044006452814845, -0.008852974292849092, -0.6124416737681215, 0.9682131380447476, 1.1375649719691259, 0.37091879445775916, 0.9352732490378375, 0.1539095660283667, 0.777440000897248, 0.22606717389632985, 0.6069013838761509, 0.30093397706517244, 1.1442039256026204, -0.7161426712243226, 0.588887225888025, 0.6972839960175987, -0.3500160949784321, 0.5128375679350539, 0.7935689766031192, -0.19559794779993978, -0.6253604887410248, 1.0145936629813255, -0.6706586307879839, 0.003436295592896238, -0.417246076528322, 0.8556308147276876, -0.209938526431461, -1.049104873280623, -0.33207489467651996, 0.6814354585137341, 0.3417057443470919, -0.059172559496654564, -0.8715300782572717, -0.2556518530893984, -1.0671471245233821, 0.3614377209651649, -0.15680078741943126, 1.166195067305183, 0.32081449773971193, 0.18756280575004247, -0.19985672490920128, 0.7805915689741869, -1.0536988894142132, 1.0857317947415768, -0.48900363536886143, -0.425774798688516, 0.17741903723193003, -0.1303947078559029, 0.9502549942826287, 0.5361055442035286, 0.7290061426453971, 0.29795698418990374, 0.26959259813541037, -0.8620031075888178, -1.0011321698786917, -0.48116523293039026, -0.3455270947267371, 0.4655510054575944, 0.21073592473488212, -0.7393279519987309, -0.7331986835493073, -0.19722904469418162, -1.0802395643312521, 0.2934313950761999, 1.1402520649753152, -0.9086071535929683, 0.02395654499079075, 0.3684164909317418, 0.5614399969048851, 0.9946642592430968, 1.124476487906898, 0.19575442149823322, -0.24851818064192416, 0.3905054965095121, -1.0073781554239616, 0.44320833092972917, -1.167994336275549, 0.6853930357205337, 0.09224681578137976, -0.9244241472672434, 0.32327571642704045, -0.08511712913634017, -0.1417785078648055, 1.0629206015460837, 0.3359763967683185, -0.30410336954697426])
runGait([0.6640463205265905, 140.52675516067927, 0.8199810222722016, -0.25481952456604723, -0.3425457104430286, -0.29895559122008386, -0.03691952728541809, -0.2926749796378846, 0.32589540084946145, -0.917897553277117, -0.1788579132316635, -1.1021614115353857, 0.16879565842355682, 0.9654926553955153, -0.8262395642035657, -0.032366000748803886, -1.035706777515601, 0.421355764319076, 0.7572712816111169, 0.20885822322721553, 0.4327161270350611, 0.9459811540427878, 0.8261945712691102, -1.1252075254946101, 0.47151105047328135, -0.20370646005147414, -0.17791531392877594, -0.1951822044066674, -0.3347713397441206, -0.8437477462605121, 0.6043792513174889, 0.8135465977213642, 0.1161484116712968, 0.2520589879602344, -0.013875994605011654, 0.6015180797865725, -0.6176009285822545, 0.12004417622115887, -0.9961965091337416, -0.3125071727309118, 0.8937107026868603, 0.08912944387701997, -0.06790816311341702, 0.22325347059753536, -0.09025762128409298, -0.011920073737759995, 0.35752949513101395, -0.45906903012374745, -0.788562694194514, -1.0050198450114056, -0.0361372172292605, -0.5954885246272503, 0.26895619166201407, 0.27184604863769407, -0.37353950677917513, 0.40112354341843237, 0.6375471269499228, 0.3141153522934928, -0.9509274229231759, 0.8779304554614856, 0.04594048324209277, 0.2495420932435201, 0.08241559351660238, -0.17642988998764342, -0.4447938485357037, -0.29330972785416387, -1.1174363806073722, -0.9616482147806933, -0.11200817025193116, -0.2108973739829038, 0.7633783825024583, -0.3175711563677784, -0.055385576075883194, -0.23831266165748738, 0.08104352235723783, 0.023356401512964642, -0.24320747848452665, 0.5859836965526846, 0.5423632994485224, -0.1667373762787313, 0.28209152742387117, 0.57278826903801, -0.2032795427955932, -0.4025112301636612, -0.5183539956075682, -0.19265157313585188, 0.17583720556492372, -0.5476355988504892, -0.6627234804934719, -0.11118988812304811, -0.7805070621113364, -0.4529826769989489, 0.7097466060065737, -0.620126964770608, 0.17005073215107458, -0.310351639008707, 0.7097004232177474, 0.6271790085262221, 0.619307086397858, 0.8131713488548474, 0.1497738579998278, 0.02526583014944417, -0.20005273447658806, -0.5474277783268228, -0.9924782021468856, 0.5028217189827595, -0.19263703499936438, 0.20588243107956172, 1.1385437676064627, 0.23883082558624708, 0.004571684392304376, 0.44876050879385027, 0.12812072388391504, -0.24208052529798593, -0.11013899281343525, -1.1196512365364517, 0.9507652902470094, 0.4545046069675115, 0.011125408478537602, 0.6549444015747156, -0.9034580752892476, -0.816680671954233, 0.42267629830319997, -0.610277202583551, 0.5500134814297155, -1.0434024414406398, -0.1937217218579826, 0.5755930968565269, -0.3839059765475241, 0.5719780788906925, 1.0347321090545003, 0.44370082946255346, 0.876765248918222, 0.2923665942100978, 0.9325649935073932, 0.5916765606714397, -0.8204301829871634, 0.5375111299520478, -0.9513662298918846, -0.48330360663587535, -0.014861243621365067, -0.7494281866976655, -0.08825309305806134, -0.9149440954101552, 1.1656564955027569, -0.319007800832239, 0.24788655005464028, -0.3902252669018448, 0.37813696098203525, 1.0257086358944083, -0.22782064462614982, 1.0142953972331836, -0.09326047201660298, -0.9608786031964357, 0.7922821770960228, -0.5752078753307402, -0.8741277024150452, 0.42074556397962476, -0.17088790293716782, -0.27812595030330334, 0.16024650373276178, -0.9015926997014683, 0.16765286991610046, 1.0745410173910972, -0.6109232086797313, -0.6866105711698087, 0.9586175739077698, 0.36069963154315404, -0.8179673245547332, 0.28221064021521486, 0.07881056395367772, 0.9621841067996804, 0.32611659794735176, 0.44963057094757963, 0.07055641109546117, -1.0165648669301155, -0.03825226786436686, -0.2562167100164272, 0.7081096623524648, 1.0925864874641888, 0.7985534527556107, -0.576117648391484, 1.128905296865967, -0.6836316776207304, -0.2843403105351967, -0.2875350090436179, -0.8884226806370785, 1.0867043497721995, 0.15936102064610103, 0.7213678990932757, 0.20189075328906597, 0.21909012756117713, 0.35081305063305934, -0.5533491342604271, -0.39139018059751557, -0.6063198386438513, -0.5324644497030327, -0.9211774284351677, 0.8014720982812371, -0.5110042508153627, 0.9729234799472499, 0.6198329213846822, -0.7172777918874114, 0.71393306635313, -0.8845569787211464, 0.8037248436039608, -0.4005762723352054, -0.41068663652781295, -0.12408256332477458, 0.2156827075982222, -0.5842278773022845, 0.5993604325472786, -0.1714832792903668, -1.073883900675711, 0.5300023789111741, -0.393565753380665, -1.1077691155993767, -0.19722587955218868, -0.10417172968555885, 0.6584848022547337, -0.5329152183955796, -0.3950555151941949, -0.10882724704645816, 1.1644009182829218, 0.1641879666963921, -0.6681618516093505, -0.927583868963644, -1.132447578431215, 0.9601897942960477, -0.3719235237813504, 0.568283689266319, 0.30050241478858897, -0.40449584730595933, 0.4561858885619927, 0.2909602044742249, 0.32523163443121184, 0.42755122870221796, -0.43174561514948073, -0.6752553468030823, 0.4204116282761027, -0.33022122289804556, -0.10054716261882779, -0.4499960947229804, 0.5718195749224134, -0.7769045028417172, 1.119286880787453, 1.0339119406352926, 0.05274713166468337, -0.6460257891457617, 0.8332178051984421, 0.03159851737606184, 0.4659457011446515, -1.1466040486387152, 0.40969329358334694, -0.055850231800543215, 1.0507510732129994, -0.05830916713550008, 0.6673647072199382, -0.93841995086702, -0.5252036523060817, -0.4085500615196542, -0.04419643170246719, 0.5892251042672283, -0.07265048037271993, 0.6495073424232363, 0.41894378985827774, -0.12136863342675985, 1.0354604832131513])
runGait([0.9676294826559563, 150.70882987508452, 0.7774265785769597, 0.30438411789744857, -1.0457358937004009, 0.06569446508772794, -0.15498365774078185, -0.2377622141182121, -0.5523307256530675, -0.08686467446610878, -1.0526089743219282, 0.8693892350692811, -0.17802396330235765, 0.984292423423254, -0.2150566341031429, 0.06941172841331614, -0.3042670031127815, 0.9739683090160918, 0.3387525680108089, 0.16403610960584877, 0.3348632802624376, 0.726765357781341, -0.9126961896061363, -0.7229583630620422, -0.3051251400627612, -0.18571836602256658, -0.31864411222929206, -0.19706640874261128, -0.11173710313723983, 0.3565369869153394, 0.1971428113859381, -0.2763605210478526, -0.10988448914397031, 0.4865331848012372, 0.19579861099733442, 0.0985008925024958, 0.10508611443908723, -0.5180802186096138, 0.602575401604621, -0.8201174052127542, 0.9018628672824313, 0.0498784872337901, -0.521071153653444, 0.4481479231233141, -0.2524266051402479, -0.253402947342113, -0.3651587489535104, -0.04675187762525251, -0.5284469398758529, 0.2654544927821044, 0.46605508251848005, 0.04966663957014836, 0.2745167545365336, 0.4868187826011344, -0.1497691281628623, -0.19339266529087248, 0.24136863629471383, -0.43924814954512725, -0.6263279786320504, 0.8484699492362705, 0.1585129887321573, -0.2588768245023081, 0.4805335508720735, -0.23474437738054899, -0.33209420118866895, -0.8818001009909548, -0.3070185272881074, 0.32004109284523, 0.6027386544692759, -0.6121775505303508, 0.4490581540464468, 0.5642596329992625, -0.04982483368519975, 0.12126979404157245, 1.03751465671718, 0.3745312020424947, -0.047861781623897426, 0.23215652576049822, 0.48636218211863, -0.1235050817634415, -0.43294538682063655, 0.25467725985917183, -0.3065392897369155, -0.1557890602814631, -0.1535446617220671, -0.594059819196811, 0.2979599287648182, -0.47393639849675195, -0.1498269458086204, -0.5013840640153254, -0.4272045457213613, 0.41099576969282453, 0.435974275195693, -0.4947517141811753, 0.5589410579791683, 0.1652474308641266, 0.40923547743201916, 0.8096533414423498, -0.1614737073645072, 0.1666180159560079, 0.11988387794171795, -0.07807848102820575, -0.7442135781834419, -0.34406828342523743, -0.05221941010307851, 0.3010681255245467, -0.5477604345796725, -0.48790074433118147, 0.5210477108826119, 0.8054612805115671, -0.06963557072433436, -0.15839013687807313, -0.8487419175268571, 0.10322897440577106, 0.28014718041568193, -0.34631485812371793, 0.9430752549792686, 0.47027663953356336, -0.5794821932554436, 0.4692452807674205, -0.11158040045677747, 0.27992277267081245, 0.3776130667252946, -0.6369352430800613, -0.2996620853089707, 0.15891663821065996, -0.2687942812407239, -0.4471464630512332, 0.12797248611817225, 0.03778787082978413, -0.04817441866063496, -0.14364337110596323, 0.2456276684919766, -0.2193460148147342, 0.309322461049816, 0.8838971330552886, 0.9139067446448306, 0.7520794039116794, 0.3506023525822065, -0.25510377807771967, 0.16331308210797202, 0.8879935062411524, -0.5959693041241926, -0.20759495151814522, 0.2108418708456552, -0.7452310360910046, 0.12664881554927482, -0.047080807460529284, 0.4692849434023498, 0.7458543281348985, 0.8107049435101739, 0.7656829635908563, 0.2585428684080011, 0.9841469724781623, 0.8620949405960008, 0.4870555312544564, -1.122938689793093, -0.35529359973828545, -0.327327233145238, -0.3160934335831116, 0.39591363428761267, -1.0579727707301056, -0.31077112675144547, -0.7938933327626483, -0.3590902641595922, 0.19493924076014235, 0.5890886736882655, -0.009036129790468118, 0.596132945850858, -0.03994141013499769, 0.07806084523888342, 0.5692918377135482, 0.4974953937971758, 0.3775889672978954, 0.12873405457093853, 0.3895539355393124, -0.014462890465162359, -0.55425962690001, -0.6132736305766234, -0.41102081373534033, 0.10835255920191245, -0.39706623995337986, -0.05463237156533987, -0.8821361662438925, -0.4293498179030984, -0.44754327412891415, -0.1880089783045697, -0.7609470792799706, -0.3607734401106461, 0.7228114557048017, -0.32350458406045907, 0.003881601223850173, 0.4645169228853948, 0.30933674367871833, -0.5511008060190785, 0.019215497706641993, -0.1901675903409386, -0.2365943643295504, -0.8261367539601678, -0.5582895426381002, 0.8430889595735926, -0.10476902705139379, -0.7224029312799556, 0.27571772103786507, 0.0885536238095041, 0.015217124065685712, 0.3616330436036972, 0.4495807925732856, -0.25846532959546853, 0.02789168870877641, -0.30766351675851644, 0.4048762627077507, 0.27121677550071394, -0.15171546326311514, -0.7757535339112696, 0.07895603889692605, 0.3394112118661255, 0.2762139076230077, -0.8609718868797998, 0.6072119320035823, 0.5713716255007126, -0.6528469102896424, -0.10679737275320846, 0.05933745485535778, 1.0330880833994396, 0.3798427665421174, -0.05376631490501693, 0.35154721956479057, -0.2278546286708691, 1.1215415803557947, 0.37551720594302973, 0.20578900503472414, -1.101421780287494, -0.16904074302935618, -0.3137973325657055, -0.31107743259952253, -0.5054336950181983, -0.6015459471353947, -0.30583499460877933, -0.5710653400947024, -0.9093940918050203, 0.4258975120803453, -0.21935467585339385, 0.7752057901473814, 0.9048914510700778, -0.7846269882405049, 0.42094152311983585, 0.579829888624975, -0.17584369852195275, 0.3434019156849982, 0.4575688137719436, -0.14604013456644133, -0.3645678519465499, 0.369472539160814, 0.8790080502109889, -0.19650675936686374, -0.18752237139683797, 0.5586386030306622, 0.9508801265095614, 0.9411240544168558, -0.345415755136577, -0.14676816884141242, -0.6025122074152638, 1.0662310264379242, 0.18388246055206153, -0.07562722289757418, -0.6679498245650091, 0.18818487201688333])
runGait([0.9348218778087588, 146.91267362984482, 0.8744564653177251, -0.2311034676019011, -0.0162125073980263, 0.23441739730394234, -0.04068722081101439, -0.01314909472419457, 0.4223694738439635, -0.25574717281863013, 0.11241756397861355, 0.863412621923713, -0.5039035218187172, 0.8097751196961653, 0.5091863473931626, -0.10148835532593689, -0.28922562386776374, 0.5858340382268268, 0.43524750474759666, 0.23821865739642933, 0.46385381613992593, 0.9239038510623541, -0.4014932024069757, -0.7366198054221755, -0.26224195420212393, -0.26567675307257604, -0.6406999273924185, -0.3947291539512541, -0.13202897393736873, -0.36868075869905625, -0.05946923101403889, -0.27726495980956967, -0.5626455668474689, -0.011352949007235264, 0.229512162107839, 0.39705904834877165, -0.4006127993614362, 0.45741277302226363, -0.04872220530283761, 0.939231875796499, 0.9244874997661925, 0.421354026869032, -0.30719169912092076, -0.28374626104543155, 0.036224616108006924, -0.25044075786835196, 0.45388624765306124, -0.2717804606894174, -0.4275921887976257, -0.6646840055520035, -0.14474013946854764, 0.13888294858271502, -0.6177532030009414, 0.2746798600901093, -0.12203515742935869, -0.2628136886221271, 0.38679528157113163, -0.19822470557065894, -0.8884013180032637, 0.9228748319499014, -0.4928839251159022, 0.27037805010739346, -0.36086268564047347, -0.24969367306344575, -0.3336638120272727, -0.19129594916608794, -0.5412798207097986, -0.16378037787753816, 0.8380380076966655, -0.5602998817983854, 0.18838739261107923, 0.5039137894474521, 0.21024282762391727, 0.08436579427566918, 0.6355601001466065, 0.5267180582633789, 0.15087882211657277, 0.2588579542917172, 0.8159804644068003, -0.38735187036573715, -0.40374868821921456, 0.3176228472810542, -0.29327017193023885, -0.5501866453799408, -0.7389217625605805, -0.5615715453807273, 0.3712336142992907, -0.41428358751812816, -0.10726134390360059, -0.4099102222197733, -0.09159826087452744, 0.19893389971361133, -0.13132545262706974, -0.7570399899916764, -0.04170327254544387, 0.6914142826643184, 0.398856999424931, 0.9233641685403612, 0.3554904000885851, 0.07458506209142986, 0.21135000375679375, -0.23744305079600145, -0.013330649851585474, 0.01157025728448935, -0.7142922193662176, -0.057521311631223385, -0.6442594978866643, 0.1356536272774043, 0.9557393672331027, 0.5233808828963422, 0.11374875444427712, 0.09649391854350325, -0.4504172430652743, 0.6539212469538538, 0.1418178498877025, -1.0285250393762446, 0.8140662921364629, 0.22032729153232467, -0.3131857222772149, -0.48190358196718097, -0.6137839859402877, 0.4530943681506018, -0.24490264309157425, -0.7036205243667922, 0.22880630883881176, -0.9353768616975839, -0.22729920059617795, -0.3986687624180637, 0.3771370805444293, 0.2962185862198067, 0.2904151502090839, 0.41187261747985526, 0.5568412571554673, -0.12906786905832562, -0.29816090327711275, 0.6695654120828216, 0.6511164416374783, 0.2354423280825553, -0.2735118074560536, -0.09228336036973724, -0.8324667409784399, -0.5181961380658293, -0.29006499483710846, 0.20790847562845022, -0.5417341319611597, 0.38491216942962997, -0.26276464862820204, -0.4194681593642782, 0.18074940042710627, 0.3028577666580481, 0.7988109336459481, 0.3168960795244507, -0.06708903960865052, 1.065337334032617, 0.5121231247483485, 0.4516447688051512, -1.1319252709654564, 0.36013046523298803, -0.34875120783540486, -0.4784054810104189, -0.6383425055163587, -0.6044113298680207, 0.44898870301162985, -0.8409890804695659, -0.13518983270380686, 0.7206151228912457, 0.6402573302424278, 0.10412067706361405, 0.054761428387725804, 0.7078674741419084, 0.6835347252533357, 0.21226096258043248, -0.3590529214883623, 0.4262896753265209, -0.048765644390856705, 0.38707830574861446, 0.084150534306334, -0.3647673644633279, -0.33591157304158037, 0.543209090584881, -0.9227930221772509, 0.2793779445429621, 0.11437608190319354, -0.7320684310072323, -0.6016577518295898, -0.45131872796737754, 0.05815823303022799, 0.8858937450029887, -0.0019911283316581506, 0.9972537913440365, -0.03610523718702252, 0.7079473862344268, 0.2573924393198488, -0.04475989790206414, -0.046463632383485554, -0.5773905293012538, -0.21619326250264764, -0.18188721111452147, -0.07658954437605886, -0.7681077839396901, 0.09322698951176928, 0.1445427413363699, -0.26358485945073823, -0.35945630531652395, 0.6607553408730602, 0.5395749627096668, -0.06565247358481555, -0.13117986498165218, 0.6166842950843242, 0.4062530279955314, -0.32413286459352975, 0.3382214897259326, 0.060478191467047124, -0.30111192010527943, -0.6852023659365898, -0.6491951557870825, -0.6082929168718781, 1.0411226013092971, -0.6704184953876647, 0.2345803856056618, 0.4849343305291035, -0.18728401603681968, -0.19357983579761226, 0.6070055818353226, -0.1077936670840348, 0.7443380029642911, -0.17385870614262244, 0.4822669757853692, 0.389397991023952, -0.5111936026821581, 0.20633197759969002, 0.1718434261981503, -0.4672185062811099, -0.17236565565445167, -0.13557244311953168, 0.15571859432119767, -0.7500101603166999, -0.23515066300234422, 0.13714967863221472, -0.5267606306271126, 0.13519136956396957, 0.02524328959004487, 0.5235642763509845, 0.8384859468187118, 0.8448970827991501, 0.8107454533312748, 0.5496009157719298, -0.7861009121069743, -0.6234808705813863, 0.4696984323447554, 0.47429018972121284, 0.623971023164932, 0.19594794245176425, -0.08196145489238126, -0.44583013816552275, 0.331803658493219, -0.8110909742267071, -0.21348457194384118, -0.2793726604175222, 0.14931602985100503, 0.12574273195266228, -0.09011644679991501, 0.42322007305737674, 0.4167395715073748, -0.6212842551478526, 0.7246903575782073, -0.06965771001708732, 0.6997039268852637])
runGait([0.9580170156701137, 156.55023833080287, 0.8697263186451583, 0.41527680177951753, -0.3091878185605595, -0.6992501726886724, -0.3621632626756743, -0.08866639808357406, 0.04011602722874327, 0.07026709630165444, 0.08310069123845333, 0.9771964256154151, -0.400666267244531, 1.0176212186192009, 0.4686604002686661, -0.05444682325152628, -0.3446318452762033, 1.167921573447584, -0.24084550251954484, 0.11520414858809458, -0.9064091144422616, 0.6287846280182579, 0.4042274363728242, -0.17984894184261202, 1.1430310808904414, 0.12829599754811477, -0.2870163134708252, -0.18392037110494033, -0.1249514559839385, 0.40824155792459593, -1.0366104970327679, -0.21924341218226873, -0.10471741599076292, -0.039709787387529435, 0.24308506785995695, 0.5029106171652411, 0.00735981801132328, 0.46310261520766255, 0.2962573675541653, 0.5716571352437573, 0.9229307062858603, 0.4495458291143761, -0.32374844918493895, -0.5428033664800668, -0.1061657887085444, 0.09327418752618749, 0.1360933416106767, -0.05729803839333989, -0.5721972886843723, -0.7501829092993302, -0.15349800908427572, 0.18791462737535045, -0.5470482537350623, 0.0694805637919472, 0.11109949906739805, -0.05080609217009568, -0.10310782753879975, -0.11306331572492027, -0.7342942829959108, 0.5899346008666371, 0.15418825045439946, -0.02081659679571593, 0.23629696966788344, -0.20051899964600228, -0.1124416281658899, -0.2846997071631564, -0.7192552571252601, 0.37433250100587284, 0.4563424915786525, -0.6333746841090807, 0.48199161211899844, 0.7988546437125186, 0.3226337048037727, 0.08751293246797012, 0.6319836666961702, 0.5183905403464221, -0.03681080646589069, 0.32577945075501186, 0.6121356022112711, 0.3422973617966078, -0.38300865284018476, 0.3869972769037436, -0.3438931711267918, -0.2925667840718638, 0.02073915908080158, -0.6601106786744819, 0.18786226365552441, -0.4674216311409396, -0.20596050805709923, -0.6805376409410507, -0.11035629757866405, 0.128064195205727, 0.25006628117465546, -0.3911802588296395, 0.2206671476325982, 0.09977946265209521, -0.8307214146667822, 0.9171381291965639, -0.4940453191512136, 0.1984116913539024, 0.16554188896519878, -0.1928373027542588, 0.035841851144624626, 0.19266064738694355, -0.5850119457302836, 0.2215164403315185, -0.5896064710472241, -0.5102020534325775, 0.7417367519970856, 0.6225121973006423, 0.19164293014838338, -0.05737129492026963, 0.6121162351865662, 0.6866768284728066, 0.14965833690587552, -0.4193640570236367, 0.8160069586540502, 0.4684910259589942, -0.3030223187406086, -0.22660524229484055, -0.26703340415352106, 0.2211243087352533, -0.34540536962148954, -0.49431357086808453, 0.4597744966917602, -1.1664564277056044, -0.42563404464543353, -0.2676448037922594, 0.10874078586225963, 0.30510521880764874, 0.0201911678551102, 0.06799655432754058, 0.2696266035980911, -0.022093390708932178, -0.23885757257157703, 0.23240754800646202, 0.3154235780471931, 0.41222261068120575, 0.21621816215661968, -0.49383086489683936, 0.22028919935866093, 0.3085302761259844, -0.7607054242258245, -0.05524612679499659, -0.4005994571597372, -0.0005409548233603068, 0.09356401716698916, -0.28019790711798187, 0.11366960643707247, 0.11455280438074295, 0.23238882534231647, 0.38151138367739235, -0.6805858547100075, 1.0336614238387605, 0.3031048722576475, 0.3326128459698742, -0.4841094623424372, -0.14572762596592148, -0.6989095508218981, 0.08856510384007225, -0.7833815783352288, -0.6051206720201369, 0.05183879858837576, 0.8525690594373536, -0.21238052918801256, 0.3064353096452067, 0.5227279651783818, -0.26970368473406214, 0.30040315174676463, 0.8044560092955977, -0.31738694194412304, -0.03793873042044421, -0.43780967817899613, 0.44695577264660596, 0.07431440259337181, 0.30468167123379775, -0.17018353015014637, -0.3717841160485929, -0.38018867550367835, -0.8319113906639981, -0.3172654371086346, 0.19103501813376841, 0.7907537210342017, -0.9089959391273528, -0.296319135720049, -0.3417512761829329, -0.023084343039717133, 0.4040896098348129, -0.31660229354210917, 0.20977471047292015, 0.06648686941410495, 0.569876899843004, 0.21573326092963024, 0.12794663668726255, 0.13345639453704178, 0.06166211393616905, -0.03240968584900304, 0.5204613173650747, -0.054310331255427405, -0.606521604737881, 0.1468424637174045, -0.10111301306428899, -0.28436289560722927, 0.8017857089530022, 0.31257612218556174, 0.1945880804760491, -0.6089705211035303, -0.14618931754382708, 0.39343797880425146, -0.5405366749846467, -0.8955466868745385, 0.24857173830306561, -0.18623545173861095, -0.3623886194435283, -0.09699504056721092, -0.3486736104159766, -0.5453068629962625, -0.04010192714300855, -0.5306328827959214, -0.1354335299516863, 0.44670333258196254, -0.29490113171676874, -0.09463147398839705, 0.874510578425735, -0.08994591375239581, 0.9780471833760542, -0.5336783066507795, 0.44298379683008876, -0.44674080020159146, -0.31978046647941477, 0.4387881341782599, 0.5350973549899811, -0.27423388863198106, -0.39599195531628345, -0.17873234672818766, -0.3297290606571569, 0.4175261890217202, -0.41268092911177146, -0.22703685041841498, -0.274965576004083, 0.18191455558091849, -0.4939031672186963, -0.5349104573630611, 0.11079268810336479, 0.29357806459055, -0.7619449431049254, 0.712338129607013, -0.10469146069165344, -0.6452483516150597, 0.27405977939835985, 0.3831614384076198, 0.5368152956549854, 0.1178851135066101, 0.4460847058945193, -0.3883316848615107, 0.29727456716368683, -0.9048653966482885, -0.014346612174598924, -0.30126039008201877, -0.4640418178366639, 0.7986905413889673, -0.07702758460544154, 0.2259118808338296, 0.04026857681135493, -0.26928463274492503, 0.3490075592061821, -0.0973833708982666, 0.6135129639803163])
runGait([0.7788908289099306, 159.1275018887873, 0.3675002143633652, 0.29358082034076927, -0.15802555334813242, -0.10120259171082222, -0.30648918293561567, -0.05565177235370438, 0.3049336061735364, 0.019485785927701088, 0.07832249749716115, 0.7871716752724793, -0.11324944128855793, 0.33911735703936136, 0.5918403744719638, 0.023601544667689423, -0.28854114361833516, 0.3817510406516811, -0.7632588163737368, 0.022344201194842275, -0.9273228127530271, 0.23720909885208666, 0.15504539382606097, -0.06533263447758221, 0.6586022121665115, -0.29344233757111227, -0.6100745589428764, 0.17959646981207406, -0.2567640532742428, 0.3822693241621865, -0.7185816271396638, -0.17076272358383804, -0.19398205231627866, 0.030812971237587354, 0.13867994513962895, 0.3166621409958447, 0.22733110475404655, 0.5283963851565332, 0.3000850798497766, 0.6660952670105973, 0.78894440862353, 0.5157348667100712, -0.38489800997374557, -0.5993480088686595, -0.18953762119493234, -0.142888877844017, -0.2487995360903048, -0.2553184465203176, -0.6835072531097912, -0.08818770700477133, -0.36234611577801024, 0.1882081759188938, -0.5403677941379689, 0.12135448182929069, 0.13736905269298347, -0.19255224947663963, 0.40446549296965023, -0.12911247255617248, -0.8830555265743198, 0.5421547081545663, 0.19311176458866605, 0.32721151153007055, 0.2615659111455483, -0.3394759597454976, -0.06908691052009064, -0.4945679102133535, -0.664437236127388, 0.455900172021705, 0.9711538928263904, -0.6122014695612167, 0.5055842831599157, 1.0056138878174767, -0.12461961762482054, 0.07270199762420153, 0.1945368404538606, 0.6057165296982737, -0.036218178124653, 0.47457880179122397, 0.561082214156418, 0.5518568617850256, -0.5389721874824412, 0.018553983034344634, -0.33762141187925576, -0.2910578505162613, 0.14535357070447755, -0.5450888267216742, 0.3147936923887069, -0.8630308630441204, -0.35532598921412856, -0.6465004633853288, -0.13894664477940558, -0.05983048923837514, 0.365281504661632, -0.5068437133042195, 0.27739826556422653, 0.19813563846913917, 0.4204274997035509, 0.8205768952006834, 0.20430739373514373, 0.3485830076102606, 0.19097357428020537, 0.11039935013513971, -0.05699918798476541, 0.17873025134016074, -0.5020918329264104, 0.06913567443773855, 0.824576743344076, -0.5181143051990277, 0.5324747932677494, 0.4047169187725307, 0.17512694920182592, 0.09027062872679636, -0.020184312316256805, 0.45759775735791614, 0.3541811364432514, -0.42810856403905084, 0.34330793691017164, 0.37803887536859593, -0.6335568031247926, 0.7451449971471023, -0.422905787574568, 0.1108746870208986, -0.48281598513746043, -0.8845798783329725, 0.2848773437400216, -1.024048537916983, -0.3564009884656577, -0.12474613038765203, -0.20868110635282452, 0.34321844967647513, 0.026989145418742592, 0.0056243347284405665, 0.2157974836543368, -0.13733758298445253, -0.33659375632067434, 0.22898564273416638, 0.5087202771881477, 0.5372813556304213, 0.2763875601801632, -0.0931809119638537, -0.15966469444157633, 0.6929648835534945, -0.6910669985856048, -0.19389406110887974, -0.3902847322396408, 0.14551156290910644, 0.16618646822053293, -0.8039787804703373, 0.15428898913228964, 0.2190997039624487, 0.6297767323586855, 0.679561926817978, -0.25269142672699957, 1.073396465709869, 0.29029445313069896, 0.3205284197358836, -0.8690640106312963, -0.28879170523811604, -0.3779137324309013, 0.11936316568503534, 0.12976155632213082, -0.6265683661237571, -0.4855309257343185, -0.5955282235528421, -0.6845815418683121, 0.4737288107745596, 0.9252056200075451, 0.34567749439018725, 0.0363068008180909, 0.5036072080080863, 0.7640949628326337, 0.23505279305957724, -0.7167717818166955, 0.22850447613489278, -0.030879680605631232, 0.8293535643185203, -0.12780675301767844, -0.4412923860207338, -0.4755533837206397, -0.7447986150788337, -0.351029516667939, 0.14495494618770427, 0.4535847436338252, -0.3531591185986031, -0.3501206760556444, -0.34008350299413603, -0.1719031304459102, 0.5791280201924914, -0.1307266287188878, 0.5722057672119683, -0.15710148018631528, 0.8971534994186912, 0.2136354954365922, 0.10736052402292397, -0.09859594062374435, 0.029800704664000633, -0.036340884369813475, 0.13376827373262254, 0.06508877177327688, 0.3209725597538162, 0.25401652110289125, 0.306412096915022, -0.4062443766586342, 0.8116135654848087, 0.5038625296989976, 0.31304069230057774, 0.27907345633577596, -0.0628432232149184, 0.7488103240677573, 0.26866161782653364, -0.3422913295247829, 0.2620956604170435, 0.3869320718994709, -0.31031616120110883, -0.6430657281164895, -0.22156900833723173, -0.38501223572432874, 0.253328681493752, -0.43126896631860384, 0.13122580788583768, 0.38236533839599784, -0.41315518332835866, -0.06881562913283318, -0.06011531751913962, -0.1786891777692833, 0.5379347036881686, -0.40503481008181036, 0.5715406089598921, 0.09206279188231829, -0.06971523980576558, 0.2873942092356773, 0.5853487864435241, -0.2276481518074298, 0.024954174691643023, -0.45483892434063, -0.24065990358463835, 0.009443952578838826, -0.4484870705490043, 0.020876918478014064, -0.3827342793037868, 0.08765966633266016, -0.27582852508027256, -0.31975146101964735, 0.23584006084015846, 0.29568147715946885, -0.8673046059997449, 0.731200312180833, 0.023929423446759857, -0.573758689689352, 0.20993143702530428, 0.47001185232098075, 0.5565009861719601, 0.4404197794305241, 0.185040383009213, -0.4131600591525496, 0.16851972924086844, -0.4306339258923647, -0.08726456746018109, -0.4968797363532158, 0.11631152950365159, 0.12129503091276284, -0.2797069038719245, 0.28964257172500313, 0.30047606726857523, 0.7797290276248695, 0.6214545112158738, -0.05292732565014542, 0.7730879197180376])
runGait([0.5798525952403486, 159.40058262811087, 0.2565373666541486, 0.4318803359566548, -0.12670809771496028, -0.18389697331663157, -0.2107828186822762, -0.08822530542530635, 0.22285787517794267, -0.14626266057699905, -0.010708372490804578, 0.8284588168920229, -0.4722713879921887, 0.19208169499854316, 0.7411503431106623, 0.040109802039862204, -0.09388586098031043, 0.3021068442167352, -0.7632253195827811, 0.7506268013480695, 0.9817795923219715, 0.24702479164747443, 0.18619723055382778, -0.01944816101487032, 0.48047453154015674, 0.044027151050185204, -0.3811611523444315, 0.07043423693100055, -0.3519428921487882, 0.06993952938115677, -0.5223378593335369, -0.22654293849301965, -0.3119867211980983, 0.02203965621516251, 0.13948563872031947, 0.303700575374144, -0.38645242345574016, 0.28023524831943847, 0.48072548736928467, 0.23302786453056437, 0.5986122931605656, 0.6980983847283087, -0.34928346917889913, -0.7471794250508657, -0.17324487759716678, -0.17474742917438432, -0.016090144920211283, -0.3939766926275115, -0.22629036214619017, 0.16700221777337937, -0.37273771647989734, 0.09062261530742664, -0.37891402716818134, -0.06917514694333406, 0.17291180959585456, -0.04844904642867835, 0.4909898309834386, -0.12380149375214647, -0.9111999943893617, 0.5239305389825415, 0.18171587810764148, 0.38432773481912763, 0.2485646260803057, -0.34491165699137327, 0.006209690525156852, -0.37412477201907574, -0.4604269737237233, 0.18972851127462684, 0.9223517018397529, -0.6210098084521306, 0.49640907383479516, 1.0891123792650426, 0.49418162637573465, 0.06420244660188437, -0.12089596773664718, 0.550748464763964, -0.06108552913804381, 0.5552559735763302, 0.5522750091665136, 0.4683531478559523, -0.49173267096532985, -0.03517143099932757, 0.06100743363377523, -0.1902922529235461, 1.1122974946275956, -0.6276776253711288, 0.18178541719252822, -0.8132832667298763, -0.4537338576551622, -0.5808862777121856, -0.23668011060600833, 0.09569218539264038, 0.304579054169755, -0.4494551817405288, 0.15870763199473473, 0.3069755905115654, 0.3959705547451262, 0.7213258637748241, 0.3763728637422603, 0.40391108748034943, 0.37339498399229337, 0.01831140131395486, -0.07811735353228262, 0.13357830561871784, -0.3758537743243868, 0.05195547431005445, 0.9379615461508354, -0.5620520288627262, 0.4539455849665125, 0.8143570677050026, 0.004283675773725867, 0.11729610271034435, 0.25800504348420195, 0.3861845305371114, 0.041281706706534374, -0.5754739036307105, 0.25496830847083185, 0.5497065484163852, -0.6197402288256176, 0.4497922477657804, -0.4509130489838098, 0.08492188046586338, -0.3745067142483794, -0.6326481434329293, 0.35798335360645445, -1.0756026955774058, -0.1658013536974587, -0.7407148571190698, -0.4210965495924469, 0.31622190189278027, -0.037060520648369094, 0.027025421714000526, 0.12225503371751616, -0.1873821138347538, -0.39270249305607063, 0.20600245925762312, 0.6985349231049772, 0.538141419017502, 0.28367589673434807, 0.2408451341677153, -0.4857764076187511, 0.3515662648369627, -0.5970692688174171, 0.02074320821791737, -0.4780204081199166, 0.1369717613278151, 0.1389681583857107, -0.7410246577802894, -0.026563078155500804, 0.15750473450145544, -0.13448669931945875, 0.6551277580017717, -0.15601141002050287, 0.7448104569077326, 0.2902755482562471, 0.5364234542988243, -0.7792137180797938, -0.2846168758998948, -0.5839320530147617, 0.0628617276240415, 0.04002665260768596, -0.5479117597172838, -0.34196183099322613, 0.7056701204997842, -0.6647297748826204, 0.42806286178547925, 1.036676035059638, 0.642474334419992, -0.007131427190290577, 0.4639036629286082, 0.7267327436184402, 0.10043705596129168, -0.6440769086811172, 0.21343864782202604, -0.10329009445544304, 0.6425250240843889, 0.05230900542901544, -0.11251073882320739, -0.35085266645906615, -0.4199713623669293, -0.3351922780693797, -0.1672466366670506, 0.5379892464340184, -0.16330627356393349, -0.056804450777555304, -0.4017389922325012, -0.1611244393278351, 0.5367215850055974, 0.26665926782883664, 0.7219467754507272, -0.23576690967651864, 0.9079195952027548, 0.2062387616905374, -0.46534175178439574, -0.10399688011724603, 0.03668728875610021, -0.036926473584827625, -0.10113397006234737, -0.0394881387655676, 0.350606385291519, 0.13403276401659836, 0.5123948398917316, -0.35772789686003825, 0.9668024245923357, 1.027298011515463, 0.48938829842316445, -0.016483670605963077, -0.11757722414303058, 0.2537310530675592, 0.2784246188624572, 0.7695997113771061, 0.20066387160146376, 0.41486141094226053, -0.41256985585307027, -1.0477965177073278, -0.3200262420965195, -0.4739320902039974, 0.1565114856750497, -0.4241862276994479, -0.014128604687211607, 0.16050204319589437, -0.40332695432173227, -0.17516701158934914, -0.11260377701162964, -0.13167892785497093, 0.7970663227025759, -0.5351328770237976, 0.5899558689297649, 0.23041684659804587, -0.15627868369477282, 0.2137694705032495, 0.5696194997458192, -0.17336701928954443, 0.10876003399835937, -0.17072054408485435, -0.329611678752721, -0.14558125057844667, -0.5971107203993231, 0.09088625634430318, -0.9113803135864458, 0.20662562476533045, -0.1850266147294778, -0.3839648611252884, -0.06266604342717771, 0.32620231538556216, -0.4511759755602935, 0.46462930437425676, -0.19098144950012377, -0.45748534958561105, 0.21835806888483988, 0.375990472674876, 0.3152963988042483, 0.4174992880580025, 0.2417839091584328, -0.24397794422280902, 0.35917942428087724, -0.3968249675417042, -0.020614707571517774, -0.5895590906441834, -0.4121546533845332, 0.7169006234784104, 0.00017719522626339322, -0.009118568606292298, 0.43285003350721596, 0.9231244888866221, 0.6350781716028882, 0.025592267149331688, 0.8892290172542283])
runGait([0.5833710183294218, 159.94477382941363, 0.274991231117698, 0.238089263008252, -0.1417409643619611, -0.12761521449834923, -0.19289461538039873, -0.10856112933014755, 0.15968577636162862, -0.17368631551323016, 0.07203736702130702, 0.881941236232128, -0.49131125475461257, 0.41963359660824545, 1.0286266905353563, 0.08461166310190373, -0.0930092118460774, 0.6688730971003121, -0.5272671341802382, 0.3759165556754689, 1.0284225286200426, 0.281022494747565, 0.0009195019049819536, -0.0879260351696233, 0.36182544936252586, 0.1544240116221797, -0.3379165966729512, -0.07107860607401342, -0.35347372811378175, 0.24474628711192828, -0.9554210776881508, -0.2446425071911576, -0.21834542364787896, 0.02941224160898587, 0.19860309964710376, 0.32015885118565973, -0.38463037767537034, 0.2721652517032083, 0.4498871803091191, 0.2843738257583236, 0.501327523282536, 0.669013035550511, -0.37715689084783993, -0.7193636388118547, -0.2383013342521316, -0.17489396233602697, 0.06083890600102712, -0.4034525364101677, -0.24583511137643727, 0.05016350547975096, -0.5231072760701854, 0.0920601174252217, -0.3798879187489547, -0.06425162833626528, 0.1175629295006112, 0.02682125752619795, 0.5075090858782456, -0.16206766607753353, -0.9027943006438802, 0.5191380547248162, 0.1922772367714138, 0.3722573359482723, 0.27824784135917774, -0.36599087581441164, -0.06620007679763394, -0.37707864764924415, -0.3432745401212583, 0.1890972969239655, 0.9771314779636118, -0.6379190437437263, 0.5327515128308239, 1.1266802573644779, 0.4853618562003829, 0.03715655903182573, 0.07311510274068314, 0.5423300464375319, -0.0658356136420452, 0.6733211262326829, 0.5412515512543659, 0.475841545559648, -0.5369352656406974, -0.026774867624149132, -0.27366768812094183, -0.21535394823513682, 1.1272641607914005, -0.6324828192170618, 0.22992240750612652, -0.8332942275738103, -0.4448812583609043, -0.5639998583724821, -0.28504303170819406, -0.13715306369785674, 0.3349484025718961, -0.3700368781101578, 0.20300787227134326, 0.22374862961672667, 0.027970795265832554, 0.7014861172229404, -0.04926493320095343, 0.4402290874741377, 0.3860194514832391, 0.11569030596073443, -0.06036997313965854, 0.1497256975919505, -0.377481545800565, 0.08298090520892161, 0.9438244188255105, -0.48021805376469584, 0.4543715274216308, 0.8678356926851799, -0.003915278924756722, 0.10352872557178089, 0.3358865719916397, 0.4211155579389066, -0.030249314775129762, -0.5658285551195321, 0.2548939424634452, 0.5745275199121783, -0.7796534931283465, 0.3451123282022226, -0.5444761756627212, 0.12200790829540269, -0.25898916669720645, -0.6724214809633824, 0.34635133694786935, -1.0685493620290625, -0.166454962800517, -0.8051985252291386, -0.4306033386198576, 0.3621432335285329, 0.014468787338504891, 0.141080510173102, 0.13964744684544284, -0.15421615523049945, -0.4317859615807832, 0.225587551388641, 0.693207792900198, 0.5533948757767216, 0.20097437713556277, 0.23256665133179352, -0.4990635733731684, 0.37724815041759296, -0.8484710927328951, 0.052329062943848995, -0.6454186305205749, 0.01709338435440333, 0.1426615820133712, -0.7496362823830726, -0.024592492969917387, 0.07160640453502068, -0.2474844962946594, 0.5941575845367926, -0.20960304431184107, 0.6424578239764861, 0.2920273219156567, 0.7036560308915455, -0.8121144845665177, -0.2789410770162129, -0.7413476580353197, 0.08188596178827257, 0.07931227840034549, -0.7207975890283618, -0.6065813517836143, 0.3983191376566657, -0.5635381703226274, 0.4088177741187736, 0.8161358908559947, 0.6554301845419963, 0.04547395492205422, 0.08051995385752733, 0.7945827746307063, 0.11087351442670304, -0.590752837198396, 0.2065658076101474, 0.0751712923684167, 0.6709125887262557, 0.1373187383960103, -0.18183312802940133, -0.4350057499267376, -0.3766430661862623, -0.8199596582372628, -0.14153603961297806, 0.590381220135425, -0.16508543450631305, -0.20708569485397604, -0.34591459093209215, -0.16651848898298874, 0.5178287410957361, -0.03657852374819068, 0.7219509009910949, -0.22937310869060928, 1.1464596068133195, 0.21233031874020497, -0.3609307120798186, -0.41136793770748015, 0.16347336752562386, -0.04569336581669571, -0.12320285070861678, 0.08240315222323638, 0.4579164525630148, 0.10194743670878537, 0.5748048740706077, -0.38484763478500494, 0.8009525610955103, 0.7333605699869847, 0.37124845323434263, -0.03501117975518403, 0.012948485735009754, 0.29139932552493186, 0.34343670572619495, 0.8542624160112243, 0.2047939987143607, 0.3903803959743837, -0.20493239305818473, -1.1287011492813999, -0.32936672671984124, -0.36581898984821176, 0.2451494881151558, -0.5756460181132672, -0.030221322542399086, 0.16449751038968874, -0.3567999251278406, -0.1618212236300447, -0.11207687799559582, 0.05735981109003743, 0.9415542138674963, -0.3554473551841668, 0.5357750527639715, 0.21498207309781378, 0.4532008047694893, 0.21329882952215284, 0.5859846457864504, -0.16362093353740018, 0.1319546289160159, -0.2194715016448026, -0.266878066809855, 0.19007538470038587, -0.6214579041470789, 0.07758190484905561, -0.7515667963793465, 0.24700843522334995, -0.292447662725082, -0.4181253106644778, 0.19564903243421003, 0.19724000917384554, -0.2063833311816462, 0.46455125472211967, -0.0899164519697946, -0.4859666940225116, 0.2204956850628324, 0.5537344147667811, 0.3710020504693896, 0.42084808456566025, 0.22826893049722402, -0.3009973798887208, 0.3133299056345898, -0.5362470634819437, -0.07363025268708201, -0.4903844709824772, -0.4212031706808154, 0.593200663984306, 0.03428638943992187, 0.24491294188014479, 0.46221509482741235, -0.20849095803967968, 0.6337473393725238, -0.05747450930384633, 0.8875435750416844])
p.disconnect(physicsClient)
if __name__ == "__main__":
main()
| true | true |
f7224ef99c59f74d5cbe11ac856e1a968e88db76 | 190 | py | Python | image_turk.py | kostyaev/image-turk | aa2a3519e74d23612f699e4b9cbe5ed5f1fdcba6 | [
"BSD-2-Clause"
] | 7 | 2016-06-18T11:22:43.000Z | 2019-08-28T23:28:41.000Z | image_turk.py | kostyaev/image-turk | aa2a3519e74d23612f699e4b9cbe5ed5f1fdcba6 | [
"BSD-2-Clause"
] | null | null | null | image_turk.py | kostyaev/image-turk | aa2a3519e74d23612f699e4b9cbe5ed5f1fdcba6 | [
"BSD-2-Clause"
] | 2 | 2016-12-12T07:40:42.000Z | 2018-02-19T13:26:07.000Z | from web import app
from gevent.pywsgi import WSGIServer
if __name__ == '__main__':
http_server = WSGIServer(('', 5000), app)
http_server.start()
http_server.serve_forever()
| 17.272727 | 45 | 0.705263 | from web import app
from gevent.pywsgi import WSGIServer
if __name__ == '__main__':
http_server = WSGIServer(('', 5000), app)
http_server.start()
http_server.serve_forever()
| true | true |
f7224f396301cfc76ade2e118f525a3fe4ace8c6 | 827 | py | Python | pixelflow/distributions/normal.py | didriknielsen/pixelcnn_flow | 9030f6a66d5ff83d7d299541ed55b20b20bb9a15 | [
"MIT"
] | 25 | 2020-02-12T00:35:48.000Z | 2021-09-18T14:30:43.000Z | pixelflow/distributions/normal.py | didriknielsen/pixelcnn_flow | 9030f6a66d5ff83d7d299541ed55b20b20bb9a15 | [
"MIT"
] | 1 | 2021-08-05T10:00:04.000Z | 2021-08-10T11:11:16.000Z | pixelflow/distributions/normal.py | didriknielsen/pixelcnn_flow | 9030f6a66d5ff83d7d299541ed55b20b20bb9a15 | [
"MIT"
] | null | null | null | import math
import torch
from pixelflow.distributions import Distribution
from pixelflow.utils import sum_except_batch
from torch.distributions import Normal
class StandardNormal(Distribution):
"""A multivariate Normal with zero mean and unit covariance."""
def __init__(self, shape):
super(StandardNormal, self).__init__()
self.shape = torch.Size(shape)
self.register_buffer('base_measure', - 0.5 * self.shape.numel() * torch.log(torch.tensor(2 * math.pi)))
def log_prob(self, x):
return self.base_measure - 0.5 * sum_except_batch(x**2)
def sample(self, num_samples):
return torch.randn((num_samples,) + self.shape, device=self.base_measure.device, dtype=self.base_measure.dtype)
def sample_shape(self, num_samples):
return (num_samples,) + self.shape
| 34.458333 | 119 | 0.71705 | import math
import torch
from pixelflow.distributions import Distribution
from pixelflow.utils import sum_except_batch
from torch.distributions import Normal
class StandardNormal(Distribution):
def __init__(self, shape):
super(StandardNormal, self).__init__()
self.shape = torch.Size(shape)
self.register_buffer('base_measure', - 0.5 * self.shape.numel() * torch.log(torch.tensor(2 * math.pi)))
def log_prob(self, x):
return self.base_measure - 0.5 * sum_except_batch(x**2)
def sample(self, num_samples):
return torch.randn((num_samples,) + self.shape, device=self.base_measure.device, dtype=self.base_measure.dtype)
def sample_shape(self, num_samples):
return (num_samples,) + self.shape
| true | true |
f7224fab731b69e54808a625abdf671ae47b423f | 1,776 | py | Python | aliyun-python-sdk-workbench-ide/aliyunsdkworkbench_ide/request/v20210121/SetAppEnvConfigRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-workbench-ide/aliyunsdkworkbench_ide/request/v20210121/SetAppEnvConfigRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-workbench-ide/aliyunsdkworkbench_ide/request/v20210121/SetAppEnvConfigRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetAppEnvConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Workbench-ide', '2021-01-21', 'SetAppEnvConfig')
self.set_method('POST')
def get_DeployStage(self): # String
return self.get_query_params().get('DeployStage')
def set_DeployStage(self, DeployStage): # String
self.add_query_param('DeployStage', DeployStage)
def get_EnvConfigList(self): # String
return self.get_query_params().get('EnvConfigList')
def set_EnvConfigList(self, EnvConfigList): # String
self.add_query_param('EnvConfigList', EnvConfigList)
def get_AppId(self): # Long
return self.get_query_params().get('AppId')
def set_AppId(self, AppId): # Long
self.add_query_param('AppId', AppId)
def get_CurrentOrgId(self): # String
return self.get_query_params().get('CurrentOrgId')
def set_CurrentOrgId(self, CurrentOrgId): # String
self.add_query_param('CurrentOrgId', CurrentOrgId)
| 37 | 78 | 0.756757 |
from aliyunsdkcore.request import RpcRequest
class SetAppEnvConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Workbench-ide', '2021-01-21', 'SetAppEnvConfig')
self.set_method('POST')
def get_DeployStage(self):
return self.get_query_params().get('DeployStage')
def set_DeployStage(self, DeployStage):
self.add_query_param('DeployStage', DeployStage)
def get_EnvConfigList(self):
return self.get_query_params().get('EnvConfigList')
def set_EnvConfigList(self, EnvConfigList):
self.add_query_param('EnvConfigList', EnvConfigList)
def get_AppId(self):
return self.get_query_params().get('AppId')
def set_AppId(self, AppId):
self.add_query_param('AppId', AppId)
def get_CurrentOrgId(self):
return self.get_query_params().get('CurrentOrgId')
def set_CurrentOrgId(self, CurrentOrgId):
self.add_query_param('CurrentOrgId', CurrentOrgId)
| true | true |
f7224fb7697b3ac73408439748e830cf29800f8a | 3,097 | py | Python | AWS_Deployment/serve/predict.py | srimanthtenneti/Deep-Learning-NanoDegree | d99b09530a96f4aeca7adf3b9188e1d5fc4104d4 | [
"Apache-2.0"
] | 1 | 2021-04-25T08:29:39.000Z | 2021-04-25T08:29:39.000Z | AWS_Deployment/serve/predict.py | srimanthtenneti/Deep-Learning-NanoDegree | d99b09530a96f4aeca7adf3b9188e1d5fc4104d4 | [
"Apache-2.0"
] | null | null | null | AWS_Deployment/serve/predict.py | srimanthtenneti/Deep-Learning-NanoDegree | d99b09530a96f4aeca7adf3b9188e1d5fc4104d4 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
# TODO: Process input_data so that it is ready to be sent to our model.
# You should produce two variables:
# data_X - A sequence of length 500 which represents the converted review
# data_len - The length of the review
data_X , data_len = convert_and_pad(model.word_dict, review_to_words(input_data))
# Using data_X and data_len we construct an appropriate input tensor. Remember
# that our model expects input data of the form 'len, review[500]'.
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
# Make sure to put the model into evaluation mode
model.eval()
# TODO: Compute the result of applying the model to the input data. The variable `result` should
# be a numpy array which contains a single integer which is either 1 or 0
with torch.no_grad():
y = model.forward(data)
result = np.round(y.numpy())
return result
| 32.946809 | 107 | 0.698418 | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
print("Loading model.")
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
data_X , data_len = convert_and_pad(model.word_dict, review_to_words(input_data))
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
model.eval()
with torch.no_grad():
y = model.forward(data)
result = np.round(y.numpy())
return result
| true | true |
f72250448cf99a09119d7986eaa24621dace8924 | 100 | py | Python | src/scraping/__init__.py | tuokri/ArgumentWinnerBot | 3a8befc4fba5e50cb64e05df4527499a1a7625fc | [
"MIT"
] | null | null | null | src/scraping/__init__.py | tuokri/ArgumentWinnerBot | 3a8befc4fba5e50cb64e05df4527499a1a7625fc | [
"MIT"
] | null | null | null | src/scraping/__init__.py | tuokri/ArgumentWinnerBot | 3a8befc4fba5e50cb64e05df4527499a1a7625fc | [
"MIT"
] | null | null | null | from .scraping import evaluate_comment_reply_pair
__all__ = [
"evaluate_comment_reply_pair",
]
| 16.666667 | 49 | 0.79 | from .scraping import evaluate_comment_reply_pair
__all__ = [
"evaluate_comment_reply_pair",
]
| true | true |
f72250b5bb01fc66955a6a49d7aaeab3b753695e | 48,823 | py | Python | epochServiceApi2/api_service_workspace.py | exastro-suite/epoch | c8ad14e9b1812310811320c9c099364917538021 | [
"Apache-2.0"
] | 5 | 2021-04-15T07:12:28.000Z | 2022-02-03T12:36:20.000Z | epochServiceApi2/api_service_workspace.py | exastro-suite/epoch | c8ad14e9b1812310811320c9c099364917538021 | [
"Apache-2.0"
] | 29 | 2021-07-20T06:54:46.000Z | 2022-03-25T01:49:34.000Z | epochServiceApi2/api_service_workspace.py | exastro-suite/epoch | c8ad14e9b1812310811320c9c099364917538021 | [
"Apache-2.0"
] | 10 | 2021-04-15T07:50:19.000Z | 2021-12-08T05:40:37.000Z | # Copyright 2021 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, request, abort, jsonify, render_template
from datetime import datetime
from datetime import timedelta, timezone
import inspect
import os
import json
import tempfile
import subprocess
import time
import re
from urllib.parse import urlparse
import base64
import requests
from requests.auth import HTTPBasicAuth
import traceback
import globals
import common
import const
import multi_lang
import api_service_current
# 設定ファイル読み込み・globals初期化
app = Flask(__name__)
app.config.from_envvar('CONFIG_API_SERVICE_PATH')
globals.init(app)
def create_workspace():
"""ワークスペース作成
Returns:
Response: HTTP Respose
"""
app_name = "ワークスペース情報:"
exec_stat = "作成"
error_detail = ""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
# ヘッダ情報
post_headers = {
'Content-Type': 'application/json',
}
# 引数をJSON形式で受け取りそのまま引数に設定
post_data = request.json.copy()
# user_idの取得
user_id = common.get_current_user(request.headers)
# workspace put送信
api_url = "{}://{}:{}/workspace".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'])
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
if response.status_code == 200:
# 正常時は戻り値がレコードの値なのでそのまま返却する
ret = json.loads(response.text)
rows = ret['rows']
else:
if common.is_json_format(response.text):
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error post workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
# Get the workspace ID - ワークスペースIDを取得する
workspace_id=rows[0]["workspace_id"]
# Set workspace roles - ワークスペースのロールを設定する
create_workspace_setting_roles(workspace_id, user_id)
# exastro-authentication-infra setting - 認証基盤の設定
create_workspace_setting_auth_infra(workspace_id, user_id)
ret_status = response.status_code
# 戻り値をそのまま返却
return jsonify({"result": ret_status, "rows": rows}), ret_status
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def create_workspace_setting_roles(workspace_id, user_id):
"""Set workspace roles - ワークスペースのロールを設定する
Args:
workspace_id (int): workspace id
user_id (str): login user id
"""
try:
# ヘッダ情報 post header
post_headers = {
'Content-Type': 'application/json',
}
api_url = "{}://{}:{}/{}/client/epoch-system/role".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"]
)
# rolesの取得 get a roles
post_data = set_roles(workspace_id)
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
#
# append workspace owner role - ワークスペースオーナーロールの付与
#
api_url = "{}://{}:{}/{}/user/{}/roles/epoch-system".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"],
user_id,
)
post_data = {
"roles" : [
{
"name": const.ROLE_WS_OWNER[0].format(workspace_id),
"enabled": True,
}
]
}
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
except common.UserException as e:
globals.logger.debug(e.args)
raise
except Exception as e:
globals.logger.debug(e.args)
raise
def create_workspace_setting_auth_infra(workspace_id, user_id):
"""exastro-authentication-infra setting - 認証基盤の設定
Args:
workspace_id (int): workspace id
user_id (str): login user id
"""
try:
# ヘッダ情報
post_headers = {
'Content-Type': 'application/json',
}
# authentication-infra-api の呼び先設定
api_url_epai = "{}://{}:{}/".format(os.environ["EPOCH_EPAI_API_PROTOCOL"],
os.environ["EPOCH_EPAI_API_HOST"],
os.environ["EPOCH_EPAI_API_PORT"])
# get namespace
namespace = common.get_namespace_name(workspace_id)
# get pipeline name
pipeline_name = common.get_pipeline_name(workspace_id)
#
# Generate a client for use in the workspace - ワークスペースで使用するクライアントを生成
#
# postする情報 post information
clients = [
{
"client_id" : 'epoch-ws-{}-ita'.format(workspace_id),
"client_host" : os.environ["EPOCH_EPAI_HOST"],
"client_protocol" : "https",
"conf_template" : "epoch-ws-ita-template.conf",
"backend_url" : "http://it-automation.{}.svc:8084/".format(namespace),
"require_claim" : const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
"mapping_client_id" : "epoch-system",
},
{
"client_id" : 'epoch-ws-{}-argocd'.format(workspace_id),
"client_host" : os.environ["EPOCH_EPAI_HOST"],
"client_protocol" : "https",
"conf_template" : "epoch-ws-argocd-template.conf",
"backend_url" : "https://argocd-server.{}.svc/".format(namespace),
"require_claim" : const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
"mapping_client_id" : "epoch-system",
},
{
"client_id" : 'epoch-ws-{}-sonarqube'.format(workspace_id),
"client_host" : os.environ["EPOCH_EPAI_HOST"],
"client_protocol" : "https",
"conf_template" : "epoch-ws-sonarqube-template.conf",
"backend_url" : "http://sonarqube.{}.svc:9000/".format(pipeline_name),
"require_claim" : const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
"mapping_client_id" : "epoch-system",
},
]
# post送信(アクセス情報生成)
exec_stat = "認証基盤 client設定"
for client in clients:
response = requests.post("{}{}/{}/{}".format(api_url_epai, 'settings', os.environ["EPOCH_EPAI_REALM_NAME"], 'clients'), headers=post_headers, data=json.dumps(client))
# 正常時以外はExceptionを発行して終了する
if response.status_code != 200:
globals.logger.debug(response.text)
error_detail = "認証基盤 client設定に失敗しました。 {}".format(response.status_code)
raise common.UserException(error_detail)
#
# Set usage authority of url used in workspace - ワークスペースで使用するurlの使用権限の設定する
#
exec_stat = "認証基盤 route設定"
post_data = {
"route_id" : namespace,
"template_file" : "epoch-system-ws-template.conf",
"render_params" : {
"workspace_id" : workspace_id,
}
}
response = requests.post(
"{}settings/{}/clients/epoch-system/route".format(api_url_epai, os.environ["EPOCH_EPAI_REALM_NAME"]),
headers=post_headers,
data=json.dumps(post_data)
)
# 正常時以外はExceptionを発行して終了する
if response.status_code != 200:
globals.logger.debug(response.text)
error_detail = "認証基盤 route設定に失敗しました。 {}".format(response.status_code)
raise common.UserException(error_detail)
#
# Do "httpd graceful" - Apacheの設定読込を行う
#
exec_stat = "認証基盤 設定読み込み"
response = requests.put("{}{}".format(api_url_epai, 'apply_settings'), headers=post_headers, data="{}")
if response.status_code != 200:
globals.logger.debug(response.text)
error_detail = "認証基盤 設定読み込みに失敗しました。 {}".format(response.status_code)
raise common.UserException(error_detail)
except common.UserException as e:
globals.logger.debug(e.args)
raise
except Exception as e:
globals.logger.debug(e.args)
raise
def set_roles(workspace_id):
"""role設定するjsonの内容を編集する(client毎にロールが必要だが内容は同じのため共通化)
Edit the content of json to set role (role is required for each client, but the content is the same, so it is common)
Args:
workspace_id (int): workspace id
Returns:
json: "roles" = [ ]
"""
try:
json_roles = {
"roles" : [
# ロール権限をすべて定義 Define all role permissions
{
"name": const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_REFERENCE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_REFERENCE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_WS_NAME_UPDATE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_NAME_UPDATE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_NAME_UPDATE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_CI_UPDATE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_CI_UPDATE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_CD_UPDATE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_CD_UPDATE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_WS_DELETE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_DELETE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_DELETE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_OWNER_ROLE_SETTING[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_OWNER_ROLE_SETTING[1] ],
"display_default": [ const.ROLE_WS_ROLE_OWNER_ROLE_SETTING[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_MEMBER_ADD[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_MEMBER_ADD[1] ],
"display_default": [ const.ROLE_WS_ROLE_MEMBER_ADD[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[1] ],
"display_default": [ const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[1] ],
"display_default": [ const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_MANIFEST_UPLOAD[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_MANIFEST_UPLOAD[1] ],
"display_default": [ const.ROLE_WS_ROLE_MANIFEST_UPLOAD[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_MANIFEST_SETTING[1] ],
"display_default": [ const.ROLE_WS_ROLE_MANIFEST_SETTING[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_CD_EXECUTE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_CD_EXECUTE[1] ],
"display_default": [ const.ROLE_WS_ROLE_CD_EXECUTE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[1] ],
"display_default": [ const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[2] ],
}
},
# ロールをすべて定義 Define all roles
{
"name": const.ROLE_WS_OWNER[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_NAME_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_DELETE[0].format(workspace_id),
const.ROLE_WS_ROLE_OWNER_ROLE_SETTING[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ADD[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_UPLOAD[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_OWNER[1] ],
"display_default": [ const.ROLE_WS_OWNER[2] ],
}
},
{
"name": const.ROLE_WS_MANAGER[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_NAME_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ADD[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_UPLOAD[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_MANAGER[1] ],
"display_default": [ const.ROLE_WS_MANAGER[2] ],
}
},
{
"name": const.ROLE_WS_MEMBER_MG[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ADD[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_MEMBER_MG[1] ],
"display_default": [ const.ROLE_WS_MEMBER_MG[2] ],
}
},
{
"name": const.ROLE_WS_CI_SETTING[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_UPLOAD[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CI_SETTING[1] ],
"display_default": [ const.ROLE_WS_CI_SETTING[2] ],
}
},
{
"name": const.ROLE_WS_CI_RESULT[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CI_RESULT[1] ],
"display_default": [ const.ROLE_WS_CI_RESULT[2] ],
}
},
{
"name": const.ROLE_WS_CD_SETTING[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CD_SETTING[1] ],
"display_default": [ const.ROLE_WS_CD_SETTING[2] ],
}
},
{
"name": const.ROLE_WS_CD_EXECUTE[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CD_EXECUTE[1] ],
"display_default": [ const.ROLE_WS_CD_EXECUTE[2] ],
}
},
{
"name": const.ROLE_WS_CD_RESULT[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CD_RESULT[1] ],
"display_default": [ const.ROLE_WS_CD_RESULT[2] ],
}
},
]
}
return json_roles
except common.UserException as e:
globals.logger.debug(e.args)
raise
except Exception as e:
globals.logger.debug(e.args)
raise
def get_workspace_list():
"""ワークスペース情報一覧取得
Returns:
Response: HTTP Respose
"""
app_name = "ワークスペース情報:"
exec_stat = "一覧取得"
error_detail = ""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
# ヘッダ情報 header info
post_headers = {
'Content-Type': 'application/json',
}
# ワークスペース情報取得 get workspace info
api_url = "{}://{}:{}/workspace".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'])
response = requests.get(api_url, headers=post_headers)
# user_idの取得 get user id
user_id = common.get_current_user(request.headers)
# ユーザクライアントロール情報取得 get user client role info
epai_api_url = "{}://{}:{}/{}/user/{}/roles/epoch-system".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"],
user_id)
epai_resp_user_role = requests.get(epai_api_url, headers=post_headers)
rows = []
if response.status_code == 200 and common.is_json_format(response.text) \
and epai_resp_user_role.status_code == 200 and common.is_json_format(epai_resp_user_role.text):
user_roles = json.loads(epai_resp_user_role.text)
# 取得した情報で必要な部分のみを編集して返却する Edit and return only the necessary part of the acquired information
ret = json.loads(response.text)
for data_row in ret["rows"]:
# ログインユーザーのロールが該当するワークスペースの参照権限があるかチェックする
# Check if the logged-in user's role has read permission for the applicable workspace
find = False
roles = []
for user_role in user_roles["rows"]:
if user_role["name"] == const.ROLE_WS_ROLE_WS_REFERENCE[0].format(data_row["workspace_id"]):
find = True
# クライアントロール表示名取得 get client role display name
epai_api_url = "{}://{}:{}/{}/client/epoch-system/role/{}".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"],
user_role["name"])
epai_resp_role_disp_name = requests.get(epai_api_url, headers=post_headers)
role_info = json.loads(epai_resp_role_disp_name.text)["rows"]
globals.logger.debug("role_info:{}".format(role_info))
disp_row = {
"id": user_role["name"],
"name": "",
}
# 表示する項目が存在した際は、多言語変換を実施して値を設定
# If there is an item to be displayed, perform multilingual conversion and set the value.
if "attributes" in role_info:
if "display" in role_info["attributes"]:
disp_row["name"] = multi_lang.get_text(role_info["attributes"]["display"], role_info["attributes"]["display_default"])
roles.append(disp_row)
break
# 参照ロールありで一覧に表示する Display in list with reference role
if find:
# メンバー数取得 get the number of members
epai_api_url = "{}://{}:{}/{}/client/epoch-system/roles/{}/users".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"],
const.ROLE_WS_ROLE_WS_REFERENCE[0].format(data_row["workspace_id"]))
epai_resp_role_users = requests.get(epai_api_url, headers=post_headers)
role_users = json.loads(epai_resp_role_users.text)
# 返り値 JSON整形 Return value JSON formatting
row = {
"workspace_id": data_row["workspace_id"],
"workspace_name": data_row["common"]["name"],
"roles": roles,
"members": len(role_users["rows"]),
"workspace_remarks": data_row["common"]["note"],
"update_at": data_row["update_at"],
}
rows.append(row)
elif not response.status_code == 404:
# 404以外の場合は、エラー、404はレコードなしで返却(エラーにはならない) If it is other than 404, it is an error, and 404 is returned without a record (it does not become an error)
raise Exception('{} Error:{}'.format(inspect.currentframe().f_code.co_name, response.status_code))
return jsonify({"result": "200", "rows": rows}), 200
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def get_workspace(workspace_id):
"""ワークスペース情報取得
Args:
workspace_id (int): workspace ID
Returns:
Response: HTTP Respose
"""
app_name = "ワークスペース情報:"
exec_stat = "取得"
error_detail = ""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
# ヘッダ情報
post_headers = {
'Content-Type': 'application/json',
}
# workspace GET送信
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'],
workspace_id)
response = requests.get(api_url, headers=post_headers)
if response.status_code == 200 and common.is_json_format(response.text):
# 取得したJSON結果が正常でない場合、例外を返す
ret = json.loads(response.text)
rows = ret["rows"]
ret_status = response.status_code
# 現在のユーザーのロール値を取得 Get the role value of the current user
user = api_service_current.user_get()
roles = user["composite_roles"]
# 権限によって、セキュア項目をマスクする Mask secure items by permission
# ワークスペース更新 (CI)無しの場合 Without workspace update (CI)
if const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id) not in roles:
rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["password"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["password"])
rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["token"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["token"])
rows[0]["ci_config"]["pipelines_common"]["container_registry"]["password"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["container_registry"]["password"])
# ワークスペース更新 (CD)無しの場合 Without workspace update (CD)
if const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id) not in roles:
rows[0]["cd_config"]["environments_common"]["git_repositry"]["password"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["password"])
rows[0]["cd_config"]["environments_common"]["git_repositry"]["token"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["token"])
elif response.status_code == 404:
# 情報が取得できない場合は、0件で返す
rows = []
ret_status = response.status_code
else:
if response.status_code == 500 and common.is_json_format(response.text):
# 戻り値がJsonの場合は、値を取得
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error get workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
return jsonify({"result": ret_status, "rows": rows}), ret_status
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def put_workspace(workspace_id):
"""ワークスペース情報更新
Args:
workspace_id (int): workspace ID
Returns:
Response: HTTP Respose
"""
app_name = multi_lang.get_text("EP020-0003", "ワークスペース情報:")
exec_stat = multi_lang.get_text("EP020-0016", "更新")
error_detail = ""
return_code = 500
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
# ヘッダ情報 header info.
post_headers = {
'Content-Type': 'application/json',
}
# 引数を一旦更新項目として仮保存する Temporarily save the argument as an update item
req_data = request.json.copy()
# 変更前のworkspace取得 Get workspace before change
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'],
workspace_id)
response = requests.get(api_url)
# 正常以外はエラーを返す Returns an error if not normal
if response.status_code != 200:
if common.is_json_format(response.text):
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error put workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
rows = json.loads(response.text)
row = rows["rows"][0]
# 現在のユーザーのロール値を取得 Get the role value of the current user
user = api_service_current.user_get()
globals.logger.debug(f'cuurent user:{user}')
roles = user["composite_roles"]
# 取得した情報をもとに、画面から送信された更新情報を権限毎に設定する
# Based on the acquired information, set the update information sent from the screen for each authority.
#
# ワークスペース更新(名称)有の場合 If workspace update (name) is available
#
if const.ROLE_WS_ROLE_WS_NAME_UPDATE[0].format(workspace_id) in roles:
row["common"] = req_data["common"]
#
# ワークスペース更新 (CI)有の場合 If workspace update (ci) is available
#
if const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id) in roles:
row["ci_config"]["pipelines_common"] = req_data["ci_config"]["pipelines_common"]
row["ci_config"]["pipelines"] = req_data["ci_config"]["pipelines"]
#
# Manifestパラメータの更新 - Update Manifest parameters
#
if const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id) in roles:
# Manifestパラメータの更新権限がある場合はパラメータの内容を差し替える
# If you have permission to update Manifest parameters, replace the contents of the parameters.
row["ci_config"]["environments"] = req_data["ci_config"]["environments"]
elif const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id) in roles:
# マニフェスト更新権限が無くてワークスペース更新 (CD)有の場合、環境の増減だけ行う
# If you do not have the manifest update authority and have a workspace update (CD), only increase or decrease the environment.
save_env = []
# 元からあるものは一旦消去し、環境内容はIDが一致すれば元のまま、該当しない場合は新規とする
# Delete the original one, leave the environment contents as they are if the IDs match, and make them new if they do not match.
for src_env in row["ci_config"]["environments"]:
# IDが存在するかチェック Check if the ID exists
if common.search_array_dict(req_data["ci_config"]["environments"], "environment_id", src_env["environment_id"]) is not None:
save_env.append(src_env)
for dest_env in req_data["ci_config"]["environments"]:
# IDが存在するかチェック Check if the ID exists
# 存在しない場合は情報を追加する Add information if it does not exist
if common.search_array_dict(row["ci_config"]["environments"], "environment_id", dest_env["environment_id"]) is None:
save_env.append(dest_env)
row["ci_config"]["environments"] = save_env
#
# CD設定の更新 - Update CD settings
#
# IaCリポジトリのユーザ,tokenを決定する - Determine user, token of IaC repository
if req_data["cd_config"]["environments_common"]["git_repositry"]["account_select"] == "applicationCode":
# アプリケーションコードと同じ - Same as application code
iac_git_user = row["ci_config"]["pipelines_common"]["git_repositry"]["user"]
iac_git_token = row["ci_config"]["pipelines_common"]["git_repositry"]["token"]
else:
# 別途入力の時は入力値を使う - Use the input value when inputting separately
iac_git_user = req_data["cd_config"]["environments_common"]["git_repositry"]["user"]
iac_git_token = req_data["cd_config"]["environments_common"]["git_repositry"]["token"]
if const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id) in roles and const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id) in roles:
# ワークスペース更新(CD)の権限とメンバー更新の権限がある場合は全項目更新
# Update all items if you have workspace update (CD) permission and member update permission
row["cd_config"] = req_data["cd_config"]
row["cd_config"]["environments_common"]["git_repositry"]["user"] = iac_git_user
row["cd_config"]["environments_common"]["git_repositry"]["password"] = iac_git_token
row["cd_config"]["environments_common"]["git_repositry"]["token"] = iac_git_token
elif const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id) in roles:
# ワークスペース更新(CD)の権限がありメンバー更新の権限がない場合はDeploy権限の項目以外の項目を更新
# If you have workspace update (CD) permission and you do not have member update permission, update items other than Deploy permission items.
# 共通部分の更新 - Update of common parts
row["cd_config"]["system_config"] = req_data["cd_config"]["system_config"]
row["cd_config"]["environments_common"] = req_data["cd_config"]["environments_common"]
row["cd_config"]["environments_common"]["git_repositry"]["user"] = iac_git_user
row["cd_config"]["environments_common"]["git_repositry"]["password"] = iac_git_token
row["cd_config"]["environments_common"]["git_repositry"]["token"] = iac_git_token
# 環境毎の項目の更新 - Update items for each environment
environments_new =[]
for env_before in row["cd_config"]["environments"]:
# 更新となる環境情報を取得する - Get updated environment information
env_new = common.search_array_dict(req_data["cd_config"]["environments"], "environment_id", env_before["environment_id"])
if env_new is not None:
# deploy_member以外の項目は更新する - Update items other than deploy_member
env_new = env_new.copy()
env_new["cd_exec_users"] = env_before["cd_exec_users"]
environments_new.append(env_new)
else:
# 環境が削除された場合は設定しない(消える)
# Not set (disappears) when the environment is deleted
pass
for env in req_data["cd_config"]["environments"]:
if common.search_array_dict(row["cd_config"]["environments"], "environment_id", env["environment_id"]) is None:
# 新規の環境の場合、deploy_member以外の項目を設定し、deploy_memberはデフォルト値に設定
# For new environment, set items other than deploy_member and set deploy_member to default value
env_new = env.copy()
env_new["cd_exec_users"] = {"user_select":"all", "user_id": ""}
environments_new.append(env_new)
row["cd_config"]["environments"] = environments_new
elif const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id) in roles:
# ワークスペース更新(CD)の権限がなくメンバー更新の権限がある場合はDeploy権限の項目だけを更新
# If you do not have workspace update (CD) permission and you have member update permission, update only the item of Deploy permission.
for env in row["cd_config"]["environments"]:
env_new = common.search_array_dict(req_data["cd_config"]["environments"], "environment_id", env["environment_id"])
env["cd_exec_users"] = env_new["cd_exec_users"]
else:
# ワークスペース更新(CD)の権限とメンバー更新の権限がない場合は更新しない
# Do not update if you do not have workspace update (CD) and member update permissions
pass
post_data = row
# workspace put送信
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'],
workspace_id)
response = requests.put(api_url, headers=post_headers, data=json.dumps(post_data))
if response.status_code == 200:
# 正常時は戻り値がレコードの値なのでそのまま返却する
ret = json.loads(response.text)
rows = ret['rows']
elif response.status_code == 404:
return_code = 400
error_detail = multi_lang.get_text("EP000-0023", "対象の情報(workspace)が他で更新されたため、更新できません\n画面更新後、再度情報を入力・選択して実行してください", "workspace")
raise common.UserException("{} Exclusive check error".format(inspect.currentframe().f_code.co_name))
else:
if common.is_json_format(response.text):
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error put workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
ret_status = response.status_code
# 戻り値をそのまま返却
return jsonify({"result": ret_status}), ret_status
except common.UserException as e:
return common.user_error_to_message(e, app_name + exec_stat, error_detail, return_code)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def patch_workspace(workspace_id):
"""ワークスペース情報一部更新 workspace info. pacth
Args:
workspace_id (int): workspace ID
Returns:
Response: HTTP Respose
"""
app_name = multi_lang.get_text("EP020-0003", "ワークスペース情報:")
exec_stat = multi_lang.get_text("EP020-0016", "更新")
error_detail = ""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
# ヘッダ情報
post_headers = {
'Content-Type': 'application/json',
}
# 引数をJSON形式で受け取りそのまま引数に設定
post_data = request.json.copy()
# workspace patch send
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'],
workspace_id)
response = requests.patch(api_url, headers=post_headers, data=json.dumps(post_data))
if response.status_code == 200:
# 正常時は戻り値がレコードの値なのでそのまま返却する
ret = json.loads(response.text)
rows = ret['rows']
else:
if common.is_json_format(response.text):
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error put workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
ret_status = response.status_code
# 戻り値をそのまま返却
return jsonify({"result": ret_status}), ret_status
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def post_pod(workspace_id):
"""ワークスペース作成
Args:
workspace_id (int): workspace ID
Returns:
Response: HTTP Respose
"""
app_name = "ワークスペース情報:"
exec_stat = "作成"
error_detail = ""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
# ヘッダ情報
post_headers = {
'Content-Type': 'application/json',
}
# 引数をJSON形式で受け取りそのまま引数に設定
post_data = request.json.copy()
# workspace post送信
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_CONTROL_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_CONTROL_WORKSPACE_HOST'],
os.environ['EPOCH_CONTROL_WORKSPACE_PORT'],
workspace_id)
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
globals.logger.debug("post workspace response:{}".format(response.text))
if response.status_code != 200:
error_detail = 'workspace post処理に失敗しました'
raise common.UserException(error_detail)
# argocd post送信
api_url = "{}://{}:{}/workspace/{}/argocd".format(os.environ['EPOCH_CONTROL_ARGOCD_PROTOCOL'],
os.environ['EPOCH_CONTROL_ARGOCD_HOST'],
os.environ['EPOCH_CONTROL_ARGOCD_PORT'],
workspace_id)
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
globals.logger.debug("post argocd response:{}".format(response.text))
if response.status_code != 200:
error_detail = 'argocd post処理に失敗しました'
raise common.UserException(error_detail)
# ita post送信
api_url = "{}://{}:{}/workspace/{}/it-automation".format(os.environ['EPOCH_CONTROL_ITA_PROTOCOL'],
os.environ['EPOCH_CONTROL_ITA_HOST'],
os.environ['EPOCH_CONTROL_ITA_PORT'],
workspace_id)
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
globals.logger.debug("post it-automation response:{}".format(response.text))
if response.status_code != 200:
error_detail = 'it-automation post処理に失敗しました'
raise common.UserException(error_detail)
# epoch-control-ita-api の呼び先設定
api_url = "{}://{}:{}/workspace/{}/it-automation/settings".format(os.environ['EPOCH_CONTROL_ITA_PROTOCOL'],
os.environ['EPOCH_CONTROL_ITA_HOST'],
os.environ['EPOCH_CONTROL_ITA_PORT'],
workspace_id)
# it-automation/settings post送信
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
globals.logger.debug("post it-automation/settings response:{}".format(response.text))
if response.status_code != 200:
error_detail = 'it-automation/settings post処理に失敗しました'
raise common.UserException(error_detail)
ret_status = 200
# 戻り値をそのまま返却
return jsonify({"result": ret_status}), ret_status
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
| 46.365622 | 184 | 0.543473 |
from flask import Flask, request, abort, jsonify, render_template
from datetime import datetime
from datetime import timedelta, timezone
import inspect
import os
import json
import tempfile
import subprocess
import time
import re
from urllib.parse import urlparse
import base64
import requests
from requests.auth import HTTPBasicAuth
import traceback
import globals
import common
import const
import multi_lang
import api_service_current
app = Flask(__name__)
app.config.from_envvar('CONFIG_API_SERVICE_PATH')
globals.init(app)
def create_workspace():
app_name = "ワークスペース情報:"
exec_stat = "作成"
error_detail = ""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
post_headers = {
'Content-Type': 'application/json',
}
post_data = request.json.copy()
user_id = common.get_current_user(request.headers)
api_url = "{}://{}:{}/workspace".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'])
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
if response.status_code == 200:
ret = json.loads(response.text)
rows = ret['rows']
else:
if common.is_json_format(response.text):
ret = json.loads(response.text)
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error post workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
workspace_id=rows[0]["workspace_id"]
create_workspace_setting_roles(workspace_id, user_id)
create_workspace_setting_auth_infra(workspace_id, user_id)
ret_status = response.status_code
return jsonify({"result": ret_status, "rows": rows}), ret_status
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def create_workspace_setting_roles(workspace_id, user_id):
try:
post_headers = {
'Content-Type': 'application/json',
}
api_url = "{}://{}:{}/{}/client/epoch-system/role".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"]
)
post_data = set_roles(workspace_id)
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
api_url = "{}://{}:{}/{}/user/{}/roles/epoch-system".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"],
user_id,
)
post_data = {
"roles" : [
{
"name": const.ROLE_WS_OWNER[0].format(workspace_id),
"enabled": True,
}
]
}
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
except common.UserException as e:
globals.logger.debug(e.args)
raise
except Exception as e:
globals.logger.debug(e.args)
raise
def create_workspace_setting_auth_infra(workspace_id, user_id):
try:
post_headers = {
'Content-Type': 'application/json',
}
api_url_epai = "{}://{}:{}/".format(os.environ["EPOCH_EPAI_API_PROTOCOL"],
os.environ["EPOCH_EPAI_API_HOST"],
os.environ["EPOCH_EPAI_API_PORT"])
namespace = common.get_namespace_name(workspace_id)
pipeline_name = common.get_pipeline_name(workspace_id)
clients = [
{
"client_id" : 'epoch-ws-{}-ita'.format(workspace_id),
"client_host" : os.environ["EPOCH_EPAI_HOST"],
"client_protocol" : "https",
"conf_template" : "epoch-ws-ita-template.conf",
"backend_url" : "http://it-automation.{}.svc:8084/".format(namespace),
"require_claim" : const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
"mapping_client_id" : "epoch-system",
},
{
"client_id" : 'epoch-ws-{}-argocd'.format(workspace_id),
"client_host" : os.environ["EPOCH_EPAI_HOST"],
"client_protocol" : "https",
"conf_template" : "epoch-ws-argocd-template.conf",
"backend_url" : "https://argocd-server.{}.svc/".format(namespace),
"require_claim" : const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
"mapping_client_id" : "epoch-system",
},
{
"client_id" : 'epoch-ws-{}-sonarqube'.format(workspace_id),
"client_host" : os.environ["EPOCH_EPAI_HOST"],
"client_protocol" : "https",
"conf_template" : "epoch-ws-sonarqube-template.conf",
"backend_url" : "http://sonarqube.{}.svc:9000/".format(pipeline_name),
"require_claim" : const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
"mapping_client_id" : "epoch-system",
},
]
exec_stat = "認証基盤 client設定"
for client in clients:
response = requests.post("{}{}/{}/{}".format(api_url_epai, 'settings', os.environ["EPOCH_EPAI_REALM_NAME"], 'clients'), headers=post_headers, data=json.dumps(client))
if response.status_code != 200:
globals.logger.debug(response.text)
error_detail = "認証基盤 client設定に失敗しました。 {}".format(response.status_code)
raise common.UserException(error_detail)
exec_stat = "認証基盤 route設定"
post_data = {
"route_id" : namespace,
"template_file" : "epoch-system-ws-template.conf",
"render_params" : {
"workspace_id" : workspace_id,
}
}
response = requests.post(
"{}settings/{}/clients/epoch-system/route".format(api_url_epai, os.environ["EPOCH_EPAI_REALM_NAME"]),
headers=post_headers,
data=json.dumps(post_data)
)
if response.status_code != 200:
globals.logger.debug(response.text)
error_detail = "認証基盤 route設定に失敗しました。 {}".format(response.status_code)
raise common.UserException(error_detail)
exec_stat = "認証基盤 設定読み込み"
response = requests.put("{}{}".format(api_url_epai, 'apply_settings'), headers=post_headers, data="{}")
if response.status_code != 200:
globals.logger.debug(response.text)
error_detail = "認証基盤 設定読み込みに失敗しました。 {}".format(response.status_code)
raise common.UserException(error_detail)
except common.UserException as e:
globals.logger.debug(e.args)
raise
except Exception as e:
globals.logger.debug(e.args)
raise
def set_roles(workspace_id):
try:
json_roles = {
"roles" : [
{
"name": const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_REFERENCE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_REFERENCE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_WS_NAME_UPDATE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_NAME_UPDATE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_NAME_UPDATE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_CI_UPDATE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_CI_UPDATE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_CD_UPDATE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_CD_UPDATE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_WS_DELETE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_WS_DELETE[1] ],
"display_default": [ const.ROLE_WS_ROLE_WS_DELETE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_OWNER_ROLE_SETTING[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_OWNER_ROLE_SETTING[1] ],
"display_default": [ const.ROLE_WS_ROLE_OWNER_ROLE_SETTING[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_MEMBER_ADD[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_MEMBER_ADD[1] ],
"display_default": [ const.ROLE_WS_ROLE_MEMBER_ADD[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[1] ],
"display_default": [ const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[1] ],
"display_default": [ const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_MANIFEST_UPLOAD[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_MANIFEST_UPLOAD[1] ],
"display_default": [ const.ROLE_WS_ROLE_MANIFEST_UPLOAD[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_MANIFEST_SETTING[1] ],
"display_default": [ const.ROLE_WS_ROLE_MANIFEST_SETTING[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_CD_EXECUTE[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_CD_EXECUTE[1] ],
"display_default": [ const.ROLE_WS_ROLE_CD_EXECUTE[2] ],
}
},
{
"name": const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
"composite_roles": [],
"attributes": {
"display": [ const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[1] ],
"display_default": [ const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[2] ],
}
},
{
"name": const.ROLE_WS_OWNER[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_NAME_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_DELETE[0].format(workspace_id),
const.ROLE_WS_ROLE_OWNER_ROLE_SETTING[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ADD[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_UPLOAD[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_OWNER[1] ],
"display_default": [ const.ROLE_WS_OWNER[2] ],
}
},
{
"name": const.ROLE_WS_MANAGER[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_NAME_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ADD[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_UPLOAD[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_MANAGER[1] ],
"display_default": [ const.ROLE_WS_MANAGER[2] ],
}
},
{
"name": const.ROLE_WS_MEMBER_MG[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ADD[0].format(workspace_id),
const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_MEMBER_MG[1] ],
"display_default": [ const.ROLE_WS_MEMBER_MG[2] ],
}
},
{
"name": const.ROLE_WS_CI_SETTING[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_UPLOAD[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CI_SETTING[1] ],
"display_default": [ const.ROLE_WS_CI_SETTING[2] ],
}
},
{
"name": const.ROLE_WS_CI_RESULT[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_CI_PIPELINE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CI_RESULT[1] ],
"display_default": [ const.ROLE_WS_CI_RESULT[2] ],
}
},
{
"name": const.ROLE_WS_CD_SETTING[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id),
const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CD_SETTING[1] ],
"display_default": [ const.ROLE_WS_CD_SETTING[2] ],
}
},
{
"name": const.ROLE_WS_CD_EXECUTE[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CD_EXECUTE[1] ],
"display_default": [ const.ROLE_WS_CD_EXECUTE[2] ],
}
},
{
"name": const.ROLE_WS_CD_RESULT[0].format(workspace_id),
"composite_roles": [ const.ROLE_WS_ROLE_WS_REFERENCE[0].format(workspace_id),
const.ROLE_WS_ROLE_CD_EXECUTE_RESULT[0].format(workspace_id),
],
"attributes": {
"display": [ const.ROLE_WS_CD_RESULT[1] ],
"display_default": [ const.ROLE_WS_CD_RESULT[2] ],
}
},
]
}
return json_roles
except common.UserException as e:
globals.logger.debug(e.args)
raise
except Exception as e:
globals.logger.debug(e.args)
raise
def get_workspace_list():
app_name = "ワークスペース情報:"
exec_stat = "一覧取得"
error_detail = ""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
post_headers = {
'Content-Type': 'application/json',
}
api_url = "{}://{}:{}/workspace".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'])
response = requests.get(api_url, headers=post_headers)
user_id = common.get_current_user(request.headers)
epai_api_url = "{}://{}:{}/{}/user/{}/roles/epoch-system".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"],
user_id)
epai_resp_user_role = requests.get(epai_api_url, headers=post_headers)
rows = []
if response.status_code == 200 and common.is_json_format(response.text) \
and epai_resp_user_role.status_code == 200 and common.is_json_format(epai_resp_user_role.text):
user_roles = json.loads(epai_resp_user_role.text)
ret = json.loads(response.text)
for data_row in ret["rows"]:
find = False
roles = []
for user_role in user_roles["rows"]:
if user_role["name"] == const.ROLE_WS_ROLE_WS_REFERENCE[0].format(data_row["workspace_id"]):
find = True
# クライアントロール表示名取得 get client role display name
epai_api_url = "{}://{}:{}/{}/client/epoch-system/role/{}".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"],
user_role["name"])
epai_resp_role_disp_name = requests.get(epai_api_url, headers=post_headers)
role_info = json.loads(epai_resp_role_disp_name.text)["rows"]
globals.logger.debug("role_info:{}".format(role_info))
disp_row = {
"id": user_role["name"],
"name": "",
}
# 表示する項目が存在した際は、多言語変換を実施して値を設定
# If there is an item to be displayed, perform multilingual conversion and set the value.
if "attributes" in role_info:
if "display" in role_info["attributes"]:
disp_row["name"] = multi_lang.get_text(role_info["attributes"]["display"], role_info["attributes"]["display_default"])
roles.append(disp_row)
break
# 参照ロールありで一覧に表示する Display in list with reference role
if find:
# メンバー数取得 get the number of members
epai_api_url = "{}://{}:{}/{}/client/epoch-system/roles/{}/users".format(os.environ['EPOCH_EPAI_API_PROTOCOL'],
os.environ['EPOCH_EPAI_API_HOST'],
os.environ['EPOCH_EPAI_API_PORT'],
os.environ["EPOCH_EPAI_REALM_NAME"],
const.ROLE_WS_ROLE_WS_REFERENCE[0].format(data_row["workspace_id"]))
epai_resp_role_users = requests.get(epai_api_url, headers=post_headers)
role_users = json.loads(epai_resp_role_users.text)
# 返り値 JSON整形 Return value JSON formatting
row = {
"workspace_id": data_row["workspace_id"],
"workspace_name": data_row["common"]["name"],
"roles": roles,
"members": len(role_users["rows"]),
"workspace_remarks": data_row["common"]["note"],
"update_at": data_row["update_at"],
}
rows.append(row)
elif not response.status_code == 404:
# 404以外の場合は、エラー、404はレコードなしで返却(エラーにはならない) If it is other than 404, it is an error, and 404 is returned without a record (it does not become an error)
raise Exception('{} Error:{}'.format(inspect.currentframe().f_code.co_name, response.status_code))
return jsonify({"result": "200", "rows": rows}), 200
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def get_workspace(workspace_id):
app_name = "ワークスペース情報:"
exec_stat = "取得"
error_detail = ""
try:
globals.logger.debug('
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('
# ヘッダ情報
post_headers = {
'Content-Type': 'application/json',
}
# workspace GET送信
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'],
workspace_id)
response = requests.get(api_url, headers=post_headers)
if response.status_code == 200 and common.is_json_format(response.text):
# 取得したJSON結果が正常でない場合、例外を返す
ret = json.loads(response.text)
rows = ret["rows"]
ret_status = response.status_code
# 現在のユーザーのロール値を取得 Get the role value of the current user
user = api_service_current.user_get()
roles = user["composite_roles"]
# 権限によって、セキュア項目をマスクする Mask secure items by permission
# ワークスペース更新 (CI)無しの場合 Without workspace update (CI)
if const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id) not in roles:
rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["password"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["password"])
rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["token"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["token"])
rows[0]["ci_config"]["pipelines_common"]["container_registry"]["password"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["container_registry"]["password"])
# ワークスペース更新 (CD)無しの場合 Without workspace update (CD)
if const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id) not in roles:
rows[0]["cd_config"]["environments_common"]["git_repositry"]["password"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["password"])
rows[0]["cd_config"]["environments_common"]["git_repositry"]["token"] = common.str_mask(rows[0]["ci_config"]["pipelines_common"]["git_repositry"]["token"])
elif response.status_code == 404:
# 情報が取得できない場合は、0件で返す
rows = []
ret_status = response.status_code
else:
if response.status_code == 500 and common.is_json_format(response.text):
# 戻り値がJsonの場合は、値を取得
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error get workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
return jsonify({"result": ret_status, "rows": rows}), ret_status
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def put_workspace(workspace_id):
app_name = multi_lang.get_text("EP020-0003", "ワークスペース情報:")
exec_stat = multi_lang.get_text("EP020-0016", "更新")
error_detail = ""
return_code = 500
try:
globals.logger.debug('
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('
# ヘッダ情報 header info.
post_headers = {
'Content-Type': 'application/json',
}
# 引数を一旦更新項目として仮保存する Temporarily save the argument as an update item
req_data = request.json.copy()
# 変更前のworkspace取得 Get workspace before change
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'],
workspace_id)
response = requests.get(api_url)
# 正常以外はエラーを返す Returns an error if not normal
if response.status_code != 200:
if common.is_json_format(response.text):
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error put workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
rows = json.loads(response.text)
row = rows["rows"][0]
# 現在のユーザーのロール値を取得 Get the role value of the current user
user = api_service_current.user_get()
globals.logger.debug(f'cuurent user:{user}')
roles = user["composite_roles"]
# 取得した情報をもとに、画面から送信された更新情報を権限毎に設定する
# Based on the acquired information, set the update information sent from the screen for each authority.
#
# ワークスペース更新(名称)有の場合 If workspace update (name) is available
#
if const.ROLE_WS_ROLE_WS_NAME_UPDATE[0].format(workspace_id) in roles:
row["common"] = req_data["common"]
#
# ワークスペース更新 (CI)有の場合 If workspace update (ci) is available
#
if const.ROLE_WS_ROLE_WS_CI_UPDATE[0].format(workspace_id) in roles:
row["ci_config"]["pipelines_common"] = req_data["ci_config"]["pipelines_common"]
row["ci_config"]["pipelines"] = req_data["ci_config"]["pipelines"]
#
# Manifestパラメータの更新 - Update Manifest parameters
#
if const.ROLE_WS_ROLE_MANIFEST_SETTING[0].format(workspace_id) in roles:
# Manifestパラメータの更新権限がある場合はパラメータの内容を差し替える
# If you have permission to update Manifest parameters, replace the contents of the parameters.
row["ci_config"]["environments"] = req_data["ci_config"]["environments"]
elif const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id) in roles:
# マニフェスト更新権限が無くてワークスペース更新 (CD)有の場合、環境の増減だけ行う
# If you do not have the manifest update authority and have a workspace update (CD), only increase or decrease the environment.
save_env = []
# 元からあるものは一旦消去し、環境内容はIDが一致すれば元のまま、該当しない場合は新規とする
# Delete the original one, leave the environment contents as they are if the IDs match, and make them new if they do not match.
for src_env in row["ci_config"]["environments"]:
# IDが存在するかチェック Check if the ID exists
if common.search_array_dict(req_data["ci_config"]["environments"], "environment_id", src_env["environment_id"]) is not None:
save_env.append(src_env)
for dest_env in req_data["ci_config"]["environments"]:
# IDが存在するかチェック Check if the ID exists
# 存在しない場合は情報を追加する Add information if it does not exist
if common.search_array_dict(row["ci_config"]["environments"], "environment_id", dest_env["environment_id"]) is None:
save_env.append(dest_env)
row["ci_config"]["environments"] = save_env
#
# CD設定の更新 - Update CD settings
#
# IaCリポジトリのユーザ,tokenを決定する - Determine user, token of IaC repository
if req_data["cd_config"]["environments_common"]["git_repositry"]["account_select"] == "applicationCode":
# アプリケーションコードと同じ - Same as application code
iac_git_user = row["ci_config"]["pipelines_common"]["git_repositry"]["user"]
iac_git_token = row["ci_config"]["pipelines_common"]["git_repositry"]["token"]
else:
# 別途入力の時は入力値を使う - Use the input value when inputting separately
iac_git_user = req_data["cd_config"]["environments_common"]["git_repositry"]["user"]
iac_git_token = req_data["cd_config"]["environments_common"]["git_repositry"]["token"]
if const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id) in roles and const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id) in roles:
# ワークスペース更新(CD)の権限とメンバー更新の権限がある場合は全項目更新
# Update all items if you have workspace update (CD) permission and member update permission
row["cd_config"] = req_data["cd_config"]
row["cd_config"]["environments_common"]["git_repositry"]["user"] = iac_git_user
row["cd_config"]["environments_common"]["git_repositry"]["password"] = iac_git_token
row["cd_config"]["environments_common"]["git_repositry"]["token"] = iac_git_token
elif const.ROLE_WS_ROLE_WS_CD_UPDATE[0].format(workspace_id) in roles:
# ワークスペース更新(CD)の権限がありメンバー更新の権限がない場合はDeploy権限の項目以外の項目を更新
# If you have workspace update (CD) permission and you do not have member update permission, update items other than Deploy permission items.
# 共通部分の更新 - Update of common parts
row["cd_config"]["system_config"] = req_data["cd_config"]["system_config"]
row["cd_config"]["environments_common"] = req_data["cd_config"]["environments_common"]
row["cd_config"]["environments_common"]["git_repositry"]["user"] = iac_git_user
row["cd_config"]["environments_common"]["git_repositry"]["password"] = iac_git_token
row["cd_config"]["environments_common"]["git_repositry"]["token"] = iac_git_token
# 環境毎の項目の更新 - Update items for each environment
environments_new =[]
for env_before in row["cd_config"]["environments"]:
# 更新となる環境情報を取得する - Get updated environment information
env_new = common.search_array_dict(req_data["cd_config"]["environments"], "environment_id", env_before["environment_id"])
if env_new is not None:
# deploy_member以外の項目は更新する - Update items other than deploy_member
env_new = env_new.copy()
env_new["cd_exec_users"] = env_before["cd_exec_users"]
environments_new.append(env_new)
else:
# 環境が削除された場合は設定しない(消える)
# Not set (disappears) when the environment is deleted
pass
for env in req_data["cd_config"]["environments"]:
if common.search_array_dict(row["cd_config"]["environments"], "environment_id", env["environment_id"]) is None:
# 新規の環境の場合、deploy_member以外の項目を設定し、deploy_memberはデフォルト値に設定
# For new environment, set items other than deploy_member and set deploy_member to default value
env_new = env.copy()
env_new["cd_exec_users"] = {"user_select":"all", "user_id": ""}
environments_new.append(env_new)
row["cd_config"]["environments"] = environments_new
elif const.ROLE_WS_ROLE_MEMBER_ROLE_UPDATE[0].format(workspace_id) in roles:
# ワークスペース更新(CD)の権限がなくメンバー更新の権限がある場合はDeploy権限の項目だけを更新
# If you do not have workspace update (CD) permission and you have member update permission, update only the item of Deploy permission.
for env in row["cd_config"]["environments"]:
env_new = common.search_array_dict(req_data["cd_config"]["environments"], "environment_id", env["environment_id"])
env["cd_exec_users"] = env_new["cd_exec_users"]
else:
# ワークスペース更新(CD)の権限とメンバー更新の権限がない場合は更新しない
# Do not update if you do not have workspace update (CD) and member update permissions
pass
post_data = row
# workspace put送信
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'],
workspace_id)
response = requests.put(api_url, headers=post_headers, data=json.dumps(post_data))
if response.status_code == 200:
# 正常時は戻り値がレコードの値なのでそのまま返却する
ret = json.loads(response.text)
rows = ret['rows']
elif response.status_code == 404:
return_code = 400
error_detail = multi_lang.get_text("EP000-0023", "対象の情報(workspace)が他で更新されたため、更新できません\n画面更新後、再度情報を入力・選択して実行してください", "workspace")
raise common.UserException("{} Exclusive check error".format(inspect.currentframe().f_code.co_name))
else:
if common.is_json_format(response.text):
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error put workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
ret_status = response.status_code
# 戻り値をそのまま返却
return jsonify({"result": ret_status}), ret_status
except common.UserException as e:
return common.user_error_to_message(e, app_name + exec_stat, error_detail, return_code)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def patch_workspace(workspace_id):
app_name = multi_lang.get_text("EP020-0003", "ワークスペース情報:")
exec_stat = multi_lang.get_text("EP020-0016", "更新")
error_detail = ""
try:
globals.logger.debug('
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('
# ヘッダ情報
post_headers = {
'Content-Type': 'application/json',
}
# 引数をJSON形式で受け取りそのまま引数に設定
post_data = request.json.copy()
# workspace patch send
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_RS_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_RS_WORKSPACE_HOST'],
os.environ['EPOCH_RS_WORKSPACE_PORT'],
workspace_id)
response = requests.patch(api_url, headers=post_headers, data=json.dumps(post_data))
if response.status_code == 200:
# 正常時は戻り値がレコードの値なのでそのまま返却する
ret = json.loads(response.text)
rows = ret['rows']
else:
if common.is_json_format(response.text):
ret = json.loads(response.text)
# 詳細エラーがある場合は詳細を設定
if ret["errorDetail"] is not None:
error_detail = ret["errorDetail"]
raise common.UserException("{} Error put workspace db status:{}".format(inspect.currentframe().f_code.co_name, response.status_code))
ret_status = response.status_code
# 戻り値をそのまま返却
return jsonify({"result": ret_status}), ret_status
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
def post_pod(workspace_id):
app_name = "ワークスペース情報:"
exec_stat = "作成"
error_detail = ""
try:
globals.logger.debug('
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('
# ヘッダ情報
post_headers = {
'Content-Type': 'application/json',
}
# 引数をJSON形式で受け取りそのまま引数に設定
post_data = request.json.copy()
# workspace post送信
api_url = "{}://{}:{}/workspace/{}".format(os.environ['EPOCH_CONTROL_WORKSPACE_PROTOCOL'],
os.environ['EPOCH_CONTROL_WORKSPACE_HOST'],
os.environ['EPOCH_CONTROL_WORKSPACE_PORT'],
workspace_id)
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
globals.logger.debug("post workspace response:{}".format(response.text))
if response.status_code != 200:
error_detail = 'workspace post処理に失敗しました'
raise common.UserException(error_detail)
# argocd post送信
api_url = "{}://{}:{}/workspace/{}/argocd".format(os.environ['EPOCH_CONTROL_ARGOCD_PROTOCOL'],
os.environ['EPOCH_CONTROL_ARGOCD_HOST'],
os.environ['EPOCH_CONTROL_ARGOCD_PORT'],
workspace_id)
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
globals.logger.debug("post argocd response:{}".format(response.text))
if response.status_code != 200:
error_detail = 'argocd post処理に失敗しました'
raise common.UserException(error_detail)
# ita post送信
api_url = "{}://{}:{}/workspace/{}/it-automation".format(os.environ['EPOCH_CONTROL_ITA_PROTOCOL'],
os.environ['EPOCH_CONTROL_ITA_HOST'],
os.environ['EPOCH_CONTROL_ITA_PORT'],
workspace_id)
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
globals.logger.debug("post it-automation response:{}".format(response.text))
if response.status_code != 200:
error_detail = 'it-automation post処理に失敗しました'
raise common.UserException(error_detail)
# epoch-control-ita-api の呼び先設定
api_url = "{}://{}:{}/workspace/{}/it-automation/settings".format(os.environ['EPOCH_CONTROL_ITA_PROTOCOL'],
os.environ['EPOCH_CONTROL_ITA_HOST'],
os.environ['EPOCH_CONTROL_ITA_PORT'],
workspace_id)
# it-automation/settings post送信
response = requests.post(api_url, headers=post_headers, data=json.dumps(post_data))
globals.logger.debug("post it-automation/settings response:{}".format(response.text))
if response.status_code != 200:
error_detail = 'it-automation/settings post処理に失敗しました'
raise common.UserException(error_detail)
ret_status = 200
# 戻り値をそのまま返却
return jsonify({"result": ret_status}), ret_status
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
| true | true |
f722512a52d85c2f133dde222b5d65e575bea90e | 13,065 | py | Python | qa/rpc-tests/test_framework/util.py | lambocoin-core/lambocoin | aa851cb2b9b8a4c733ef26d71e4502f45890859e | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/util.py | lambocoin-core/lambocoin | aa851cb2b9b8a4c733ef26d71e4502f45890859e | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/util.py | lambocoin-core/lambocoin | aa851cb2b9b8a4c733ef26d71e4502f45890859e | [
"MIT"
] | null | null | null | # Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "lambocoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("LAMBOCOIND", "lambocoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: lambocoind started, calling lambocoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LAMBOCOINCLI", "lambocoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: lambocoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(15):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("LAMBOCOIND", "lambocoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: lambocoind started, calling lambocoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LAMBOCOINCLI", "lambocoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling lambocoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| 36.596639 | 110 | 0.647608 |
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "lambocoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("LAMBOCOIND", "lambocoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: lambocoind started, calling lambocoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LAMBOCOINCLI", "lambocoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: lambocoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(15):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
sync_blocks(rpcs)
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i)
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['):
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("LAMBOCOIND", "lambocoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: lambocoind started, calling lambocoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LAMBOCOINCLI", "lambocoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling lambocoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
change_address = from_node.getnewaddress()
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| false | true |
f72251726696c1e896ff03b3100c48683916efdc | 92 | py | Python | Software_University/python_basics/for_loop/lecture/numbers_n_to_1.py | Ivanazzz/SoftUni-W3resource-Python | 892321a290e22a91ff2ac2fef5316179a93f2f17 | [
"MIT"
] | 1 | 2022-01-26T07:38:11.000Z | 2022-01-26T07:38:11.000Z | Software_University/python_basics/for_loop/lecture/numbers_n_to_1.py | Ivanazzz/SoftUni-W3resource-Python | 892321a290e22a91ff2ac2fef5316179a93f2f17 | [
"MIT"
] | null | null | null | Software_University/python_basics/for_loop/lecture/numbers_n_to_1.py | Ivanazzz/SoftUni-W3resource-Python | 892321a290e22a91ff2ac2fef5316179a93f2f17 | [
"MIT"
] | null | null | null | number = int(input("Enter an integer number: "))
for i in range(number, 0, -1):
print(i) | 30.666667 | 48 | 0.641304 | number = int(input("Enter an integer number: "))
for i in range(number, 0, -1):
print(i) | true | true |
f722528cdceebb50fb10ce727ffbd969111d6c3e | 3,113 | py | Python | youtube_dl/extractor/sbs.py | ajaivasudeve/youtube-dl | 6f8cb72f689c0aac630e06e91785f8428f1ba3f3 | [
"Unlicense"
] | 55 | 2015-01-18T08:44:39.000Z | 2021-09-15T19:49:52.000Z | youtube_dl/extractor/sbs.py | ajaivasudeve/youtube-dl | 6f8cb72f689c0aac630e06e91785f8428f1ba3f3 | [
"Unlicense"
] | 14 | 2019-11-30T00:01:34.000Z | 2019-12-17T16:04:53.000Z | youtube_dl/extractor/sbs.py | ajaivasudeve/youtube-dl | 6f8cb72f689c0aac630e06e91785f8428f1ba3f3 | [
"Unlicense"
] | 36 | 2015-10-07T18:32:33.000Z | 2021-01-28T10:06:15.000Z | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
smuggle_url,
ExtractorError,
)
class SBSIE(InfoExtractor):
IE_DESC = 'sbs.com.au'
_VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand(?:/video/(?:single/)?|.*?\bplay=)|news/(?:embeds/)?video/)(?P<id>[0-9]+)'
_TESTS = [{
# Original URL is handled by the generic IE which finds the iframe:
# http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation
'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed',
'md5': '3150cf278965eeabb5b4cea1c963fe0a',
'info_dict': {
'id': '_rFBPRPO4pMR',
'ext': 'mp4',
'title': 'Dingo Conservation (The Feed)',
'description': 'md5:f250a9856fca50d22dec0b5b8015f8a5',
'thumbnail': r're:http://.*\.jpg',
'duration': 308,
'timestamp': 1408613220,
'upload_date': '20140821',
'uploader': 'SBSC',
},
}, {
'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed',
'only_matching': True,
}, {
'url': 'http://www.sbs.com.au/news/video/471395907773/The-Feed-July-9',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/?play=1836638787723',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/program/inside-windsor-castle?play=1283505731842',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/news/embeds/video/1840778819866',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
player_params = self._download_json(
'http://www.sbs.com.au/api/video_pdkvars/id/%s?form=json' % video_id, video_id)
error = player_params.get('error')
if error:
error_message = 'Sorry, The video you are looking for does not exist.'
video_data = error.get('results') or {}
error_code = error.get('errorCode')
if error_code == 'ComingSoon':
error_message = '%s is not yet available.' % video_data.get('title', '')
elif error_code in ('Forbidden', 'intranetAccessOnly'):
error_message = 'Sorry, This video cannot be accessed via this website'
elif error_code == 'Expired':
error_message = 'Sorry, %s is no longer available.' % video_data.get('title', '')
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
urls = player_params['releaseUrls']
theplatform_url = (urls.get('progressive') or urls.get('html')
or urls.get('standard') or player_params['relatedItemsURL'])
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
'id': video_id,
'url': smuggle_url(self._proto_relative_url(theplatform_url), {'force_smil_url': True}),
}
| 40.960526 | 136 | 0.585609 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
smuggle_url,
ExtractorError,
)
class SBSIE(InfoExtractor):
IE_DESC = 'sbs.com.au'
_VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand(?:/video/(?:single/)?|.*?\bplay=)|news/(?:embeds/)?video/)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed',
'md5': '3150cf278965eeabb5b4cea1c963fe0a',
'info_dict': {
'id': '_rFBPRPO4pMR',
'ext': 'mp4',
'title': 'Dingo Conservation (The Feed)',
'description': 'md5:f250a9856fca50d22dec0b5b8015f8a5',
'thumbnail': r're:http://.*\.jpg',
'duration': 308,
'timestamp': 1408613220,
'upload_date': '20140821',
'uploader': 'SBSC',
},
}, {
'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed',
'only_matching': True,
}, {
'url': 'http://www.sbs.com.au/news/video/471395907773/The-Feed-July-9',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/?play=1836638787723',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/program/inside-windsor-castle?play=1283505731842',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/news/embeds/video/1840778819866',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
player_params = self._download_json(
'http://www.sbs.com.au/api/video_pdkvars/id/%s?form=json' % video_id, video_id)
error = player_params.get('error')
if error:
error_message = 'Sorry, The video you are looking for does not exist.'
video_data = error.get('results') or {}
error_code = error.get('errorCode')
if error_code == 'ComingSoon':
error_message = '%s is not yet available.' % video_data.get('title', '')
elif error_code in ('Forbidden', 'intranetAccessOnly'):
error_message = 'Sorry, This video cannot be accessed via this website'
elif error_code == 'Expired':
error_message = 'Sorry, %s is no longer available.' % video_data.get('title', '')
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
urls = player_params['releaseUrls']
theplatform_url = (urls.get('progressive') or urls.get('html')
or urls.get('standard') or player_params['relatedItemsURL'])
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
'id': video_id,
'url': smuggle_url(self._proto_relative_url(theplatform_url), {'force_smil_url': True}),
}
| true | true |
f72252ba555ae2cebc8bddb12841fa7a995f772f | 1,498 | py | Python | code/fastai_ext/fastai_ext/hyperparameter.py | jandremarais/TabularLearner | 7905b06a31fc6e0b0adf6a13a0fb445bdfe00c96 | [
"MIT"
] | 14 | 2019-07-15T07:38:36.000Z | 2021-12-08T23:54:15.000Z | code/fastai_ext/fastai_ext/hyperparameter.py | jandremarais/TabularLearner | 7905b06a31fc6e0b0adf6a13a0fb445bdfe00c96 | [
"MIT"
] | 2 | 2019-05-07T13:56:51.000Z | 2019-08-08T01:27:58.000Z | code/fastai_ext/fastai_ext/hyperparameter.py | jandremarais/TabularLearner | 7905b06a31fc6e0b0adf6a13a0fb445bdfe00c96 | [
"MIT"
] | 2 | 2021-03-23T11:06:45.000Z | 2021-03-23T11:34:48.000Z | import pandas as pd
import itertools
from functools import partial
from fastai.callbacks import CSVLogger
def get_config_df(config):
df = pd.DataFrame(list(itertools.product(*config.values())), columns=config.keys())
df.index = [f'model_{i+1}' for i in range(len(df))]
return df
def create_experiment(nm, path, folder='results'):
exp_path = (path/folder/nm)
exp_path.mkdir(exist_ok=True)
return nm, exp_path
def record_experiment(learn, fn, exp_path):
learn.callback_fns.append(partial(CSVLogger, filename=exp_path/fn))
def load_results(exp_path):
config_df = pd.read_csv(exp_path/'config.csv', index_col=0)
param_names = config_df.columns.values
recorder_df=[]
for p in exp_path.ls():
if p.name.startswith(tuple(config_df.index.values)):
df = pd.read_csv(p)
ind_name, fold_name = p.stem.split('-')
df['index']=ind_name
df['fold']=int(fold_name.split('_')[-1].split('.')[0])
recorder_df.append(df)
recorder_df = pd.concat(recorder_df)
metric_names = list(set(recorder_df.columns).symmetric_difference(['index', 'epoch', 'train_loss', 'fold']))
recorder_df = recorder_df.merge(config_df.reset_index())
return config_df, recorder_df, param_names, metric_names
def summarise_results(recorder_df, param_names, metric_names):
return (recorder_df.groupby(['index', *param_names, 'epoch'], as_index=False)
.agg({k:['mean', 'std'] for k in metric_names})) | 38.410256 | 112 | 0.688251 | import pandas as pd
import itertools
from functools import partial
from fastai.callbacks import CSVLogger
def get_config_df(config):
df = pd.DataFrame(list(itertools.product(*config.values())), columns=config.keys())
df.index = [f'model_{i+1}' for i in range(len(df))]
return df
def create_experiment(nm, path, folder='results'):
exp_path = (path/folder/nm)
exp_path.mkdir(exist_ok=True)
return nm, exp_path
def record_experiment(learn, fn, exp_path):
learn.callback_fns.append(partial(CSVLogger, filename=exp_path/fn))
def load_results(exp_path):
config_df = pd.read_csv(exp_path/'config.csv', index_col=0)
param_names = config_df.columns.values
recorder_df=[]
for p in exp_path.ls():
if p.name.startswith(tuple(config_df.index.values)):
df = pd.read_csv(p)
ind_name, fold_name = p.stem.split('-')
df['index']=ind_name
df['fold']=int(fold_name.split('_')[-1].split('.')[0])
recorder_df.append(df)
recorder_df = pd.concat(recorder_df)
metric_names = list(set(recorder_df.columns).symmetric_difference(['index', 'epoch', 'train_loss', 'fold']))
recorder_df = recorder_df.merge(config_df.reset_index())
return config_df, recorder_df, param_names, metric_names
def summarise_results(recorder_df, param_names, metric_names):
return (recorder_df.groupby(['index', *param_names, 'epoch'], as_index=False)
.agg({k:['mean', 'std'] for k in metric_names})) | true | true |
f72252fb17ff4e53251c9c88acde167184bfaac9 | 410 | py | Python | supriya/assets/synthdefs/simple_sine.py | butayama/supriya | 0c197324ecee4232381221880d1f40e109bb756c | [
"MIT"
] | 191 | 2015-11-13T02:28:42.000Z | 2022-03-29T10:26:44.000Z | supriya/assets/synthdefs/simple_sine.py | butayama/supriya | 0c197324ecee4232381221880d1f40e109bb756c | [
"MIT"
] | 130 | 2016-01-04T16:59:02.000Z | 2022-02-26T15:37:20.000Z | supriya/assets/synthdefs/simple_sine.py | butayama/supriya | 0c197324ecee4232381221880d1f40e109bb756c | [
"MIT"
] | 22 | 2016-05-04T10:32:16.000Z | 2022-02-26T19:22:45.000Z | import supriya
def _build_synthdef():
with supriya.SynthDefBuilder(amplitude=0, bus=0, frequency=440) as builder:
supriya.ugens.Out.ar(
bus=builder["bus"],
source=supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
* builder["amplitude"],
)
return builder.build(name="simple_sine")
simple_sine = _build_synthdef()
__all__ = ["simple_sine"]
| 22.777778 | 79 | 0.64878 | import supriya
def _build_synthdef():
with supriya.SynthDefBuilder(amplitude=0, bus=0, frequency=440) as builder:
supriya.ugens.Out.ar(
bus=builder["bus"],
source=supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
* builder["amplitude"],
)
return builder.build(name="simple_sine")
simple_sine = _build_synthdef()
__all__ = ["simple_sine"]
| true | true |
f7225437013ea810afe0c5483e568e663d0f6c87 | 1,027 | py | Python | my-third-web-app.py | thunderboltsid/snake-taming-101 | 2f3fef8303d20fd026adc5b995510f375bcaba22 | [
"MIT"
] | 1 | 2020-07-25T19:21:47.000Z | 2020-07-25T19:21:47.000Z | talks/snake-taming-101/my-third-web-app.py | thunderboltsid/sshukla.de | a39d05cc3d3c74277b0e64d49847c1d7e3053a80 | [
"CC-BY-3.0"
] | null | null | null | talks/snake-taming-101/my-third-web-app.py | thunderboltsid/sshukla.de | a39d05cc3d3c74277b0e64d49847c1d7e3053a80 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
from flask import Flask, request
app = Flask(__name__)
# Shared storage for our list of tasks
tasks = ["GenCS 1"]
form = ("<form action='/' method='POST'>"
"<input autofocus type='text' name='task' />"
"<input type='submit' />"
"</form>")
def delete_form(idx):
return ("<form action='/delete' method='POST'>"
"<input type='hidden' name='task' value='" + str(idx) + "' />"
"<input type='submit' value='Delete'/>"
"</form>"
)
def task_list():
task_list = ["<li>" + task + delete_form(idx) + "</li>" for idx, task in enumerate(tasks)]
return form + "<ul>" + "".join(task_list) + "</ul>"
@app.route("/", methods=['GET', 'POST'])
def home():
if request.method == 'POST':
new_task = request.form['task']
tasks.append(new_task)
return task_list()
@app.route("/delete", methods=['POST'])
def delete():
tasks.pop(int(request.form['task']))
return task_list()
app.run()
| 24.452381 | 94 | 0.552093 |
from flask import Flask, request
app = Flask(__name__)
tasks = ["GenCS 1"]
form = ("<form action='/' method='POST'>"
"<input autofocus type='text' name='task' />"
"<input type='submit' />"
"</form>")
def delete_form(idx):
return ("<form action='/delete' method='POST'>"
"<input type='hidden' name='task' value='" + str(idx) + "' />"
"<input type='submit' value='Delete'/>"
"</form>"
)
def task_list():
task_list = ["<li>" + task + delete_form(idx) + "</li>" for idx, task in enumerate(tasks)]
return form + "<ul>" + "".join(task_list) + "</ul>"
@app.route("/", methods=['GET', 'POST'])
def home():
if request.method == 'POST':
new_task = request.form['task']
tasks.append(new_task)
return task_list()
@app.route("/delete", methods=['POST'])
def delete():
tasks.pop(int(request.form['task']))
return task_list()
app.run()
| true | true |
f7225447221fae0751d91a0b3b5cbbb1de9be1d8 | 3,535 | py | Python | alipay/aop/api/domain/AlipayOpenPublicPartnerMenuOperateModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AlipayOpenPublicPartnerMenuOperateModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AlipayOpenPublicPartnerMenuOperateModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenPublicPartnerMenuOperateModel(object):
def __init__(self):
self._action_param = None
self._action_type = None
self._agreement_id = None
self._public_id = None
self._third_account_id = None
self._user_id = None
@property
def action_param(self):
return self._action_param
@action_param.setter
def action_param(self, value):
self._action_param = value
@property
def action_type(self):
return self._action_type
@action_type.setter
def action_type(self, value):
self._action_type = value
@property
def agreement_id(self):
return self._agreement_id
@agreement_id.setter
def agreement_id(self, value):
self._agreement_id = value
@property
def public_id(self):
return self._public_id
@public_id.setter
def public_id(self, value):
self._public_id = value
@property
def third_account_id(self):
return self._third_account_id
@third_account_id.setter
def third_account_id(self, value):
self._third_account_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.action_param:
if hasattr(self.action_param, 'to_alipay_dict'):
params['action_param'] = self.action_param.to_alipay_dict()
else:
params['action_param'] = self.action_param
if self.action_type:
if hasattr(self.action_type, 'to_alipay_dict'):
params['action_type'] = self.action_type.to_alipay_dict()
else:
params['action_type'] = self.action_type
if self.agreement_id:
if hasattr(self.agreement_id, 'to_alipay_dict'):
params['agreement_id'] = self.agreement_id.to_alipay_dict()
else:
params['agreement_id'] = self.agreement_id
if self.public_id:
if hasattr(self.public_id, 'to_alipay_dict'):
params['public_id'] = self.public_id.to_alipay_dict()
else:
params['public_id'] = self.public_id
if self.third_account_id:
if hasattr(self.third_account_id, 'to_alipay_dict'):
params['third_account_id'] = self.third_account_id.to_alipay_dict()
else:
params['third_account_id'] = self.third_account_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicPartnerMenuOperateModel()
if 'action_param' in d:
o.action_param = d['action_param']
if 'action_type' in d:
o.action_type = d['action_type']
if 'agreement_id' in d:
o.agreement_id = d['agreement_id']
if 'public_id' in d:
o.public_id = d['public_id']
if 'third_account_id' in d:
o.third_account_id = d['third_account_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 30.474138 | 83 | 0.603395 |
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenPublicPartnerMenuOperateModel(object):
def __init__(self):
self._action_param = None
self._action_type = None
self._agreement_id = None
self._public_id = None
self._third_account_id = None
self._user_id = None
@property
def action_param(self):
return self._action_param
@action_param.setter
def action_param(self, value):
self._action_param = value
@property
def action_type(self):
return self._action_type
@action_type.setter
def action_type(self, value):
self._action_type = value
@property
def agreement_id(self):
return self._agreement_id
@agreement_id.setter
def agreement_id(self, value):
self._agreement_id = value
@property
def public_id(self):
return self._public_id
@public_id.setter
def public_id(self, value):
self._public_id = value
@property
def third_account_id(self):
return self._third_account_id
@third_account_id.setter
def third_account_id(self, value):
self._third_account_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.action_param:
if hasattr(self.action_param, 'to_alipay_dict'):
params['action_param'] = self.action_param.to_alipay_dict()
else:
params['action_param'] = self.action_param
if self.action_type:
if hasattr(self.action_type, 'to_alipay_dict'):
params['action_type'] = self.action_type.to_alipay_dict()
else:
params['action_type'] = self.action_type
if self.agreement_id:
if hasattr(self.agreement_id, 'to_alipay_dict'):
params['agreement_id'] = self.agreement_id.to_alipay_dict()
else:
params['agreement_id'] = self.agreement_id
if self.public_id:
if hasattr(self.public_id, 'to_alipay_dict'):
params['public_id'] = self.public_id.to_alipay_dict()
else:
params['public_id'] = self.public_id
if self.third_account_id:
if hasattr(self.third_account_id, 'to_alipay_dict'):
params['third_account_id'] = self.third_account_id.to_alipay_dict()
else:
params['third_account_id'] = self.third_account_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicPartnerMenuOperateModel()
if 'action_param' in d:
o.action_param = d['action_param']
if 'action_type' in d:
o.action_type = d['action_type']
if 'agreement_id' in d:
o.agreement_id = d['agreement_id']
if 'public_id' in d:
o.public_id = d['public_id']
if 'third_account_id' in d:
o.third_account_id = d['third_account_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| true | true |
f72254bb72393c444db26b25e01795cdaaecd70c | 1,377 | py | Python | annif/backend/dummy.py | ooduor/Annif | 2fd7db9c709ef018e15671d8fafe3aa9cbd4f2cf | [
"Apache-2.0"
] | null | null | null | annif/backend/dummy.py | ooduor/Annif | 2fd7db9c709ef018e15671d8fafe3aa9cbd4f2cf | [
"Apache-2.0"
] | null | null | null | annif/backend/dummy.py | ooduor/Annif | 2fd7db9c709ef018e15671d8fafe3aa9cbd4f2cf | [
"Apache-2.0"
] | null | null | null | """Dummy backend for testing basic interaction of projects and backends"""
from annif.suggestion import SubjectSuggestion, ListSuggestionResult
from . import backend
class DummyBackend(backend.AnnifLearningBackend):
name = "dummy"
initialized = False
uri = 'http://example.org/dummy'
label = 'dummy'
def default_params(self):
return backend.AnnifBackend.DEFAULT_PARAMETERS
def initialize(self):
self.initialized = True
def _suggest(self, text, params):
score = float(params.get('score', 1.0))
notation = params.get('notation', None)
return ListSuggestionResult([SubjectSuggestion(uri=self.uri,
label=self.label,
notation=notation,
score=score)],
self.project.subjects)
def _learn(self, corpus, params):
# in this dummy backend we "learn" by picking up the URI and label
# of the first subject of the first document in the learning set
# and using that in subsequent analysis results
for doc in corpus.documents:
if doc.uris and doc.labels:
self.uri = list(doc.uris)[0]
self.label = list(doc.labels)[0]
break
| 36.236842 | 74 | 0.571532 |
from annif.suggestion import SubjectSuggestion, ListSuggestionResult
from . import backend
class DummyBackend(backend.AnnifLearningBackend):
name = "dummy"
initialized = False
uri = 'http://example.org/dummy'
label = 'dummy'
def default_params(self):
return backend.AnnifBackend.DEFAULT_PARAMETERS
def initialize(self):
self.initialized = True
def _suggest(self, text, params):
score = float(params.get('score', 1.0))
notation = params.get('notation', None)
return ListSuggestionResult([SubjectSuggestion(uri=self.uri,
label=self.label,
notation=notation,
score=score)],
self.project.subjects)
def _learn(self, corpus, params):
for doc in corpus.documents:
if doc.uris and doc.labels:
self.uri = list(doc.uris)[0]
self.label = list(doc.labels)[0]
break
| true | true |
f72255d3f9ce93464b0a41607b812236a3af47db | 931 | py | Python | tests/test_classical_names.py | hramezani/inflect | c16192f0197890f6e439a1186f3296c2ce4d7303 | [
"MIT"
] | null | null | null | tests/test_classical_names.py | hramezani/inflect | c16192f0197890f6e439a1186f3296c2ce4d7303 | [
"MIT"
] | null | null | null | tests/test_classical_names.py | hramezani/inflect | c16192f0197890f6e439a1186f3296c2ce4d7303 | [
"MIT"
] | null | null | null | from nose.tools import eq_
import inflect
def test_ancient_1():
p = inflect.engine()
# DEFAULT...
eq_(p.plural_noun("Sally"), "Sallys", msg="classical 'names' active")
eq_(p.plural_noun("Jones", 0), "Joneses", msg="classical 'names' active")
# "person" PLURALS ACTIVATED...
p.classical(names=True)
eq_(p.plural_noun("Sally"), "Sallys", msg="classical 'names' active")
eq_(p.plural_noun("Jones", 0), "Joneses", msg="classical 'names' active")
# OTHER CLASSICALS NOT ACTIVATED...
eq_(p.plural_noun("wildebeest"), "wildebeests", msg="classical 'herd' not active")
eq_(p.plural_noun("formula"), "formulas", msg="classical 'ancient' active")
eq_(p.plural_noun("error", 0), "errors", msg="classical 'zero' not active")
eq_(p.plural_noun("brother"), "brothers", msg="classical 'all' not active")
eq_(p.plural_noun("person"), "people", msg="classical 'persons' not active")
| 34.481481 | 86 | 0.663802 | from nose.tools import eq_
import inflect
def test_ancient_1():
p = inflect.engine()
eq_(p.plural_noun("Sally"), "Sallys", msg="classical 'names' active")
eq_(p.plural_noun("Jones", 0), "Joneses", msg="classical 'names' active")
p.classical(names=True)
eq_(p.plural_noun("Sally"), "Sallys", msg="classical 'names' active")
eq_(p.plural_noun("Jones", 0), "Joneses", msg="classical 'names' active")
eq_(p.plural_noun("wildebeest"), "wildebeests", msg="classical 'herd' not active")
eq_(p.plural_noun("formula"), "formulas", msg="classical 'ancient' active")
eq_(p.plural_noun("error", 0), "errors", msg="classical 'zero' not active")
eq_(p.plural_noun("brother"), "brothers", msg="classical 'all' not active")
eq_(p.plural_noun("person"), "people", msg="classical 'persons' not active")
| true | true |
f722572e191342082a2d695c14606225fcf5fbef | 2,485 | py | Python | pages/base_page.py | bloodmilk44/stepik_final_task | 897b32b5dba3a07e5cf720c1c3eb57887a0c24cb | [
"Apache-2.0"
] | null | null | null | pages/base_page.py | bloodmilk44/stepik_final_task | 897b32b5dba3a07e5cf720c1c3eb57887a0c24cb | [
"Apache-2.0"
] | null | null | null | pages/base_page.py | bloodmilk44/stepik_final_task | 897b32b5dba3a07e5cf720c1c3eb57887a0c24cb | [
"Apache-2.0"
] | null | null | null | import math
import time
from .locators import BasePageLocators
from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException, TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class BasePage():
def __init__(self, browser, url, timeout=10):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def go_to_basket_page(self):
link = self.browser.find_element(*BasePageLocators.BASKET_BUTTON)
link.click()
def go_to_login_page(self):
link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)
link.click()
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException).\
until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except NoSuchElementException:
return False
return True
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
def open(self):
self.browser.get(self.url)
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), "User icon is not presented," \
" probably unauthorised user"
def should_be_basket_button(self):
assert self.is_element_present(*BasePageLocators.BASKET_BUTTON)
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), "Login link is not presented"
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
| 35 | 104 | 0.654326 | import math
import time
from .locators import BasePageLocators
from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException, TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class BasePage():
def __init__(self, browser, url, timeout=10):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def go_to_basket_page(self):
link = self.browser.find_element(*BasePageLocators.BASKET_BUTTON)
link.click()
def go_to_login_page(self):
link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)
link.click()
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException).\
until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except NoSuchElementException:
return False
return True
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
def open(self):
self.browser.get(self.url)
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), "User icon is not presented," \
" probably unauthorised user"
def should_be_basket_button(self):
assert self.is_element_present(*BasePageLocators.BASKET_BUTTON)
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), "Login link is not presented"
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
| true | true |
f722574172b83587f0c7ea3c96cb8a8b1454e86c | 1,935 | py | Python | aliyun-python-sdk-codeup/aliyunsdkcodeup/request/v20200414/UpdateRepositoryMemberRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-codeup/aliyunsdkcodeup/request/v20200414/UpdateRepositoryMemberRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-codeup/aliyunsdkcodeup/request/v20200414/UpdateRepositoryMemberRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class UpdateRepositoryMemberRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'codeup', '2020-04-14', 'UpdateRepositoryMember')
self.set_uri_pattern('/api/v3/projects/[ProjectId]/members/[UserId]')
self.set_method('PUT')
def get_OrganizationId(self):
return self.get_query_params().get('OrganizationId')
def set_OrganizationId(self,OrganizationId):
self.add_query_param('OrganizationId',OrganizationId)
def get_SubUserId(self):
return self.get_query_params().get('SubUserId')
def set_SubUserId(self,SubUserId):
self.add_query_param('SubUserId',SubUserId)
def get_AccessToken(self):
return self.get_query_params().get('AccessToken')
def set_AccessToken(self,AccessToken):
self.add_query_param('AccessToken',AccessToken)
def get_ProjectId(self):
return self.get_path_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_path_param('ProjectId',ProjectId)
def get_UserId(self):
return self.get_path_params().get('UserId')
def set_UserId(self,UserId):
self.add_path_param('UserId',UserId) | 33.947368 | 78 | 0.762274 |
from aliyunsdkcore.request import RoaRequest
class UpdateRepositoryMemberRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'codeup', '2020-04-14', 'UpdateRepositoryMember')
self.set_uri_pattern('/api/v3/projects/[ProjectId]/members/[UserId]')
self.set_method('PUT')
def get_OrganizationId(self):
return self.get_query_params().get('OrganizationId')
def set_OrganizationId(self,OrganizationId):
self.add_query_param('OrganizationId',OrganizationId)
def get_SubUserId(self):
return self.get_query_params().get('SubUserId')
def set_SubUserId(self,SubUserId):
self.add_query_param('SubUserId',SubUserId)
def get_AccessToken(self):
return self.get_query_params().get('AccessToken')
def set_AccessToken(self,AccessToken):
self.add_query_param('AccessToken',AccessToken)
def get_ProjectId(self):
return self.get_path_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_path_param('ProjectId',ProjectId)
def get_UserId(self):
return self.get_path_params().get('UserId')
def set_UserId(self,UserId):
self.add_path_param('UserId',UserId) | true | true |
f72257798e083e50f24091ddc6e6f591079e6a2a | 829 | py | Python | home/urls.py | Adyyousf/ToDo-List | a5d200b0eab14702c927dc451b47535b80be3093 | [
"MIT"
] | 1 | 2021-07-08T05:37:05.000Z | 2021-07-08T05:37:05.000Z | home/urls.py | Adyyousf/ToDo-List | a5d200b0eab14702c927dc451b47535b80be3093 | [
"MIT"
] | null | null | null | home/urls.py | Adyyousf/ToDo-List | a5d200b0eab14702c927dc451b47535b80be3093 | [
"MIT"
] | null | null | null | """todoList URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('', views.home, name='home'),
path('tasks', views.tasks, name='tasks')
]
| 34.541667 | 77 | 0.705669 | from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('', views.home, name='home'),
path('tasks', views.tasks, name='tasks')
]
| true | true |
f722586083347dd4e8df04b1cab2405526be06e0 | 396 | py | Python | python/st.py | waytai/open | 00fc6891a8fe8e7dc84067ebea8e69895e14e912 | [
"BSD-2-Clause"
] | null | null | null | python/st.py | waytai/open | 00fc6891a8fe8e7dc84067ebea8e69895e14e912 | [
"BSD-2-Clause"
] | null | null | null | python/st.py | waytai/open | 00fc6891a8fe8e7dc84067ebea8e69895e14e912 | [
"BSD-2-Clause"
] | null | null | null | class Student(object):
def __init__(self, name):
self.name = name
def __repr__(self):
print "8"*20
return 'Student object (name: %s)' % self.name
def __str__(self):
print "*"*20
return 'Student object str (name: %s)' % self.name
def __call__(self):
print "my name is __call__"
print Student('liu')
s = Student("how are you")
s()
| 22 | 58 | 0.578283 | class Student(object):
def __init__(self, name):
self.name = name
def __repr__(self):
print "8"*20
return 'Student object (name: %s)' % self.name
def __str__(self):
print "*"*20
return 'Student object str (name: %s)' % self.name
def __call__(self):
print "my name is __call__"
print Student('liu')
s = Student("how are you")
s()
| false | true |
f72258f0a94deb8548e6e4572af13c548a6b4b9e | 13,349 | py | Python | lib2to3/main.py | wrye-bash/2to3 | a2cf203dc5bb7eb77a1b32d9138aa2f579cd94ba | [
"0BSD"
] | null | null | null | lib2to3/main.py | wrye-bash/2to3 | a2cf203dc5bb7eb77a1b32d9138aa2f579cd94ba | [
"0BSD"
] | null | null | null | lib2to3/main.py | wrye-bash/2to3 | a2cf203dc5bb7eb77a1b32d9138aa2f579cd94ba | [
"0BSD"
] | null | null | null | """
Main program for 2to3.
"""
from __future__ import with_statement, print_function
import sys
import os
import difflib
import logging
import shutil
import operator
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory and or
have an extra file suffix appended to their name for use in situations
where you do not want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup '.bak' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir) and output_dir:
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except OSError:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except OSError:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
# Preserve the file mode in the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print(line)
sys.stdout.flush()
else:
for line in diff_lines:
print(line)
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print("WARNING: %s" % (msg,), file=sys.stderr)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-e", "--exec-function", action="store_true",
help="Modify the grammar so that exec() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
# If we allowed these, the original files would be renamed to backup names
# but not replaced.
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.print_function:
flags["print_function"] = True
if options.exec_function:
flags["exec_function"] = True
if options.list_fixes:
fixer_opts = {"print_function": False, "exec_function": False,
"write_unchanged_files": False}
if flags:
fixer_opts.update(flags)
fixer_log = []
def get_fixers(fix_names):
"""Stolen from RefactoringTool.get_fixers."""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in fix_names:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith("fix_"):
fix_name = fix_name[len("fix_"):]
parts = fix_name.split("_")
class_name = "Fix" + "".join(
[p.title() for p in parts])
fix_class = getattr(mod, class_name)
fixer = fix_class(fixer_opts, fixer_log)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return pre_order_fixers + post_order_fixers
print("Available transformations for the -f/--fix option, sorted by "
"the order in which they will run:")
for fixer in get_fixers(refactor.get_fixers_from_package(fixer_pkg)):
# Strip off the fixer_pkg and fix_ from the beginning of the module
print(type(fixer).__module__[len(fixer_pkg) + len('fix_') + 1:])
if not args:
return 0
if not args:
print("At least one file or directory argument required.", file=sys.stderr)
print("Use --help to show usage.", file=sys.stderr)
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print("Can't write to stdin.", file=sys.stderr)
return 2
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print("Sorry, -j isn't supported on this platform.",
file=sys.stderr)
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| 44.056106 | 83 | 0.589183 |
from __future__ import with_statement, print_function
import sys
import os
import difflib
import logging
import shutil
import operator
import optparse
from . import refactor
def diff_texts(a, b, filename):
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir) and output_dir:
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except OSError:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except OSError:
self.log_message("Can't rename %s to %s", filename, backup)
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print(line)
sys.stdout.flush()
else:
for line in diff_lines:
print(line)
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print("WARNING: %s" % (msg,), file=sys.stderr)
def main(fixer_pkg, args=None):
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-e", "--exec-function", action="store_true",
help="Modify the grammar so that exec() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.print_function:
flags["print_function"] = True
if options.exec_function:
flags["exec_function"] = True
if options.list_fixes:
fixer_opts = {"print_function": False, "exec_function": False,
"write_unchanged_files": False}
if flags:
fixer_opts.update(flags)
fixer_log = []
def get_fixers(fix_names):
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in fix_names:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith("fix_"):
fix_name = fix_name[len("fix_"):]
parts = fix_name.split("_")
class_name = "Fix" + "".join(
[p.title() for p in parts])
fix_class = getattr(mod, class_name)
fixer = fix_class(fixer_opts, fixer_log)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return pre_order_fixers + post_order_fixers
print("Available transformations for the -f/--fix option, sorted by "
"the order in which they will run:")
for fixer in get_fixers(refactor.get_fixers_from_package(fixer_pkg)):
print(type(fixer).__module__[len(fixer_pkg) + len('fix_') + 1:])
if not args:
return 0
if not args:
print("At least one file or directory argument required.", file=sys.stderr)
print("Use --help to show usage.", file=sys.stderr)
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print("Can't write to stdin.", file=sys.stderr)
return 2
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print("Sorry, -j isn't supported on this platform.",
file=sys.stderr)
return 1
rt.summarize()
return int(bool(rt.errors))
| true | true |
f722591cc7fdff90b26972f0289a12c85a0e1d74 | 7,624 | py | Python | facilyst/mocks/mock_types/features.py | ParthivNaresh/Facilyst | 786932b0afcf07cd300b2e6ce55ccf7f9e4c49d9 | [
"MIT"
] | null | null | null | facilyst/mocks/mock_types/features.py | ParthivNaresh/Facilyst | 786932b0afcf07cd300b2e6ce55ccf7f9e4c49d9 | [
"MIT"
] | 3 | 2022-02-26T17:19:28.000Z | 2022-03-01T09:34:19.000Z | facilyst/mocks/mock_types/features.py | ParthivNaresh/facilyst | 786932b0afcf07cd300b2e6ce55ccf7f9e4c49d9 | [
"MIT"
] | null | null | null | """A mock type that returns features data."""
import pandas as pd
import woodwork as ww
from facilyst.mocks import MockBase
from facilyst.mocks.mock_types.utils import mock_features_dtypes
class Features(MockBase):
"""Class to manage mock data creation of features.
:param num_rows: The number of observations in the final dataset. Defaults to 100.
:type num_rows: int, optional
:param library: The library of which the final dataset should be, options are 'pandas' and 'numpy'. Defaults to 'pandas'.
:type library: str, optional
:param ints: Flag that includes column with monotonically increasing incremental set of negative and positive integers. Defaults to True.
:type ints: bool, optional
:param rand_ints: Flag that includes column with randomly selected integers between -5 and 5. Defaults to True.
:type rand_ints: bool, optional
:param floats: Flag that includes column which is the float version of the 'ints' column. Defaults to True.
:type floats: bool, optional
:param rand_floats: Flag that includes column with randomly selected floats between -5 and 5. Defaults to True.
:type rand_floats: bool, optional
:param booleans: Flag that includes column with randomly selected boolean values. Defaults to False.
:type booleans: bool, optional
:param categoricals: Flag that includes column with four categoriesL 'First', 'Second', 'Third', and 'Fourth'. Defaults to False.
:type categoricals: bool, optional
:param dates: Flag that includes column with monotonically increasing dates from 01/01/2001 with a daily frequency. Defaults to False.
:type dates: bool, optional
:param texts: Flag that includes column with different text on each line. Defaults to False.
:type texts: bool, optional
:param ints_nullable: Flag that includes column which is the same as the 'ints' column with pd.NA included. Defaults to False.
:type ints_nullable: bool, optional
:param floats_nullable: Flag that includes column which is the same as the 'floats' column with pd.NA included. Defaults to False.
:type floats_nullable: bool, optional
:param booleans_nullable: Flag that includes column which is a randomly selected column with boolean values and pd.NA included. Defaults to False.
:type booleans_nullable: bool, optional
:param full_names: Flag that includes column with first and last names. Defaults to False.
:type full_names: bool, optional
:param phone_numbers: Flag that includes column with US-based phone numbers. Defaults to True.
:type phone_numbers: bool, optional
:param addresses: Flag that includes column with addresses. Defaults to True.
:type addresses: bool, optional
:param countries: Flag that includes column with country names. Defaults to False.
:type countries: bool, optional
:param email_addresses: Flag that includes column with email addresses. Defaults to True.
:type email_addresses: bool, optional
:param urls: Flag that includes column with URLs. Defaults to True.
:type urls: bool, optional
:param currencies: Flag that includes column with US dollar based amounts. Defaults to False.
:type currencies: bool, optional
:param file_paths: Flag that includes column with file paths at a depth of 3. Defaults to False.
:type file_paths: bool, optional
:param ipv4: Flag that includes column with different IPv4 addresses. Defaults to False.
:type ipv4: bool, optional
:param ipv6: Flag that includes column with different IPv6 addresses. Defaults to False.
:type ipv6: bool, optional
:param lat_longs: Flag that includes column with latitude and longitude values in a tuple. Defaults to False.
:type lat_longs: bool, optional
:param all_dtypes: Flag that includes all columns. Defaults to False.
:type all_dtypes: bool, optional
:return: Mock features data.
:rtype: pd.DataFrame by default, can also return np.ndarray
"""
name = "Features"
def __init__(
self,
num_rows=100,
library="pandas",
ints=True,
rand_ints=True,
floats=True,
rand_floats=True,
booleans=False,
categoricals=False,
dates=False,
texts=False,
ints_nullable=False,
floats_nullable=False,
booleans_nullable=False,
full_names=False,
phone_numbers=False,
addresses=False,
countries=False,
email_addresses=False,
urls=False,
currencies=False,
file_paths=False,
ipv4=False,
ipv6=False,
lat_longs=False,
all_dtypes=False,
):
kw_args = locals()
if all_dtypes:
parameters = {
k: True
for k, v in kw_args.items()
if k not in ["self", "library", "num_rows", "__class__"]
}
else:
parameters = {
k: v
for k, v in kw_args.items()
if k not in ["self", "library", "num_rows", "__class__"] and v
}
if not any(
parameters.values()
): # All False flags results in all dtypes being included
parameters = {k: True for k, v in kw_args.items()}
super().__init__(library, num_rows, parameters)
def create_data(self):
"""Main function to be called to create features data.
:return: The final features data created.
"""
data, dtypes_to_keep = self.get_data_from_dict()
data = self.handle_library(data, dtypes_to_keep)
return data
def get_data_from_dict(self):
"""Returns the data based on the dtypes specified during class instantiation.
:return: The final data created from the appropriate library as a pd.DataFrame or ndarray.
"""
dtypes_to_keep = list(self.parameters.keys())
mocked = Features._refine_dtypes(dtypes_to_keep, self.num_rows)
mocked_df = pd.DataFrame.from_dict(mocked)
return mocked_df, dtypes_to_keep
def handle_library(self, data, dtypes_to_keep):
"""Handles the library that was selected to determine the format in which the data will be returned.
:param data: The final data to be returned.
:type data: pd.DataFrame
:param dtypes_to_keep: All data format options from the class initialization. Defaults to returning the full dataset.
:type dtypes_to_keep: list
:return: The final data created from the appropriate library as a pd.DataFrame or ndarray.
"""
if self.library == "numpy":
return data.to_numpy()
else:
if "ints_nullable" in dtypes_to_keep:
data["ints_nullable"] = data["ints_nullable"].astype("Int64")
if "floats_nullable" in dtypes_to_keep:
data["floats_nullable"] = data["floats_nullable"].astype("Float64")
data.ww.init()
return data
@staticmethod
def _refine_dtypes(dtypes, num_rows=100):
"""Internal function that selects the dtypes to be kept from the full dataset.
:param dtypes: All data format options from the class initialization. Defaults to returning the full dataset.
:type dtypes: list
:param num_rows : The number of observations in the final dataset. Defaults to 100.
:type num_rows: int
:return: A refined form of the full set of columns available.
"""
full_mock = mock_features_dtypes(num_rows)
return {k: v for k, v in full_mock.items() if k in dtypes}
| 45.380952 | 150 | 0.677859 | import pandas as pd
import woodwork as ww
from facilyst.mocks import MockBase
from facilyst.mocks.mock_types.utils import mock_features_dtypes
class Features(MockBase):
name = "Features"
def __init__(
self,
num_rows=100,
library="pandas",
ints=True,
rand_ints=True,
floats=True,
rand_floats=True,
booleans=False,
categoricals=False,
dates=False,
texts=False,
ints_nullable=False,
floats_nullable=False,
booleans_nullable=False,
full_names=False,
phone_numbers=False,
addresses=False,
countries=False,
email_addresses=False,
urls=False,
currencies=False,
file_paths=False,
ipv4=False,
ipv6=False,
lat_longs=False,
all_dtypes=False,
):
kw_args = locals()
if all_dtypes:
parameters = {
k: True
for k, v in kw_args.items()
if k not in ["self", "library", "num_rows", "__class__"]
}
else:
parameters = {
k: v
for k, v in kw_args.items()
if k not in ["self", "library", "num_rows", "__class__"] and v
}
if not any(
parameters.values()
):
parameters = {k: True for k, v in kw_args.items()}
super().__init__(library, num_rows, parameters)
def create_data(self):
data, dtypes_to_keep = self.get_data_from_dict()
data = self.handle_library(data, dtypes_to_keep)
return data
def get_data_from_dict(self):
dtypes_to_keep = list(self.parameters.keys())
mocked = Features._refine_dtypes(dtypes_to_keep, self.num_rows)
mocked_df = pd.DataFrame.from_dict(mocked)
return mocked_df, dtypes_to_keep
def handle_library(self, data, dtypes_to_keep):
if self.library == "numpy":
return data.to_numpy()
else:
if "ints_nullable" in dtypes_to_keep:
data["ints_nullable"] = data["ints_nullable"].astype("Int64")
if "floats_nullable" in dtypes_to_keep:
data["floats_nullable"] = data["floats_nullable"].astype("Float64")
data.ww.init()
return data
@staticmethod
def _refine_dtypes(dtypes, num_rows=100):
full_mock = mock_features_dtypes(num_rows)
return {k: v for k, v in full_mock.items() if k in dtypes}
| true | true |
f7225955a80a43c4c49fab791ea51586ce36ebdf | 1,887 | py | Python | sopel/modules/spellcheck.py | CrushAndRun/sopel-Bodhi | 98d9661663a59f91880df21749c4861d163020fe | [
"EFL-2.0"
] | 2 | 2016-02-02T22:24:16.000Z | 2016-02-02T22:50:05.000Z | sopel/modules/spellcheck.py | CrushAndRun/sopel-Bodhi | 98d9661663a59f91880df21749c4861d163020fe | [
"EFL-2.0"
] | null | null | null | sopel/modules/spellcheck.py | CrushAndRun/sopel-Bodhi | 98d9661663a59f91880df21749c4861d163020fe | [
"EFL-2.0"
] | null | null | null | # coding=utf-8
"""
spellcheck.py - Sopel spell check Module
Copyright © 2012, Elad Alfassa, <elad@fedoraproject.org>
Copyright © 2012, Lior Ramati
Licensed under the Eiffel Forum License 2.
http://sopel.chat
This module relies on pyenchant, on Fedora and Red Hat based system, it can be found in the package python-enchant
"""
from __future__ import unicode_literals, absolute_import, print_function, division
try:
import enchant
except ImportError:
enchant = None
from sopel.module import commands, example
@commands('spellcheck', 'spell')
@example('.spellcheck stuff')
def spellcheck(bot, trigger):
"""
Says whether the given word is spelled correctly, and gives suggestions if
it's not.
"""
if not enchant:
bot.say("Missing pyenchant module.")
if not trigger.group(2):
return
word = trigger.group(2).rstrip()
if " " in word:
bot.say("One word at a time, please")
return
dictionary = enchant.Dict("en_US")
dictionary_uk = enchant.Dict("en_GB")
# I don't want to make anyone angry, so I check both American and British English.
if dictionary_uk.check(word):
if dictionary.check(word):
bot.say(word + " is spelled correctly")
else:
bot.say(word + " is spelled correctly (British)")
elif dictionary.check(word):
bot.say(word + " is spelled correctly (American)")
else:
msg = word + " is not spelled correctly. Maybe you want one of these spellings:"
sugWords = []
for suggested_word in dictionary.suggest(word):
sugWords.append(suggested_word)
for suggested_word in dictionary_uk.suggest(word):
sugWords.append(suggested_word)
for suggested_word in sorted(set(sugWords)): # removes duplicates
msg = msg + " '" + suggested_word + "',"
bot.say(msg)
| 34.309091 | 114 | 0.660837 |
from __future__ import unicode_literals, absolute_import, print_function, division
try:
import enchant
except ImportError:
enchant = None
from sopel.module import commands, example
@commands('spellcheck', 'spell')
@example('.spellcheck stuff')
def spellcheck(bot, trigger):
if not enchant:
bot.say("Missing pyenchant module.")
if not trigger.group(2):
return
word = trigger.group(2).rstrip()
if " " in word:
bot.say("One word at a time, please")
return
dictionary = enchant.Dict("en_US")
dictionary_uk = enchant.Dict("en_GB")
if dictionary_uk.check(word):
if dictionary.check(word):
bot.say(word + " is spelled correctly")
else:
bot.say(word + " is spelled correctly (British)")
elif dictionary.check(word):
bot.say(word + " is spelled correctly (American)")
else:
msg = word + " is not spelled correctly. Maybe you want one of these spellings:"
sugWords = []
for suggested_word in dictionary.suggest(word):
sugWords.append(suggested_word)
for suggested_word in dictionary_uk.suggest(word):
sugWords.append(suggested_word)
for suggested_word in sorted(set(sugWords)): # removes duplicates
msg = msg + " '" + suggested_word + "',"
bot.say(msg)
| true | true |
f7225a1eb55fafef881df8e0e24e41bf096ad066 | 14,358 | py | Python | ThorLabs_motor_controller/ThorLabs_controller_rs232_driver.py | vstadnytskyi/drivers | 3d4dbc43dac7d13ddec25b179252ccea18792bc7 | [
"MIT"
] | null | null | null | ThorLabs_motor_controller/ThorLabs_controller_rs232_driver.py | vstadnytskyi/drivers | 3d4dbc43dac7d13ddec25b179252ccea18792bc7 | [
"MIT"
] | null | null | null | ThorLabs_motor_controller/ThorLabs_controller_rs232_driver.py | vstadnytskyi/drivers | 3d4dbc43dac7d13ddec25b179252ccea18792bc7 | [
"MIT"
] | null | null | null | #!/bin/env python
"""
ThorLabs TDC001 and KDC001 cubes Low Level code.
This code specifies communication protocola for T snd K cubes.
Valentyn Stadnytskyi
valentyn.stadnytskyi@nih.gov
The communication protocols:
https://www.thorlabs.com/Software/Motion%20Control/APT_Communications_Protocol.pdf issue 20
"""
from time import sleep, time
from numpy import zeros, ones, mean, std, sign
from serial import Serial
from struct import pack, unpack
from pdb import pm
import logging
from logging import debug, info, warn, error
class Motor(object):
def __init__(self):
self.baudrate = 115200
self.controller = ''
self.motor = ''
self.last_communiction = {}
self.port = None
def init(self, serial_number = '', controller_type = None, motor_type = None):
if controller_type is None:
raise Exception('The controller type is not specified!')
else:
self.controller_type = controller_type
if motor_type is None:
raise Exception('The motor type is not specified!')
else:
self.motor_type = motor_type
if serial_number != "":
port_name = self.get_port(serial_number = serial_number)
ser = self.get_serial_object(port_name = port_name)
if ser is not None:
self.port = ser
self.port.timeout = 0.4
self.serial_number = serial_number
else:
self.port = None
print('No serial device with serial number {}'.format(serial_number))
else:
self.port = None
print('Serial Number has to be specified')
def list_all(self):
"""
lists and returns all com ports with the manufacturer field equal to 'ThorLabs'
"""
import serial.tools.list_ports
lst = serial.tools.list_ports.comports()
available_ports = []
for item in lst:
debug('Manufacturer of this motor is {}'.format(item.manufacturer))
if item.manufacturer == 'Thorlabs':
available_ports.append(item)
return available_ports
def get_port(self, serial_number = None):
"""
returns the name of the serial port for the ThorLabs motor controller with specified serial_number
"""
def is_port_open(port):
from serial import Serial
import platform
if platform.system() == 'Linux':
prefix = '/dev/'
else:
prefix = ''
ser = Serial(prefix+port, baudrate=115200, bytesize = 8, parity='N', stopbits=1, timeout=1)
ser.isOpen()
import platform
if platform.system() == 'Linux':
prefix = '/dev/'
else:
prefix = ''
lst = self.list_all()
port_name = ''
if serial_number != None:
for item in lst:
if item.serial_number == str(serial_number):
port_name = prefix+item.name
return port_name
def get_serial_object(self,port_name):
"connects to a given port name /dev/ttyUSB1 in case of linux"
from serial import Serial
ser = Serial(port_name, baudrate=self.baudrate, bytesize = 8, parity='N', stopbits=1, timeout=1)
return ser
def initialization(self):
"""initialization function"""
#this will turn on message to be send upon completion of the move.
#I ran this command and it turned off reply to identify
#self.port.write(pack('BBBBBB',0x6C,0x04,0x00,0x00,0x80,0x01))
#suspend end of motion message
#self.port.write(pack('BBBBBB',0x6B,0x04,0x00,0x00,0x21,0x01))
"""MGMSG_HW_NO_FLASH_PROGRAMMING"""
#self.port.write()
pass
def read(self, N = 0):
if N ==0:
result = None
else:
result = self.port.read(N)
return result
def write(self,command):
self.flush()
self.port.write(command)
def query_line(self,command, length = None):
"""write read command"""
self.flush()
self.write(command)
while self.port.in_waiting < 1:
sleep(0.1)
result = self.port.readline()
return result
def query(self,command, length = None):
"""write read command"""
self.flush()
self.write(command + '\r')
if length == None:
result = None
else:
while self.port.in_waiting < length:
sleep(0.1)
result = self.read(N = length)
return result
def close(self):
self.port.close()
del self.port
def waiting(self):
return [self.port.in_waiting,self.port.out_waiting]
def flush(self):
self.port.reset_input_buffer()
self.port.reset_output_buffer()
def blink(self):
"""
submits blink command to the controller
tested for
"""
if self.controller_type == 'T':
self.write(pack('B'*6,0x23,0x02,0x00,0x00,0x50,0x01))
flag = True
else:
flag = False
warn('the controller type is not specified')
def identify(self):
"""
This command is independent on the controller type
page 28-29 of the communication protocol file
send 6 bytes 05 00 00 00 11 01
back 90 bytes
0-5 bytes - header 06 00 54 00 d| s
6-9 bytes - <--Serial Number-->
10-17 bytes- <--Model Number-->
18-19 bytes - <--Type-->
20-23 bytes - <--Firmware Version-->
24-83 bytes - <--For internal use only-->
84-85 bytes - <--HW Version-->
86-87 bytes - <--Mod State-->
88-89 bytes - <--nchs--> "mnumber of channels"
tested fot TDC001 cubes
"""
from struct import pack
flag = True
if self.controller_type == 'T':
command = pack('BBBBBB',0x05,0x00,0x00,0x00,0x50,0x01)
else:
flag = False
if flag:
result = self.query_line(command,90)
self.full_result = result
Header = result[0:6]
SerialNumber = unpack('i',result[6:10]) #unpack('L',result[6:10])
ModelNumber = result[10:18]
Type = unpack('h',result[18:20])
FirmwareVersion = self.my_unpack(result,20,23)
HWVersion = result[84:86]
ForInternalUseOnly = result[24:84]
ModState = self.my_unpack(result,86,87)
nchs = self.my_unpack(result,88,89)
msg = ""
debug('The result of identify command: \n Header: {} \n Serial Number: {} \
\n Model Number: {} \n Type: {} \n Firmware Version: {} \n Mod State: {} \
\n nchs: {} \n For Internal Use Only: \
\n {}'.format(Header, SerialNumber,ModelNumber,Type,FirmwareVersion, ModState,nchs,ForInternalUseOnly))
res_dic = {}
res_dic['Header'] = Header
res_dic['SerialNumber'] = SerialNumber
res_dic['ModelNumber'] = ModelNumber
res_dic['Type'] = Type
res_dic['FirmwareVersion'] = FirmwareVersion
res_dic['HWVersion'] = HWVersion
res_dic['ForInternalUseOnly'] = ForInternalUseOnly
res_dic['ModState'] = ModState
res_dic['nchs'] = nchs
else:
res_dic = {}
res_dic['Header'] = None
res_dic['SerialNumber'] = None
res_dic['ModelNumber'] = None
res_dic['Type'] = None
res_dic['FirmwareVersion'] = None
res_dic['HWVersion'] = None
res_dic['ForInternalUseOnly'] = None
res_dic['ModState'] = None
res_dic['nchs'] = None
return res_dic
def move_abs(self,new_pos):
flag = False
comment = None
if self.controller_type == 'T':
c_header = pack('BBBBBB',0x53,0x04,0x06,0x00,0x80,0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',new_pos)
command = c_header + c_channel + c_distance
response = self.query_line(command,20)
res_pos = unpack('i',response[8:12])
res_enc = unpack('i',response[12:16])
res_status_bits = response[16:20]
flag = True
return flag, comment
def move_relative(self,delta_pos):
"""
move relative
+delta_pos will move positive by that number
-delta_pos will move negative by that number
tested for TDC001 cube.
"""
flag = False
comment = None
if self.controller_type == 'T':
c_header = pack('B'*6,0x48, 0x04, 0x06, 0x00, 0xA2, 0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',delta_pos)
command = c_header + c_channel + c_distance
#print('command sent %r' % command)
response = self.query_line(command,20)
res_pos = unpack('i',response[8:12])
res_enc = unpack('i',response[12:16])
res_status_bits = response[16:20]
debug('res_pos:{} , res_enc:{}, res_status_bits:{}'.format(res_pos,res_enc,res_status_bits))
flag = True
comment = '' + str(response)
else:
warn('unknown controller type')
reply = {}
reply['flag'] = flag
reply['comment'] = comment
return reply
def get_position(self):
"""FIXIT: add description"""
if self.controller == 'T':
command = pack('BBBBBB',0x11,0x04,0x01,0x00,0x21,0x01)
#print('Get position command sent %r' % command)
response = self.query(command,12)
res_header = response[0:6]
res_chan_ident = response[6:8]
res_encoder_counts = response[8:12]
self.last_communiction['command'] = command
self.last_communiction['response'] = response
return unpack('i',res_encoder_counts)[0]
else:
return None
def set_position(self,value):
"""
"""
c_header = pack('BBBBBB',0x09,0x04,0x06,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',value)
command = c_header + c_channel + c_distance
self.write(command)
def home(self):
'''MGMSG_MOT_MOVE_HOME'''
flag = False
if self.motor == 'T':
self.write(pack('B'*6,0x43,0x04,0x00,0x00,0xA2,0x01))
while self.port.in_waiting == 0:
sleep(0.1)
res = self.read(6)
if res == pack('BBBBBB',0x44,0x04,0x01,0x00,0x01,0x80):
print('%r' % res)
flag = True
else:
print(res)
return flag, res #returns True of home was succesfully executed.
def my_unpack(self,var,f,t):
return unpack('B'*len(var[f:t+1]),var[f:t+1])
def hex_to_chr(self, var):
for i in var:
print(i)
string =+ chr(var)
return string
"""Potentially useful commands. Haven;t been used or extensively tested"""
def set_position_2(self,value):
c_header = pack('BBBBBB',0x10,0x04,0x06,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00)
Pos = self.get_position()
c_distance = pack('i',value)
command = c_header + c_channel + c_distance
response = self.query(command)
return response
def home_parameters_set(self,home_dir,limit_switch,home_velocity,offset_distance):
""" MGMSG_MOT_SET_HOMEPARAMS 0x0440 """
if self.motor == 'A':
raise ValueError('This is AutoOptics motor and it does not have homing option!')
else:
c_header = pack('B'*6,0x40,0x04,0x0E,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00) #<---this is always the same for TDC001 cubes.
c_home_dir = pack('h',home_dir)
c_limit_switch = pack('h',limit_switch)
c_home_velocity = pack('l',home_velocity)
c_offset_distance = pack('l',offset_distance)
command = c_header + c_channel + c_home_dir + c_limit_switch + c_home_velocity + c_offset_distance
response = self.query(command)
def home_parameters_get(self):
""" MGMSG_MOT_GET_HOMEPARAMS 0x0442 """
from struct import unpack
command = pack('B'*6,0x41,0x04,0x01,0x00,0x64,0x73)
response = self.query(command,20)
res = {}
res['header'] = response[0:7]
res['chan_ident'] = unpack('h',response[6:8])
res['home_dir']= unpack('h',response[8:10])
res['limit_switch'] = unpack('b',response[10:11])
res['home_velocity'] = unpack('i',response[12:16])
res['offset_distance'] = unpack('i',response[16:20])
return res
def run_test1(self,N):
from random import random
from time import sleep, time
lst = []
to = []
for i in range(N):
start = time()
prev = self.get_position()
goto = int(random()*25*512.*67.)
self.move_abs(goto)
while abs(goto - self.get_position()) > 3:
sleep(0.3)
arrived = self.get_position()
print [time()-start,round(goto/(512.*67.),2),round(arrived/(512.*67.),2),round((goto-prev)/(512.*67.*(time()-start)),2)]
sleep(5)
if __name__ == "__main__":
from tempfile import gettempdir
import logging
logging.basicConfig(#filename=gettempdir()+'/syringe_pump_DL.log',
level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s")
print('This is a Low Level python script providing Device Level with basic communication commands')
motors = []
motors.append(Motor())
#motors.append(Motor())
#motors.append(Motor())
#motors[0].init('27254090')
#motors[1].init('27254025')
motors[0].init('83825160', controller_type = 'T', motor_type = 'T')
for motor in motors:
motor.blink()
for motor in motors:
reply = motor.identify()
print(reply)
#for motor in motors: motor.close()
#print('all ports are closed')
| 33.704225 | 132 | 0.566792 |
from time import sleep, time
from numpy import zeros, ones, mean, std, sign
from serial import Serial
from struct import pack, unpack
from pdb import pm
import logging
from logging import debug, info, warn, error
class Motor(object):
def __init__(self):
self.baudrate = 115200
self.controller = ''
self.motor = ''
self.last_communiction = {}
self.port = None
def init(self, serial_number = '', controller_type = None, motor_type = None):
if controller_type is None:
raise Exception('The controller type is not specified!')
else:
self.controller_type = controller_type
if motor_type is None:
raise Exception('The motor type is not specified!')
else:
self.motor_type = motor_type
if serial_number != "":
port_name = self.get_port(serial_number = serial_number)
ser = self.get_serial_object(port_name = port_name)
if ser is not None:
self.port = ser
self.port.timeout = 0.4
self.serial_number = serial_number
else:
self.port = None
print('No serial device with serial number {}'.format(serial_number))
else:
self.port = None
print('Serial Number has to be specified')
def list_all(self):
import serial.tools.list_ports
lst = serial.tools.list_ports.comports()
available_ports = []
for item in lst:
debug('Manufacturer of this motor is {}'.format(item.manufacturer))
if item.manufacturer == 'Thorlabs':
available_ports.append(item)
return available_ports
def get_port(self, serial_number = None):
def is_port_open(port):
from serial import Serial
import platform
if platform.system() == 'Linux':
prefix = '/dev/'
else:
prefix = ''
ser = Serial(prefix+port, baudrate=115200, bytesize = 8, parity='N', stopbits=1, timeout=1)
ser.isOpen()
import platform
if platform.system() == 'Linux':
prefix = '/dev/'
else:
prefix = ''
lst = self.list_all()
port_name = ''
if serial_number != None:
for item in lst:
if item.serial_number == str(serial_number):
port_name = prefix+item.name
return port_name
def get_serial_object(self,port_name):
from serial import Serial
ser = Serial(port_name, baudrate=self.baudrate, bytesize = 8, parity='N', stopbits=1, timeout=1)
return ser
def initialization(self):
pass
def read(self, N = 0):
if N ==0:
result = None
else:
result = self.port.read(N)
return result
def write(self,command):
self.flush()
self.port.write(command)
def query_line(self,command, length = None):
self.flush()
self.write(command)
while self.port.in_waiting < 1:
sleep(0.1)
result = self.port.readline()
return result
def query(self,command, length = None):
self.flush()
self.write(command + '\r')
if length == None:
result = None
else:
while self.port.in_waiting < length:
sleep(0.1)
result = self.read(N = length)
return result
def close(self):
self.port.close()
del self.port
def waiting(self):
return [self.port.in_waiting,self.port.out_waiting]
def flush(self):
self.port.reset_input_buffer()
self.port.reset_output_buffer()
def blink(self):
if self.controller_type == 'T':
self.write(pack('B'*6,0x23,0x02,0x00,0x00,0x50,0x01))
flag = True
else:
flag = False
warn('the controller type is not specified')
def identify(self):
from struct import pack
flag = True
if self.controller_type == 'T':
command = pack('BBBBBB',0x05,0x00,0x00,0x00,0x50,0x01)
else:
flag = False
if flag:
result = self.query_line(command,90)
self.full_result = result
Header = result[0:6]
SerialNumber = unpack('i',result[6:10])
ModelNumber = result[10:18]
Type = unpack('h',result[18:20])
FirmwareVersion = self.my_unpack(result,20,23)
HWVersion = result[84:86]
ForInternalUseOnly = result[24:84]
ModState = self.my_unpack(result,86,87)
nchs = self.my_unpack(result,88,89)
msg = ""
debug('The result of identify command: \n Header: {} \n Serial Number: {} \
\n Model Number: {} \n Type: {} \n Firmware Version: {} \n Mod State: {} \
\n nchs: {} \n For Internal Use Only: \
\n {}'.format(Header, SerialNumber,ModelNumber,Type,FirmwareVersion, ModState,nchs,ForInternalUseOnly))
res_dic = {}
res_dic['Header'] = Header
res_dic['SerialNumber'] = SerialNumber
res_dic['ModelNumber'] = ModelNumber
res_dic['Type'] = Type
res_dic['FirmwareVersion'] = FirmwareVersion
res_dic['HWVersion'] = HWVersion
res_dic['ForInternalUseOnly'] = ForInternalUseOnly
res_dic['ModState'] = ModState
res_dic['nchs'] = nchs
else:
res_dic = {}
res_dic['Header'] = None
res_dic['SerialNumber'] = None
res_dic['ModelNumber'] = None
res_dic['Type'] = None
res_dic['FirmwareVersion'] = None
res_dic['HWVersion'] = None
res_dic['ForInternalUseOnly'] = None
res_dic['ModState'] = None
res_dic['nchs'] = None
return res_dic
def move_abs(self,new_pos):
flag = False
comment = None
if self.controller_type == 'T':
c_header = pack('BBBBBB',0x53,0x04,0x06,0x00,0x80,0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',new_pos)
command = c_header + c_channel + c_distance
response = self.query_line(command,20)
res_pos = unpack('i',response[8:12])
res_enc = unpack('i',response[12:16])
res_status_bits = response[16:20]
flag = True
return flag, comment
def move_relative(self,delta_pos):
flag = False
comment = None
if self.controller_type == 'T':
c_header = pack('B'*6,0x48, 0x04, 0x06, 0x00, 0xA2, 0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',delta_pos)
command = c_header + c_channel + c_distance
response = self.query_line(command,20)
res_pos = unpack('i',response[8:12])
res_enc = unpack('i',response[12:16])
res_status_bits = response[16:20]
debug('res_pos:{} , res_enc:{}, res_status_bits:{}'.format(res_pos,res_enc,res_status_bits))
flag = True
comment = '' + str(response)
else:
warn('unknown controller type')
reply = {}
reply['flag'] = flag
reply['comment'] = comment
return reply
def get_position(self):
if self.controller == 'T':
command = pack('BBBBBB',0x11,0x04,0x01,0x00,0x21,0x01)
response = self.query(command,12)
res_header = response[0:6]
res_chan_ident = response[6:8]
res_encoder_counts = response[8:12]
self.last_communiction['command'] = command
self.last_communiction['response'] = response
return unpack('i',res_encoder_counts)[0]
else:
return None
def set_position(self,value):
c_header = pack('BBBBBB',0x09,0x04,0x06,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00)
c_distance = pack('i',value)
command = c_header + c_channel + c_distance
self.write(command)
def home(self):
flag = False
if self.motor == 'T':
self.write(pack('B'*6,0x43,0x04,0x00,0x00,0xA2,0x01))
while self.port.in_waiting == 0:
sleep(0.1)
res = self.read(6)
if res == pack('BBBBBB',0x44,0x04,0x01,0x00,0x01,0x80):
print('%r' % res)
flag = True
else:
print(res)
return flag, res
def my_unpack(self,var,f,t):
return unpack('B'*len(var[f:t+1]),var[f:t+1])
def hex_to_chr(self, var):
for i in var:
print(i)
string =+ chr(var)
return string
def set_position_2(self,value):
c_header = pack('BBBBBB',0x10,0x04,0x06,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00)
Pos = self.get_position()
c_distance = pack('i',value)
command = c_header + c_channel + c_distance
response = self.query(command)
return response
def home_parameters_set(self,home_dir,limit_switch,home_velocity,offset_distance):
if self.motor == 'A':
raise ValueError('This is AutoOptics motor and it does not have homing option!')
else:
c_header = pack('B'*6,0x40,0x04,0x0E,0x00,0xA2,0x01)
c_channel = pack('BB',0x01,0x00)
c_home_dir = pack('h',home_dir)
c_limit_switch = pack('h',limit_switch)
c_home_velocity = pack('l',home_velocity)
c_offset_distance = pack('l',offset_distance)
command = c_header + c_channel + c_home_dir + c_limit_switch + c_home_velocity + c_offset_distance
response = self.query(command)
def home_parameters_get(self):
from struct import unpack
command = pack('B'*6,0x41,0x04,0x01,0x00,0x64,0x73)
response = self.query(command,20)
res = {}
res['header'] = response[0:7]
res['chan_ident'] = unpack('h',response[6:8])
res['home_dir']= unpack('h',response[8:10])
res['limit_switch'] = unpack('b',response[10:11])
res['home_velocity'] = unpack('i',response[12:16])
res['offset_distance'] = unpack('i',response[16:20])
return res
def run_test1(self,N):
from random import random
from time import sleep, time
lst = []
to = []
for i in range(N):
start = time()
prev = self.get_position()
goto = int(random()*25*512.*67.)
self.move_abs(goto)
while abs(goto - self.get_position()) > 3:
sleep(0.3)
arrived = self.get_position()
print [time()-start,round(goto/(512.*67.),2),round(arrived/(512.*67.),2),round((goto-prev)/(512.*67.*(time()-start)),2)]
sleep(5)
if __name__ == "__main__":
from tempfile import gettempdir
import logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s")
print('This is a Low Level python script providing Device Level with basic communication commands')
motors = []
motors.append(Motor())
motors[0].init('83825160', controller_type = 'T', motor_type = 'T')
for motor in motors:
motor.blink()
for motor in motors:
reply = motor.identify()
print(reply)
| true | true |
f7225f7016abfde0e482ac3eab7b4abbb27306bd | 229 | py | Python | ocumol/__init__.py | lqtza/OcuMOL_Leap | dcba934513535e3199c058a7e228603ee652b65d | [
"Apache-2.0"
] | 4 | 2015-03-24T12:05:22.000Z | 2015-09-30T12:24:09.000Z | ocumol/__init__.py | lqtza/OcuMOL_Leap | dcba934513535e3199c058a7e228603ee652b65d | [
"Apache-2.0"
] | null | null | null | ocumol/__init__.py | lqtza/OcuMOL_Leap | dcba934513535e3199c058a7e228603ee652b65d | [
"Apache-2.0"
] | null | null | null | import sys
from ocumol.leapConfig import leapPath
if leapPath not in sys.path:
sys.path.append(leapPath)
#from ocumol.src.hands.leap_only import PymolListener
# from ocumol.src.pymol.pymolHmd import PymolHmd, pymolHmdScript | 28.625 | 64 | 0.812227 | import sys
from ocumol.leapConfig import leapPath
if leapPath not in sys.path:
sys.path.append(leapPath)
| true | true |
f7225fff5d36c02275a0328a3bf2e93a987b9578 | 10,936 | py | Python | cc/engine/tests/test_util.py | Abbas-000/cc.engine | eb4b5e5f6c695a16c7ab8bcc52036cf16a0fba22 | [
"MIT"
] | 6 | 2017-12-25T08:18:43.000Z | 2021-01-02T09:02:59.000Z | cc/engine/tests/test_util.py | Abbas-000/cc.engine | eb4b5e5f6c695a16c7ab8bcc52036cf16a0fba22 | [
"MIT"
] | 39 | 2017-11-17T01:59:38.000Z | 2021-12-14T19:14:12.000Z | cc/engine/tests/test_util.py | Abbas-000/cc.engine | eb4b5e5f6c695a16c7ab8bcc52036cf16a0fba22 | [
"MIT"
] | 17 | 2017-12-25T08:18:13.000Z | 2021-04-12T12:50:35.000Z | import email
import StringIO
from nose.tools import assert_raises
from lxml import etree
from webob import Request
import nose
import cc.license
from cc.engine import util
util._activate_testing()
class FakeAcceptLanguage(object):
def __init__(self, best_matches):
self._best_matches = best_matches
def best_matches(self):
return self._best_matches
class FakeRequest(object):
def __init__(self, best_matches):
self.accept_language = FakeAcceptLanguage(best_matches)
def test_get_target_lang_from_request():
def pick_lang(langs=[], form_lang=None):
"""Shorthand helper function thing."""
environ = {
"REQUEST_METHOD" : "GET",
"PATH_INFO" : "/",
"HTTP_ACCEPT_LANGUAGE" : ", ".join(langs),
}
if form_lang:
environ["QUERY_STRING"] = "lang="+form_lang
req = Request(environ)
req.matchdict = {}
return util.get_target_lang_from_request(req, default_locale='default')
# don't crash when the environment variables are blank
req = Request.blank("/")
lang = util.get_target_lang_from_request(req, default_locale='default')
assert lang == 'default'
# default language case
assert pick_lang() == 'default'
# amurican english
assert pick_lang(['en-us', 'en']) == 'en_US'
# spanish
assert pick_lang(['es']) == 'es'
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
assert pick_lang(['da, en-gb;q=0.8, en;q=0.7']) == 'da'
# moar english
assert pick_lang(['en-bs']) == 'en'
# bs
assert pick_lang(['total_bs_locale']) == 'default'
# lower upper
assert pick_lang(['es_ES']) == 'es_ES'
# lower lower
assert pick_lang(['es-es']) == 'es_ES'
# specific language request
assert pick_lang(['es', 'el'], form_lang='jp') == 'jp'
def test_get_xpath_attribute():
tree = etree.parse(
StringIO.StringIO('<foo><bar><baz basil="herb" /></bar></foo>'))
assert util._get_xpath_attribute(tree, '/foo/bar/baz', 'basil') == 'herb'
def test_get_locale_identity_data():
identity_data = util.get_locale_identity_data('en-US_POSIX')
assert identity_data['language'] == 'en'
assert identity_data['territory'] == 'US'
assert identity_data['variant'] == 'POSIX'
assert identity_data['script'] == None
def test_get_locale_text_orientation():
# Make sure rtl languates are accepted as rtl
assert util.get_locale_text_orientation('he-il') == u'rtl'
# Make sure ltr languates are accepted as ltr
assert util.get_locale_text_orientation('en') == u'ltr'
# If only an unknown/imaginary language is given, default to ltr
assert util.get_locale_text_orientation('foo-bar') == u'ltr'
def test_active_languages():
{'code': 'en', 'name': u'English'} in util.active_languages()
def test_safer_resource_filename():
assert util.safer_resource_filename(
'cc.engine', 'templates/test/bunnies.html').endswith(
'templates/test/bunnies.html')
assert_raises(
util.UnsafeResource,
util.safer_resource_filename,
'cc.engine', '../../templates/test/bunnies.html')
def test_send_email():
util._clear_test_inboxes()
# send the email
util.send_email(
"sender@creativecommons.org",
["amanda@example.org", "akila@example.org"],
"Testing is so much fun!",
"""HAYYY GUYS!
I hope you like unit tests JUST AS MUCH AS I DO!""")
# check the main inbox
assert len(util.EMAIL_TEST_INBOX) == 1
message = util.EMAIL_TEST_INBOX.pop()
assert message['From'] == "sender@creativecommons.org"
assert message['To'] == "amanda@example.org, akila@example.org"
assert message['Subject'] == "Testing is so much fun!"
assert message.get_payload() == """HAYYY GUYS!
I hope you like unit tests JUST AS MUCH AS I DO!"""
# Check everything that the FakeMhost.sendmail() method got is correct
assert len(util.EMAIL_TEST_MBOX_INBOX) == 1
mbox_dict = util.EMAIL_TEST_MBOX_INBOX.pop()
assert mbox_dict['from'] == "sender@creativecommons.org"
assert mbox_dict['to'] == ["amanda@example.org", "akila@example.org"]
mbox_message = email.message_from_string(mbox_dict['message'])
assert mbox_message['From'] == "sender@creativecommons.org"
assert mbox_message['To'] == "amanda@example.org, akila@example.org"
assert mbox_message['Subject'] == "Testing is so much fun!"
assert mbox_message.get_payload() == """HAYYY GUYS!
I hope you like unit tests JUST AS MUCH AS I DO!"""
SILLY_LICENSE_HTML = """This work available under a
<a href="http://example.org/goes/nowhere">very silly license</a>."""
def test_send_license_info_email():
util._clear_test_inboxes()
util.send_license_info_email(
'Creative Commons Very-Silly License 5.8',
SILLY_LICENSE_HTML,
'ilovesillylicenses@example.org', 'en')
assert len(util.EMAIL_TEST_INBOX) == 1
message = util.EMAIL_TEST_INBOX.pop()
assert message['From'] == "info@creativecommons.org"
assert message['To'] == "ilovesillylicenses@example.org"
assert message['Subject'] == "Your Creative Commons License Information"
normal_payload = """Thank you for using a Creative Commons legal tool for your work.
You have selected Creative Commons Very-Silly License 5.8.
You should include a reference to this on the web page that includes
the work in question.
Here is the suggested HTML:
This work available under a
<a href="http://example.org/goes/nowhere">very silly license</a>.
Tips for marking your work can be found at
http://wiki.creativecommons.org/Marking. Information on the supplied HTML and
metadata can be found at http://wiki.creativecommons.org/CC_REL.
This email and tech support has been brought to you by the nonprofit folks at
Creative Commons. CC relies on donations to provide you with licenses and
services like this. Please consider a donation to our annual fund:
https://creativecommons.net/donate.
Thank you!
Creative Commons Support
info@creativecommons.org"""
campaign_payload = """Thank you for using a Creative Commons legal tool for your work.\n\nYou have selected Creative Commons Very-Silly License 5.8.\nYou should include a reference to this on the web page that includes\nthe work in question.\n\nHere is the suggested HTML:\n\nThis work available under a\n<a href="http://example.org/goes/nowhere">very silly license</a>.\n\nTips for marking your work can be found at\nhttp://wiki.creativecommons.org/Marking. Information on the supplied HTML and\nmetadata can be found at http://wiki.creativecommons.org/CC_REL.\n\nThis email and tech support has been brought to you by the nonprofit folks at\nCreative Commons. CC relies on donations to provide you with licenses and\nservices like this. Please consider a donation to our annual fund:\nhttps://creativecommons.net/donate.\n\nThank you!\nCreative Commons Support\ninfo@creativecommons.org"""
assert message.get_payload() in [normal_payload, campaign_payload]
def test_subset_dict():
expected = {
'keeper1': 'keepme1',
'keeper2': 'keepme2'}
result = util.subset_dict(
{'keeper1': 'keepme1',
'loser1': 'loseme1',
'keeper2': 'keepme2',
'loser2': 'loseme2'},
['keeper1', 'keeper2', 'keeper3'])
assert result == expected
def test_publicdomain_partner_get_params():
result = util.publicdomain_partner_get_params({'lang': 'en'})
assert result == 'lang=en'
# ignore garbage parameters
result = util.publicdomain_partner_get_params({'lang': 'en', 'floobie': 'blech'})
assert result == 'lang=en'
result = util.publicdomain_partner_get_params(
{'lang': 'en',
'partner': 'http://nethack.org/',
'exit_url': 'http://nethack.org/return_from_cc?license_url=[license_url]&license_name=[license_name]',
'stylesheet': 'http://nethack.org/yendor.css',
'extraneous_argument': 'large mimic'})
result_pieces = result.split('&')
assert len(result_pieces) == 4
assert 'lang=en' in result_pieces
assert 'partner=http%3A%2F%2Fnethack.org%2F' in result_pieces
assert 'exit_url=http%3A%2F%2Fnethack.org%2Freturn_from_cc%3Flicense_url%3D%5Blicense_url%5D%26license_name%3D%5Blicense_name%5D' in result_pieces
assert 'stylesheet=http%3A%2F%2Fnethack.org%2Fyendor.css' in result_pieces
def test_catch_license_versions_from_request():
# Request with just a code
request = Request.blank('/')
request.matchdict = {
'code': 'by'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris,
['http://creativecommons.org/licenses/by/1.0/',
'http://creativecommons.org/licenses/by/2.0/',
'http://creativecommons.org/licenses/by/2.5/',
'http://creativecommons.org/licenses/by/3.0/',
'http://creativecommons.org/licenses/by/4.0/'])
# Request with a code and valid jurisdiction
request = Request.blank('/')
request.matchdict = {
'code': 'by',
'jurisdiction': 'es'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris,
['http://creativecommons.org/licenses/by/2.0/es/',
'http://creativecommons.org/licenses/by/2.1/es/',
'http://creativecommons.org/licenses/by/2.5/es/',
'http://creativecommons.org/licenses/by/3.0/es/'])
# Request with a code and bogus jurisdiction
request = Request.blank('/')
request.matchdict = {
'code': 'by',
'jurisdiction': 'zz'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris,
['http://creativecommons.org/licenses/by/1.0/',
'http://creativecommons.org/licenses/by/2.0/',
'http://creativecommons.org/licenses/by/2.5/',
'http://creativecommons.org/licenses/by/3.0/',
'http://creativecommons.org/licenses/by/4.0/'])
# Request with a bogus code
request = Request.blank('/')
request.matchdict = {
'code': 'AAAAA'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris, [])
# Request with a bogus code and bogus jurisdiction
request = Request.blank('/')
request.matchdict = {
'code': 'AAAAA', 'jurisdiction': 'FUUUUUUU'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris, [])
| 35.855738 | 896 | 0.684254 | import email
import StringIO
from nose.tools import assert_raises
from lxml import etree
from webob import Request
import nose
import cc.license
from cc.engine import util
util._activate_testing()
class FakeAcceptLanguage(object):
def __init__(self, best_matches):
self._best_matches = best_matches
def best_matches(self):
return self._best_matches
class FakeRequest(object):
def __init__(self, best_matches):
self.accept_language = FakeAcceptLanguage(best_matches)
def test_get_target_lang_from_request():
def pick_lang(langs=[], form_lang=None):
environ = {
"REQUEST_METHOD" : "GET",
"PATH_INFO" : "/",
"HTTP_ACCEPT_LANGUAGE" : ", ".join(langs),
}
if form_lang:
environ["QUERY_STRING"] = "lang="+form_lang
req = Request(environ)
req.matchdict = {}
return util.get_target_lang_from_request(req, default_locale='default')
req = Request.blank("/")
lang = util.get_target_lang_from_request(req, default_locale='default')
assert lang == 'default'
# default language case
assert pick_lang() == 'default'
# amurican english
assert pick_lang(['en-us', 'en']) == 'en_US'
# spanish
assert pick_lang(['es']) == 'es'
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
assert pick_lang(['da, en-gb;q=0.8, en;q=0.7']) == 'da'
# moar english
assert pick_lang(['en-bs']) == 'en'
# bs
assert pick_lang(['total_bs_locale']) == 'default'
# lower upper
assert pick_lang(['es_ES']) == 'es_ES'
# lower lower
assert pick_lang(['es-es']) == 'es_ES'
# specific language request
assert pick_lang(['es', 'el'], form_lang='jp') == 'jp'
def test_get_xpath_attribute():
tree = etree.parse(
StringIO.StringIO('<foo><bar><baz basil="herb" /></bar></foo>'))
assert util._get_xpath_attribute(tree, '/foo/bar/baz', 'basil') == 'herb'
def test_get_locale_identity_data():
identity_data = util.get_locale_identity_data('en-US_POSIX')
assert identity_data['language'] == 'en'
assert identity_data['territory'] == 'US'
assert identity_data['variant'] == 'POSIX'
assert identity_data['script'] == None
def test_get_locale_text_orientation():
# Make sure rtl languates are accepted as rtl
assert util.get_locale_text_orientation('he-il') == u'rtl'
# Make sure ltr languates are accepted as ltr
assert util.get_locale_text_orientation('en') == u'ltr'
# If only an unknown/imaginary language is given, default to ltr
assert util.get_locale_text_orientation('foo-bar') == u'ltr'
def test_active_languages():
{'code': 'en', 'name': u'English'} in util.active_languages()
def test_safer_resource_filename():
assert util.safer_resource_filename(
'cc.engine', 'templates/test/bunnies.html').endswith(
'templates/test/bunnies.html')
assert_raises(
util.UnsafeResource,
util.safer_resource_filename,
'cc.engine', '../../templates/test/bunnies.html')
def test_send_email():
util._clear_test_inboxes()
# send the email
util.send_email(
"sender@creativecommons.org",
["amanda@example.org", "akila@example.org"],
"Testing is so much fun!",
"""HAYYY GUYS!
I hope you like unit tests JUST AS MUCH AS I DO!""")
# check the main inbox
assert len(util.EMAIL_TEST_INBOX) == 1
message = util.EMAIL_TEST_INBOX.pop()
assert message['From'] == "sender@creativecommons.org"
assert message['To'] == "amanda@example.org, akila@example.org"
assert message['Subject'] == "Testing is so much fun!"
assert message.get_payload() == """HAYYY GUYS!
I hope you like unit tests JUST AS MUCH AS I DO!"""
# Check everything that the FakeMhost.sendmail() method got is correct
assert len(util.EMAIL_TEST_MBOX_INBOX) == 1
mbox_dict = util.EMAIL_TEST_MBOX_INBOX.pop()
assert mbox_dict['from'] == "sender@creativecommons.org"
assert mbox_dict['to'] == ["amanda@example.org", "akila@example.org"]
mbox_message = email.message_from_string(mbox_dict['message'])
assert mbox_message['From'] == "sender@creativecommons.org"
assert mbox_message['To'] == "amanda@example.org, akila@example.org"
assert mbox_message['Subject'] == "Testing is so much fun!"
assert mbox_message.get_payload() == """HAYYY GUYS!
I hope you like unit tests JUST AS MUCH AS I DO!"""
SILLY_LICENSE_HTML = """This work available under a
<a href="http://example.org/goes/nowhere">very silly license</a>."""
def test_send_license_info_email():
util._clear_test_inboxes()
util.send_license_info_email(
'Creative Commons Very-Silly License 5.8',
SILLY_LICENSE_HTML,
'ilovesillylicenses@example.org', 'en')
assert len(util.EMAIL_TEST_INBOX) == 1
message = util.EMAIL_TEST_INBOX.pop()
assert message['From'] == "info@creativecommons.org"
assert message['To'] == "ilovesillylicenses@example.org"
assert message['Subject'] == "Your Creative Commons License Information"
normal_payload = """Thank you for using a Creative Commons legal tool for your work.
You have selected Creative Commons Very-Silly License 5.8.
You should include a reference to this on the web page that includes
the work in question.
Here is the suggested HTML:
This work available under a
<a href="http://example.org/goes/nowhere">very silly license</a>.
Tips for marking your work can be found at
http://wiki.creativecommons.org/Marking. Information on the supplied HTML and
metadata can be found at http://wiki.creativecommons.org/CC_REL.
This email and tech support has been brought to you by the nonprofit folks at
Creative Commons. CC relies on donations to provide you with licenses and
services like this. Please consider a donation to our annual fund:
https://creativecommons.net/donate.
Thank you!
Creative Commons Support
info@creativecommons.org"""
campaign_payload = """Thank you for using a Creative Commons legal tool for your work.\n\nYou have selected Creative Commons Very-Silly License 5.8.\nYou should include a reference to this on the web page that includes\nthe work in question.\n\nHere is the suggested HTML:\n\nThis work available under a\n<a href="http://example.org/goes/nowhere">very silly license</a>.\n\nTips for marking your work can be found at\nhttp://wiki.creativecommons.org/Marking. Information on the supplied HTML and\nmetadata can be found at http://wiki.creativecommons.org/CC_REL.\n\nThis email and tech support has been brought to you by the nonprofit folks at\nCreative Commons. CC relies on donations to provide you with licenses and\nservices like this. Please consider a donation to our annual fund:\nhttps://creativecommons.net/donate.\n\nThank you!\nCreative Commons Support\ninfo@creativecommons.org"""
assert message.get_payload() in [normal_payload, campaign_payload]
def test_subset_dict():
expected = {
'keeper1': 'keepme1',
'keeper2': 'keepme2'}
result = util.subset_dict(
{'keeper1': 'keepme1',
'loser1': 'loseme1',
'keeper2': 'keepme2',
'loser2': 'loseme2'},
['keeper1', 'keeper2', 'keeper3'])
assert result == expected
def test_publicdomain_partner_get_params():
result = util.publicdomain_partner_get_params({'lang': 'en'})
assert result == 'lang=en'
# ignore garbage parameters
result = util.publicdomain_partner_get_params({'lang': 'en', 'floobie': 'blech'})
assert result == 'lang=en'
result = util.publicdomain_partner_get_params(
{'lang': 'en',
'partner': 'http://nethack.org/',
'exit_url': 'http://nethack.org/return_from_cc?license_url=[license_url]&license_name=[license_name]',
'stylesheet': 'http://nethack.org/yendor.css',
'extraneous_argument': 'large mimic'})
result_pieces = result.split('&')
assert len(result_pieces) == 4
assert 'lang=en' in result_pieces
assert 'partner=http%3A%2F%2Fnethack.org%2F' in result_pieces
assert 'exit_url=http%3A%2F%2Fnethack.org%2Freturn_from_cc%3Flicense_url%3D%5Blicense_url%5D%26license_name%3D%5Blicense_name%5D' in result_pieces
assert 'stylesheet=http%3A%2F%2Fnethack.org%2Fyendor.css' in result_pieces
def test_catch_license_versions_from_request():
# Request with just a code
request = Request.blank('/')
request.matchdict = {
'code': 'by'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris,
['http://creativecommons.org/licenses/by/1.0/',
'http://creativecommons.org/licenses/by/2.0/',
'http://creativecommons.org/licenses/by/2.5/',
'http://creativecommons.org/licenses/by/3.0/',
'http://creativecommons.org/licenses/by/4.0/'])
# Request with a code and valid jurisdiction
request = Request.blank('/')
request.matchdict = {
'code': 'by',
'jurisdiction': 'es'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris,
['http://creativecommons.org/licenses/by/2.0/es/',
'http://creativecommons.org/licenses/by/2.1/es/',
'http://creativecommons.org/licenses/by/2.5/es/',
'http://creativecommons.org/licenses/by/3.0/es/'])
# Request with a code and bogus jurisdiction
request = Request.blank('/')
request.matchdict = {
'code': 'by',
'jurisdiction': 'zz'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris,
['http://creativecommons.org/licenses/by/1.0/',
'http://creativecommons.org/licenses/by/2.0/',
'http://creativecommons.org/licenses/by/2.5/',
'http://creativecommons.org/licenses/by/3.0/',
'http://creativecommons.org/licenses/by/4.0/'])
# Request with a bogus code
request = Request.blank('/')
request.matchdict = {
'code': 'AAAAA'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris, [])
# Request with a bogus code and bogus jurisdiction
request = Request.blank('/')
request.matchdict = {
'code': 'AAAAA', 'jurisdiction': 'FUUUUUUU'}
license_versions = util.catch_license_versions_from_request(request)
license_uris = [lic.uri for lic in license_versions]
nose.tools.assert_equal(
license_uris, [])
| true | true |
f72260a0d9e69cc1728afa301a3238b988fe14f2 | 3,900 | py | Python | attention_intervention_winobias.py | simassakenis/CausalMediationAnalysis | ad46c02cdda88e13e2e891e9968c365e093ecc99 | [
"MIT"
] | null | null | null | attention_intervention_winobias.py | simassakenis/CausalMediationAnalysis | ad46c02cdda88e13e2e891e9968c365e093ecc99 | [
"MIT"
] | null | null | null | attention_intervention_winobias.py | simassakenis/CausalMediationAnalysis | ad46c02cdda88e13e2e891e9968c365e093ecc99 | [
"MIT"
] | null | null | null | """Performs attention intervention on Winobias samples and saves results to JSON file."""
import json
import fire
from pandas import DataFrame
from transformers import (
GPT2Tokenizer, TransfoXLTokenizer, XLNetTokenizer,
BertTokenizer, DistilBertTokenizer, RobertaTokenizer
)
import winobias
from attention_utils import perform_interventions, get_odds_ratio
from experiment import Model
def get_interventions_winobias(gpt2_version, do_filter, split, model, tokenizer,
device='cuda', filter_quantile=0.25):
if split == 'dev':
examples = winobias.load_dev_examples()
elif split == 'test':
examples = winobias.load_test_examples()
else:
raise ValueError(f"Invalid split: {split}")
json_data = {'model_version': gpt2_version,
'do_filter': do_filter,
'split': split,
'num_examples_loaded': len(examples)}
if do_filter:
interventions = [ex.to_intervention(tokenizer) for ex in examples]
df = DataFrame({'odds_ratio': [get_odds_ratio(intervention, model) for intervention in interventions]})
df_expected = df[df.odds_ratio > 1]
threshold = df_expected.odds_ratio.quantile(filter_quantile)
filtered_examples = []
assert len(examples) == len(df)
for i in range(len(examples)):
ex = examples[i]
odds_ratio = df.iloc[i].odds_ratio
if odds_ratio > threshold:
filtered_examples.append(ex)
print(f'Num examples with odds ratio > 1: {len(df_expected)} / {len(examples)}')
print(
f'Num examples with odds ratio > {threshold:.4f} ({filter_quantile} quantile): {len(filtered_examples)} / {len(examples)}')
json_data['num_examples_aligned'] = len(df_expected)
json_data['filter_quantile'] = filter_quantile
json_data['threshold'] = threshold
examples = filtered_examples
json_data['num_examples_analyzed'] = len(examples)
interventions = [ex.to_intervention(tokenizer) for ex in examples]
return interventions, json_data
def intervene_attention(gpt2_version, do_filter, split, device='cuda',
filter_quantile=0.25, random_weights=False,
masking_approach=1):
model = Model(output_attentions=True, gpt2_version=gpt2_version,
device=device, random_weights=random_weights,
masking_approach=masking_approach)
tokenizer = (GPT2Tokenizer if model.is_gpt2 else
TransfoXLTokenizer if model.is_txl else
XLNetTokenizer if model.is_xlnet else
BertTokenizer if model.is_bert else
DistilBertTokenizer if model.is_distilbert else
RobertaTokenizer).from_pretrained(gpt2_version)
interventions, json_data = get_interventions_winobias(gpt2_version, do_filter, split, model, tokenizer,
device, filter_quantile)
results = perform_interventions(interventions, model)
json_data['mean_total_effect'] = DataFrame(results).total_effect.mean()
json_data['mean_model_indirect_effect'] = DataFrame(results).indirect_effect_model.mean()
json_data['mean_model_direct_effect'] = DataFrame(results).direct_effect_model.mean()
filter_name = 'filtered' if do_filter else 'unfiltered'
if random_weights:
gpt2_version += '_random'
if model.is_gpt2 or model.is_txl or model.is_xlnet:
fname = f"winobias_data/attention_intervention_{gpt2_version}_{filter_name}_{split}.json"
else:
fname = f"winobias_data/attention_intervention_{gpt2_version}_{filter_name}_{split}_{masking_approach}.json"
json_data['results'] = results
with open(fname, 'w') as f:
json.dump(json_data, f)
if __name__ == "__main__":
fire.Fire(intervene_attention)
| 45.348837 | 135 | 0.678718 |
import json
import fire
from pandas import DataFrame
from transformers import (
GPT2Tokenizer, TransfoXLTokenizer, XLNetTokenizer,
BertTokenizer, DistilBertTokenizer, RobertaTokenizer
)
import winobias
from attention_utils import perform_interventions, get_odds_ratio
from experiment import Model
def get_interventions_winobias(gpt2_version, do_filter, split, model, tokenizer,
device='cuda', filter_quantile=0.25):
if split == 'dev':
examples = winobias.load_dev_examples()
elif split == 'test':
examples = winobias.load_test_examples()
else:
raise ValueError(f"Invalid split: {split}")
json_data = {'model_version': gpt2_version,
'do_filter': do_filter,
'split': split,
'num_examples_loaded': len(examples)}
if do_filter:
interventions = [ex.to_intervention(tokenizer) for ex in examples]
df = DataFrame({'odds_ratio': [get_odds_ratio(intervention, model) for intervention in interventions]})
df_expected = df[df.odds_ratio > 1]
threshold = df_expected.odds_ratio.quantile(filter_quantile)
filtered_examples = []
assert len(examples) == len(df)
for i in range(len(examples)):
ex = examples[i]
odds_ratio = df.iloc[i].odds_ratio
if odds_ratio > threshold:
filtered_examples.append(ex)
print(f'Num examples with odds ratio > 1: {len(df_expected)} / {len(examples)}')
print(
f'Num examples with odds ratio > {threshold:.4f} ({filter_quantile} quantile): {len(filtered_examples)} / {len(examples)}')
json_data['num_examples_aligned'] = len(df_expected)
json_data['filter_quantile'] = filter_quantile
json_data['threshold'] = threshold
examples = filtered_examples
json_data['num_examples_analyzed'] = len(examples)
interventions = [ex.to_intervention(tokenizer) for ex in examples]
return interventions, json_data
def intervene_attention(gpt2_version, do_filter, split, device='cuda',
filter_quantile=0.25, random_weights=False,
masking_approach=1):
model = Model(output_attentions=True, gpt2_version=gpt2_version,
device=device, random_weights=random_weights,
masking_approach=masking_approach)
tokenizer = (GPT2Tokenizer if model.is_gpt2 else
TransfoXLTokenizer if model.is_txl else
XLNetTokenizer if model.is_xlnet else
BertTokenizer if model.is_bert else
DistilBertTokenizer if model.is_distilbert else
RobertaTokenizer).from_pretrained(gpt2_version)
interventions, json_data = get_interventions_winobias(gpt2_version, do_filter, split, model, tokenizer,
device, filter_quantile)
results = perform_interventions(interventions, model)
json_data['mean_total_effect'] = DataFrame(results).total_effect.mean()
json_data['mean_model_indirect_effect'] = DataFrame(results).indirect_effect_model.mean()
json_data['mean_model_direct_effect'] = DataFrame(results).direct_effect_model.mean()
filter_name = 'filtered' if do_filter else 'unfiltered'
if random_weights:
gpt2_version += '_random'
if model.is_gpt2 or model.is_txl or model.is_xlnet:
fname = f"winobias_data/attention_intervention_{gpt2_version}_{filter_name}_{split}.json"
else:
fname = f"winobias_data/attention_intervention_{gpt2_version}_{filter_name}_{split}_{masking_approach}.json"
json_data['results'] = results
with open(fname, 'w') as f:
json.dump(json_data, f)
if __name__ == "__main__":
fire.Fire(intervene_attention)
| true | true |
f72260b9d6cb7e207bd2ab87653d3aa297dd46fb | 11,973 | py | Python | code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_simple_moving_average_trading_days_since_crossover_sma50vs200_number_days_maximum.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_simple_moving_average_trading_days_since_crossover_sma50vs200_number_days_maximum.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_simple_moving_average_trading_days_since_crossover_sma50vs200_number_days_maximum.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.StocksAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.StocksAPIforDigitalPortals.exceptions import ApiAttributeError
class StockNotationScreenerSearchDataSimpleMovingAverageTradingDaysSinceCrossoverSma50vs200NumberDaysMaximum(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('value',): {
'inclusive_maximum': 3E+2,
'inclusive_minimum': 0,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (float,), # noqa: E501
'inclusive': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'value': 'value', # noqa: E501
'inclusive': 'inclusive', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""StockNotationScreenerSearchDataSimpleMovingAverageTradingDaysSinceCrossoverSma50vs200NumberDaysMaximum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
value (float): The maximum value.. [optional] # noqa: E501
inclusive (bool): Indicates whether the maximum value is included in the range or not.. [optional] if omitted the server will use the default value of True # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""StockNotationScreenerSearchDataSimpleMovingAverageTradingDaysSinceCrossoverSma50vs200NumberDaysMaximum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
value (float): The maximum value.. [optional] # noqa: E501
inclusive (bool): Indicates whether the maximum value is included in the range or not.. [optional] if omitted the server will use the default value of True # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.352273 | 181 | 0.58373 |
import re
import sys
from fds.sdk.StocksAPIforDigitalPortals.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.StocksAPIforDigitalPortals.exceptions import ApiAttributeError
class StockNotationScreenerSearchDataSimpleMovingAverageTradingDaysSinceCrossoverSma50vs200NumberDaysMaximum(ModelNormal):
allowed_values = {
}
validations = {
('value',): {
'inclusive_maximum': 3E+2,
'inclusive_minimum': 0,
},
}
@cached_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
return {
'value': (float,),
'inclusive': (bool,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'value': 'value',
'inclusive': 'inclusive',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
f722630ddd2f0f9085208c0807dc71815ceb42e5 | 58,571 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_application_gateways_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_application_gateways_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_application_gateways_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewaysOperations:
"""ApplicationGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> "_models.ApplicationGateway":
"""Gets the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.ApplicationGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "_models.ApplicationGateway",
**kwargs: Any
) -> "_models.ApplicationGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "_models.ApplicationGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGateway"]:
"""Creates or updates the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param parameters: Parameters supplied to the create or update application gateway operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.ApplicationGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.ApplicationGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ApplicationGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGateway"]:
"""Updates the specified application gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param parameters: Parameters supplied to update application gateway tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.ApplicationGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayListResult"]:
"""Lists all application gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayListResult"]:
"""Gets all the application gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'} # type: ignore
async def _backend_health_initial(
self,
resource_group_name: str,
application_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> Optional["_models.ApplicationGatewayBackendHealth"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationGatewayBackendHealth"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._backend_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_backend_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'} # type: ignore
async def begin_backend_health(
self,
resource_group_name: str,
application_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGatewayBackendHealth"]:
"""Gets the backend health of the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param expand: Expands BackendAddressPool and BackendHttpSettings referenced in backend health.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGatewayBackendHealth or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.ApplicationGatewayBackendHealth]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayBackendHealth"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._backend_health_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
expand=expand,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_backend_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'} # type: ignore
async def list_available_waf_rule_sets(
self,
**kwargs: Any
) -> "_models.ApplicationGatewayAvailableWafRuleSetsResult":
"""Lists all available web application firewall rule sets.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayAvailableWafRuleSetsResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.ApplicationGatewayAvailableWafRuleSetsResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayAvailableWafRuleSetsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.list_available_waf_rule_sets.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableWafRuleSetsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_waf_rule_sets.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableWafRuleSets'} # type: ignore
async def list_available_ssl_options(
self,
**kwargs: Any
) -> "_models.ApplicationGatewayAvailableSslOptions":
"""Lists available Ssl options for configuring Ssl policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayAvailableSslOptions, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.ApplicationGatewayAvailableSslOptions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayAvailableSslOptions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.list_available_ssl_options.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableSslOptions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_ssl_options.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default'} # type: ignore
def list_available_ssl_predefined_policies(
self,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayAvailableSslPredefinedPolicies"]:
"""Lists all SSL predefined policies for configuring Ssl policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayAvailableSslPredefinedPolicies or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.ApplicationGatewayAvailableSslPredefinedPolicies]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayAvailableSslPredefinedPolicies"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_available_ssl_predefined_policies.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayAvailableSslPredefinedPolicies', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_ssl_predefined_policies.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies'} # type: ignore
async def get_ssl_predefined_policy(
self,
predefined_policy_name: str,
**kwargs: Any
) -> "_models.ApplicationGatewaySslPredefinedPolicy":
"""Gets Ssl predefined policy with the specified policy name.
:param predefined_policy_name: Name of Ssl predefined policy.
:type predefined_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewaySslPredefinedPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.ApplicationGatewaySslPredefinedPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewaySslPredefinedPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get_ssl_predefined_policy.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'predefinedPolicyName': self._serialize.url("predefined_policy_name", predefined_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewaySslPredefinedPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ssl_predefined_policy.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies/{predefinedPolicyName}'} # type: ignore
| 50.842882 | 219 | 0.678407 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewaysOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> "_models.ApplicationGateway":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
async def _create_or_update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "_models.ApplicationGateway",
**kwargs: Any
) -> "_models.ApplicationGateway":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ApplicationGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
async def begin_create_or_update(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "_models.ApplicationGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGateway"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
async def _update_tags_initial(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ApplicationGateway":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_tags_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
async def begin_update_tags(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGateway"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways'}
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways'}
async def _start_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
url = self._start_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'}
async def begin_start(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'}
async def _stop_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
url = self._stop_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'}
async def begin_stop(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'}
async def _backend_health_initial(
self,
resource_group_name: str,
application_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> Optional["_models.ApplicationGatewayBackendHealth"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self._backend_health_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_backend_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'}
async def begin_backend_health(
self,
resource_group_name: str,
application_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGatewayBackendHealth"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._backend_health_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
expand=expand,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_backend_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'}
async def list_available_waf_rule_sets(
self,
**kwargs: Any
) -> "_models.ApplicationGatewayAvailableWafRuleSetsResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.list_available_waf_rule_sets.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableWafRuleSetsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_waf_rule_sets.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableWafRuleSets'}
async def list_available_ssl_options(
self,
**kwargs: Any
) -> "_models.ApplicationGatewayAvailableSslOptions":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.list_available_ssl_options.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableSslOptions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_ssl_options.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default'}
def list_available_ssl_predefined_policies(
self,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayAvailableSslPredefinedPolicies"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_available_ssl_predefined_policies.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayAvailableSslPredefinedPolicies', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_ssl_predefined_policies.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies'}
async def get_ssl_predefined_policy(
self,
predefined_policy_name: str,
**kwargs: Any
) -> "_models.ApplicationGatewaySslPredefinedPolicy":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.get_ssl_predefined_policy.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'predefinedPolicyName': self._serialize.url("predefined_policy_name", predefined_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewaySslPredefinedPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ssl_predefined_policy.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies/{predefinedPolicyName}'}
| true | true |
f722634ba5e70ca4a34dee0817bd68f32852423b | 12,616 | py | Python | assets/tools/ipfs_pinner.py | foxdproject/foxdcoin | 9db505f6f32bd3e51bd2b2da533744c98cee23af | [
"MIT"
] | 7 | 2020-06-19T20:49:02.000Z | 2022-01-31T09:12:18.000Z | assets/tools/ipfs_pinner.py | foxdproject/foxdcoin | 9db505f6f32bd3e51bd2b2da533744c98cee23af | [
"MIT"
] | 1 | 2021-02-26T19:14:11.000Z | 2021-02-26T19:14:11.000Z | assets/tools/ipfs_pinner.py | foxdproject/foxdcoin | 9db505f6f32bd3e51bd2b2da533744c98cee23af | [
"MIT"
] | 12 | 2020-05-02T20:01:44.000Z | 2022-03-03T11:02:13.000Z | #!/usr/bin/env python3
# Install pip3 (if not there)
# sudo apt-get install python3-pip
# Install zmq with
# pip3 install pyzmq
# Install bitcoinrpc with
# pip3 install python-bitcoinrpc
# Install ipfsapi with
# pip3 install ipfsapi
import sys
import argparse
import zmq
import struct
import binascii
import codecs
import random
import os
import subprocess
import json
import signal #Used for timeout
JSON_ONLY_CHECK = False
FILESIZE_THRESHOLD = 100000000
#Set this to your foxdcoin-cli program
cli = "foxdcoin-cli"
#mode = "-testnet"
mode = ""
rpc_port = 8766
#Set this information in your foxdcoin.conf file (in datadir, not testnet3)
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def print_debug(str):
if args.debug:
print(str)
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%s@127.0.0.1:%s"%(rpc_user, rpc_pass, rpc_port)
rpc_conn = AuthServiceProxy(connection)
return(rpc_conn)
rpc_connection = get_rpc_connection()
def get_blockinfo(num):
hash = rpc_connection.getblockhash(num)
blockinfo = rpc_connection.getblock(hash)
return(blockinfo)
def get_block(hash):
blockinfo = rpc_connection.getblock(hash)
return(blockinfo)
def get_rawtx(tx):
txinfo = rpc_connection.getrawtransaction(tx)
return(txinfo)
def get_bci():
bci = rpc_connection.getblockchaininfo()
return(bci)
# def decode_rawtx(txdata):
# print("decoding: " + txdata)
# txjson = rpc_connection.decoderawtransaction(txdata)
# return(txjson)
def decode_rawtx(txdata):
#print("decoding: " + txdata)
txjson = rpc_connection.decoderawtransaction(txdata)
return(txjson)
# def decode_script(script):
# scriptinfo = get_rpc_connection.decodescript(script)
# return(scriptinfo)
def decode_rawtx_cli(txdata):
txjson_str = rpc_call('decoderawtransaction', txdata)
return(json.loads(txjson_str))
def decode_script(script):
scriptinfo_str = rpc_call('decodescript', script)
scriptinfo_str = scriptinfo_str.decode('ascii') #Needed for Python version 3.5 compat. 3.6 works fine without it.
return(json.loads(scriptinfo_str))
def rpc_call(command, params):
# return(subprocess.check_output, [cli, mode, command, params])
print_debug('cli: ' + cli)
print_debug('command: ' + command)
print_debug('params: ' + params)
if len(params) > 131070:
print_debug("Params too long for command line")
print_debug('Len: ' + str(len(params)))
return("")
process = subprocess.Popen([cli, command, params], stdout=subprocess.PIPE)
out, err = process.communicate()
process.stdout.close()
if process.stderr:
process.stderr.close()
#print(out)
return(out)
def is_json_only(txt):
if args.debug:
print("Validating JSON")
txt = txt.strip()
if txt[1:] != '{':
print("Not proper JSON - No leading {")
return False
if txt[-1] != '}':
print("Not proper JSON - No trailing }")
return False
try:
json.loads(txt)
except ValueError as e:
print('Invalid json: %s' % e)
return False
return(True)
def asset_handler(asset_script):
global FILESIZE_THRESHOLD
global args
asset_file = asset_to_file(asset_script.get('asset_name'))
if args.debug:
print("Type: " + asset_script.get('type'))
print("Asset: " + asset_script.get('asset_name'))
print("Asset File: " + asset_file)
print(asset_script.get('amount'))
print(asset_script.get('units'))
print("Reissuable: " + str(asset_script.get('reissuable')))
print("Has IPFS: " + str(asset_script.get('hasIPFS')))
if asset_script.get('hasIPFS') == True:
ipfs_hash = asset_script.get('ipfs_hash')
print_debug(ipfs_hash)
size = FILESIZE_THRESHOLD + 1
with timeout(seconds=15):
try:
size = check_ipfs_file_size(ipfs_hash)
except:
print("Couldn't get size - skipping: " + asset_script.get('asset_name'))
size = FILESIZE_THRESHOLD + 1
#size = check_ipfs_file_size(asset_script.get('ipfs_hash'))
#size=1
full_path_with_asset = asset_file + "=" + ipfs_hash
if not args.folder == None:
full_path_with_asset = add_sep(args.folder) + full_path_with_asset
if (size <= FILESIZE_THRESHOLD):
with timeout(seconds=20):
try:
if not os.path.isfile(full_path_with_asset):
if JSON_ONLY_CHECK:
a_str = ipfs_cat(ipfs_hash)
if not is_json_only(a_str):
return(None)
atuple = ipfs_get(ipfs_hash)
ipfs_pin_add(ipfs_hash)
os.rename(ipfs_hash, full_path_with_asset)
if args.debug:
print('Saved file as: ' + full_path_with_asset)
else:
if args.debug:
print("Found: " + full_path_with_asset)
except:
print("Unable to fetch IPFS file for asset: " + asset_script.get('asset_name'))
else:
print_debug("Failed to get " + ipfs_hash + ' via ipfs get <hash> Trying http...')
result = get_ipfs_file_wget(full_path_with_asset, ipfs_hash)
if not result == 1:
print("Unable to get file for asset " + asset_file)
output_missing(full_path_with_asset + '.MISSING')
#print("Too large at %d bytes" % size)
def output_missing(file):
outf = open(file, 'w')
outf.write("MISSING")
outf.close()
def get_ipfs_file_wget(filename, hash):
try:
import urllib.request as urllib2
except ImportError:
import urllib2
print("Downloading: " + hash + " as " + filename)
try:
filedata = urllib2.urlopen('https://ipfs.io/ipfs/' + hash, timeout=20)
datatowrite = filedata.read()
datatowrite.strip()
if (datatowrite[0] != '{'):
print("Not a valid metadata file")
return
with open(filename, 'wb') as f:
f.write(datatowrite)
print("Saving metadata file")
except urllib2.URLError as e:
print(type(e))
return 0
except:
print("Uncaught error while downloading") #not catch
return 0
return 1
#Converts Asset to valid filename
def asset_to_file(asset):
file = asset
file = file.replace('/', r'%2F')
file = file.replace('*', r'%2A')
file = file.replace('&', r'%26')
file = file.replace('?', r'%3F')
file = file.replace(':', r'%3A')
file = file.replace('=', r'%3D')
return(file)
#Converts valid filename back to asset name
def file_to_asset(file):
asset = file
asset = asset.replace(r'%2F', '/')
asset = asset.replace(r'%2A', '*')
asset = asset.replace(r'%26', '&')
asset = asset.replace(r'%3F', '?')
asset = asset.replace(r'%3A', ':')
asset = asset.replace(r'%3D', '=')
return(asset)
def check_ipfs_file_size(hash):
#print("Checking size in IPFS")
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.object_stat(hash)
#print(res)
return(res['CumulativeSize'])
def ipfs_add(file):
print("Adding to IPFS")
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.add(file)
if args.debug:
print(res)
return(res['Hash'])
def ipfs_get(hash):
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.get(hash)
return()
def ipfs_pin_add(hash):
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.pin_add(hash)
return(res)
def ipfs_repo_stat():
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.repo_stat()
if args.debug:
print(res)
return(res)
def ipfs_pin_ls():
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.pin_ls()
print(res)
return(res)
def block_conf_filename():
return('saveblock.conf')
#Add OS specific folder separator
def add_sep(dir):
if (dir[-1] != os.sep):
dir = dir + os.sep
return(dir)
def load_block():
print_debug('reading block')
FIRST_ASSET_BLOCK = 435456
#If we passed in an argument for the first block
if args.block != None and args.block >= FIRST_ASSET_BLOCK:
return(args.block)
#Read from the config file for last blocks processed
if os.path.isfile(block_conf_filename()):
outf = open(block_conf_filename(), 'r')
saved_block = int(outf.read())
outf.close()
if saved_block > FIRST_ASSET_BLOCK:
return(saved_block)
#Return first block that could contain assets
return(FIRST_ASSET_BLOCK)
def save_block(block_num):
outf = open(block_conf_filename(), 'w')
outf.write(str(block_num))
outf.close()
def scan_asset_blocks():
#Get the blockheight of the chain
blockheight = get_bci().get('blocks')
start_block = load_block()
print_debug("Starting at block: " + str(start_block))
for i in range(start_block,blockheight):
dta = get_blockinfo(i)
print('Block #' + str(i) + " - " + dta.get('hash'))
tx_in_block = get_block(dta.get('hash'))
txs = tx_in_block.get('tx')
print_debug(txs)
for tx in txs:
tx_info = get_rawtx(tx)
print_debug("txinfo: " + tx_info)
tx_detail = decode_rawtx(tx_info)
for vout in tx_detail.get('vout'):
if (vout.get('scriptPubKey').get('asm')[86:98] == "OP_FOXD_ASSET"):
print_debug("Found OP_FOXD_ASSET")
print_debug(vout.get('scriptPubKey').get('hex'))
asset_script = decode_script(vout.get('scriptPubKey').get('hex'))
asset_handler(asset_script)
save_block(i)
print_debug(asset_script)
def monitor_zmq():
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
print("Getting Foxdcoin msgs")
socket.connect("tcp://localhost:28766")
#socket.setsockopt_string(zmq.SUBSCRIBE, u'hashtx')
#socket.setsockopt_string(zmq.SUBSCRIBE, u'hashblock')
#socket.setsockopt_string(zmq.SUBSCRIBE, u'rawblock')
socket.setsockopt_string(zmq.SUBSCRIBE, u'rawtx')
while True:
msg = socket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('ZMQ - RAW TX - Sequence: ' + sequence)
if args.debug:
print('- RAW TX ('+sequence+') -')
tx_info = binascii.hexlify(body).decode("utf-8")
#print('tx_info is ' + tx_info)
if args.debug:
print("txinfo: " + tx_info)
tx_detail = decode_rawtx_cli(tx_info)
for vout in tx_detail.get('vout'):
#print("vout: " + str(vout.get('value')))
#print(vout.get('scriptPubKey').get('asm'))
if (vout.get('scriptPubKey').get('asm')[86:98] == "OP_FOXD_ASSET"):
#print("Found OP_FOXD_ASSET")
#print(vout.get('scriptPubKey').get('hex'))
asset_script = decode_script(vout.get('scriptPubKey').get('hex'))
asset_handler(asset_script)
#print(file_to_asset(asset_to_file('?*_/')))
#exit(0)
def main(argv):
global args
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--noblockscan', action='store_true', help='Do not scan though blocks.')
parser.add_argument('-z', '--nozmqwatch', action='store_true', help='Do not watch zero message queue.')
parser.add_argument('-s', '--safemode', action='store_true', help='Only store JSON files of limited size.')
parser.add_argument('-b', '--block', type=int, help='Start at this block number.')
parser.add_argument('-f', '--folder', type=str, help='Store files in a different folder.')
parser.add_argument('-d', '--debug', action='store_true', help='Print debug info.')
args = parser.parse_args()
if args.debug:
print(args)
try:
ipfs_repo_stat() #Make sure IPFS is running
except:
print("pip3 install ipfs")
print("OR")
print("ipfs not running. Run: ipfs daemon")
exit(-1)
if args.safemode:
FILESIZE_THRESHOLD = 16000
JSON_ONLY_CHECK = True
#check_ipfs_file_size('QmTqu3Lk3gmTsQVtjU7rYYM37EAW4xNmbuEAp2Mjr4AV7E')
if not args.noblockscan:
scan_asset_blocks()
if not args.nozmqwatch:
monitor_zmq()
if __name__ == "__main__":
main(sys.argv[1:])
| 28.097996 | 121 | 0.668912 |
import sys
import argparse
import zmq
import struct
import binascii
import codecs
import random
import os
import subprocess
import json
import signal
JSON_ONLY_CHECK = False
FILESIZE_THRESHOLD = 100000000
cli = "foxdcoin-cli"
mode = ""
rpc_port = 8766
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def print_debug(str):
if args.debug:
print(str)
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%s@127.0.0.1:%s"%(rpc_user, rpc_pass, rpc_port)
rpc_conn = AuthServiceProxy(connection)
return(rpc_conn)
rpc_connection = get_rpc_connection()
def get_blockinfo(num):
hash = rpc_connection.getblockhash(num)
blockinfo = rpc_connection.getblock(hash)
return(blockinfo)
def get_block(hash):
blockinfo = rpc_connection.getblock(hash)
return(blockinfo)
def get_rawtx(tx):
txinfo = rpc_connection.getrawtransaction(tx)
return(txinfo)
def get_bci():
bci = rpc_connection.getblockchaininfo()
return(bci)
def decode_rawtx(txdata):
txjson = rpc_connection.decoderawtransaction(txdata)
return(txjson)
def decode_rawtx_cli(txdata):
txjson_str = rpc_call('decoderawtransaction', txdata)
return(json.loads(txjson_str))
def decode_script(script):
scriptinfo_str = rpc_call('decodescript', script)
scriptinfo_str = scriptinfo_str.decode('ascii')
return(json.loads(scriptinfo_str))
def rpc_call(command, params):
print_debug('cli: ' + cli)
print_debug('command: ' + command)
print_debug('params: ' + params)
if len(params) > 131070:
print_debug("Params too long for command line")
print_debug('Len: ' + str(len(params)))
return("")
process = subprocess.Popen([cli, command, params], stdout=subprocess.PIPE)
out, err = process.communicate()
process.stdout.close()
if process.stderr:
process.stderr.close()
return(out)
def is_json_only(txt):
if args.debug:
print("Validating JSON")
txt = txt.strip()
if txt[1:] != '{':
print("Not proper JSON - No leading {")
return False
if txt[-1] != '}':
print("Not proper JSON - No trailing }")
return False
try:
json.loads(txt)
except ValueError as e:
print('Invalid json: %s' % e)
return False
return(True)
def asset_handler(asset_script):
global FILESIZE_THRESHOLD
global args
asset_file = asset_to_file(asset_script.get('asset_name'))
if args.debug:
print("Type: " + asset_script.get('type'))
print("Asset: " + asset_script.get('asset_name'))
print("Asset File: " + asset_file)
print(asset_script.get('amount'))
print(asset_script.get('units'))
print("Reissuable: " + str(asset_script.get('reissuable')))
print("Has IPFS: " + str(asset_script.get('hasIPFS')))
if asset_script.get('hasIPFS') == True:
ipfs_hash = asset_script.get('ipfs_hash')
print_debug(ipfs_hash)
size = FILESIZE_THRESHOLD + 1
with timeout(seconds=15):
try:
size = check_ipfs_file_size(ipfs_hash)
except:
print("Couldn't get size - skipping: " + asset_script.get('asset_name'))
size = FILESIZE_THRESHOLD + 1
#size = check_ipfs_file_size(asset_script.get('ipfs_hash'))
#size=1
full_path_with_asset = asset_file + "=" + ipfs_hash
if not args.folder == None:
full_path_with_asset = add_sep(args.folder) + full_path_with_asset
if (size <= FILESIZE_THRESHOLD):
with timeout(seconds=20):
try:
if not os.path.isfile(full_path_with_asset):
if JSON_ONLY_CHECK:
a_str = ipfs_cat(ipfs_hash)
if not is_json_only(a_str):
return(None)
atuple = ipfs_get(ipfs_hash)
ipfs_pin_add(ipfs_hash)
os.rename(ipfs_hash, full_path_with_asset)
if args.debug:
print('Saved file as: ' + full_path_with_asset)
else:
if args.debug:
print("Found: " + full_path_with_asset)
except:
print("Unable to fetch IPFS file for asset: " + asset_script.get('asset_name'))
else:
print_debug("Failed to get " + ipfs_hash + ' via ipfs get <hash> Trying http...')
result = get_ipfs_file_wget(full_path_with_asset, ipfs_hash)
if not result == 1:
print("Unable to get file for asset " + asset_file)
output_missing(full_path_with_asset + '.MISSING')
#print("Too large at %d bytes" % size)
def output_missing(file):
outf = open(file, 'w')
outf.write("MISSING")
outf.close()
def get_ipfs_file_wget(filename, hash):
try:
import urllib.request as urllib2
except ImportError:
import urllib2
print("Downloading: " + hash + " as " + filename)
try:
filedata = urllib2.urlopen('https://ipfs.io/ipfs/' + hash, timeout=20)
datatowrite = filedata.read()
datatowrite.strip()
if (datatowrite[0] != '{'):
print("Not a valid metadata file")
return
with open(filename, 'wb') as f:
f.write(datatowrite)
print("Saving metadata file")
except urllib2.URLError as e:
print(type(e))
return 0
except:
print("Uncaught error while downloading") #not catch
return 0
return 1
#Converts Asset to valid filename
def asset_to_file(asset):
file = asset
file = file.replace('/', r'%2F')
file = file.replace('*', r'%2A')
file = file.replace('&', r'%26')
file = file.replace('?', r'%3F')
file = file.replace(':', r'%3A')
file = file.replace('=', r'%3D')
return(file)
#Converts valid filename back to asset name
def file_to_asset(file):
asset = file
asset = asset.replace(r'%2F', '/')
asset = asset.replace(r'%2A', '*')
asset = asset.replace(r'%26', '&')
asset = asset.replace(r'%3F', '?')
asset = asset.replace(r'%3A', ':')
asset = asset.replace(r'%3D', '=')
return(asset)
def check_ipfs_file_size(hash):
#print("Checking size in IPFS")
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.object_stat(hash)
#print(res)
return(res['CumulativeSize'])
def ipfs_add(file):
print("Adding to IPFS")
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.add(file)
if args.debug:
print(res)
return(res['Hash'])
def ipfs_get(hash):
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.get(hash)
return()
def ipfs_pin_add(hash):
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.pin_add(hash)
return(res)
def ipfs_repo_stat():
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.repo_stat()
if args.debug:
print(res)
return(res)
def ipfs_pin_ls():
import ipfsapi
api = ipfsapi.connect('127.0.0.1', 5001)
res = api.pin_ls()
print(res)
return(res)
def block_conf_filename():
return('saveblock.conf')
#Add OS specific folder separator
def add_sep(dir):
if (dir[-1] != os.sep):
dir = dir + os.sep
return(dir)
def load_block():
print_debug('reading block')
FIRST_ASSET_BLOCK = 435456
#If we passed in an argument for the first block
if args.block != None and args.block >= FIRST_ASSET_BLOCK:
return(args.block)
#Read from the config file for last blocks processed
if os.path.isfile(block_conf_filename()):
outf = open(block_conf_filename(), 'r')
saved_block = int(outf.read())
outf.close()
if saved_block > FIRST_ASSET_BLOCK:
return(saved_block)
#Return first block that could contain assets
return(FIRST_ASSET_BLOCK)
def save_block(block_num):
outf = open(block_conf_filename(), 'w')
outf.write(str(block_num))
outf.close()
def scan_asset_blocks():
#Get the blockheight of the chain
blockheight = get_bci().get('blocks')
start_block = load_block()
print_debug("Starting at block: " + str(start_block))
for i in range(start_block,blockheight):
dta = get_blockinfo(i)
print('Block
tx_in_block = get_block(dta.get('hash'))
txs = tx_in_block.get('tx')
print_debug(txs)
for tx in txs:
tx_info = get_rawtx(tx)
print_debug("txinfo: " + tx_info)
tx_detail = decode_rawtx(tx_info)
for vout in tx_detail.get('vout'):
if (vout.get('scriptPubKey').get('asm')[86:98] == "OP_FOXD_ASSET"):
print_debug("Found OP_FOXD_ASSET")
print_debug(vout.get('scriptPubKey').get('hex'))
asset_script = decode_script(vout.get('scriptPubKey').get('hex'))
asset_handler(asset_script)
save_block(i)
print_debug(asset_script)
def monitor_zmq():
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
print("Getting Foxdcoin msgs")
socket.connect("tcp://localhost:28766")
#socket.setsockopt_string(zmq.SUBSCRIBE, u'hashtx')
#socket.setsockopt_string(zmq.SUBSCRIBE, u'hashblock')
#socket.setsockopt_string(zmq.SUBSCRIBE, u'rawblock')
socket.setsockopt_string(zmq.SUBSCRIBE, u'rawtx')
while True:
msg = socket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('ZMQ - RAW TX - Sequence: ' + sequence)
if args.debug:
print('- RAW TX ('+sequence+') -')
tx_info = binascii.hexlify(body).decode("utf-8")
#print('tx_info is ' + tx_info)
if args.debug:
print("txinfo: " + tx_info)
tx_detail = decode_rawtx_cli(tx_info)
for vout in tx_detail.get('vout'):
#print("vout: " + str(vout.get('value')))
#print(vout.get('scriptPubKey').get('asm'))
if (vout.get('scriptPubKey').get('asm')[86:98] == "OP_FOXD_ASSET"):
#print("Found OP_FOXD_ASSET")
#print(vout.get('scriptPubKey').get('hex'))
asset_script = decode_script(vout.get('scriptPubKey').get('hex'))
asset_handler(asset_script)
#print(file_to_asset(asset_to_file('?*_/')))
#exit(0)
def main(argv):
global args
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--noblockscan', action='store_true', help='Do not scan though blocks.')
parser.add_argument('-z', '--nozmqwatch', action='store_true', help='Do not watch zero message queue.')
parser.add_argument('-s', '--safemode', action='store_true', help='Only store JSON files of limited size.')
parser.add_argument('-b', '--block', type=int, help='Start at this block number.')
parser.add_argument('-f', '--folder', type=str, help='Store files in a different folder.')
parser.add_argument('-d', '--debug', action='store_true', help='Print debug info.')
args = parser.parse_args()
if args.debug:
print(args)
try:
ipfs_repo_stat() #Make sure IPFS is running
except:
print("pip3 install ipfs")
print("OR")
print("ipfs not running. Run: ipfs daemon")
exit(-1)
if args.safemode:
FILESIZE_THRESHOLD = 16000
JSON_ONLY_CHECK = True
#check_ipfs_file_size('QmTqu3Lk3gmTsQVtjU7rYYM37EAW4xNmbuEAp2Mjr4AV7E')
if not args.noblockscan:
scan_asset_blocks()
if not args.nozmqwatch:
monitor_zmq()
if __name__ == "__main__":
main(sys.argv[1:])
| true | true |
f7226478feaa40df2f49df721dfaa0ea3f1d2930 | 95 | py | Python | diary/utils.py | sjwhole/diary-backend | 5e72972e39c6417c394da44643c2c9054193eef5 | [
"MIT"
] | 1 | 2021-07-09T16:26:30.000Z | 2021-07-09T16:26:30.000Z | diary/utils.py | sjwhole/diary-backend | 5e72972e39c6417c394da44643c2c9054193eef5 | [
"MIT"
] | 1 | 2021-07-22T06:48:13.000Z | 2021-07-22T07:09:02.000Z | diary/utils.py | sjwhole/diary-backend | 5e72972e39c6417c394da44643c2c9054193eef5 | [
"MIT"
] | null | null | null | def set_message(message):
content = {
"message": message
}
return content
| 13.571429 | 26 | 0.578947 | def set_message(message):
content = {
"message": message
}
return content
| true | true |
f722649afa920019c781bd8ddba28544b387b710 | 766 | py | Python | flamesense4.py | saaketporay/Stovetop-Flame-Notifier | 2cc66e8f51046b14160cfd74e97e69647e95c526 | [
"MIT"
] | 2 | 2016-08-18T13:06:26.000Z | 2019-12-17T00:31:31.000Z | flamesense4.py | saaketporay/Stovetop-Flame-Notifier | 2cc66e8f51046b14160cfd74e97e69647e95c526 | [
"MIT"
] | null | null | null | flamesense4.py | saaketporay/Stovetop-Flame-Notifier | 2cc66e8f51046b14160cfd74e97e69647e95c526 | [
"MIT"
] | null | null | null |
import os
import serial
from time import sleep
ser = serial.Serial('/dev/ttyACM0',9600)
#counter = 32
flame = False
newflame = False
while True:
#counter +=1
value = ser.readline()
print(value)
try:
if int(value) > 400:
newflame = True
else:
newflame = False
if newflame != flame:
flame = newflame
if newflame == True:
print 'Flame detected'
os.system('bash /usr/bin/notify.sh "Flame Detected"')
else:
print 'No Flame'
os.system('bash /usr/bin/notify.sh "No Flame Detected"')
except Exception as e:
print('No value detected.')
| 20.157895 | 72 | 0.498695 |
import os
import serial
from time import sleep
ser = serial.Serial('/dev/ttyACM0',9600)
flame = False
newflame = False
while True:
value = ser.readline()
print(value)
try:
if int(value) > 400:
newflame = True
else:
newflame = False
if newflame != flame:
flame = newflame
if newflame == True:
print 'Flame detected'
os.system('bash /usr/bin/notify.sh "Flame Detected"')
else:
print 'No Flame'
os.system('bash /usr/bin/notify.sh "No Flame Detected"')
except Exception as e:
print('No value detected.')
| false | true |
f72267a505fe7d3afa184c0f7c8d907922327ad9 | 3,718 | py | Python | api/app/tools/202104.py | yunfei07/vue-flask-in-action | 8695f9a252bb3e2136609f421e02a0d3f01c0e58 | [
"MIT"
] | null | null | null | api/app/tools/202104.py | yunfei07/vue-flask-in-action | 8695f9a252bb3e2136609f421e02a0d3f01c0e58 | [
"MIT"
] | null | null | null | api/app/tools/202104.py | yunfei07/vue-flask-in-action | 8695f9a252bb3e2136609f421e02a0d3f01c0e58 | [
"MIT"
] | null | null | null | from math import ceil
def chunk(lst, size):
return list(
map(lambda x: lst[x * size:x * size + size],
list(range(0, ceil(len(lst) / size)))))
def find_str_index(str1, str2):
if not str2:
return "str2 not none"
for x in str2:
if x in str1:
return str1.index(x)
def find_sub_string(s, words):
if not words:
return []
tmp = []
str1 = ''
str2 = ''
for x in words:
str1 += x
if str1 in s:
tmp.append(s.index(str1))
words.reverse()
for x in words:
str2 += x
if str2 in s:
tmp.append(s.index(str2))
return tmp
def longest_valid_parentheses(s: str) -> int:
left = '('
right = ')'
n = 0
stack = [-1]
for x in range(len(s)):
if x == left:
stack.append(x)
else:
stack.pop()
if not stack:
stack.append(x)
if stack:
n = max(n, x - stack[-1])
return n
def search(nums, target) -> int:
if target in nums:
return nums.index(target)
else:
return -1
def search_range(nums, target):
indices = [i for i, x in enumerate(nums) if x == target]
if not indices:
return [-1, -1]
return indices
def binary_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if l[mid] == item:
return mid
if l[mid] > item:
high = mid - 1
else:
low = mid + 1
return None
def bin_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_sort(l):
for index in range(len(l) - 1):
for k in range(len(l) - 1):
if l[k] > l[k + 1]:
l[k], l[k + 1] = l[k + 1], l[k]
k += 1
return l
# 插入排序
def i_sort():
pass
# 选择排序
def s_sort(l):
low = 0
high = len(l) - 1
while low >= high:
n = min(l[low:])
n, l[low] = l[low], n
low += 1
return l
# 快速排序
def q_sort(l):
pass
# 递归算法
# 匹配括号
def find_k(strings):
stack = []
count = 0
for s in strings:
if s == '(':
stack.append(s)
elif len(stack) > 0 and s == ')':
stack.pop()
count += 1
else:
return 0
return count * 2
def insert_index(l, target):
l.append(target)
l.sort()
return l.index(target)
def multiply(n1, n2):
return f"{eval(f'{n1}*{n2}')}"
if __name__ == '__main__':
a = find_str_index('hello', 'l')
b = find_sub_string("barfoothefoobarman", ["foo", "bar", 'aaa'])
l = [1, 2, 3, 4, 5, 6]
k = "(()()())()"
# c = longest_valid_parentheses("(()()()))")
# print(c)
nums = [4, 5, 6, 7, 0, 1, 2]
# target = 6
# s = search(nums,target)
# ss = search_range([5, 7, 7, 8, 8, 10], 18)
# print(ss)
# x = [1, 3, 5, 7, 8, 9]
# # bs = binary_search(x, 9)
# bs = bin_search(l, 4)
# b = b_search(x, 9)
# print(bs, b)
s = b_sort(nums)
print(s)
f = find_k(k)
print(f)
select = s_sort(nums)
print(select)
print(multiply("12", "12"))
# t = [1, 3, 5, 6]
# st = insert_index(t, 7)
# print(st)
| 17.961353 | 68 | 0.461001 | from math import ceil
def chunk(lst, size):
return list(
map(lambda x: lst[x * size:x * size + size],
list(range(0, ceil(len(lst) / size)))))
def find_str_index(str1, str2):
if not str2:
return "str2 not none"
for x in str2:
if x in str1:
return str1.index(x)
def find_sub_string(s, words):
if not words:
return []
tmp = []
str1 = ''
str2 = ''
for x in words:
str1 += x
if str1 in s:
tmp.append(s.index(str1))
words.reverse()
for x in words:
str2 += x
if str2 in s:
tmp.append(s.index(str2))
return tmp
def longest_valid_parentheses(s: str) -> int:
left = '('
right = ')'
n = 0
stack = [-1]
for x in range(len(s)):
if x == left:
stack.append(x)
else:
stack.pop()
if not stack:
stack.append(x)
if stack:
n = max(n, x - stack[-1])
return n
def search(nums, target) -> int:
if target in nums:
return nums.index(target)
else:
return -1
def search_range(nums, target):
indices = [i for i, x in enumerate(nums) if x == target]
if not indices:
return [-1, -1]
return indices
def binary_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if l[mid] == item:
return mid
if l[mid] > item:
high = mid - 1
else:
low = mid + 1
return None
def bin_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_sort(l):
for index in range(len(l) - 1):
for k in range(len(l) - 1):
if l[k] > l[k + 1]:
l[k], l[k + 1] = l[k + 1], l[k]
k += 1
return l
def i_sort():
pass
def s_sort(l):
low = 0
high = len(l) - 1
while low >= high:
n = min(l[low:])
n, l[low] = l[low], n
low += 1
return l
def q_sort(l):
pass
def find_k(strings):
stack = []
count = 0
for s in strings:
if s == '(':
stack.append(s)
elif len(stack) > 0 and s == ')':
stack.pop()
count += 1
else:
return 0
return count * 2
def insert_index(l, target):
l.append(target)
l.sort()
return l.index(target)
def multiply(n1, n2):
return f"{eval(f'{n1}*{n2}')}"
if __name__ == '__main__':
a = find_str_index('hello', 'l')
b = find_sub_string("barfoothefoobarman", ["foo", "bar", 'aaa'])
l = [1, 2, 3, 4, 5, 6]
k = "(()()())()"
nums = [4, 5, 6, 7, 0, 1, 2]
_sort(nums)
print(s)
f = find_k(k)
print(f)
select = s_sort(nums)
print(select)
print(multiply("12", "12"))
| true | true |
f72267d179fcc6174846127af025da7c0e86fefe | 4,008 | py | Python | cookbook/c02/p19_descent_parser.py | Xiao-jiuguan/python3-cookbook | 95d5a1d5cb59b5d88e816f6f10eb1e5befc25b05 | [
"Apache-2.0"
] | 3 | 2018-05-10T01:13:08.000Z | 2018-06-17T12:34:07.000Z | cookbook/c02/p19_descent_parser.py | Xiao-jiuguan/python3-cookbook | 95d5a1d5cb59b5d88e816f6f10eb1e5befc25b05 | [
"Apache-2.0"
] | 2 | 2020-09-19T17:10:23.000Z | 2020-10-17T16:43:52.000Z | cookbook/c02/p19_descent_parser.py | Xiao-jiuguan/python3-cookbook | 95d5a1d5cb59b5d88e816f6f10eb1e5befc25b05 | [
"Apache-2.0"
] | 1 | 2020-12-22T06:33:18.000Z | 2020-12-22T06:33:18.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 下降解析器
Desc :
"""
import re
import collections
# Token specification
NUM = r'(?P<NUM>\d+)'
PLUS = r'(?P<PLUS>\+)'
MINUS = r'(?P<MINUS>-)'
TIMES = r'(?P<TIMES>\*)'
DIVIDE = r'(?P<DIVIDE>/)'
LPAREN = r'(?P<LPAREN>\()'
RPAREN = r'(?P<RPAREN>\))'
WS = r'(?P<WS>\s+)'
master_pat = re.compile('|'.join([NUM, PLUS, MINUS, TIMES,
DIVIDE, LPAREN, RPAREN, WS]))
# Tokenizer
Token = collections.namedtuple('Token', ['type', 'value'])
def generate_tokens(text):
scanner = master_pat.scanner(text)
for m in iter(scanner.match, None):
tok = Token(m.lastgroup, m.group())
if tok.type != 'WS':
yield tok
# Parser
class ExpressionEvaluator:
'''
Implementation of a recursive descent parser. Each method
implements a single grammar rule. Use the ._accept() method
to test and accept the current lookahead token. Use the ._expect()
method to exactly match and discard the next token on on the input
(or raise a SyntaxError if it doesn't match).
'''
def parse(self, text):
self.tokens = generate_tokens(text)
self.tok = None # Last symbol consumed
self.nexttok = None # Next symbol tokenized
self._advance() # Load first lookahead token
return self.expr()
def _advance(self):
'Advance one token ahead'
self.tok, self.nexttok = self.nexttok, next(self.tokens, None)
def _accept(self, toktype):
'Test and consume the next token if it matches toktype'
if self.nexttok and self.nexttok.type == toktype:
self._advance()
return True
else:
return False
def _expect(self, toktype):
'Consume next token if it matches toktype or raise SyntaxError'
if not self._accept(toktype):
raise SyntaxError('Expected ' + toktype)
# Grammar rules follow
def expr(self):
"expression ::= term { ('+'|'-') term }*"
exprval = self.term()
while self._accept('PLUS') or self._accept('MINUS'):
op = self.tok.type
right = self.term()
if op == 'PLUS':
exprval += right
elif op == 'MINUS':
exprval -= right
return exprval
def term(self):
"term ::= factor { ('*'|'/') factor }*"
termval = self.factor()
while self._accept('TIMES') or self._accept('DIVIDE'):
op = self.tok.type
right = self.factor()
if op == 'TIMES':
termval *= right
elif op == 'DIVIDE':
termval /= right
return termval
def factor(self):
"factor ::= NUM | ( expr )"
if self._accept('NUM'):
return int(self.tok.value)
elif self._accept('LPAREN'):
exprval = self.expr()
self._expect('RPAREN')
return exprval
else:
raise SyntaxError('Expected NUMBER or LPAREN')
def descent_parser():
e = ExpressionEvaluator()
print(e.parse('2'))
print(e.parse('2 + 3'))
print(e.parse('2 + 3 * 4'))
print(e.parse('2 + (3 + 4) * 5'))
# print(e.parse('2 + (3 + * 4)'))
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "exprparse.py", line 40, in parse
# return self.expr()
# File "exprparse.py", line 67, in expr
# right = self.term()
# File "exprparse.py", line 77, in term
# termval = self.factor()
# File "exprparse.py", line 93, in factor
# exprval = self.expr()
# File "exprparse.py", line 67, in expr
# right = self.term()
# File "exprparse.py", line 77, in term
# termval = self.factor()
# File "exprparse.py", line 97, in factor
# raise SyntaxError("Expected NUMBER or LPAREN")
# SyntaxError: Expected NUMBER or LPAREN
if __name__ == '__main__':
descent_parser() | 30.363636 | 71 | 0.558633 |
import re
import collections
NUM = r'(?P<NUM>\d+)'
PLUS = r'(?P<PLUS>\+)'
MINUS = r'(?P<MINUS>-)'
TIMES = r'(?P<TIMES>\*)'
DIVIDE = r'(?P<DIVIDE>/)'
LPAREN = r'(?P<LPAREN>\()'
RPAREN = r'(?P<RPAREN>\))'
WS = r'(?P<WS>\s+)'
master_pat = re.compile('|'.join([NUM, PLUS, MINUS, TIMES,
DIVIDE, LPAREN, RPAREN, WS]))
Token = collections.namedtuple('Token', ['type', 'value'])
def generate_tokens(text):
scanner = master_pat.scanner(text)
for m in iter(scanner.match, None):
tok = Token(m.lastgroup, m.group())
if tok.type != 'WS':
yield tok
class ExpressionEvaluator:
def parse(self, text):
self.tokens = generate_tokens(text)
self.tok = None
self.nexttok = None
self._advance()
return self.expr()
def _advance(self):
self.tok, self.nexttok = self.nexttok, next(self.tokens, None)
def _accept(self, toktype):
if self.nexttok and self.nexttok.type == toktype:
self._advance()
return True
else:
return False
def _expect(self, toktype):
if not self._accept(toktype):
raise SyntaxError('Expected ' + toktype)
def expr(self):
exprval = self.term()
while self._accept('PLUS') or self._accept('MINUS'):
op = self.tok.type
right = self.term()
if op == 'PLUS':
exprval += right
elif op == 'MINUS':
exprval -= right
return exprval
def term(self):
termval = self.factor()
while self._accept('TIMES') or self._accept('DIVIDE'):
op = self.tok.type
right = self.factor()
if op == 'TIMES':
termval *= right
elif op == 'DIVIDE':
termval /= right
return termval
def factor(self):
if self._accept('NUM'):
return int(self.tok.value)
elif self._accept('LPAREN'):
exprval = self.expr()
self._expect('RPAREN')
return exprval
else:
raise SyntaxError('Expected NUMBER or LPAREN')
def descent_parser():
e = ExpressionEvaluator()
print(e.parse('2'))
print(e.parse('2 + 3'))
print(e.parse('2 + 3 * 4'))
print(e.parse('2 + (3 + 4) * 5'))
if __name__ == '__main__':
descent_parser() | true | true |
f72268fc7af81ab7a500353a54e46fd7610982d8 | 1,517 | py | Python | tools/GraphClust/Preprocessing/splitSHAPE.py | pavanvidem/galaxytools | 339363f6c9d817bc2c35997b4dfdd3ca99a37055 | [
"MIT"
] | null | null | null | tools/GraphClust/Preprocessing/splitSHAPE.py | pavanvidem/galaxytools | 339363f6c9d817bc2c35997b4dfdd3ca99a37055 | [
"MIT"
] | null | null | null | tools/GraphClust/Preprocessing/splitSHAPE.py | pavanvidem/galaxytools | 339363f6c9d817bc2c35997b4dfdd3ca99a37055 | [
"MIT"
] | null | null | null | import re
import sys
shape_file = sys.argv[1]
pattern = re.compile("^>.*$")
toWrite = ""
count_for_id = 1
seq_counter = 0
new_id = ""
seq_id = []
seq_string = []
orig_id = []
name_file = "FASTA/data.names"
array_all_chunks = []
with open(name_file, "r") as f:
for line in f:
if len(line.strip()) == 0:
continue
seq_id.append(int(line.split()[0]))
seq_string.append(line.split()[1])
orig_id_srt = line.split()[3]
orig_id_srt = orig_id_srt.rsplit("_", 1)[0]
orig_id.append(orig_id_srt)
react_dict = {}
react_arr = []
with open(shape_file, "r") as shape:
content = shape.read()
lines = content.split("\n")
for line in lines:
if pattern.match(line):
line = line.replace(">", "").split()[0]
react_arr = []
react_dict[line] = react_arr
continue
else:
react_arr.append(line)
toWrite = ""
chunks = []
for i in range(len(orig_id)):
if not orig_id[i] in react_dict:
raise RuntimeError("Error key {} {} not found".format(i, orig_id[i]))
react_val = react_dict[orig_id[i]]
toWrite += ">" + str(seq_id[i]) + " " + seq_string[i] + "\n"
chunks = re.findall(r"\d+", seq_string[i])
for j in react_val[int(chunks[1]) - 1: int(chunks[2])]:
id_s = int(j.split()[0])
id_s = id_s - int(chunks[1]) + 1
toWrite += str(id_s) + "\t" + j.split()[1] + "\n"
with open("shape_data_split.react", "w") as out:
out.write(toWrite)
| 24.467742 | 77 | 0.566908 | import re
import sys
shape_file = sys.argv[1]
pattern = re.compile("^>.*$")
toWrite = ""
count_for_id = 1
seq_counter = 0
new_id = ""
seq_id = []
seq_string = []
orig_id = []
name_file = "FASTA/data.names"
array_all_chunks = []
with open(name_file, "r") as f:
for line in f:
if len(line.strip()) == 0:
continue
seq_id.append(int(line.split()[0]))
seq_string.append(line.split()[1])
orig_id_srt = line.split()[3]
orig_id_srt = orig_id_srt.rsplit("_", 1)[0]
orig_id.append(orig_id_srt)
react_dict = {}
react_arr = []
with open(shape_file, "r") as shape:
content = shape.read()
lines = content.split("\n")
for line in lines:
if pattern.match(line):
line = line.replace(">", "").split()[0]
react_arr = []
react_dict[line] = react_arr
continue
else:
react_arr.append(line)
toWrite = ""
chunks = []
for i in range(len(orig_id)):
if not orig_id[i] in react_dict:
raise RuntimeError("Error key {} {} not found".format(i, orig_id[i]))
react_val = react_dict[orig_id[i]]
toWrite += ">" + str(seq_id[i]) + " " + seq_string[i] + "\n"
chunks = re.findall(r"\d+", seq_string[i])
for j in react_val[int(chunks[1]) - 1: int(chunks[2])]:
id_s = int(j.split()[0])
id_s = id_s - int(chunks[1]) + 1
toWrite += str(id_s) + "\t" + j.split()[1] + "\n"
with open("shape_data_split.react", "w") as out:
out.write(toWrite)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.